code
stringlengths 1
199k
|
|---|
from django.db import models
class RiverOutfall(models.Model):
name = models.TextField()
lat = models.FloatField(null=True)
lon = models.FloatField(null=True)
class RiverCso(models.Model):
river_outfall = models.ForeignKey("RiverOutfall")
open_time = models.DateTimeField()
close_time = models.DateTimeField()
class LakeOutfall(models.Model):
name = models.TextField()
lat = models.FloatField(null=True)
lon = models.FloatField(null=True)
class LakeReversal(models.Model):
lake_outfall = models.ForeignKey("LakeOutfall")
open_date = models.DateTimeField()
close_date = models.DateTimeField()
millions_of_gallons = models.FloatField()
|
width = 75
height = 75
data = [
0x00,0x00,0x00,0x00,0x00,0xe0,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x01,0xf0,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x03,0xf0,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x03,0xf8,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x07,0xf8,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x0f,0xf8,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x1f,0xfc,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x1f,0xfc,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x3f,0xfc,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x7f,0xfe,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x7f,0xfe,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0xff,0xfe,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x01,0xff,0xff,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x03,0xff,0xff,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x03,0xff,0xff,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x07,0xff,0xff,0x80,0x00,0x00,0x00,
0x00,0x00,0x00,0x07,0xff,0xff,0x80,0x00,0x00,0x00,
0x00,0x00,0x00,0x07,0xff,0xff,0x80,0x00,0x00,0x00,
0x00,0x00,0x00,0x0f,0xff,0xff,0x80,0x00,0x00,0x00,
0x00,0x00,0x00,0x0f,0xff,0xff,0x80,0x00,0x00,0x00,
0x7f,0xff,0xfc,0x0f,0xff,0xff,0x80,0x00,0x00,0x00,
0xff,0xff,0xff,0x0f,0xff,0xff,0x80,0x00,0x00,0x00,
0xff,0xff,0xff,0xcf,0xff,0xff,0x80,0x00,0x00,0x00,
0xff,0xff,0xff,0xef,0xff,0xff,0x80,0x00,0x00,0x00,
0x7f,0xff,0xff,0xf7,0xff,0xff,0x80,0x00,0x00,0x00,
0x3f,0xff,0xff,0xff,0xfb,0xff,0x00,0x00,0x00,0x00,
0x3f,0xff,0xff,0xff,0xf1,0xff,0x3f,0xf0,0x00,0x00,
0x1f,0xff,0xff,0xff,0xf1,0xfe,0xff,0xfe,0x00,0x00,
0x0f,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xc0,0x00,
0x0f,0xff,0xff,0xff,0xe1,0xff,0xff,0xff,0xf8,0x00,
0x07,0xff,0xff,0xff,0xe1,0xff,0xff,0xff,0xff,0x00,
0x03,0xff,0xff,0xff,0xe1,0xff,0xff,0xff,0xff,0xc0,
0x01,0xff,0xff,0x3f,0xe1,0xff,0xff,0xff,0xff,0xe0,
0x01,0xff,0xfe,0x07,0xe3,0xff,0xff,0xff,0xff,0xe0,
0x00,0xff,0xff,0x03,0xe3,0xff,0xff,0xff,0xff,0xe0,
0x00,0x7f,0xff,0x00,0xf7,0xff,0xff,0xff,0xff,0xc0,
0x00,0x3f,0xff,0xc0,0xff,0xc0,0x7f,0xff,0xff,0x80,
0x00,0x1f,0xff,0xf0,0xff,0x00,0x3f,0xff,0xff,0x00,
0x00,0x0f,0xff,0xff,0xff,0x00,0x7f,0xff,0xfc,0x00,
0x00,0x07,0xff,0xff,0xff,0x01,0xff,0xff,0xf8,0x00,
0x00,0x01,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x00,
0x00,0x00,0x7f,0xff,0xff,0xff,0xff,0xff,0xc0,0x00,
0x00,0x00,0x1f,0xfc,0x7f,0xff,0xff,0xff,0x80,0x00,
0x00,0x00,0x7f,0xf8,0x78,0xff,0xff,0xfe,0x00,0x00,
0x00,0x00,0xff,0xf0,0x78,0x7f,0xff,0xfc,0x00,0x00,
0x00,0x01,0xff,0xe0,0xf8,0x7f,0xff,0xf0,0x00,0x00,
0x00,0x03,0xff,0xc0,0xf8,0x3f,0xdf,0xc0,0x00,0x00,
0x00,0x07,0xff,0xc1,0xfc,0x3f,0xe0,0x00,0x00,0x00,
0x00,0x07,0xff,0x87,0xfc,0x1f,0xf0,0x00,0x00,0x00,
0x00,0x0f,0xff,0xcf,0xfe,0x1f,0xf8,0x00,0x00,0x00,
0x00,0x0f,0xff,0xff,0xff,0x1f,0xf8,0x00,0x00,0x00,
0x00,0x1f,0xff,0xff,0xff,0x1f,0xfc,0x00,0x00,0x00,
0x00,0x1f,0xff,0xff,0xff,0xff,0xfc,0x00,0x00,0x00,
0x00,0x1f,0xff,0xff,0xff,0xff,0xfe,0x00,0x00,0x00,
0x00,0x3f,0xff,0xff,0xff,0xff,0xfe,0x00,0x00,0x00,
0x00,0x3f,0xff,0xff,0xff,0xff,0xfe,0x00,0x00,0x00,
0x00,0x3f,0xff,0xff,0x3f,0xff,0xfe,0x00,0x00,0x00,
0x00,0x7f,0xff,0xff,0x3f,0xff,0xfe,0x00,0x00,0x00,
0x00,0x7f,0xff,0xff,0x3f,0xff,0xfe,0x00,0x00,0x00,
0x00,0x7f,0xff,0xfe,0x3f,0xff,0xfe,0x00,0x00,0x00,
0x00,0xff,0xff,0xfc,0x1f,0xff,0xfe,0x00,0x00,0x00,
0x00,0xff,0xff,0xf8,0x1f,0xff,0xfe,0x00,0x00,0x00,
0x00,0xff,0xff,0xe0,0x0f,0xff,0xfe,0x00,0x00,0x00,
0x01,0xff,0xff,0x80,0x07,0xff,0xfe,0x00,0x00,0x00,
0x01,0xff,0xfc,0x00,0x03,0xff,0xfe,0x00,0x00,0x00,
0x01,0xff,0xe0,0x00,0x01,0xff,0xfe,0x00,0x00,0x00,
0x01,0xff,0x00,0x00,0x00,0xff,0xfe,0x00,0x00,0x00,
0x00,0xf8,0x00,0x00,0x00,0x7f,0xfe,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x1f,0xfe,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x0f,0xfe,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x07,0xfe,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x01,0xfe,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x7e,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x1c,0x00,0x00,0x00
]
|
import time
from pygame.locals import *
import gui
MOUSE_LEFT_BUTTON = 1
MOUSE_MIDDLE_BUTTON = 2
MOUSE_RIGHT_BUTTON = 3
MOUSE_WHEELUP = 4
MOUSE_WHEELDOWN = 5
class Screen(object):
"""Base gui screen class
every game screen class should inherit from this one
"""
__triggers = []
__old_hover = None
__hover = None
__hover_changed = False
def __init__(self):
pass
def log_info(self, message):
"""Prints an INFO message to standard output"""
ts = int(time.time())
print("# INFO %i ... %s" % (ts, message))
def log_error(self, message):
"""Prints an ERROR message to standard output"""
ts = int(time.time())
print("! ERROR %i ... %s" % (ts, message))
def reset_triggers_list(self):
"""Clears the screen's trigger list"""
self.__triggers = []
def add_trigger(self, trigger):
"""Appends given trigger to the end of screen's trigger list"""
if not trigger.has_key('hover_id'):
trigger['hover_id'] = None
self.__triggers.append(trigger)
def list_triggers(self):
"""Returns the screen's list of triggers"""
return self.__triggers
def get_timestamp(self, zoom = 1):
"""Returns an actual timestamp"""
return int(time.time() * zoom)
def get_image(self, img_key, subkey1 = None, subkey2 = None, subkey3 = None):
"""Returns an image object from GUI engine, identified by its key(s)"""
return gui.GUI.get_image(img_key, subkey1, subkey2, subkey3)
def redraw_flip(self):
"""Redraws the screen, takes care about mouse cursor and flips the graphic buffer to display"""
self.draw()
gui.GUI.highlight_triggers(self.list_triggers())
gui.GUI.flip()
def redraw_noflip(self):
"""Redraws the screen, takes care about mouse cursor but doesn't flip the buffer to display"""
self.draw()
gui.GUI.highlight_triggers(self.list_triggers())
def prepare(self):
"""This method should be implemented by screens that require some
special actions each time before the screen is run.
For example to reset screen to a well known state to prevent unexpected behaviour.
"""
pass
def draw(self):
"""All static graphic output should be implemented in this method.
Unless there is only a dynamic graphic (animations),
every screen should implement this method.
"""
pass
def animate(self):
"""Entry point for Screen animations, e.g. ship trajectory on MainScreen.
GUI engine calls this method periodically
Animations should be time-dependant - such screens have to implement the timing!
"""
pass
def get_escape_trigger(self):
"""Returns standard trigger for sending escape action"""
return {'action': "ESCAPE"}
def on_mousebuttonup(self, event):
"""Default implementation of mouse click event serving.
Checks the mouse wheel events (up and down scrolling) and regular mouse buttons.
If the event's subject is the left mouse button it checks the mouse position against the trigger list and
returns the first trigger where mouse positions is within its rectangle.
There is a good chance that no screen would have to override this method.
"""
if event.button == MOUSE_MIDDLE_BUTTON:
print event
elif event.button == MOUSE_WHEELUP:
return {'action': "SCROLL_UP"}
elif event.button == MOUSE_WHEELDOWN:
return {'action': "SCROLL_DOWN"}
else:
triggers_list = self.list_triggers()
for trigger in triggers_list:
if trigger['rect'].collidepoint(event.pos):
if event.button == MOUSE_LEFT_BUTTON:
trigger['mouse_pos'] = event.pos
return trigger
elif event.button == MOUSE_RIGHT_BUTTON:
return {'action': "help", 'help': trigger['action']}
def on_keydown(self, event):
"""Default implementation of a keyboard event handling.
If keypress is detected by a GUI engine it calls this method.
The pressed key is checked against the trigger list.
Returns the first trigger where the key matches the pressed or
None if no trigger matches the keypress
There is a good chance that no screen would have to override this method.
"""
print("@ screen.Screen::on_keydown()")
print(" scancode = %i" % event.scancode)
print(" key = %i" % event.key)
if event.key == K_ESCAPE:
return {'action': "ESCAPE"}
else:
triggers_list = self.list_triggers()
for trigger in triggers_list:
if trigger.has_key('key') and trigger['key'] == event.key:
return trigger
return {'action': "key", 'key': event.key}
def update_hover(self, mouse_pos):
"""This method is invoked by a GUI engine on every pure mouse move
and right before the screen's on_mousemotion() method.
Mouse position is checked against screen's trigger list.
If hover is detected (=mouse position is inside the trigger's rectangle)
the trigger is copied and can be returned by get_hover() method
Also if the previously stored value is different than the new one,
the __hover_changed flag is set to True
The idea is to handle mouse hover detection separately,
so other methods could rely on get_hover() and hover_changed() methods.
Probably no screen should require to override this method.
"""
for trigger in self.list_triggers():
if trigger.has_key('hover_id') and trigger['rect'].collidepoint(mouse_pos):
if self.__hover != trigger:
self.__hover_changed = True
self.__hover = trigger
break
def get_hover(self):
"""Returns the current hover trigger"""
return self.__hover
def hover_changed(self):
"""Returns True if screen's hover has changed since last call of this method"""
if self.__hover_changed:
self.__hover_changed = False
return True
else:
return False
def on_mousemotion(self, event):
"""Invoked by a GUI engine on every pure (non-dragging) mouse move.
Currently no screen requires to override this empty implementation.
"""
pass
def get_drag_item(self, mouse_pos):
""""""
for trigger in self.list_triggers():
if trigger.has_key('drag_id') and trigger['rect'].collidepoint(mouse_pos):
return trigger['drag_id']
return None
def on_mousedrag(self, drag_item, pos, rel):
"""Invoked by a GUI engine when left mouse button is being held, drag item is set and mouse moves"""
pass
def on_mousedrop(self, drag_item, (mouse_x, mouse_y)):
"""Invoked by a GUI engine when mouse dragging stops
(drag item was set and left mouse button was released).
"""
pass
def process_trigger(self, trigger):
"""Empty implementation of a trigger handling
If a screen trigger is positively evaluated
(e.g. returned from on_mousebuttonup() or on_keydown() methods)
it's passed as a trigger argument to this method
Every screen should override this method to handle the proper actions.
"""
pass
def enter(self):
""" Called by GUI engine right before gui_client::run_screen() is invoked
Suitable for saving initial state that can be reveresed by the screen's cancel() method
"""
pass
def leave_confirm(self):
""" Called by GUI engine when CONFIRM trigger is activated
Every screen that sends data to the game server should implement this method
"""
pass
def leave_cancel(self):
""" Called by GUI engine when ESCAPE trigger is activated
This is the right place to implement things like getting the screen to state before any changes were made
"""
pass
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
'''
All things computer vision.
'''
import cv2
from mousetrap.i18n import _
from mousetrap.image import Image
import mousetrap.plugins.interface as interface
import logging
LOGGER = logging.getLogger(__name__)
FRAME_WIDTH = 3
FRAME_HEIGHT = 4
class Camera(object):
S_CAPTURE_OPEN_ERROR = _(
'Device #%d does not support video capture interface')
S_CAPTURE_READ_ERROR = _('Error while capturing. Camera disconnected?')
def __init__(self, config):
self._config = config
self._device = \
self._new_capture_device(config['camera']['device_index'])
self.set_dimensions(
config['camera']['width'],
config['camera']['height'],
)
@classmethod
def _new_capture_device(cls, device_index):
capture = cv2.VideoCapture(device_index)
if not capture.isOpened():
capture.release()
raise IOError(cls.S_CAPTURE_OPEN_ERROR % device_index)
return capture
def set_dimensions(self, width, height):
self._device.set(FRAME_WIDTH, width)
self._device.set(FRAME_HEIGHT, height)
def read_image(self):
ret, image = self._device.read()
if not ret:
raise IOError(self.S_CAPTURE_READ_ERROR)
return Image(self._config, image)
class HaarLoader(object):
def __init__(self, config):
self._config = config
self._haar_files = config['haar_files']
self._haar_cache = {}
def from_name(self, name):
if not name in self._haar_files:
raise HaarNameError(name)
haar_file = self._haar_files[name]
haar = self.from_file(haar_file, name)
return haar
def from_file(self, file_, cache_name=None):
import os
if cache_name in self._haar_cache:
return self._haar_cache[cache_name]
current_dir = os.path.dirname(os.path.realpath(__file__))
haar_file = os.path.join(current_dir, file_)
haar = cv2.CascadeClassifier(haar_file)
if not cache_name is None:
if not cache_name in self._haar_cache:
self._haar_cache[cache_name] = haar
return haar
class HaarNameError(Exception):
pass
class FeatureDetector(object):
_INSTANCES = {}
@classmethod
def get_detector(cls, config, name, scale_factor=1.1, min_neighbors=3):
key = (name, scale_factor, min_neighbors)
if key in cls._INSTANCES:
LOGGER.info("Reusing %s detector.", key)
return cls._INSTANCES[key]
cls._INSTANCES[key] = FeatureDetector(
config, name, scale_factor, min_neighbors)
return cls._INSTANCES[key]
@classmethod
def clear_all_detection_caches(cls):
for instance in cls._INSTANCES.values():
instance.clear_cache()
def __init__(self, config, name, scale_factor=1.1, min_neighbors=3):
'''
name - name of feature to detect
scale_factor - how much the image size is reduced at each image scale
while searching. Default 1.1.
min_neighbors - how many neighbors each candidate rectangle should have
to retain it. Default 3.
'''
LOGGER.info("Building detector: %s",
(name, scale_factor, min_neighbors))
self._config = config
self._name = name
self._single = None
self._plural = None
self._image = None
self._cascade = HaarLoader(config).from_name(name)
self._scale_factor = scale_factor
self._min_neighbors = min_neighbors
self._last_attempt_successful = False
self._detect_cache = {}
def detect(self, image):
if image in self._detect_cache:
message = "Detection cache hit: %(image)d -> %(result)s" % \
{'image':id(image), 'result':self._detect_cache[image]}
LOGGER.debug(message)
if isinstance(self._detect_cache[image], FeatureNotFoundException):
message = str(self._detect_cache[image])
raise FeatureNotFoundException(message,
cause=self._detect_cache[image])
return self._detect_cache[image]
try:
self._image = image
self._detect_plural()
self._exit_if_none_detected()
self._unpack_first()
self._extract_image()
self._calculate_center()
self._detect_cache[image] = self._single
return self._detect_cache[image]
except FeatureNotFoundException as exception:
self._detect_cache[image] = exception
raise
def _detect_plural(self):
self._plural = self._cascade.detectMultiScale(
self._image.to_cv_grayscale(),
self._scale_factor,
self._min_neighbors,
)
def _exit_if_none_detected(self):
if len(self._plural) == 0:
message = _('Feature not detected: %s') % (self._name)
if self._last_attempt_successful:
self._last_attempt_successful = False
LOGGER.info(message)
raise FeatureNotFoundException(message)
else:
if not self._last_attempt_successful:
self._last_attempt_successful = True
message = _('Feature detected: %s') % (self._name)
LOGGER.info(message)
def _unpack_first(self):
self._single = dict(
zip(['x', 'y', 'width', 'height'],
self._plural[0]))
def _calculate_center(self):
self._single["center"] = {
"x": (self._single["x"] + self._single["width"]) // 2,
"y": (self._single["y"] + self._single["height"]) // 2,
}
def _extract_image(self):
single = self._single
from_y = single['y']
to_y = single['y'] + single['height']
from_x = single['x']
to_x = single['x'] + single['width']
image_cv_grayscale = self._image.to_cv_grayscale()
single["image"] = Image(
self._config,
image_cv_grayscale[from_y:to_y, from_x:to_x],
is_grayscale=True,
)
def clear_cache(self):
self._detect_cache.clear()
class FeatureDetectorClearCachePlugin(interface.Plugin):
def __init__(self, config):
super(FeatureDetectorClearCachePlugin, self).__init__(config)
self._config = config
def run(self, app):
FeatureDetector.clear_all_detection_caches()
class FeatureNotFoundException(Exception):
def __init__(self, message, cause=None):
if cause is not None:
message = message + ', caused by ' + repr(cause)
self.cause = cause
super(FeatureNotFoundException, self).__init__(message)
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import libdnf
import hawkey
from dnf.i18n import _
import dnf.exceptions
import json
VERSION_MAJOR = 0
VERSION_MINOR = 0
VERSION = "%s.%s" % (VERSION_MAJOR, VERSION_MINOR)
"""
The version of the stored transaction.
MAJOR version denotes backwards incompatible changes (old dnf won't work with
new transaction JSON).
MINOR version denotes extending the format without breaking backwards
compatibility (old dnf can work with new transaction JSON). Forwards
compatibility needs to be handled by being able to process the old format as
well as the new one.
"""
class TransactionError(dnf.exceptions.Error):
def __init__(self, msg):
super(TransactionError, self).__init__(msg)
class TransactionReplayError(dnf.exceptions.Error):
def __init__(self, filename, errors):
"""
:param filename: The name of the transaction file being replayed
:param errors: a list of error classes or a string with an error description
"""
# store args in case someone wants to read them from a caught exception
self.filename = filename
if isinstance(errors, (list, tuple)):
self.errors = errors
else:
self.errors = [errors]
if filename:
msg = _('The following problems occurred while replaying the transaction from file "{filename}":').format(filename=filename)
else:
msg = _('The following problems occurred while running a transaction:')
for error in self.errors:
msg += "\n " + str(error)
super(TransactionReplayError, self).__init__(msg)
class IncompatibleTransactionVersionError(TransactionReplayError):
def __init__(self, filename, msg):
super(IncompatibleTransactionVersionError, self).__init__(filename, msg)
def _check_version(version, filename):
major, minor = version.split('.')
try:
major = int(major)
except ValueError as e:
raise TransactionReplayError(
filename,
_('Invalid major version "{major}", number expected.').format(major=major)
)
try:
int(minor) # minor is unused, just check it's a number
except ValueError as e:
raise TransactionReplayError(
filename,
_('Invalid minor version "{minor}", number expected.').format(minor=minor)
)
if major != VERSION_MAJOR:
raise IncompatibleTransactionVersionError(
filename,
_('Incompatible major version "{major}", supported major version is "{major_supp}".')
.format(major=major, major_supp=VERSION_MAJOR)
)
def serialize_transaction(transaction):
"""
Serializes a transaction to a data structure that is equivalent to the stored JSON format.
:param transaction: the transaction to serialize (an instance of dnf.db.history.TransactionWrapper)
"""
data = {
"version": VERSION,
}
rpms = []
groups = []
environments = []
if transaction is None:
return data
for tsi in transaction.packages():
if tsi.is_package():
rpms.append({
"action": tsi.action_name,
"nevra": tsi.nevra,
"reason": libdnf.transaction.TransactionItemReasonToString(tsi.reason),
"repo_id": tsi.from_repo
})
elif tsi.is_group():
group = tsi.get_group()
group_data = {
"action": tsi.action_name,
"id": group.getGroupId(),
"packages": [],
"package_types": libdnf.transaction.compsPackageTypeToString(group.getPackageTypes())
}
for pkg in group.getPackages():
group_data["packages"].append({
"name": pkg.getName(),
"installed": pkg.getInstalled(),
"package_type": libdnf.transaction.compsPackageTypeToString(pkg.getPackageType())
})
groups.append(group_data)
elif tsi.is_environment():
env = tsi.get_environment()
env_data = {
"action": tsi.action_name,
"id": env.getEnvironmentId(),
"groups": [],
"package_types": libdnf.transaction.compsPackageTypeToString(env.getPackageTypes())
}
for grp in env.getGroups():
env_data["groups"].append({
"id": grp.getGroupId(),
"installed": grp.getInstalled(),
"group_type": libdnf.transaction.compsPackageTypeToString(grp.getGroupType())
})
environments.append(env_data)
if rpms:
data["rpms"] = rpms
if groups:
data["groups"] = groups
if environments:
data["environments"] = environments
return data
class TransactionReplay(object):
"""
A class that encapsulates replaying a transaction. The transaction data are
loaded and stored when the class is initialized. The transaction is run by
calling the `run()` method, after the transaction is created (but before it is
performed), the `post_transaction()` method needs to be called to verify no
extra packages were pulled in and also to fix the reasons.
"""
def __init__(
self,
base,
filename="",
data=None,
ignore_extras=False,
ignore_installed=False,
skip_unavailable=False
):
"""
:param base: the dnf base
:param filename: the filename to load the transaction from (conflicts with the 'data' argument)
:param data: the dictionary to load the transaction from (conflicts with the 'filename' argument)
:param ignore_extras: whether to ignore extra package pulled into the transaction
:param ignore_installed: whether to ignore installed versions of packages
:param skip_unavailable: whether to skip transaction packages that aren't available
"""
self._base = base
self._filename = filename
self._ignore_installed = ignore_installed
self._ignore_extras = ignore_extras
self._skip_unavailable = skip_unavailable
if not self._base.conf.strict:
self._skip_unavailable = True
self._nevra_cache = set()
self._nevra_reason_cache = {}
self._warnings = []
if filename and data:
raise ValueError(_("Conflicting TransactionReplay arguments have been specified: filename, data"))
elif filename:
self._load_from_file(filename)
else:
self._load_from_data(data)
def _load_from_file(self, fn):
self._filename = fn
with open(fn, "r") as f:
try:
replay_data = json.load(f)
except json.decoder.JSONDecodeError as e:
raise TransactionReplayError(fn, str(e) + ".")
try:
self._load_from_data(replay_data)
except TransactionError as e:
raise TransactionReplayError(fn, e)
def _load_from_data(self, data):
self._replay_data = data
self._verify_toplevel_json(self._replay_data)
self._rpms = self._replay_data.get("rpms", [])
self._assert_type(self._rpms, list, "rpms", "array")
self._groups = self._replay_data.get("groups", [])
self._assert_type(self._groups, list, "groups", "array")
self._environments = self._replay_data.get("environments", [])
self._assert_type(self._environments, list, "environments", "array")
def _raise_or_warn(self, warn_only, msg):
if warn_only:
self._warnings.append(msg)
else:
raise TransactionError(msg)
def _assert_type(self, value, t, id, expected):
if not isinstance(value, t):
raise TransactionError(_('Unexpected type of "{id}", {exp} expected.').format(id=id, exp=expected))
def _verify_toplevel_json(self, replay_data):
fn = self._filename
if "version" not in replay_data:
raise TransactionReplayError(fn, _('Missing key "{key}".'.format(key="version")))
self._assert_type(replay_data["version"], str, "version", "string")
_check_version(replay_data["version"], fn)
def _replay_pkg_action(self, pkg_data):
try:
action = pkg_data["action"]
nevra = pkg_data["nevra"]
repo_id = pkg_data["repo_id"]
reason = libdnf.transaction.StringToTransactionItemReason(pkg_data["reason"])
except KeyError as e:
raise TransactionError(
_('Missing object key "{key}" in an rpm.').format(key=e.args[0])
)
except IndexError as e:
raise TransactionError(
_('Unexpected value of package reason "{reason}" for rpm nevra "{nevra}".')
.format(reason=pkg_data["reason"], nevra=nevra)
)
subj = hawkey.Subject(nevra)
parsed_nevras = subj.get_nevra_possibilities(forms=[hawkey.FORM_NEVRA])
if len(parsed_nevras) != 1:
raise TransactionError(_('Cannot parse NEVRA for package "{nevra}".').format(nevra=nevra))
parsed_nevra = parsed_nevras[0]
na = "%s.%s" % (parsed_nevra.name, parsed_nevra.arch)
query_na = self._base.sack.query().filter(name=parsed_nevra.name, arch=parsed_nevra.arch)
epoch = parsed_nevra.epoch if parsed_nevra.epoch is not None else 0
query = query_na.filter(epoch=epoch, version=parsed_nevra.version, release=parsed_nevra.release)
# In case the package is found in the same repo as in the original
# transaction, limit the query to that plus installed packages. IOW
# remove packages with the same NEVRA in case they are found in
# multiple repos and the repo the package came from originally is one
# of them.
# This can e.g. make a difference in the system-upgrade plugin, in case
# the same NEVRA is in two repos, this makes sure the same repo is used
# for both download and upgrade steps of the plugin.
if repo_id:
query_repo = query.filter(reponame=repo_id)
if query_repo:
query = query_repo.union(query.installed())
if not query:
self._raise_or_warn(self._skip_unavailable, _('Cannot find rpm nevra "{nevra}".').format(nevra=nevra))
return
# a cache to check no extra packages were pulled into the transaction
if action != "Reason Change":
self._nevra_cache.add(nevra)
# store reasons for forward actions and "Removed", the rest of the
# actions reasons should stay as they were determined by the transaction
if action in ("Install", "Upgrade", "Downgrade", "Reinstall", "Removed"):
self._nevra_reason_cache[nevra] = reason
if action in ("Install", "Upgrade", "Downgrade"):
if action == "Install" and query_na.installed() and not self._base._get_installonly_query(query_na):
self._raise_or_warn(self._ignore_installed,
_('Package "{na}" is already installed for action "{action}".').format(na=na, action=action))
sltr = dnf.selector.Selector(self._base.sack).set(pkg=query)
self._base.goal.install(select=sltr, optional=not self._base.conf.strict)
elif action == "Reinstall":
query = query.available()
if not query:
self._raise_or_warn(self._skip_unavailable,
_('Package nevra "{nevra}" not available in repositories for action "{action}".')
.format(nevra=nevra, action=action))
return
sltr = dnf.selector.Selector(self._base.sack).set(pkg=query)
self._base.goal.install(select=sltr, optional=not self._base.conf.strict)
elif action in ("Upgraded", "Downgraded", "Reinstalled", "Removed", "Obsoleted"):
query = query.installed()
if not query:
self._raise_or_warn(self._ignore_installed,
_('Package nevra "{nevra}" not installed for action "{action}".').format(nevra=nevra, action=action))
return
# erasing the original version (the reverse part of an action like
# e.g. upgrade) is more robust, but we can't do it if
# skip_unavailable is True, because if the forward part of the
# action is skipped, we would simply remove the package here
if not self._skip_unavailable or action == "Removed":
for pkg in query:
self._base.goal.erase(pkg, clean_deps=False)
elif action == "Reason Change":
self._base.history.set_reason(query[0], reason)
else:
raise TransactionError(
_('Unexpected value of package action "{action}" for rpm nevra "{nevra}".')
.format(action=action, nevra=nevra)
)
def _create_swdb_group(self, group_id, pkg_types, pkgs):
comps_group = self._base.comps._group_by_id(group_id)
if not comps_group:
self._raise_or_warn(self._skip_unavailable, _("Group id '%s' is not available.") % group_id)
return None
swdb_group = self._base.history.group.new(group_id, comps_group.name, comps_group.ui_name, pkg_types)
try:
for pkg in pkgs:
name = pkg["name"]
self._assert_type(name, str, "groups.packages.name", "string")
installed = pkg["installed"]
self._assert_type(installed, bool, "groups.packages.installed", "boolean")
package_type = pkg["package_type"]
self._assert_type(package_type, str, "groups.packages.package_type", "string")
try:
swdb_group.addPackage(name, installed, libdnf.transaction.stringToCompsPackageType(package_type))
except libdnf.error.Error as e:
raise TransactionError(str(e))
except KeyError as e:
raise TransactionError(
_('Missing object key "{key}" in groups.packages.').format(key=e.args[0])
)
return swdb_group
def _swdb_group_install(self, group_id, pkg_types, pkgs):
swdb_group = self._create_swdb_group(group_id, pkg_types, pkgs)
if swdb_group is not None:
self._base.history.group.install(swdb_group)
def _swdb_group_upgrade(self, group_id, pkg_types, pkgs):
if not self._base.history.group.get(group_id):
self._raise_or_warn( self._ignore_installed, _("Group id '%s' is not installed.") % group_id)
return
swdb_group = self._create_swdb_group(group_id, pkg_types, pkgs)
if swdb_group is not None:
self._base.history.group.upgrade(swdb_group)
def _swdb_group_remove(self, group_id, pkg_types, pkgs):
if not self._base.history.group.get(group_id):
self._raise_or_warn(self._ignore_installed, _("Group id '%s' is not installed.") % group_id)
return
swdb_group = self._create_swdb_group(group_id, pkg_types, pkgs)
if swdb_group is not None:
self._base.history.group.remove(swdb_group)
def _create_swdb_environment(self, env_id, pkg_types, groups):
comps_env = self._base.comps._environment_by_id(env_id)
if not comps_env:
self._raise_or_warn(self._skip_unavailable, _("Environment id '%s' is not available.") % env_id)
return None
swdb_env = self._base.history.env.new(env_id, comps_env.name, comps_env.ui_name, pkg_types)
try:
for grp in groups:
id = grp["id"]
self._assert_type(id, str, "environments.groups.id", "string")
installed = grp["installed"]
self._assert_type(installed, bool, "environments.groups.installed", "boolean")
group_type = grp["group_type"]
self._assert_type(group_type, str, "environments.groups.group_type", "string")
try:
group_type = libdnf.transaction.stringToCompsPackageType(group_type)
except libdnf.error.Error as e:
raise TransactionError(str(e))
if group_type not in (
libdnf.transaction.CompsPackageType_MANDATORY,
libdnf.transaction.CompsPackageType_OPTIONAL
):
raise TransactionError(
_('Invalid value "{group_type}" of environments.groups.group_type, '
'only "mandatory" or "optional" is supported.'
).format(group_type=grp["group_type"])
)
swdb_env.addGroup(id, installed, group_type)
except KeyError as e:
raise TransactionError(
_('Missing object key "{key}" in environments.groups.').format(key=e.args[0])
)
return swdb_env
def _swdb_environment_install(self, env_id, pkg_types, groups):
swdb_env = self._create_swdb_environment(env_id, pkg_types, groups)
if swdb_env is not None:
self._base.history.env.install(swdb_env)
def _swdb_environment_upgrade(self, env_id, pkg_types, groups):
if not self._base.history.env.get(env_id):
self._raise_or_warn(self._ignore_installed,_("Environment id '%s' is not installed.") % env_id)
return
swdb_env = self._create_swdb_environment(env_id, pkg_types, groups)
if swdb_env is not None:
self._base.history.env.upgrade(swdb_env)
def _swdb_environment_remove(self, env_id, pkg_types, groups):
if not self._base.history.env.get(env_id):
self._raise_or_warn(self._ignore_installed, _("Environment id '%s' is not installed.") % env_id)
return
swdb_env = self._create_swdb_environment(env_id, pkg_types, groups)
if swdb_env is not None:
self._base.history.env.remove(swdb_env)
def get_data(self):
"""
:returns: the loaded data of the transaction
"""
return self._replay_data
def get_warnings(self):
"""
:returns: an array of warnings gathered during the transaction replay
"""
return self._warnings
def run(self):
"""
Replays the transaction.
"""
fn = self._filename
errors = []
for pkg_data in self._rpms:
try:
self._replay_pkg_action(pkg_data)
except TransactionError as e:
errors.append(e)
for group_data in self._groups:
try:
action = group_data["action"]
group_id = group_data["id"]
try:
pkg_types = libdnf.transaction.stringToCompsPackageType(group_data["package_types"])
except libdnf.error.Error as e:
errors.append(TransactionError(str(e)))
continue
if action == "Install":
self._swdb_group_install(group_id, pkg_types, group_data["packages"])
elif action == "Upgrade":
self._swdb_group_upgrade(group_id, pkg_types, group_data["packages"])
elif action == "Removed":
self._swdb_group_remove(group_id, pkg_types, group_data["packages"])
else:
errors.append(TransactionError(
_('Unexpected value of group action "{action}" for group "{group}".')
.format(action=action, group=group_id)
))
except KeyError as e:
errors.append(TransactionError(
_('Missing object key "{key}" in a group.').format(key=e.args[0])
))
except TransactionError as e:
errors.append(e)
for env_data in self._environments:
try:
action = env_data["action"]
env_id = env_data["id"]
try:
pkg_types = libdnf.transaction.stringToCompsPackageType(env_data["package_types"])
except libdnf.error.Error as e:
errors.append(TransactionError(str(e)))
continue
if action == "Install":
self._swdb_environment_install(env_id, pkg_types, env_data["groups"])
elif action == "Upgrade":
self._swdb_environment_upgrade(env_id, pkg_types, env_data["groups"])
elif action == "Removed":
self._swdb_environment_remove(env_id, pkg_types, env_data["groups"])
else:
errors.append(TransactionError(
_('Unexpected value of environment action "{action}" for environment "{env}".')
.format(action=action, env=env_id)
))
except KeyError as e:
errors.append(TransactionError(
_('Missing object key "{key}" in an environment.').format(key=e.args[0])
))
except TransactionError as e:
errors.append(e)
if errors:
raise TransactionReplayError(fn, errors)
def post_transaction(self):
"""
Sets reasons in the transaction history to values from the stored transaction.
Also serves to check whether additional packages were pulled in by the
transaction, which results in an error (unless ignore_extras is True).
"""
if not self._base.transaction:
return
errors = []
for tsi in self._base.transaction:
try:
pkg = tsi.pkg
except KeyError as e:
# the transaction item has no package, happens for action == "Reason Change"
continue
nevra = str(pkg)
if nevra not in self._nevra_cache:
# if ignore_installed is True, we don't want to check for
# Upgraded/Downgraded/Reinstalled extras in the transaction,
# basically those may be installed and we are ignoring them
if not self._ignore_installed or not tsi.action in (
libdnf.transaction.TransactionItemAction_UPGRADED,
libdnf.transaction.TransactionItemAction_DOWNGRADED,
libdnf.transaction.TransactionItemAction_REINSTALLED
):
msg = _('Package nevra "{nevra}", which is not present in the transaction file, was pulled '
'into the transaction.'
).format(nevra=nevra)
if not self._ignore_extras:
errors.append(TransactionError(msg))
else:
self._warnings.append(msg)
try:
replay_reason = self._nevra_reason_cache[nevra]
if tsi.action in (
libdnf.transaction.TransactionItemAction_INSTALL,
libdnf.transaction.TransactionItemAction_REMOVE
) or libdnf.transaction.TransactionItemReasonCompare(replay_reason, tsi.reason) > 0:
tsi.reason = replay_reason
except KeyError as e:
# if the pkg nevra wasn't found, we don't want to change the reason
pass
if errors:
raise TransactionReplayError(self._filename, errors)
|
import tempfile
from Utils import runCommand
from SpecParser import SpecParser
from Base import Base
class RemoteSpecParser(Base):
def __init__(self, branch, package):
Base.__init__(self)
self.branch = branch
self.package = package
self.sp_obj = None
def parse(self):
f = tempfile.NamedTemporaryFile(delete=True)
cmd_str = "curl http://pkgs.fedoraproject.org/cgit/rpms/%s.git/plain/%s.spec > %s"
runCommand(cmd_str % (self.package, self.package, f.name))
self.sp_obj = SpecParser(f.name)
if not self.sp_obj.parse():
self.err = self.sp_obj.getError()
f.close()
return False
f.close()
return True
def getProvides(self):
"""Fetch a spec file from pkgdb and get provides from all its [sub]packages"""
if self.sp_obj == None:
return {}
return self.sp_obj.getProvides()
def getPackageCommits(self):
if self.sp_obj == None:
return ""
return self.sp_obj.getMacro("commit")
def getPkgURL(self):
if self.sp_obj == None:
return ""
return self.sp_obj.getTag("url")
|
from Components.Converter.Converter import Converter
from Components.Element import cached
from Tools.Directories import fileExists
from Poll import Poll
import time
import os
class YWeather(Poll, Converter, object):
weather_city = '711665'
time_update = 20
time_update_ms = 30000
city = 0
country = 1
direction = 2
speed = 3
humidity = 4
visibility = 5
pressure = 6
pressurenm = 7
wtext = 8
temp = 9
picon = 10
wtext2 = 11
templow2 = 12
temphigh2 = 13
picon2 = 14
day2 = 15
date2 = 16
wtext3 = 17
templow3 = 18
temphigh3 = 19
picon3 = 20
day3 = 21
date3 = 22
wtext4 = 23
templow4 = 24
temphigh4 = 25
picon4 = 26
day4 = 27
date4 = 28
wtext5 = 29
templow5 = 30
temphigh5 = 31
picon5 = 32
day5 = 33
date5 = 34
def __init__(self, type):
Converter.__init__(self, type)
Poll.__init__(self)
if type == "city":
self.type = self.city
elif type == "country":
self.type = self.country
elif type == "direction":
self.type = self.direction
elif type == "speed":
self.type = self.speed
elif type == "humidity":
self.type = self.humidity
elif type == "visibility":
self.type = self.visibility
elif type == "pressure":
self.type = self.pressure
elif type == "pressurenm":
self.type = self.pressurenm
elif type == "text":
self.type = self.wtext
elif type == "temp":
self.type = self.temp
elif type == "picon":
self.type = self.picon
elif type == "text2":
self.type = self.wtext2
elif type == "templow2":
self.type = self.templow2
elif type == "temphigh2":
self.type = self.temphigh2
elif type == "day2":
self.type = self.day2
elif type == "date2":
self.type = self.date2
elif type == "picon2":
self.type = self.picon2
elif type == "text3":
self.type = self.wtext3
elif type == "templow3":
self.type = self.templow3
elif type == "temphigh3":
self.type = self.temphigh3
elif type == "day3":
self.type = self.day3
elif type == "date3":
self.type = self.date3
elif type == "picon3":
self.type = self.picon3
elif type == "text4":
self.type = self.wtext4
elif type == "templow4":
self.type = self.templow4
elif type == "temphigh4":
self.type = self.temphigh4
elif type == "day4":
self.type = self.day4
elif type == "date4":
self.type = self.date4
elif type == "picon4":
self.type = self.picon4
elif type == "text5":
self.type = self.wtext5
elif type == "templow5":
self.type = self.templow5
elif type == "temphigh5":
self.type = self.temphigh5
elif type == "day5":
self.type = self.day5
elif type == "date5":
self.type = self.date5
elif type == "picon5":
self.type = self.picon5
self.poll_interval = self.time_update_ms
self.poll_enabled = True
@cached
def getText(self):
xweather = {'ycity':"N/A", 'ycountry':"N/A", 'ydirection':"N/A", 'yspeed':"N/A", 'yhumidity':"N/A", 'yvisibility':"N/A", 'ypressure':"N/A", 'ytext':"N/A", 'ytemp':"N/A", 'ypicon':"3200",
'yday2':"N/A", 'yday3':"N/A", 'yday4':"N/A", 'yday5':"N/A",
'ypiconday2':"3200", 'ypiconday3':"3200", 'ypiconday4':"3200", 'ypiconday5':"3200",
'ydate2':"N/A", 'ydate3':"N/A", 'ydate4':"N/A", 'ydate5':"N/A",
'ytextday2':"N/A", 'ytextday3':"N/A", 'ytextday4':"N/A", 'ytextday5':"N/A",
'ytemphighday2':"N/A", 'ytemphighday3':"N/A", 'ytemphighday4':"N/A", 'ytemphighday5':"N/A",
'ytemplowday2':"N/A", 'ytemplowday3':"N/A", 'ytemplowday4':"N/A", 'ytemplowday5':"N/A"}
direct = 0
info = ""
if fileExists("/usr/lib/enigma2/python/Plugins/Extensions/iSkin/Weather/Config/Location_id"):
self.weather_city = open("/usr/lib/enigma2/python/Plugins/Extensions/iSkin/Weather/Config/Location_id").read()
elif fileExists("/usr/lib/enigma2/python/Plugins/Extensions/YahooWeather/Config/Location_id"):
self.weather_city = open("/usr/lib/enigma2/python/Plugins/Extensions/YahooWeather/Config/Location_id").read()
if fileExists("/tmp/yweather.xml"):
if int((time.time() - os.stat("/tmp/yweather.xml").st_mtime)/60) >= self.time_update:
os.system("rm /tmp/yweather.xml")
os.system("wget -P /tmp -T2 'https://query.yahooapis.com/v1/public/yql?q=select%20%2A%20from%20weather.forecast%20where%20woeid=%s%20AND%20u=%22c%22' -O /tmp/yweather.xml" % self.weather_city)
else:
os.system("wget -P /tmp -T2 'https://query.yahooapis.com/v1/public/yql?q=select%20%2A%20from%20weather.forecast%20where%20woeid=%s%20AND%20u=%22c%22' -O /tmp/yweather.xml" % self.weather_city)
if not fileExists("/tmp/yweather.xml"):
os.system("echo -e 'None' >> /tmp/yweather.xml")
return 'N/A'
if not fileExists("/tmp/yweather.xml"):
os.system("echo -e 'None' >> /tmp/yweather.xml")
return 'N/A'
wday = 1
for line in open("/tmp/yweather.xml"):
if line.find("<yweather:location") > -1:
xweather['ycity'] = line.split('city')[1].split('"')[1]
xweather['ycountry'] = line.split('country')[1].split('"')[1]
elif line.find("<yweather:wind") > -1:
xweather['ydirection'] = line.split('direction')[1].split('"')[1]
xweather['yspeed'] = line.split('speed')[1].split('"')[1]
elif line.find("<yweather:atmosphere") > -1:
xweather['yhumidity'] = line.split('humidity')[1].split('"')[1]
xweather['yvisibility'] = line.split('visibility')[1].split('"')[1]
xweather['ypressure'] = line.split('pressure')[1].split('"')[1]
elif line.find("<yweather:condition") > -1:
xweather['ytext'] = line.split('text')[1].split('"')[1]
xweather['ypicon'] = line.split('code')[1].split('"')[1]
xweather['ytemp'] = line.split('temp')[1].split('"')[1]
elif line.find('yweather:forecast') > -1:
if wday == 2:
xweather['yday2'] = line.split('day')[1].split('"')[1]
xweather['ydate2'] = line.split('date')[1].split('"')[1]
xweather['ytextday2'] = line.split('text')[1].split('"')[1]
xweather['ypiconday2'] = line.split('code')[1].split('"')[1]
xweather['ytemphighday2'] = line.split('high')[1].split('"')[1]
xweather['ytemplowday2'] = line.split('low')[1].split('"')[1]
elif wday == 3:
xweather['yday3'] = line.split('day')[1].split('"')[1]
xweather['ydate3'] = line.split('date')[1].split('"')[1]
xweather['ytextday3'] = line.split('text')[1].split('"')[1]
xweather['ypiconday3'] = line.split('code')[1].split('"')[1]
xweather['ytemphighday3'] = line.split('high')[1].split('"')[1]
xweather['ytemplowday3'] = line.split('low')[1].split('"')[1]
elif wday == 4:
xweather['yday4'] = line.split('day')[1].split('"')[1]
xweather['ydate4'] = line.split('date')[1].split('"')[1]
xweather['ytextday4'] = line.split('text')[1].split('"')[1]
xweather['ypiconday4'] = line.split('code')[1].split('"')[1]
xweather['ytemphighday4'] = line.split('high')[1].split('"')[1]
xweather['ytemplowday4'] = line.split('low')[1].split('"')[1]
elif wday == 5:
xweather['yday5'] = line.split('day')[1].split('"')[1]
xweather['ydate5'] = line.split('date')[1].split('"')[1]
xweather['ytextday5'] = line.split('text')[1].split('"')[1]
xweather['ypiconday5'] = line.split('code')[1].split('"')[1]
xweather['ytemphighday5'] = line.split('high')[1].split('"')[1]
xweather['ytemplowday5'] = line.split('low')[1].split('"')[1]
wday = wday + 1
if self.type == self.city:
info = xweather['ycity']
elif self.type == self.country:
info = xweather['ycountry']
elif self.type == self.direction:
if xweather['ydirection'] != "N/A":
direct = int(xweather['ydirection'])
if direct >= 0 and direct <= 20:
info = _('N')
elif direct >= 21 and direct <= 35:
info = _('nne')
elif direct >= 36 and direct <= 55:
info = _('ne')
elif direct >= 56 and direct <= 70:
info = _('ene')
elif direct >= 71 and direct <= 110:
info = _('E')
elif direct >= 111 and direct <= 125:
info = _('ese')
elif direct >= 126 and direct <= 145:
info = _('se')
elif direct >= 146 and direct <= 160:
info = _('sse')
elif direct >= 161 and direct <= 200:
info = _('S')
elif direct >= 201 and direct <= 215:
info = _('ssw')
elif direct >= 216 and direct <= 235:
info = _('sw')
elif direct >= 236 and direct <= 250:
info = _('wsw')
elif direct >= 251 and direct <= 290:
info = _('W')
elif direct >= 291 and direct <= 305:
info = _('wnw')
elif direct >= 306 and direct <= 325:
info = _('nw')
elif direct >= 326 and direct <= 340:
info = _('nnw')
elif direct >= 341 and direct <= 360:
info = _('N')
else:
info = "N/A"
elif self.type == self.speed:
info = xweather['yspeed'] + ' km/h'
elif self.type == self.humidity:
info = xweather['yhumidity'] + ' mb'
elif self.type == self.visibility:
info = xweather['yvisibility'] + ' km'
elif self.type == self.pressure:
info = xweather['ypressure'] + ' mb'
elif self.type == self.pressurenm:
if xweather['ypressure'] != "N/A":
info = "%d mmHg" % round(float(xweather['ypressure']) * 0.75)
else:
info = "N/A"
elif self.type == self.wtext:
info = xweather['ytext']
elif self.type == self.temp:
if info != "N/A":
info = xweather['ytemp'] + '%s' % unichr(176).encode("latin-1")
else:
info = xweather['ytemp']
elif self.type == self.picon:
info = xweather['ypicon']
elif self.type == self.wtext2:
info = xweather['ytextday2']
elif self.type == self.templow2:
if info != "N/A":
info = xweather['ytemplowday2'] + '%s' % unichr(176).encode("latin-1")
else:
info = xweather['ytemplowday2']
elif self.type == self.temphigh2:
if info != "N/A":
info = xweather['ytemphighday2'] + '%s' % unichr(176).encode("latin-1")
else:
info = xweather['ytemphighday2']
elif self.type == self.picon2:
info = xweather['ypiconday2']
elif self.type == self.day2:
if xweather['yday2'] != "N/A":
day = xweather['yday2']
if day == 'Mon':
info = _('Mon')
elif day == 'Tue':
info = _('Tue')
elif day == 'Wed':
info = _('Wed')
elif day == 'Thu':
info = _('Thu')
elif day == 'Fri':
info = _('Fri')
elif day == 'Sat':
info = _('Sat')
elif day == 'Sun':
info = _('Sun')
else:
info = "N/A"
elif self.type == self.date2:
info = xweather['ydate2']
elif self.type == self.wtext3:
info = xweather['ytextday3']
elif self.type == self.templow3:
if info != "N/A":
info = xweather['ytemplowday3'] + '%s' % unichr(176).encode("latin-1")
else:
info = xweather['ytemplowday3']
elif self.type == self.temphigh3:
if info != "N/A":
info = xweather['ytemphighday3'] + '%s' % unichr(176).encode("latin-1")
else:
info = xweather['ytemphighday3']
elif self.type == self.picon3:
info = xweather['ypiconday3']
elif self.type == self.day3:
if xweather['yday3'] != "N/A":
day = xweather['yday3']
if day == 'Mon':
info = _('Mon')
elif day == 'Tue':
info = _('Tue')
elif day == 'Wed':
info = _('Wed')
elif day == 'Thu':
info = _('Thu')
elif day == 'Fri':
info = _('Fri')
elif day == 'Sat':
info = _('Sat')
elif day == 'Sun':
info = _('Sun')
else:
info = "N/A"
elif self.type == self.date3:
info = xweather['ydate3']
elif self.type == self.wtext4:
info = xweather['ytextday4']
elif self.type == self.templow4:
if info != "N/A":
info = xweather['ytemplowday4'] + '%s' % unichr(176).encode("latin-1")
else:
info = xweather['ytemplowday4']
elif self.type == self.temphigh4:
if info != "N/A":
info = xweather['ytemphighday4'] + '%s' % unichr(176).encode("latin-1")
else:
info = xweather['ytemphighday4']
elif self.type == self.picon4:
info = xweather['ypiconday4']
elif self.type == self.day4:
if xweather['yday4'] != "N/A":
day = xweather['yday4']
if day == 'Mon':
info = _('Mon')
elif day == 'Tue':
info = _('Tue')
elif day == 'Wed':
info = _('Wed')
elif day == 'Thu':
info = _('Thu')
elif day == 'Fri':
info = _('Fri')
elif day == 'Sat':
info = _('Sat')
elif day == 'Sun':
info = _('Sun')
else:
info = "N/A"
elif self.type == self.date4:
info = xweather['ydate4']
elif self.type == self.wtext5:
info = xweather['ytextday5']
elif self.type == self.templow5:
if info != "N/A":
info = xweather['ytemplowday5'] + '%s' % unichr(176).encode("latin-1")
else:
info = xweather['ytemplowday5']
elif self.type == self.temphigh5:
if info != "N/A":
info = xweather['ytemphighday5'] + '%s' % unichr(176).encode("latin-1")
else:
info = xweather['ytemphighday5']
elif self.type == self.picon5:
info = xweather['ypiconday5']
elif self.type == self.day5:
if xweather['yday5'] != "N/A":
day = xweather['yday5']
if day == 'Mon':
info = _('Mon')
elif day == 'Tue':
info = _('Tue')
elif day == 'Wed':
info = _('Wed')
elif day == 'Thu':
info = _('Thu')
elif day == 'Fri':
info = _('Fri')
elif day == 'Sat':
info = _('Sat')
elif day == 'Sun':
info = _('Sun')
else:
info = "N/A"
elif self.type == self.date5:
info = xweather['ydate5']
return info
text = property(getText)
def changed(self, what):
Converter.changed(self, (self.CHANGED_POLL,))
|
import time
import os
try:
import enigma
from Components.config import config
except:
print "Cannot import enigma"
from Directories import resolveFilename, SCOPE_HDD
def getTrashFolder():
# Returns trash folder without symlinks
return os.path.realpath(os.path.join(resolveFilename(SCOPE_HDD), ".Trash"))
def createTrashFolder():
trash = getTrashFolder()
if not os.path.isdir(trash):
os.mkdir(trash)
return trash
class Trashcan:
def __init__(self, session):
self.session = session
session.nav.record_event.append(self.gotRecordEvent)
self.gotRecordEvent(None, None)
def gotRecordEvent(self, service, event):
print "[Trashcan] gotRecordEvent", service, event
self.recordings = len(self.session.nav.getRecordings())
if (event == enigma.iRecordableService.evEnd):
self.cleanIfIdle()
def destroy(self):
if self.session is not None:
self.session.nav.record_event.remove(self.gotRecordEvent)
self.session = None
def __del__(self):
self.destroy()
def cleanIfIdle(self):
# RecordTimer calls this when preparing a recording. That is a
# nice moment to clean up.
if self.recordings:
print "[Trashcan] Recording in progress", self.recordings
return
try:
ctimeLimit = time.time() - (config.usage.movielist_trashcan_days.value * 3600 * 24)
reserveBytes = 1024*1024*1024 * int(config.usage.movielist_trashcan_reserve.value)
clean(ctimeLimit, reserveBytes)
except Exception, e:
print "[Trashcan] Weirdness:", e
def clean(ctimeLimit, reserveBytes):
# Remove expired items from trash, and attempt to have
# reserveBytes of free disk space.
trash = getTrashFolder()
if not os.path.isdir(trash):
print "[Trashcan] No trash.", trash
return 0
diskstat = os.statvfs(trash)
free = diskstat.f_bfree * diskstat.f_bsize
bytesToRemove = reserveBytes - free
candidates = []
print "[Trashcan] bytesToRemove", bytesToRemove
size = 0
for root, dirs, files in os.walk(trash, topdown=False):
for name in files:
try:
fn = os.path.join(root, name)
st = os.stat(fn)
if st.st_ctime < ctimeLimit:
print "[Trashcan] Too old:", name, st.st_ctime
enigma.eBackgroundFileEraser.getInstance().erase(fn)
bytesToRemove -= st.st_size
else:
candidates.append((st.st_ctime, fn, st.st_size))
size += st.st_size
except Exception, e:
print "[Trashcan] Failed to stat %s:"% name, e
# Remove empty directories if possible
for name in dirs:
try:
os.rmdir(os.path.join(root, name))
except:
pass
candidates.sort()
# Now we have a list of ctime, candidates, size. Sorted by ctime (=deletion time)
print "[Trashcan] Bytes to remove:", bytesToRemove
print "[Trashcan] Size now:", size
for st_ctime, fn, st_size in candidates:
if bytesToRemove < 0:
break
enigma.eBackgroundFileEraser.getInstance().erase(fn)
bytesToRemove -= st_size
size -= st_size
print "[Trashcan] Size now:", size
def cleanAll():
trash = getTrashFolder()
if not os.path.isdir(trash):
print "[Trashcan] No trash.", trash
return 0
for root, dirs, files in os.walk(trash, topdown=False):
for name in files:
fn = os.path.join(root, name)
try:
enigma.eBackgroundFileEraser.getInstance().erase(fn)
except Exception, e:
print "[Trashcan] Failed to erase %s:"% name, e
# Remove empty directories if possible
for name in dirs:
try:
os.rmdir(os.path.join(root, name))
except:
pass
def init(session):
global instance
instance = Trashcan(session)
if __name__ == '__main__':
class Fake:
def __init__(self):
self.record_event = []
self.nav = self
self.RecordTimer = self
self.usage = self
self.movielist_trashcan_days = self
self.movielist_trashcan_reserve = self
self.value = 1
self.eBackgroundFileEraser = self
self.iRecordableService = self
self.evEnd = None
def getInstance(self):
# eBackgroundFileEraser
return self
def erase(self, fn):
print "ERASE", fn
def getNextRecordingTime(self):
# RecordTimer
return time.time() + 500
def getRecordings(self):
return []
def destroy(self):
if self.record_event:
raise Exception, "record_event not empty" + str(self.record_event)
s = Fake()
createTrashFolder()
config = s
enigma = s
init(s)
diskstat = os.statvfs('/hdd/movie')
free = diskstat.f_bfree * diskstat.f_bsize
# Clean up one MB
clean(1264606758, free + 1000000)
cleanAll()
instance.destroy()
s.destroy()
|
from __future__ import division, absolute_import, print_function, unicode_literals
from awlsim.common.compat import *
from awlsim.core.instructions.main import * #@nocy
from awlsim.core.operators import *
class AwlInsn_ASSERT_LT(AwlInsn): #+cdef
__slots__ = ()
def __init__(self, cpu, rawInsn):
AwlInsn.__init__(self, cpu, AwlInsn.TYPE_ASSERT_LT, rawInsn)
self.assertOpCount(2)
def run(self):
s = self.cpu.statusWord
val0 = self.cpu.fetch(self.ops[0])
val1 = self.cpu.fetch(self.ops[1])
if not (val0 < val1):
raise AwlSimError("Assertion failed")
s.NER = 0
|
"""Implement registries for formatter."""
import os
from flask_registry import (
ModuleAutoDiscoveryRegistry,
PkgResourcesDirDiscoveryRegistry,
RegistryProxy,
)
from invenio.ext.registry import ModuleAutoDiscoverySubRegistry
from invenio.utils.datastructures import LazyDict
import yaml
format_templates_directories = RegistryProxy(
'format_templates_directories',
ModuleAutoDiscoveryRegistry,
'format_templates'
)
format_templates = RegistryProxy(
'format_templates',
PkgResourcesDirDiscoveryRegistry,
'.', registry_namespace=format_templates_directories
)
output_formats_directories = RegistryProxy(
'output_formats_directories',
ModuleAutoDiscoveryRegistry,
'output_formats'
)
output_formats_files = RegistryProxy(
'output_formats_files',
PkgResourcesDirDiscoveryRegistry,
'.', registry_namespace=output_formats_directories
)
template_context_functions = RegistryProxy(
'template_context_functions',
ModuleAutoDiscoverySubRegistry,
'template_context_functions'
)
def create_format_templates_lookup():
"""Create format templates."""
out = {}
def _register(path, level=1):
if level > 4:
return
normpath = os.path.normpath(path)
if os.path.isdir(normpath):
for p in os.listdir(normpath):
_register(os.path.join(normpath, p), level=level+1)
else:
parts = normpath.split(os.path.sep)
out[os.path.sep.join(parts[-level:])] = normpath
for t in reversed(format_templates):
_register(t)
return out
format_templates_lookup = LazyDict(create_format_templates_lookup)
def create_output_formats_lookup():
"""Create output formats."""
out = {}
for f in output_formats_files:
of = os.path.basename(f).lower()
data = {'names': {}}
if of.endswith('.yml'):
of = of[:-4]
with open(f, 'r') as f:
data.update(yaml.load(f) or {})
data['code'] = of
else:
continue # unknown filetype
if of in out:
continue
out[of] = data
return out
output_formats = LazyDict(create_output_formats_lookup)
export_formats = LazyDict(lambda: dict(
(code, of) for code, of in output_formats.items()
if of.get('content_type', '') != 'text/html' and of.get('visibility', 0)
))
|
import os
import sys
import time
import linecache
import traceback
import logging
from os import path
from logging import Logger, Formatter, Handler, DEBUG, INFO, WARNING, ERROR
from conduct import colors
LOGFMT = '%(asctime)s : %(levelname)-7s : %(name)-25s: %(message)s'
DATEFMT = '%H:%M:%S'
DATESTAMP_FMT = '%Y-%m-%d'
SECONDS_PER_DAY = 60 * 60 * 24
LOGLEVELS = {'debug': DEBUG, 'info': INFO, 'warning': WARNING, 'error': ERROR}
INVLOGLEVELS = {value : key for key, value in LOGLEVELS.items()}
class ConductLogger(Logger):
maxLogNameLength = 0
def __init__(self, *args, **kwargs):
Logger.__init__(self, *args, **kwargs)
ConductLogger._storeLoggerNameLength(self)
def getChild(self, suffix, ownDir=False):
child = Logger.getChild(self, suffix)
child.setLevel(self.getEffectiveLevel())
if ownDir:
for handler in self._collectHandlers():
if isinstance(handler, LogfileHandler):
handler = handler.getChild(suffix)
child.addHandler(handler)
child.propagate = False
return child
def _collectHandlers(self):
result = []
log = self
while log is not None:
result += log.handlers
log = log.parent
return result
@staticmethod
def _storeLoggerNameLength(logObj):
# store max logger name length for formatting
if len(logObj.name) > ConductLogger.maxLogNameLength:
ConductLogger.maxLogNameLength = len(logObj.name)
class ConsoleFormatter(Formatter):
"""
A lightweight formatter for the interactive console, with optional
colored output.
"""
def __init__(self, fmt=None, datefmt=None, colorize=None):
Formatter.__init__(self, fmt, datefmt)
if colorize:
self.colorize = colorize
else:
self.colorize = lambda c, s: s
def formatException(self, exc_info):
return traceback.format_exception_only(*exc_info[0:2])[-1]
def formatTime(self, record, datefmt=None):
return time.strftime(datefmt or DATEFMT,
self.converter(record.created))
def format(self, record):
record.message = record.getMessage()
levelno = record.levelno
datefmt = self.colorize('lightgray', '[%(asctime)s] ')
namefmt = '%(name)-' + str(ConductLogger.maxLogNameLength) + 's: '
if levelno <= DEBUG:
fmtstr = self.colorize('darkgray', '%s%%(message)s' % namefmt)
elif levelno <= INFO:
fmtstr = '%s%%(message)s' % namefmt
elif levelno <= WARNING:
fmtstr = self.colorize('fuchsia', '%s%%(levelname)s: %%(message)s'
% namefmt)
else:
# Add exception type to error (if caused by exception)
msgPrefix = ''
if record.exc_info:
msgPrefix = '%s: ' % record.exc_info[0].__name__
fmtstr = self.colorize('red', '%s%%(levelname)s: %s%%(message)s'
% (namefmt, msgPrefix))
fmtstr = datefmt + fmtstr
if not getattr(record, 'nonl', False):
fmtstr += '\n'
record.asctime = self.formatTime(record, self.datefmt)
s = fmtstr % record.__dict__
# never output more exception info -- the exception message is already
# part of the log message because of our special logger behavior
# if record.exc_info:
# # *not* caching exception text on the record, since it's
# # only a short version
# s += self.formatException(record.exc_info)
return s
def format_extended_frame(frame):
ret = []
for key, value in frame.f_locals.items():
try:
valstr = repr(value)[:256]
except Exception:
valstr = '<cannot be displayed>'
ret.append(' %-20s = %s\n' % (key, valstr))
ret.append('\n')
return ret
def format_extended_traceback(etype, value, tb):
ret = ['Traceback (most recent call last):\n']
while tb is not None:
frame = tb.tb_frame
filename = frame.f_code.co_filename
item = ' File "%s", line %d, in %s\n' % (filename, tb.tb_lineno,
frame.f_code.co_name)
linecache.checkcache(filename)
line = linecache.getline(filename, tb.tb_lineno, frame.f_globals)
if line:
item = item + ' %s\n' % line.strip()
ret.append(item)
if filename != '<script>':
ret += format_extended_frame(tb.tb_frame)
tb = tb.tb_next
ret += traceback.format_exception_only(etype, value)
return ''.join(ret).rstrip('\n')
class LogfileFormatter(Formatter):
"""
The standard Formatter does not support milliseconds with an explicit
datestamp format. It also doesn't show the full traceback for exceptions.
"""
extended_traceback = True
def formatException(self, ei):
if self.extended_traceback:
s = format_extended_traceback(*ei)
else:
s = ''.join(traceback.format_exception(ei[0], ei[1], ei[2],
sys.maxsize))
if s.endswith('\n'):
s = s[:-1]
return s
def formatTime(self, record, datefmt=None):
res = time.strftime(DATEFMT, self.converter(record.created))
res += ',%03d' % record.msecs
return res
class StreamHandler(Handler):
"""Reimplemented from logging: remove cruft, remove bare excepts."""
def __init__(self, stream=None):
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
self.acquire()
try:
if self.stream and hasattr(self.stream, 'flush'):
self.stream.flush()
finally:
self.release()
def emit(self, record):
try:
msg = self.format(record)
try:
self.stream.write('%s\n' % msg)
except UnicodeEncodeError:
self.stream.write('%s\n' % msg.encode('utf-8'))
self.flush()
except Exception:
self.handleError(record)
class LogfileHandler(StreamHandler):
"""
Logs to log files with a date stamp appended, and rollover on midnight.
"""
def __init__(self, directory, filenameprefix, dayfmt=DATESTAMP_FMT):
self._directory = path.join(directory, filenameprefix)
if not path.isdir(self._directory):
os.makedirs(self._directory)
self._currentsymlink = path.join(self._directory, 'current')
self._filenameprefix = filenameprefix
self._pathnameprefix = path.join(self._directory, filenameprefix)
self._dayfmt = dayfmt
# today's logfile name
basefn = self._pathnameprefix + '-' + time.strftime(dayfmt) + '.log'
self.baseFilename = path.abspath(basefn)
self.mode = 'a'
StreamHandler.__init__(self, self._open())
# determine time of first midnight from now on
t = time.localtime()
self.rollover_at = time.mktime((t[0], t[1], t[2], 0, 0, 0,
t[6], t[7], t[8])) + SECONDS_PER_DAY
self.setFormatter(LogfileFormatter(LOGFMT, DATEFMT))
self.disabled = False
def getChild(self, name):
return LogfileHandler(self._directory, name)
def filter(self, record):
return not self.disabled
def emit(self, record):
try:
t = int(time.time())
if t >= self.rollover_at:
self.doRollover()
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
except Exception:
self.handleError(record)
def enable(self, enabled):
if enabled:
self.disabled = False
self.stream.close()
self.stream = self._open()
else:
self.disabled = True
def close(self):
self.acquire()
try:
if self.stream:
self.flush()
if hasattr(self.stream, 'close'):
self.stream.close()
StreamHandler.close(self)
self.stream = None
finally:
self.release()
def doRollover(self):
self.stream.close()
self.baseFilename = self._pathnameprefix + '-' + \
time.strftime(self._dayfmt) + '.log'
self.stream = self._open()
self.rollover_at += SECONDS_PER_DAY
def _open(self):
# update 'current' symlink upon open
try:
os.remove(self._currentsymlink)
except OSError:
# if the symlink does not (yet) exist, OSError is raised.
# should happen at most once per installation....
pass
if hasattr(os, 'symlink'):
os.symlink(path.basename(self.baseFilename), self._currentsymlink)
# finally open the new logfile....
return open(self.baseFilename, self.mode)
class ColoredConsoleHandler(StreamHandler):
"""
A handler class that writes colorized records to standard output.
"""
def __init__(self):
StreamHandler.__init__(self, sys.stdout)
self.setFormatter(ConsoleFormatter(datefmt=DATEFMT,
colorize=colors.colorize))
def emit(self, record):
msg = self.format(record)
try:
self.stream.write(msg)
except UnicodeEncodeError:
self.stream.write(msg.encode('utf-8'))
self.stream.flush()
|
from scapy.all import *
import sys, urllib , os, subprocess, random
from itertools import *
import Global_Vars
class pacifyIpv4Http:
def writeIPv4HttpRule(self, sid_id_http, http_method, http_uri_string, \
http_content_all, directory, src_name):
##creating and writing a sid.rules file
rule_file = open('%s/%s.rules' % (directory,sid_id_http), 'w+')
content_http_uri_string_ready_for_rule = None
content_http_uri_string_ready_for_rule = ""
if (len(http_uri_string) > 250):
content_http_uri_string_array = [http_uri_string[i:i+250] for i in range(0, len(http_uri_string), 250)]
for i in content_http_uri_string_array:
i = i.replace('|', '|7C|').replace('"', '|22|').replace(';', '|3B|').\
replace(':', '|3A|').replace(' ', '|20|').replace('\\', '|5C|').\
replace('\'', '|27|').replace('\r', '|0d|').replace('\n', '|0a|')
content_http_uri_string_ready_for_rule = \
content_http_uri_string_ready_for_rule + \
("content:\"%s\"; http_raw_uri; " % (i))
else:
http_uri_string = http_uri_string.replace('|', '|7C|').\
replace('"', '|22|').replace(';', '|3B|').replace(':', '|3A|').\
replace(' ', '|20|').replace('\\', '|5C|').replace('\'', '|27|').\
replace('\r', '|0d|').replace('\n', '|0a|')
content_http_uri_string_ready_for_rule = \
("content:\"%s\"; http_raw_uri; " % (http_uri_string))
content_all_ready_for_rule = None
content_all_ready_for_rule = ""
if (len(http_content_all) > 250):
content_http_all_array = [http_content_all[i:i+250] for i in range(0, len(http_content_all), 250)]
for i in content_http_all_array:
i = i.replace('|', '|7C|').replace('"', '|22|').replace(';', '|3B|').\
replace(':', '|3A|').replace(' ', '|20|').replace('\\', '|5C|').\
replace('\'', '|27|').replace('\r', '|0d|').replace('\n', '|0a|')
content_all_ready_for_rule = \
content_all_ready_for_rule + \
("content:\"%s\"; " % (i))
else:
http_content_all = http_content_all.replace('|', '|7C|').\
replace('"', '|22|').replace(';', '|3B|').replace(':', '|3A|').\
replace(' ', '|20|').replace('\\', '|5C|').replace('\'', '|27|').\
replace('\r', '|0d|').replace('\n', '|0a|')
content_all_ready_for_rule = \
("content:\"%s\"; " % (http_content_all))
rule_file.write ( \
"alert http any any -> any any (msg:\"HTTP requests tests - sid %s , \
pcap - %s \"; \
content:\"%s\"; http_method; %s %s \
reference:url,%s; sid:%s; rev:1;)" % \
(sid_id_http, sid_id_http, http_method, \
content_http_uri_string_ready_for_rule, \
content_all_ready_for_rule, \
src_name, sid_id_http) )
rule_file.close()
def rebuildIPv4HttpSessionExtraTcpSAs(self, packet, results_directory, \
sid_id_http, src_name, repo_name):
#We rebuild the http session , however inject some extra SAs
session_packets = list()
session_packets_fragmented = list()
#print packet[TCP][Raw]
#print packet[Ether].src
ipsrc = packet[IP].src
ipdst = packet[IP].dst
portsrc = packet[TCP].sport
portdst = packet[TCP].dport
seq_num = random.randint(1024,(2**32)-1)
ack_num = random.randint((2**10),(2**16))
# We make sure ack_num_extra* are never going to be the same numbering
# as ack_num
ack_num_extra_1 = random.randint((2**22)+1 , (2**32)-1)
ack_num_extra_2 = random.randint((2**16)+1,(2**22)-1)
syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \
seq=seq_num)
synack_extra_1 = Ether(src=packet[Ether].dst, dst=packet[Ether].src, \
type=0x800 )/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, \
dport=portsrc, seq=ack_num_extra_1, ack=syn.seq+1)
synack_extra_2 = Ether(src=packet[Ether].dst, dst=packet[Ether].src, \
type=0x800 )/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, \
dport=portsrc, seq=ack_num_extra_2, ack=syn.seq+1)
synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \
seq=ack_num, ack=syn.seq+1)
p_frag_synack = fragment(synack, fragsize=1 )
ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)
##This is the actual data packet that will be send, containing the payload
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
##We need to ACK the packet
returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=p.ack, ack=(p.seq + len(p[Raw])))
##Now we build the Finshake
finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \
seq=returnAck.ack, ack=returnAck.seq)
finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=finAck.ack, ack=finAck.seq+1)
##
# Here we start ordering the stream so that we have 3 SAs. The extra ones are
# BEFORE the real one. For the purpose of thoroughness we also
# add cases where the real SA arrives fragmented.
##
#write the session - normal
session_packets.append(syn)
session_packets.append(synack_extra_1)
session_packets.append(synack_extra_2)
session_packets.append(synack)
session_packets.append(ack)
session_packets.append(p)
session_packets.append(returnAck)
session_packets.append(finAck)
session_packets.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_Real_SA-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets)
session_packets[:] = [] #empty the list
#write the session but with an ordered fragmented real SA
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack_extra_1)
session_packets_fragmented.append(synack_extra_2)
for p_fragment in p_frag_synack:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(ack)
session_packets_fragmented.append(p)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_Fragmented_Real_SA_Ordered-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack_extra_1)
session_packets_fragmented.append(synack_extra_2)
for p_fragment in reversed(p_frag_synack):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(ack)
session_packets_fragmented.append(p)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_Fragmented_Real_SA_Reversed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack_extra_1)
session_packets_fragmented.append(synack_extra_2)
random.shuffle(p_frag_synack)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_synack:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(ack)
session_packets_fragmented.append(p)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_Fragmented_Real_SA_Mixed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
##
# Here we start ordering the stream so that we have 3 SAs. The extra ones are
# AFTER the real one. For the purpose of thoroughness we also
# add cases where the real SA arrives fragmented.
##
#write the session - normal
session_packets.append(syn)
session_packets.append(synack)
session_packets.append(synack_extra_1)
session_packets.append(synack_extra_2)
session_packets.append(ack)
session_packets.append(p)
session_packets.append(returnAck)
session_packets.append(finAck)
session_packets.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_after_Real_SA-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets)
session_packets[:] = [] #empty the list
#write the session but with an ordered fragmented real SA
session_packets_fragmented.append(syn)
for p_fragment in p_frag_synack:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(synack_extra_1)
session_packets_fragmented.append(synack_extra_2)
session_packets_fragmented.append(ack)
session_packets_fragmented.append(p)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_after_Fragmented_Real_SA_Ordered-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
for p_fragment in reversed(p_frag_synack):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(synack_extra_1)
session_packets_fragmented.append(synack_extra_2)
session_packets_fragmented.append(ack)
session_packets_fragmented.append(p)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_after_Fragmented_Real_SA_Reversed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
random.shuffle(p_frag_synack)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_synack:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(synack_extra_1)
session_packets_fragmented.append(synack_extra_2)
session_packets_fragmented.append(ack)
session_packets_fragmented.append(p)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_after_Fragmented_Real_SA_Mixed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
##
# Here we start ordering the stream so that we have 3 SAs. The extra ones are
# BEFORE and AFTER the real one. For the purpose of thoroughness we also
# add cases where the real SA arrives fragmented.
##
#write the session - normal
session_packets.append(syn)
session_packets.append(synack_extra_1)
session_packets.append(synack)
session_packets.append(synack_extra_2)
session_packets.append(ack)
session_packets.append(p)
session_packets.append(returnAck)
session_packets.append(finAck)
session_packets.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_and_after_Real_SA-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets)
session_packets[:] = [] #empty the list
#write the session but with an ordered fragmented real SA
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack_extra_1)
for p_fragment in p_frag_synack:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(synack_extra_2)
session_packets_fragmented.append(ack)
session_packets_fragmented.append(p)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_and_after_Fragmented_Real_SA_Ordered-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack_extra_1)
for p_fragment in reversed(p_frag_synack):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(synack_extra_2)
session_packets_fragmented.append(ack)
session_packets_fragmented.append(p)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_and_after_Fragmented_Real_SA_Reversed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack_extra_1)
random.shuffle(p_frag_synack)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_synack:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(synack_extra_2)
session_packets_fragmented.append(ack)
session_packets_fragmented.append(p)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Tcp_Extra_SAs_before_and_after_Fragmented_Real_SA_Mixed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
def rebuildIPv4HttpSession(self, packet, results_directory, sid_id_http, \
src_name, repo_name):
session_packets = list()
session_packets_fragmented = list()
#print packet[TCP][Raw]
#print packet[Ether].src
ipsrc = packet[IP].src
ipdst = packet[IP].dst
portsrc = packet[TCP].sport
portdst = packet[TCP].dport
seq_num = random.randint(1024,(2**32)-1)
ack_num = random.randint(1024,(2**32)-1)
syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \
seq=seq_num)
synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \
seq=ack_num, ack=syn.seq+1)
ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)
##This is the actual data packet that will be send, containing the payload
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
##This is the actual data packet that will be sent containing the payload
#- fragmented
p_frag = fragment(p, fragsize=10 )
##We need to ACK the packet
returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=p.ack, ack=(p.seq + len(p[Raw])))
##Now we build the Finshake
finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \
seq=returnAck.ack, ack=returnAck.seq)
finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=finAck.ack, ack=finAck.seq+1)
#write the session - normal
session_packets.append(syn)
session_packets.append(synack)
session_packets.append(ack)
session_packets.append(p)
session_packets.append(returnAck)
session_packets.append(finAck)
session_packets.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets)
session_packets[:] = [] #empty the list
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag)
#shuffle JUST the fragments in the session
for p_fragment in p_frag:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
def rebuildIPv4HttpSessionDot1Q(self, packet, results_directory, \
sid_id_http, src_name, repo_name):
#Dot1Q VLAN tags
session_packets = list()
session_packets_fragmented = list()
ipsrc = packet[IP].src
ipdst = packet[IP].dst
portsrc = packet[TCP].sport
portdst = packet[TCP].dport
seq_num = random.randint(1024,(2**32)-1)
ack_num = random.randint(1024,(2**32)-1)
syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \
seq=seq_num)
syn.tags = Dot1Q(vlan=1111)
synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \
seq=ack_num, ack=syn.seq+1)
synack.tags = Dot1Q(vlan=1111)
ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)
ack.tags = Dot1Q(vlan=1111)
##This is the actual data packet that will be send, containing the payload
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p.tags = Dot1Q(vlan=1111)
##This is the actual data packet that will be sent containing the payload
#- fragmented
p_frag = fragment(p, fragsize=10 )
## This is the same original data packet - but no VLAN tags
p_Dot1Q_untagged = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_frag_Dot1Q_untagged = fragment(p_Dot1Q_untagged, fragsize=10)
# Dot1Q wrong VLAN tag - we change the VLAN tag in the data packet
# Everything else is the same and stays the same
p_Dot1Q_tagged_wrong = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_Dot1Q_tagged_wrong.tags = Dot1Q(vlan=3333)
##This is the actual data packet that will be sent containing the payload
#- fragmented.
p_frag_Dot1Q_tagged_wrong = fragment(p_Dot1Q_tagged_wrong, fragsize=10 )
##This is the data packet. Fromt this data packet we will edit and tweek
# the VLAN tags for one or more fragments of the same data packet !
p_Dot1Q_data_frag = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_Dot1Q_data_frag.tags = Dot1Q(vlan=1111)
# We fragment the data packet, then we will play around with the fragments
# VLAN tags
p_frag_Dot1Q_data_frag_wrong = fragment(p_Dot1Q_data_frag, fragsize=10 )
p_frag_Dot1Q_data_frag_wrong[3].tags = Dot1Q(vlan=3333)
# We fragment the data packet , but we make one fragment untagged.
# VLAN tag missing
p_frag_Dot1Q_data_frag_missing = fragment(p_Dot1Q_data_frag, fragsize=10 )
p_frag_Dot1Q_data_frag_missing[3].tags = Untagged()
# We fragment the data packet , but we make ONLY one fragment tagged
# with the correct VLAN tag
p_frag_Dot1Q_data_frag_one_tagged = fragment(p_Dot1Q_data_frag, fragsize=10 )
for frag in p_frag_Dot1Q_data_frag_one_tagged:
frag.tags = Untagged()
p_frag_Dot1Q_data_frag_one_tagged[3].tags = Dot1Q(vlan=1111)
#We need to ACK the packet
returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=p.ack, ack=(p.seq + len(p[Raw])))
returnAck.tags = Dot1Q(vlan=1111)
##Now we build the Finshake
finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \
seq=returnAck.ack, ack=returnAck.seq)
finAck.tags = Dot1Q(vlan=1111)
finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=finAck.ack, ack=finAck.seq+1)
finalAck.tags = Dot1Q(vlan=1111)
#write the session - normal
session_packets.append(syn)
session_packets.append(synack)
session_packets.append(ack)
session_packets.append(p)
session_packets.append(returnAck)
session_packets.append(finAck)
session_packets.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Dot1Q-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets)
session_packets[:] = [] #empty the list
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_Dot1Q-%s-tp-01.pcap"\
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_Dot1Q-%s-tp-01.pcap"\
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag)
#shuffle JUST the fragments in the session
for p_fragment in p_frag:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_Dot1Q-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
##
# Here we start with the wrong Dot1Q VLAN tags in the data packet
# and the creation of the pcaps designed for not alerting
# due to changed (fake/hopped) VLAN tag in the same flow
##
#write the session - normal
session_packets.append(syn)
session_packets.append(synack)
session_packets.append(ack)
session_packets.append(p_Dot1Q_tagged_wrong)
session_packets.append(returnAck)
session_packets.append(finAck)
session_packets.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Dot1Q_tagged_wrong-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets)
session_packets[:] = [] #empty the list
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_Dot1Q_tagged_wrong:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_Dot1Q_tagged_wrong-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_Dot1Q_tagged_wrong):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_Dot1Q_tagged_wrong-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_Dot1Q_tagged_wrong)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_Dot1Q_tagged_wrong:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_Dot1Q_tagged_wrong-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
##
# Here we start with the missing Dot1Q VLAN tag in the data packet
# and the creation of the pcaps designed for not alerting
# due to missing VLAN tag in the same flow.
##
#write the session - normal
session_packets.append(syn)
session_packets.append(synack)
session_packets.append(ack)
session_packets.append(p_Dot1Q_untagged)
session_packets.append(returnAck)
session_packets.append(finAck)
session_packets.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Dot1Q_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets)
session_packets[:] = [] #empty the list
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_Dot1Q_untagged:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_Dot1Q_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_Dot1Q_untagged):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_Dot1Q_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_Dot1Q_untagged)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_Dot1Q_untagged:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_Dot1Q_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
def rebuildIPv4HttpSessionDot1QWrongTagInFragments(self, packet, \
results_directory, sid_id_http, src_name, repo_name):
#Dot1Q VLAN tags
#Here we will change the VLAN tags on one or more frgaments
#of the data packet
session_packets = list()
session_packets_fragmented = list()
ipsrc = packet[IP].src
ipdst = packet[IP].dst
portsrc = packet[TCP].sport
portdst = packet[TCP].dport
seq_num = random.randint(1024,(2**32)-1)
ack_num = random.randint(1024,(2**32)-1)
syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \
seq=seq_num)
syn.tags = Dot1Q(vlan=1111)
synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \
seq=ack_num, ack=syn.seq+1)
synack.tags = Dot1Q(vlan=1111)
ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)
ack.tags = Dot1Q(vlan=1111)
##This is the actual data packet that will be send, containing the payload
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p.tags = Dot1Q(vlan=1111)
##This is the actual data packet that will be sent containing the payload
#- fragmented
p_frag = fragment(p, fragsize=10 )
##This is the data packet. Fromt this data packet we will edit and tweek
# the VLAN tags for one or more fragments of the same data packet !
p_Dot1Q_data_frag = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_Dot1Q_data_frag.tags = Dot1Q(vlan=1111)
# We fragment the data packet, then we will play around with the fragments
# VLAN tags - one fragment has the wrong VLAN tag
p_frag_Dot1Q_data_frag_wrong = fragment(p_Dot1Q_data_frag, fragsize=10 )
p_frag_Dot1Q_data_frag_wrong[3].tags = Dot1Q(vlan=3333)
# We fragment the data packet , but we make one fragment untagged.
# VLAN tag missing
p_frag_Dot1Q_data_frag_missing = fragment(p_Dot1Q_data_frag, fragsize=10 )
p_frag_Dot1Q_data_frag_missing[3].tags = Untagged()
# We fragment the data packet , but we make ONLY one fragment tagged
# with the correct VLAN tag
p_frag_Dot1Q_data_frag_one_tagged = fragment(p_Dot1Q_data_frag, fragsize=10 )
for frag in p_frag_Dot1Q_data_frag_one_tagged:
frag.tags = Untagged()
p_frag_Dot1Q_data_frag_one_tagged[3].tags = Dot1Q(vlan=1111)
#We need to ACK the packet
returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=p.ack, ack=(p.seq + len(p[Raw])))
returnAck.tags = Dot1Q(vlan=1111)
##Now we build the Finshake
finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \
seq=returnAck.ack, ack=returnAck.seq)
finAck.tags = Dot1Q(vlan=1111)
finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=finAck.ack, ack=finAck.seq+1)
finalAck.tags = Dot1Q(vlan=1111)
##
# Here we start with chnaging the Dot1Q VLAN tags in the FRAGMENTS
# of the data packetand the creation of the pcaps designed for not alerting
# due to missing VLAN tag in the fragments of data in the same flow.
##
## one fragment from the data packet has a missing VLAN tag
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_Dot1Q_data_frag_missing:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_Dot1Q_data_tag_missing_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_Dot1Q_data_frag_missing):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_Dot1Q_data_tag_missing_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_Dot1Q_data_frag_missing)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_Dot1Q_data_frag_missing:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_Dot1Q_data_tag_missing_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
## one frgament from the data packet has the wrong VLAN tag
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_Dot1Q_data_frag_wrong:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_Dot1Q_data_frag_wrong):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_Dot1Q_data_frag_wrong)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_Dot1Q_data_frag_wrong:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
## all frgaments from the data packet have no VLAN tags BUT one
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_Dot1Q_data_frag_one_tagged:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_Dot1Q_data_tag_one_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_Dot1Q_data_frag_one_tagged):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_Dot1Q_data_tag_one_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_Dot1Q_data_frag_one_tagged)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_Dot1Q_data_frag_one_tagged:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_Dot1Q_data_tag_one_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
def rebuildIPv4HttpSessionQinQ(self, packet, results_directory, \
sid_id_http, src_name, repo_name):
#Dot1Q double tags (vlans) = QinQ
session_packets = list()
session_packets_fragmented = list()
ipsrc = packet[IP].src
ipdst = packet[IP].dst
portsrc = packet[TCP].sport
portdst = packet[TCP].dport
seq_num = random.randint(1024,(2**32)-1)
ack_num = random.randint(1024,(2**32)-1)
syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \
seq=seq_num)
syn.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
syn.tags[Dot1Q].tpid = 0x88a8
synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \
seq=ack_num, ack=syn.seq+1)
synack.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
synack.tags[Dot1Q].tpid = 0x88a8
ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)
ack.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
ack.tags[Dot1Q].tpid = 0x88a8
##This is the actual data packet that will be send, containing the payload
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
p.tags[Dot1Q].tpid = 0x88a8
##This is the actual data packet that will be sent containing the payload
#- fragmented
p_frag = fragment(p, fragsize=10 )
## This is the same original data packet - but no VLAN tags
p_QinQ_untagged = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_frag_QinQ_untagged = fragment(p_QinQ_untagged, fragsize=10)
# QinQ reversed - we reverse/switch the VLAN tags in the data packet
# Everything else is the same and stays the same
p_QinQ_tag_reversed = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_QinQ_tag_reversed.tags = Dot1AD(vlan=4094)/Dot1Q(vlan=666)
p_QinQ_tag_reversed.tags[Dot1Q].tpid = 0x88a8
##This is the actual data packet that will be sent containing the payload
#- fragmented, QinQ reversed/siwtched tags
p_frag_QinQ_tag_reversed = fragment(p_QinQ_tag_reversed, fragsize=10 )
##We need to ACK the packet
returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=p.ack, ack=(p.seq + len(p[Raw])))
returnAck.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
returnAck.tags[Dot1Q].tpid = 0x88a8
##Now we build the Finshake
finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \
seq=returnAck.ack, ack=returnAck.seq)
finAck.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
finAck.tags[Dot1Q].tpid = 0x88a8
finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=finAck.ack, ack=finAck.seq+1)
finalAck.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
finalAck.tags[Dot1Q].tpid = 0x88a8
#write the session - normal
session_packets.append(syn)
session_packets.append(synack)
session_packets.append(ack)
session_packets.append(p)
session_packets.append(returnAck)
session_packets.append(finAck)
session_packets.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_QinQ-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets)
session_packets[:] = [] #empty the list
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag)
#shuffle JUST the fragments in the session
for p_fragment in p_frag:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
##
# Here we start with the reversed QinQ VLAN tags
# and the creation of the pcaps designed for not alerting
# due to switched (fake) VLAN tags in the same flow
##
#write the session - normal
session_packets.append(syn)
session_packets.append(synack)
session_packets.append(ack)
session_packets.append(p_QinQ_tag_reversed)
session_packets.append(returnAck)
session_packets.append(finAck)
session_packets.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_QinQ_tags_reversed-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets)
session_packets[:] = [] #empty the list
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_QinQ_tag_reversed:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_tags_reversed-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_tag_reversed):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_tags_reversed-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_QinQ_tag_reversed)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_tag_reversed:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_tags_reversed-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
##
# Here we start with the missing Dot1Q VLAN tag in the data packet
# and the creation of the pcaps designed for not alerting
# due to missing VLAN tag in the same flow
##
#write the session - normal
session_packets.append(syn)
session_packets.append(synack)
session_packets.append(ack)
session_packets.append(p_QinQ_untagged)
session_packets.append(returnAck)
session_packets.append(finAck)
session_packets.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_QinQ_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name),
session_packets)
session_packets[:] = [] #empty the list
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_QinQ_untagged:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_untagged):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_QinQ_untagged)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_untagged:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
def rebuildIPv4HttpSessionQinQWrongTagInFragments(self, packet, \
results_directory, sid_id_http, src_name, repo_name):
#QinQ VLAN tags - double tags
#Here we will change the VLAN tags on one or more frgaments
#of the QinQ data packet
session_packets = list()
session_packets_fragmented = list()
ipsrc = packet[IP].src
ipdst = packet[IP].dst
portsrc = packet[TCP].sport
portdst = packet[TCP].dport
seq_num = random.randint(1024,(2**32)-1)
ack_num = random.randint(1024,(2**32)-1)
syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \
seq=seq_num)
syn.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
syn.tags[Dot1Q].tpid = 0x88a8
synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \
seq=ack_num, ack=syn.seq+1)
synack.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
synack.tags[Dot1Q].tpid = 0x88a8
ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)
ack.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
ack.tags[Dot1Q].tpid = 0x88a8
##This is the actual data packet that will be send, containing the payload
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
p.tags[Dot1Q].tpid = 0x88a8
##This is the data packet. Fromt this data packet we will edit and tweek
# the VLAN tags (QinQ) for one or more fragments of the same data packet !
p_QinQ_data_frag = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_QinQ_data_frag.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
p_QinQ_data_frag.tags[Dot1Q].tpid = 0x88a8
## We fragment the data packet, then we will play around with the fragments
# VLAN tags in QinQ
# Here we change the VLAN tag of the inner Dot1Q layer
p_frag_QinQ_data_frag_wrong_dot1q = fragment(p_QinQ_data_frag, fragsize=10 )
p_frag_QinQ_data_frag_wrong_dot1q[3].tags = Dot1AD(vlan=666)/Dot1Q(vlan=777)
p_frag_QinQ_data_frag_wrong_dot1q[3].tags[Dot1Q].tpid = 0x88a8
## We fragment the data packet, then we will play around with the fragments
# VLAN tags in QinQ
# Here we change the VLAN tag of the outer 802.1AD layer
p_frag_QinQ_data_frag_wrong_dot1ad = fragment(p_QinQ_data_frag, fragsize=10 )
p_frag_QinQ_data_frag_wrong_dot1ad[3].tags = Dot1AD(vlan=777)/Dot1Q(vlan=4094)
p_frag_QinQ_data_frag_wrong_dot1ad[3].tags[Dot1Q].tpid = 0x88a8
## We fragment the data packet and make one fragment with both tags
# having the wrong VLAN IDs
p_frag_QinQ_data_frag_wrong_both = fragment(p_QinQ_data_frag, fragsize=10 )
p_frag_QinQ_data_frag_wrong_both[3].tags = Dot1AD(vlan=444)/Dot1Q(vlan=555)
p_frag_QinQ_data_frag_wrong_both[3].tags[Dot1Q].tpid = 0x88a8
## We fragment the data packet , but we make one fragment untagged.
# VLAN tags missing
p_frag_QinQ_data_frag_missing_tags = fragment(p_QinQ_data_frag, fragsize=10 )
p_frag_QinQ_data_frag_missing_tags[3].tags = Untagged()
## We fragment the data packet , but we make one fragment with reversed
# VLAN tags
p_frag_QinQ_data_frag_reversed_tags = fragment(p_QinQ_data_frag, fragsize=10 )
p_frag_QinQ_data_frag_reversed_tags[3].tags = \
Dot1AD(vlan=4094)/Dot1Q(vlan=666)
p_frag_QinQ_data_frag_reversed_tags[3].tags[Dot1Q].tpid = 0x88a8
## We fragment the data packet , but we make ONLY one fragment QinQ tagged
# with the correct VLAN tags
p_frag_QinQ_data_frag_one_tagged = fragment(p_QinQ_data_frag, fragsize=10 )
for frag in p_frag_QinQ_data_frag_one_tagged:
frag.tags = Untagged()
p_frag_QinQ_data_frag_one_tagged[3].tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
p_frag_QinQ_data_frag_one_tagged[3].tags[Dot1Q].tpid = 0x88a8
##We need to ACK the packet
returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=p.ack, ack=(p.seq + len(p[Raw])))
returnAck.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
returnAck.tags[Dot1Q].tpid = 0x88a8
##Now we build the Finshake
finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \
seq=returnAck.ack, ack=returnAck.seq)
finAck.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
finAck.tags[Dot1Q].tpid = 0x88a8
finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=finAck.ack, ack=finAck.seq+1)
finalAck.tags = Dot1AD(vlan=666)/Dot1Q(vlan=4094)
finalAck.tags[Dot1Q].tpid = 0x88a8
##
# Here we start with chnaging the QinQ VLAN tags in the FRAGMENTS
# of the data packetand the creation of the pcaps designed for not alerting
# due to missing/reversed/nonexisting VLAN tags in the fragments of
# data in the same flow.
##
## one fragment from the data packet has a wrong VLAN tag - dot1Q tag.
# The other tag (dot1AD- S-VLAN/Carrier VLAN) is correct
# write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_wrong_dot1q:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_wrong_dot1q):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_wrong_dot1q)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_wrong_dot1q:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_data_frag_wrong_dot1q_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
## one fragment from the data packet has a wrong VLAN tag - dot1AD tag
# -> S-VLAN/Carrier VLAN. The other tag (dot1q) is correct
# write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_wrong_dot1ad:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_wrong_dot1ad):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_wrong_dot1ad)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_wrong_dot1ad:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_data_frag_wrong_dot1ad_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
## one frgament from the data packet has both VLAN tag IDs wrong
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_wrong_both:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_wrong_both):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_wrong_both)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_wrong_both:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
## one fragment of the data packet has NO VLAN tags
#write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_missing_tags:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_missing_tags):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_missing_tags)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_missing_tags:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
## one fragment of the data packet has both VLAN tags switched/reversed
# write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_reversed_tags:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_reversed_tags):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_reversed_tags)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_reversed_tags:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
## one fragment of the data packet has both VLAN tags correct.
# The rest do not.
# write the session but with an ordered fragmented payload
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_one_tagged:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Ordered_QinQ_data_frag_one_tagged_fragments-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_one_tagged):
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Reversed_QinQ_data_frag_one_tagged_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_fragmented.append(syn)
session_packets_fragmented.append(synack)
session_packets_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_one_tagged)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_one_tagged:
session_packets_fragmented.append(p_fragment)
session_packets_fragmented.append(returnAck)
session_packets_fragmented.append(finAck)
session_packets_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Fragmented_Mixed_QinQ_data_frag_one_tagged_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_fragmented)
session_packets_fragmented[:] = [] #empty the list
def rebuildIPv4HttpSeqOverSpill(self, packet, results_directory, \
sid_id_http, src_name, repo_name):
#rebuild session with overspilling seq numbers
# seq = 4294967294, 4294967295, 0, 1,....(as per RFC)
#seq overspill re-writing
session_packets_seq_overspill = list()
session_packets_seq_overspill_fragmented = list()
ipsrc = packet[IP].src
ipdst = packet[IP].dst
portsrc = packet[TCP].sport
portdst = packet[TCP].dport
#maximum seq=4294967295
seq_num = 4294967294
ack_num = random.randint(1024,(2**32)-1)
syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \
seq=seq_num)
synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \
seq=ack_num, ack=syn.seq+1)
ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)
##This is the actual data packet that will be send, containing the payload
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
##This is the actual data packet that will be sent containing the payload
#- fragmented
p_frag = fragment(p, fragsize=10 )
##We need to ACK the packet
#here we go to "ack=(len(p[Raw]) -1 )" !! - "the overspill"
returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=p.ack, ack=(len(p[Raw]) -1 ))
##Now we build the Finshake
finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \
seq=returnAck.ack, ack=returnAck.seq)
finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=finAck.ack, ack=finAck.seq+1)
#write the session - normal
session_packets_seq_overspill.append(syn)
session_packets_seq_overspill.append(synack)
session_packets_seq_overspill.append(ack)
session_packets_seq_overspill.append(p)
session_packets_seq_overspill.append(returnAck)
session_packets_seq_overspill.append(finAck)
session_packets_seq_overspill.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill)
session_packets_seq_overspill[:] = [] #empty the list
#write the fragmented packets - ordered
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write mix the fragmented packets
#shuffle/unsort/unorder/mix JUST the fragmented packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag)
#shuffle JUST the fragments in the session
for p_fragment in p_frag:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
def rebuildIPv4HttpSeqOverSpillDot1Q(self, packet, results_directory, \
sid_id_http, src_name, repo_name):
#Dot1Q - VLAN tags cases.
#rebuild session with overspilling seq numbers
# seq = 4294967294, 4294967295, 0, 1,....(as per RFC)
#seq overspill re-writing
session_packets_seq_overspill = list()
session_packets_seq_overspill_fragmented = list()
ipsrc = packet[IP].src
ipdst = packet[IP].dst
portsrc = packet[TCP].sport
portdst = packet[TCP].dport
#maximum seq=4294967295
seq_num = 4294967294
ack_num = random.randint(1024,(2**32)-1)
syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \
seq=seq_num)
syn.tags = Dot1Q(vlan=1155)
synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \
seq=ack_num, ack=syn.seq+1)
synack.tags = Dot1Q(vlan=1155)
ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)
ack.tags = Dot1Q(vlan=1155)
##This is the actual data packet that will be send, containing the payload
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p.tags = Dot1Q(vlan=1155)
##This is the actual data packet that will be sent containing the payload
#- fragmented
p_frag = fragment(p, fragsize=10 )
## This is the same original data packet - but no VLAN tags
p_Dot1Q_untagged = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_frag_Dot1Q_untagged = fragment(p_Dot1Q_untagged, fragsize=10)
# Dot1Q wrong VLAN tag - we change the VLAN tag in the data packet
# Everything else is the same and stays the same
p_Dot1Q_tagged_wrong = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_Dot1Q_tagged_wrong.tags = Dot1Q(vlan=3355)
##This is the actual data packet that will be sent containing the payload
#- fragmented, QinQ reversed/siwtched tags
p_frag_Dot1Q_tagged_wrong = fragment(p_Dot1Q_tagged_wrong, fragsize=10 )
##We need to ACK the packet
#here we go to "ack=(len(p[Raw]) -1 )" !! - "the overspill"
returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=p.ack, ack=(len(p[Raw]) -1 ))
returnAck.tags = Dot1Q(vlan=1155)
##Now we build the Finshake
finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \
seq=returnAck.ack, ack=returnAck.seq)
finAck.tags = Dot1Q(vlan=1155)
finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=finAck.ack, ack=finAck.seq+1)
finalAck.tags = Dot1Q(vlan=1155)
#write the session - normal
session_packets_seq_overspill.append(syn)
session_packets_seq_overspill.append(synack)
session_packets_seq_overspill.append(ack)
session_packets_seq_overspill.append(p)
session_packets_seq_overspill.append(returnAck)
session_packets_seq_overspill.append(finAck)
session_packets_seq_overspill.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Dot1Q-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill)
session_packets_seq_overspill[:] = [] #empty the list
#write the fragmented packets - ordered
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_Dot1Q-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_Dot1Q-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write mix the fragmented packets
#shuffle/unsort/unorder/mix JUST the fragmented packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag)
#shuffle JUST the fragments in the session
for p_fragment in p_frag:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_Dot1Q-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
##
# Here we start with the wrong Dot1Q VLAN tags in the data packet
# and the creation of the pcaps designed for not alerting
# due to changed (fake/hopped) VLAN tag in the same flow
##
#write the session - normal
session_packets_seq_overspill.append(syn)
session_packets_seq_overspill.append(synack)
session_packets_seq_overspill.append(ack)
session_packets_seq_overspill.append(p_Dot1Q_tagged_wrong)
session_packets_seq_overspill.append(returnAck)
session_packets_seq_overspill.append(finAck)
session_packets_seq_overspill.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Dot1Q_tagged_wrong-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill)
session_packets_seq_overspill[:] = [] #empty the list
#write the fragmented packets - ordered
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_Dot1Q_tagged_wrong:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_Dot1Q_tagged_wrong-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_Dot1Q_tagged_wrong):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_Dot1Q_tagged_wrong-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write mix the fragmented packets
#shuffle/unsort/unorder/mix JUST the fragmented packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_Dot1Q_tagged_wrong)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_Dot1Q_tagged_wrong:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_Dot1Q_tagged_wrong-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
##
# Here we start with the missing Dot1Q VLAN tag in the data packet
# and the creation of the pcaps designed for not alerting
# due to missing VLAN tag in the same flow
##
#write the session - normal
session_packets_seq_overspill.append(syn)
session_packets_seq_overspill.append(synack)
session_packets_seq_overspill.append(ack)
session_packets_seq_overspill.append(p_Dot1Q_untagged)
session_packets_seq_overspill.append(returnAck)
session_packets_seq_overspill.append(finAck)
session_packets_seq_overspill.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Dot1Q_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill)
session_packets_seq_overspill[:] = [] #empty the list
#write the fragmented packets - ordered
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_Dot1Q_untagged:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_Dot1Q_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_Dot1Q_untagged):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_Dot1Q_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write mix the fragmented packets
#shuffle/unsort/unorder/mix JUST the fragmented packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_Dot1Q_untagged)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_Dot1Q_untagged:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_Dot1Q_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
def rebuildIPv4HttpSeqOverSpillDot1QWrongTagInFragments(self, packet, \
results_directory, sid_id_http, src_name, repo_name):
#Dot1Q - VLAN tags cases.
#rebuild session with overspilling seq numbers
# seq = 4294967294, 4294967295, 0, 1,....(as per RFC)
#seq overspill re-writing
session_packets_seq_overspill = list()
session_packets_seq_overspill_fragmented = list()
ipsrc = packet[IP].src
ipdst = packet[IP].dst
portsrc = packet[TCP].sport
portdst = packet[TCP].dport
#maximum seq=4294967295
seq_num = 4294967294
ack_num = random.randint(1024,(2**32)-1)
syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \
seq=seq_num)
syn.tags = Dot1Q(vlan=1155)
synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \
seq=ack_num, ack=syn.seq+1)
synack.tags = Dot1Q(vlan=1155)
ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)
ack.tags = Dot1Q(vlan=1155)
##This is the actual data packet that will be send, containing the payload
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p.tags = Dot1Q(vlan=1155)
##This is the actual data packet that will be sent containing the payload
#- fragmented
p_frag = fragment(p, fragsize=10 )
##This is the data packet. Fromt this data packet we will edit and tweek
# the VLAN tags for one or more fragments of the same data packet !
p_Dot1Q_data_frag = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_Dot1Q_data_frag.tags = Dot1Q(vlan=1155)
# We fragment the data packet, then we will play around with the fragments
# VLAN tags - one fragment has the wrong VLAN tag
p_frag_Dot1Q_data_frag_wrong = fragment(p_Dot1Q_data_frag, fragsize=10 )
p_frag_Dot1Q_data_frag_wrong[3].tags = Dot1Q(vlan=3333)
# We fragment the data packet , but we make one fragment untagged.
# VLAN tag missing
p_frag_Dot1Q_data_frag_missing = fragment(p_Dot1Q_data_frag, fragsize=10 )
p_frag_Dot1Q_data_frag_missing[3].tags = Untagged()
# We fragment the data packet , but we make ONLY one fragment tagged
# with the correct VLAN tag
p_frag_Dot1Q_data_frag_one_tagged = fragment(p_Dot1Q_data_frag, fragsize=10 )
for frag in p_frag_Dot1Q_data_frag_one_tagged:
frag.tags = Untagged()
p_frag_Dot1Q_data_frag_one_tagged[3].tags = Dot1Q(vlan=1155)
##We need to ACK the packet
#here we go to "ack=(len(p[Raw]) -1 )" !! - "the overspill"
returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=p.ack, ack=(len(p[Raw]) -1 ))
returnAck.tags = Dot1Q(vlan=1155)
##Now we build the Finshake
finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \
seq=returnAck.ack, ack=returnAck.seq)
finAck.tags = Dot1Q(vlan=1155)
finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=finAck.ack, ack=finAck.seq+1)
finalAck.tags = Dot1Q(vlan=1155)
##
# Here we start with chnaging the Dot1Q VLAN tags in the FRAGMENTS
# of the data packetand the creation of the pcaps designed for not alerting
# due to missing VLAN tag in the fragments of data in the same flow.
##
## one fragment from the data packet has a missing VLAN tag
#write the session but with an ordered fragmented payload
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_Dot1Q_data_frag_missing:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_Dot1Q_data_tag_missing_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_Dot1Q_data_frag_missing):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_Dot1Q_data_tag_missing_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_Dot1Q_data_frag_missing)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_Dot1Q_data_frag_missing:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_Dot1Q_data_tag_missing_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
## one frgament from the data packet has the wrong VLAN tag
#write the session but with an ordered fragmented payload
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_Dot1Q_data_frag_wrong:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_Dot1Q_data_frag_wrong):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_Dot1Q_data_frag_wrong)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_Dot1Q_data_frag_wrong:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
## all frgaments from the data packet have no VLAN tags BUT one
#write the session but with an ordered fragmented payload
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_Dot1Q_data_frag_one_tagged:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_Dot1Q_data_tag_one_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_Dot1Q_data_frag_one_tagged):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_Dot1Q_data_tag_one_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_Dot1Q_data_frag_one_tagged)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_Dot1Q_data_frag_one_tagged:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_Dot1Q_data_tag_one_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
def rebuildIPv4HttpSeqOverSpillQinQ(self, packet, results_directory, \
sid_id_http, src_name, repo_name):
#QinQ - double VLAN tag cases.
#rebuild session with overspilling seq numbers
# seq = 4294967294, 4294967295, 0, 1,....(as per RFC)
#seq overspill re-writing
session_packets_seq_overspill = list()
session_packets_seq_overspill_fragmented = list()
ipsrc = packet[IP].src
ipdst = packet[IP].dst
portsrc = packet[TCP].sport
portdst = packet[TCP].dport
#maximum seq=4294967295
seq_num = 4294967294
ack_num = random.randint(1024,(2**32)-1)
syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \
seq=seq_num)
syn.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
syn.tags[Dot1Q].tpid = 0x88a8
synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \
seq=ack_num, ack=syn.seq+1)
synack.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
synack.tags[Dot1Q].tpid = 0x88a8
ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)
ack.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
ack.tags[Dot1Q].tpid = 0x88a8
##This is the actual data packet that will be send, containing the payload
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
p.tags[Dot1Q].tpid = 0x88a8
##This is the actual data packet that will be sent containing the payload
#- fragmented
p_frag = fragment(p, fragsize=10 )
## This is the same original data packet - but no VLAN tags
p_QinQ_untagged = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_frag_QinQ_untagged = fragment(p_QinQ_untagged, fragsize=10)
# Dot1Q wrong VLAN tag - we change the VLAN tag in the data packet
# Everything else is the same and stays the same
p_QinQ_tag_reversed = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_QinQ_tag_reversed.tags = Dot1AD(vlan=4000)/Dot1Q(vlan=777)
p_QinQ_tag_reversed.tags[Dot1Q].tpid = 0x88a8
##This is the actual data packet that will be sent containing the payload
#- fragmented, QinQ reversed/siwtched tags
p_frag_QinQ_tag_reversed = fragment(p_QinQ_tag_reversed, fragsize=10 )
## ONLY Dot1Q VLAN tag - present in the fragments (QinQ expected)
p_QinQ_tag_only_dot1q = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_QinQ_tag_only_dot1q.tags = Dot1Q(vlan=1234)
#The actual fragmentation - only one VLAN tag - QinQ expected
p_frag_QinQ_tag_only_dot1q = fragment(p_QinQ_tag_only_dot1q, fragsize=10 )
##We need to ACK the packet
#here we go to "ack=(len(p[Raw]) -1 )" !! - "the overspill"
returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=p.ack, ack=(len(p[Raw]) -1 ))
returnAck.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
returnAck.tags[Dot1Q].tpid = 0x88a8
##Now we build the Finshake
finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \
seq=returnAck.ack, ack=returnAck.seq)
finAck.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
finAck.tags[Dot1Q].tpid = 0x88a8
finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=finAck.ack, ack=finAck.seq+1)
finalAck.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
finalAck.tags[Dot1Q].tpid = 0x88a8
#write the session - normal
session_packets_seq_overspill.append(syn)
session_packets_seq_overspill.append(synack)
session_packets_seq_overspill.append(ack)
session_packets_seq_overspill.append(p)
session_packets_seq_overspill.append(returnAck)
session_packets_seq_overspill.append(finAck)
session_packets_seq_overspill.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_QinQ-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill)
session_packets_seq_overspill[:] = [] #empty the list
#write the fragmented packets - ordered
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write mix the fragmented packets
#shuffle/unsort/unorder/mix JUST the fragmented packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag)
#shuffle JUST the fragments in the session
for p_fragment in p_frag:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
##
# Here we start with the revrsed/switched QinQ VLAN tags in the data packet
# and the creation of the pcaps designed for not alerting
# due to changed (fake/hopped) VLAN tag in the same flow
##
#write the session - normal
session_packets_seq_overspill.append(syn)
session_packets_seq_overspill.append(synack)
session_packets_seq_overspill.append(ack)
session_packets_seq_overspill.append(p_QinQ_tag_reversed)
session_packets_seq_overspill.append(returnAck)
session_packets_seq_overspill.append(finAck)
session_packets_seq_overspill.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_QinQ_tags_reversed-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill)
session_packets_seq_overspill[:] = [] #empty the list
#write the fragmented packets - ordered
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_QinQ_tag_reversed:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_tags_reversed-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_tag_reversed):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_tags_reversed-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write mix the fragmented packets
#shuffle/unsort/unorder/mix JUST the fragmented packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_QinQ_tag_reversed)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_tag_reversed:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_tags_reversed-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
##
# Here we start with the missing QinQ VLAN tag in the data packet
# and the creation of the pcaps designed for not alerting
# due to missing VLAN tag in the same flow
##
#write the session - normal
session_packets_seq_overspill.append(syn)
session_packets_seq_overspill.append(synack)
session_packets_seq_overspill.append(ack)
session_packets_seq_overspill.append(p_QinQ_untagged)
session_packets_seq_overspill.append(returnAck)
session_packets_seq_overspill.append(finAck)
session_packets_seq_overspill.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_QinQ_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill)
session_packets_seq_overspill[:] = [] #empty the list
#write the fragmented packets - ordered
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_QinQ_untagged:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name) , session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_untagged):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write mix the fragmented packets
#shuffle/unsort/unorder/mix JUST the fragmented packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_QinQ_untagged)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_untagged:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_tag_missing-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
##
# Here we start with only one VLAN tag found in the data packet
# QinQ VLAN tags expected
##
#write the session - normal
session_packets_seq_overspill.append(syn)
session_packets_seq_overspill.append(synack)
session_packets_seq_overspill.append(ack)
session_packets_seq_overspill.append(p_QinQ_tag_only_dot1q)
session_packets_seq_overspill.append(returnAck)
session_packets_seq_overspill.append(finAck)
session_packets_seq_overspill.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_QinQ_data_tag_only_dot1q-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill)
session_packets_seq_overspill[:] = [] #empty the list
#write the fragmented packets - ordered
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_QinQ_tag_only_dot1q:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_tag_only_dotq-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_tag_only_dot1q):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_tag_only_dot1q-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write mix the fragmented packets
#shuffle/unsort/unorder/mix JUST the fragmented packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_QinQ_tag_only_dot1q)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_tag_only_dot1q:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_tag_only_dot1q-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
def rebuildIPv4HttpSeqOverSpillQinQWrongTagInFragments(self, packet, \
results_directory, sid_id_http, src_name, repo_name):
#QinQ - double VLAN tag cases.
#rebuild session with overspilling seq numbers
# seq = 4294967294, 4294967295, 0, 1,....(as per RFC)
#seq overspill re-writing
session_packets_seq_overspill = list()
session_packets_seq_overspill_fragmented = list()
ipsrc = packet[IP].src
ipdst = packet[IP].dst
portsrc = packet[TCP].sport
portdst = packet[TCP].dport
#maximum seq=4294967295
seq_num = 4294967294
ack_num = random.randint(1024,(2**32)-1)
syn = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="S", sport=portsrc, dport=portdst, \
seq=seq_num)
syn.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
syn.tags[Dot1Q].tpid = 0x88a8
synack = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="SA", sport=portdst, dport=portsrc, \
seq=ack_num, ack=syn.seq+1)
synack.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
synack.tags[Dot1Q].tpid = 0x88a8
ack = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="A", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)
ack.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
ack.tags[Dot1Q].tpid = 0x88a8
##This is the actual data packet that will be send, containing the payload
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
p.tags[Dot1Q].tpid = 0x88a8
##This is the data packet. Fromt this data packet we will edit and tweek
# the VLAN tags (QinQ) for one or more fragments of the same data packet !
p_QinQ_data_frag = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=portsrc, dport=portdst, \
seq=syn.seq+1, ack=synack.seq+1)/packet[TCP][Raw]
p_QinQ_data_frag.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
p_QinQ_data_frag.tags[Dot1Q].tpid = 0x88a8
## We fragment the data packet, then we will play around with the fragments
# VLAN tags in QinQ
# Here we change the VLAN tag of the outer 802.1AD layer
p_frag_QinQ_data_frag_wrong_dot1ad = fragment(p_QinQ_data_frag, fragsize=10 )
p_frag_QinQ_data_frag_wrong_dot1ad[3].tags = Dot1AD(vlan=777)/Dot1Q(vlan=888)
p_frag_QinQ_data_frag_wrong_dot1ad[3].tags[Dot1Q].tpid = 0x88a8
## We fragment the data packet, then we will play around with the fragments
# VLAN tags in QinQ
# Here we change the VLAN tag of the inner Dot1Q layer
p_frag_QinQ_data_frag_wrong_dot1q = fragment(p_QinQ_data_frag, fragsize=10 )
p_frag_QinQ_data_frag_wrong_dot1q[3].tags = Dot1AD(vlan=333)/Dot1Q(vlan=4000)
p_frag_QinQ_data_frag_wrong_dot1q[3].tags[Dot1Q].tpid = 0x88a8
## We fragment the data packet, then we will play around with the fragments
# VLAN tags in QinQ
# Here we make one fragmanet tagged only with one VLAN
p_frag_QinQ_data_frag_only_dot1q = fragment(p_QinQ_data_frag, fragsize=10 )
p_frag_QinQ_data_frag_only_dot1q[3].tags = Dot1Q(vlan=1234)
## We fragment the data packet and make one fragment with both tags
# having the wrong VLAN IDs
p_frag_QinQ_data_frag_wrong_both = fragment(p_QinQ_data_frag, fragsize=10 )
p_frag_QinQ_data_frag_wrong_both[3].tags = Dot1AD(vlan=444)/Dot1Q(vlan=555)
p_frag_QinQ_data_frag_wrong_both[3].tags[Dot1Q].tpid = 0x88a8
## We fragment the data packet , but we make one fragment untagged.
# VLAN tags missing
p_frag_QinQ_data_frag_missing_tags = fragment(p_QinQ_data_frag, fragsize=10 )
p_frag_QinQ_data_frag_missing_tags[3].tags = Untagged()
## We fragment the data packet , but we make one fragment with reversed
# VLAN tags
p_frag_QinQ_data_frag_reversed_tags = fragment(p_QinQ_data_frag, fragsize=10 )
p_frag_QinQ_data_frag_reversed_tags[3].tags = \
Dot1AD(vlan=4000)/Dot1Q(vlan=777)
p_frag_QinQ_data_frag_reversed_tags[3].tags[Dot1Q].tpid = 0x88a8
## We fragment the data packet , but we make ONLY one fragment QinQ tagged
# with the correct VLAN tags
p_frag_QinQ_data_frag_one_tagged = fragment(p_QinQ_data_frag, fragsize=10 )
for frag in p_frag_QinQ_data_frag_one_tagged:
frag.tags = Untagged()
p_frag_QinQ_data_frag_one_tagged[3].tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
p_frag_QinQ_data_frag_one_tagged[3].tags[Dot1Q].tpid = 0x88a8
##We need to ACK the packet
#here we go to "ack=(len(p[Raw]) -1 )" !! - "the overspill"
returnAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src, type=0x800 ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=p.ack, ack=(len(p[Raw]) -1 ))
returnAck.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
returnAck.tags[Dot1Q].tpid = 0x88a8
##Now we build the Finshake
finAck = Ether(src=packet[Ether].src, dst=packet[Ether].dst, type=0x800 ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="FA", sport=portsrc, dport=portdst, \
seq=returnAck.ack, ack=returnAck.seq)
finAck.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
finAck.tags[Dot1Q].tpid = 0x88a8
finalAck = Ether(src=packet[Ether].dst, dst=packet[Ether].src ) \
/IP(src=ipdst, dst=ipsrc)/TCP(flags="A", sport=portdst, dport=portsrc, \
seq=finAck.ack, ack=finAck.seq+1)
finalAck.tags = Dot1AD(vlan=777)/Dot1Q(vlan=4000)
finalAck.tags[Dot1Q].tpid = 0x88a8
##
# Here we start with chnaging the QinQ VLAN tags in the FRAGMENTS
# of the data packet and the creation of the pcaps designed for not alerting
# due to missing/reversed/nonexisting VLAN tags in the fragments of
# data in the same flow.
##
## one fragment from the data packet has a wrong VLAN tag - dot1Q tag.
# The other tag (dot1AD- S-VLAN/Carrier VLAN) is correct
# write the session but with an ordered fragmented payload
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_wrong_dot1q:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_wrong_dot1q):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_wrong_dot1q)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_wrong_dot1q:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
## one fragment from the data packet has a wrong VLAN tag - dot1AD tag
# -> S-VLAN/Carrier VLAN. The other tag (dot1q) is correct
# write the session but with an ordered fragmented payload
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_wrong_dot1ad:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_wrong_dot1ad):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_wrong_dot1ad)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_wrong_dot1ad:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
## We make one frgament with only one VLAN tag (not double)
# write the session but with an ordered fragmented payload
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_only_dot1q:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_frag_only_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_only_dot1q):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_frag_only_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_only_dot1q)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_only_dot1q:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_frag_only_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
## one frgament from the data packet has both VLAN tag IDs wrong
#write the session but with an ordered fragmented payload
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_wrong_both:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_wrong_both):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_wrong_both)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_wrong_both:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
## one fragment of the data packet has NO VLAN tags
#write the session but with an ordered fragmented payload
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_missing_tags:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_missing_tags):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_missing_tags)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_missing_tags:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
## one fragment of the data packet has both VLAN tags switched/reversed
# write the session but with an ordered fragmented payload
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_reversed_tags:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_reversed_tags):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_reversed_tags)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_reversed_tags:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
## one fragment of the data packet has both VLAN tags correct.
# The rest do not.
# write the session but with an ordered fragmented payload
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in p_frag_QinQ_data_frag_one_tagged:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Ordered_QinQ_data_frag_only_one_tagged_in_fragments-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session with reverse fragments order
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
for p_fragment in reversed(p_frag_QinQ_data_frag_one_tagged):
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Reversed_QinQ_data_frag_only_one_tagged_in_fragments-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
#write the session but with unordered/unsorted/mixed JUST fragmented
#payload packets
session_packets_seq_overspill_fragmented.append(syn)
session_packets_seq_overspill_fragmented.append(synack)
session_packets_seq_overspill_fragmented.append(ack)
random.shuffle(p_frag_QinQ_data_frag_one_tagged)
#shuffle JUST the fragments in the session
for p_fragment in p_frag_QinQ_data_frag_one_tagged:
session_packets_seq_overspill_fragmented.append(p_fragment)
session_packets_seq_overspill_fragmented.append(returnAck)
session_packets_seq_overspill_fragmented.append(finAck)
session_packets_seq_overspill_fragmented.append(finalAck)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Session_Seq_Overspill_Fragmented_Mixed_QinQ_data_frag_only_one_tagged_in_fragments-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), session_packets_seq_overspill_fragmented)
session_packets_seq_overspill_fragmented[:] = [] #empty the list
def midstreamIPv4Http(self, fragit, results_directory, sid_id_http, \
src_name, repo_name):
#forcing correct recalculation of the checksum
del fragit[IP].chksum
del fragit[TCP].chksum
fragit_done = fragment(fragit, fragsize=10 )
#write the ordered fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done)
#reverse the fragments !!!
#permanent change to the list of fragments
fragit_done.reverse()
#write the reversed fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done)
#shuffle(unorder/mix) the fragmented payload packet and write
random.shuffle(fragit_done)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Regular'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done)
def midstreamIPv4HttpDot1Q(self, fragit, results_directory, sid_id_http, \
src_name, repo_name):
#Using VLAN Tag - Dot1Q
#forcing correct recalculation of the checksum
del fragit[IP].chksum
del fragit[TCP].chksum
fragit[Ether].tags=Dot1Q(vlan=2222)
#one midstream packet in Dot1Q
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Dot1Q-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit)
fragit_done = fragment(fragit, fragsize=10 )
#write the ordered fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_Dot1Q-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done)
#reverse the fragments !!!
#permanent change to the list of fragments
fragit_done.reverse()
#write the reversed fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_Dot1Q-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done)
#shuffle(unorder/mix) the fragmented payload packet and write
random.shuffle(fragit_done)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_Dot1Q-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done)
def midstreamIPv4HttpDot1QWrongTagInFragments(self, fragit, results_directory, \
sid_id_http, src_name, repo_name):
# Wrongly tagged fragments
# Using VLAN Tag - Dot1Q
#forcing correct recalculation of the checksum
del fragit[IP].chksum
del fragit[TCP].chksum
fragit[Ether].tags = Dot1Q(vlan=2222)
##
# one fragment has the wrong VLAN ID tag
##
fragit_done_wrong_dot1q_tag = fragment(fragit, fragsize=10 )
fragit_done_wrong_dot1q_tag[3].tags = Dot1Q(vlan=2299)
#write the ordered fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done_wrong_dot1q_tag)
#reverse the fragments !!!
#permanent change to the list of fragments
fragit_done_wrong_dot1q_tag.reverse()
#write the reversed fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done_wrong_dot1q_tag)
#shuffle(unorder/mix) the fragmented payload packet and write
random.shuffle(fragit_done_wrong_dot1q_tag)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_Dot1Q_data_tag_wrong_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done_wrong_dot1q_tag)
##
# one fragment has no VLAN ID tag
##
fragit_done_no_dot1q_tag = fragment(fragit, fragsize=10 )
fragit_done_no_dot1q_tag[3].tags = Untagged()
#write the ordered fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_Dot1Q_data_tag_none_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done_no_dot1q_tag)
#reverse the fragments !!!
#permanent change to the list of fragments
fragit_done_no_dot1q_tag.reverse()
#write the reversed fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_Dot1Q_data_tag_none_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done_no_dot1q_tag)
#shuffle(unorder/mix) the fragmented payload packet and write
random.shuffle(fragit_done_no_dot1q_tag)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_Dot1Q_data_tag_none_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Dot1Q'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done_no_dot1q_tag)
def midstreamIPv4HttpQinQ(self, fragit, results_directory, sid_id_http, \
src_name, repo_name):
#Using DOUBLE VLAN Tagging - QinQ
#Forcing correct recalculation of the checksum
del fragit[IP].chksum
del fragit[TCP].chksum
fragit.tags = Dot1AD(vlan=3333)/Dot1Q(vlan=1)
fragit.tags[Dot1Q].tpid = 0x88a8
#one midstream packet in QinQ
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_QinQ-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit)
fragit_done = fragment(fragit, fragsize=10 )
#write the ordered fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_QinQ-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done)
#reverse the fragments !!!
#permanent change to the list of fragments
fragit_done.reverse()
#write the reversed fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_QinQ-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done)
#shuffle(unorder/mix) the fragmented payload packet and write
random.shuffle(fragit_done)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_QinQ-%s-tp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), fragit_done)
def midstreamIPv4HttpQinQWrongTagInFragments(self, fragit, \
results_directory, sid_id_http, src_name, repo_name):
#Wrongly tagged fragments
#Using DOUBLE VLAN Tagging - QinQ
#forcing correct recalculation of the checksum
del fragit[IP].chksum
del fragit[TCP].chksum
fragit.tags = Dot1AD(vlan=3333)/Dot1Q(vlan=1)
fragit.tags[Dot1Q].tpid = 0x88a8
##
# We fragment the data packet, we change the VLAN tag of
# the outer 802.1AD layer
##
p_frag_QinQ_data_frag_wrong_dot1ad = fragment(fragit, fragsize=10 )
p_frag_QinQ_data_frag_wrong_dot1ad[3].tags = Dot1AD(vlan=3333)/Dot1Q(vlan=777)
p_frag_QinQ_data_frag_wrong_dot1ad[3].tags[Dot1Q].tpid = 0x88a8
##
# We fragment the data packet, we change the VLAN tag of
# the inner Dot1Q layer
##
p_frag_QinQ_data_frag_wrong_dot1q = fragment(fragit, fragsize=10 )
p_frag_QinQ_data_frag_wrong_dot1q[3].tags = Dot1AD(vlan=777)/Dot1Q(vlan=1)
p_frag_QinQ_data_frag_wrong_dot1q[3].tags[Dot1Q].tpid = 0x88a8
##
# We fragment the data packet, we make one fragmanet tagged only
# with one VLAN
##
p_frag_QinQ_data_frag_only_dot1q = fragment(fragit, fragsize=10 )
p_frag_QinQ_data_frag_only_dot1q[3].tags = Dot1Q(vlan=2345)
##
# We fragment the data packet and make one fragment with both tags
# having the wrong VLAN IDs
##
p_frag_QinQ_data_frag_wrong_both = fragment(fragit, fragsize=10 )
p_frag_QinQ_data_frag_wrong_both[3].tags = Dot1AD(vlan=111)/Dot1Q(vlan=222)
p_frag_QinQ_data_frag_wrong_both[3].tags[Dot1Q].tpid = 0x88a8
##
# We fragment the data packet , but we make one fragment untagged.
# VLAN tags missing
##
p_frag_QinQ_data_frag_missing_tags = fragment(fragit, fragsize=10 )
p_frag_QinQ_data_frag_missing_tags[3].tags = Untagged()
##
# We fragment the data packet , but we make one fragment with reversed
# VLAN tags
##
p_frag_QinQ_data_frag_reversed_tags = fragment(fragit, fragsize=10 )
p_frag_QinQ_data_frag_reversed_tags[3].tags = Dot1AD(vlan=1)/Dot1Q(vlan=3333)
p_frag_QinQ_data_frag_reversed_tags[3].tags[Dot1Q].tpid = 0x88a8
##
# We fragment the data packet, we change the VLAN tag of
# the outer 802.1AD layer
##
#write the ordered fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_wrong_dot1ad)
#reverse the fragments !!!
#permanent change to the list of fragments
p_frag_QinQ_data_frag_wrong_dot1ad.reverse()
#write the reversed fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_wrong_dot1ad)
#shuffle(unorder/mix) the fragmented payload packet and write
random.shuffle(p_frag_QinQ_data_frag_wrong_dot1ad)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_QinQ_data_frag_wrong_dot1ad_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_wrong_dot1ad)
##
# We fragment the data packet, we change the VLAN tag of
# the inner Dot1Q layer
##
#write the ordered fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_wrong_dot1q)
#reverse the fragments !!!
#permanent change to the list of fragments
p_frag_QinQ_data_frag_wrong_dot1q.reverse()
#write the reversed fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_wrong_dot1q)
#shuffle(unorder/mix) the fragmented payload packet and write
random.shuffle(p_frag_QinQ_data_frag_wrong_dot1q)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_QinQ_data_frag_wrong_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_wrong_dot1q)
##
# We fragment the data packet, we make one fragmanet tagged only
# with one VLAN
##
#write the ordered fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_QinQ_data_frag_only_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_only_dot1q)
#reverse the fragments !!!
#permanent change to the list of fragments
p_frag_QinQ_data_frag_only_dot1q.reverse()
#write the reversed fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_QinQ_data_frag_only_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_only_dot1q)
#shuffle(unorder/mix) the fragmented payload packet and write
random.shuffle(p_frag_QinQ_data_frag_only_dot1q)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_QinQ_data_frag_only_dot1q_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_only_dot1q)
##
# We fragment the data packet and make one fragment with both tags
# having the wrong VLAN IDs
##
#write the ordered fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_wrong_both)
#reverse the fragments !!!
#permanent change to the list of fragments
p_frag_QinQ_data_frag_wrong_both.reverse()
#write the reversed fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_wrong_both)
#shuffle(unorder/mix) the fragmented payload packet and write
random.shuffle(p_frag_QinQ_data_frag_wrong_both)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_QinQ_data_frag_wrong_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_wrong_both)
##
# We fragment the data packet , but we make one fragment untagged.
# VLAN tags missing
##
#write the ordered fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_missing_tags)
#reverse the fragments !!!
#permanent change to the list of fragments
p_frag_QinQ_data_frag_missing_tags.reverse()
#write the reversed fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_missing_tags)
#shuffle(unorder/mix) the fragmented payload packet and write
random.shuffle(p_frag_QinQ_data_frag_missing_tags)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_QinQ_data_frag_missing_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_missing_tags)
##
# We fragment the data packet , but we make one fragment with reversed
# VLAN tags
##
#write the ordered fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Ordered_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_reversed_tags)
#reverse the fragments !!!
#permanent change to the list of fragments
p_frag_QinQ_data_frag_reversed_tags.reverse()
#write the reversed fragmented payload packet and write
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Reversed_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_reversed_tags)
#shuffle(unorder/mix) the fragmented payload packet and write
random.shuffle(p_frag_QinQ_data_frag_reversed_tags)
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream_Fragmented_Mixed_QinQ_data_frag_reversed_tags_in_fragment-%s-fp-00.pcap" \
% (os.path.join(results_directory, 'Midstream', 'QinQ'), sid_id_http, self.incrementPcapId("byOne") \
, src_name, repo_name), p_frag_QinQ_data_frag_reversed_tags)
def reconstructIPv4HttpPacket(self, packet):
# here we make the original HTTP packet into a just TCP packet
if packet.haslayer(IPv6):
ipsrc = "1.1.1.1"
ipdst = "9.9.9.9"
else:
ipsrc = packet[IP].src
ipdst = packet[IP].dst
p = Ether(src=packet[Ether].src, dst=packet[Ether].dst ) \
/IP(src=ipsrc, dst=ipdst)/TCP(flags="PA", sport=packet[TCP].sport, \
dport=packet[TCP].dport, seq=packet.seq, ack=packet.ack)/packet[TCP][Raw]
return p
def incrementPcapId(self, action):
if action == "byOne":
Global_Vars.pcap_id = Global_Vars.pcap_id+1
return '{0:03}'.format(Global_Vars.pcap_id)
elif action == "clear":
Global_Vars.pcap_id = 000
return '{0:03}'.format(Global_Vars.pcap_id)
else:
sys.exit("Invalid argument for function incrementPcapId()")
def httpReWrite(self, scapy_load, FN, pcap_id, results_directory, \
source_name, sid_id_http, url_method, url_str, content_all, repository_name):
# writing the http request packet to pcap
# in regression script format
# 2002031-001-sandnet-public-tp-01.pcap - example
## 001 - starts here ##
ipv4_ready = self.reconstructIPv4HttpPacket(scapy_load[FN])
if Global_Vars.yaml_options['Protocols']['HTTP']['WriteRule']:
self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \
os.path.join(results_directory, 'Rules'), source_name)
if Global_Vars.yaml_options['Protocols']['HTTP']['Midstream']['Midstream']:
wrpcap("%s/%s-%s-%s_IPv4_HTTP_Midstream-%s-tp-01.pcap" \
% (os.path.join(results_directory, 'Midstream', 'Regular'), sid_id_http, self.incrementPcapId("byOne"), \
source_name, repository_name) , ipv4_ready)
self.midstreamIPv4Http(ipv4_ready, results_directory, sid_id_http, \
source_name, repository_name)
self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \
os.path.join(results_directory, 'Midstream', 'Regular'), source_name)
if Global_Vars.yaml_options['Protocols']['HTTP']['Midstream']['Dot1Q']:
self.midstreamIPv4HttpDot1Q(ipv4_ready, results_directory, sid_id_http, \
source_name, repository_name)
self.midstreamIPv4HttpDot1QWrongTagInFragments(ipv4_ready, \
results_directory, sid_id_http, source_name, repository_name)
self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \
os.path.join(results_directory, 'Midstream', 'Dot1Q'), source_name)
if Global_Vars.yaml_options['Protocols']['HTTP']['Midstream']['QinQ']:
self.midstreamIPv4HttpQinQ(ipv4_ready, results_directory, \
sid_id_http, source_name, repository_name)
self.midstreamIPv4HttpQinQWrongTagInFragments(ipv4_ready, \
results_directory, sid_id_http, source_name, repository_name)
self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \
os.path.join(results_directory, 'Midstream', 'QinQ'), source_name)
if Global_Vars.yaml_options['Protocols']['HTTP']['Session']['Session']:
self.rebuildIPv4HttpSession(ipv4_ready, results_directory, sid_id_http, \
source_name, repository_name)
self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \
os.path.join(results_directory, 'Regular'), source_name)
if Global_Vars.yaml_options['Protocols']['HTTP']['Session']['ExtraTcpSA']:
self.rebuildIPv4HttpSessionExtraTcpSAs(ipv4_ready, results_directory, \
sid_id_http, source_name, repository_name)
self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \
os.path.join(results_directory, 'Regular'), source_name)
if Global_Vars.yaml_options['Protocols']['HTTP']['Session']['Dot1Q']:
self.rebuildIPv4HttpSessionDot1Q(ipv4_ready, results_directory, \
sid_id_http, source_name, repository_name)
self.rebuildIPv4HttpSessionDot1QWrongTagInFragments(ipv4_ready, \
results_directory, sid_id_http, source_name, repository_name)
self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \
os.path.join(results_directory, 'Dot1Q'), source_name)
if Global_Vars.yaml_options['Protocols']['HTTP']['Session']['QinQ']:
self.rebuildIPv4HttpSessionQinQ(ipv4_ready, results_directory, \
sid_id_http, source_name, repository_name)
self.rebuildIPv4HttpSessionQinQWrongTagInFragments(ipv4_ready, \
results_directory, sid_id_http, source_name, repository_name)
self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \
os.path.join(results_directory,'QinQ'), source_name)
if Global_Vars.yaml_options['Protocols']['HTTP']['Session']['SeqOverspill']:
self.rebuildIPv4HttpSeqOverSpill(ipv4_ready, results_directory, \
sid_id_http, source_name, repository_name)
self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \
os.path.join(results_directory, 'Regular'), source_name)
if Global_Vars.yaml_options['Protocols']['HTTP']['Session']['Dot1Q']:
self.rebuildIPv4HttpSeqOverSpillDot1Q(ipv4_ready, results_directory, \
sid_id_http, source_name, repository_name)
self.rebuildIPv4HttpSeqOverSpillDot1QWrongTagInFragments(ipv4_ready, \
results_directory, sid_id_http, source_name, repository_name)
self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \
os.path.join(results_directory, 'Dot1Q'), source_name)
if Global_Vars.yaml_options['Protocols']['HTTP']['Session']['QinQ']:
self.rebuildIPv4HttpSeqOverSpillQinQ(ipv4_ready, \
results_directory, sid_id_http, source_name, repository_name)
self.rebuildIPv4HttpSeqOverSpillQinQWrongTagInFragments(ipv4_ready, \
results_directory, sid_id_http, source_name, repository_name)
self.writeIPv4HttpRule(sid_id_http, url_method, url_str, content_all, \
os.path.join(results_directory,'QinQ'), source_name)
def __init__(self, scapy_load, FN, pcap_id, results_directory, source_name, \
sid_id_http, url_method, url_str, content_all, repository_name):
self.scapy_load_to_pass = scapy_load
self.FN_to_pass = FN
self.pcap_id_to_pass = pcap_id
self.results_directory_to_pass = results_directory
self.source_name_to_pass = source_name
self.sid_id_http_to_pass = sid_id_http
self.url_method_to_pass = url_method
self.url_str_to_pass = url_str
self.content_all_to_pass = content_all
self.repository_name_to_pass = repository_name
# if HTTP over IPv4 is enabled in yaml
if Global_Vars.yaml_options['Protocols']['HTTP']['IPv4']:
self.httpReWrite( \
self.scapy_load_to_pass, self.FN_to_pass, self.pcap_id_to_pass, \
self.results_directory_to_pass, self.source_name_to_pass, \
self.sid_id_http_to_pass, self.url_method_to_pass, \
self.url_str_to_pass, self.content_all_to_pass, \
self.repository_name_to_pass )
|
from sos.plugins import Plugin, UbuntuPlugin
class Maas(Plugin, UbuntuPlugin):
"""Ubuntu Metal-As-A-Service
"""
plugin_name = 'maas'
profiles = ('sysmgmt',)
option_list = [
('profile-name',
'The name with which you will later refer to this remote', '', False),
('url', 'The URL of the remote API', '', False),
('credentials',
'The credentials, also known as the API key', '', False)
]
def _has_login_options(self):
return self.get_option("url") and self.get_option("credentials") \
and self.get_option("profile-name")
def _remote_api_login(self):
ret = self.call_ext_prog("maas login %s %s %s" % (
self.get_option("profile-name"),
self.get_option("url"),
self.get_option("credentials")))
return ret['status'] == 0
def setup(self):
self.add_copy_spec([
"/etc/squid-deb-proxy",
"/etc/maas",
"/var/lib/maas/dhcp*",
"/var/log/apache2*",
"/var/log/maas*",
"/var/log/upstart/maas-*",
])
self.add_cmd_output([
"apt-cache policy maas-*",
"apt-cache policy python-django-*",
])
if self.is_installed("maas-region-controller"):
self.add_cmd_output([
"maas-region-admin dumpdata",
])
if self._has_login_options():
if self._remote_api_login():
self.add_cmd_output("maas %s commissioning-results list" %
self.get_option("profile-name"))
else:
self._log_error(
"Cannot login into MAAS remote API with provided creds.")
|
"""Prepare release news from git log.
Prepares release news from git log messages, breaking release news
into (1) sections (e.g. Security fixes, detected from commit labels)
and (2) modules (e.g. search, detected from commit log headlines).
"""
from __future__ import absolute_import, print_function, unicode_literals
import itertools
import re
import sys
import textwrap
from collections import OrderedDict
from flask import current_app
from flask_script import Manager
from .check import _git_commits, _pygit2_commits
manager = Manager(usage=__doc__)
def analyse_body_paragraph(body_paragraph, labels=None):
"""Analyse commit body paragraph and return (label, message).
>>> analyse_body_paragraph('* BETTER Foo and bar.',
>>> ... {'BETTER': 'Improvements'})
('BETTER', 'Foo and bar.')
>>> analyse_body_paragraph('* Foo and bar.')
(None, 'Foo and bar.')
>>> analyse_body_paragraph('Foo and bar.')
(None, None)
"""
# try to find leading label first:
for label, dummy in labels:
if body_paragraph.startswith('* ' + label):
return (label, body_paragraph[len(label) + 3:].replace('\n ',
' '))
# no conformed leading label found; do we have leading asterisk?
if body_paragraph.startswith('* '):
return (None, body_paragraph[2:].replace('\n ', ' '))
# no leading asterisk found; ignore this paragraph silently:
return (None, None)
def remove_ticket_directives(message):
"""Remove ticket directives like "(closes #123).
>>> remove_ticket_directives('(closes #123)')
'(#123)'
>>> remove_ticket_directives('(foo #123)')
'(foo #123)'
"""
if message:
message = re.sub(r'closes #', '#', message)
message = re.sub(r'addresses #', '#', message)
message = re.sub(r'references #', '#', message)
return message
def amended_commits(commits):
"""Return those git commit sha1s that have been amended later."""
# which SHA1 are declared as amended later?
amended_sha1s = []
for message in commits.values():
amended_sha1s.extend(re.findall(r'AMENDS\s([0-f]+)', message))
return amended_sha1s
def enrich_git_log_dict(messages, labels):
"""Enrich git log with related information on tickets."""
for commit_sha1, message in messages.items():
# detect module and ticket numbers for each commit:
component = None
title = message.split('\n')[0]
try:
component, title = title.split(":", 1)
component = component.strip()
except ValueError:
pass # noqa
paragraphs = [analyse_body_paragraph(p, labels)
for p in message.split('\n\n')]
yield {
'sha1': commit_sha1,
'component': component,
'title': title.strip(),
'tickets': re.findall(r'\s(#\d+)', message),
'paragraphs': [
(label, remove_ticket_directives(message))
for label, message in paragraphs
],
}
@manager.option('repository', default='.', nargs='?', help='repository path')
@manager.option('commit', metavar='<sha or branch>', nargs='?',
default='HEAD', help='an integer for the accumulator')
@manager.option('-c', '--components', default=False, action="store_true",
help='group components', dest='group_components')
def release(commit='HEAD', repository='.', group_components=False):
"""Generate release notes."""
from ..kwalitee import get_options
from ..hooks import _read_local_kwalitee_configuration
options = get_options(current_app.config)
options.update(_read_local_kwalitee_configuration(directory=repository))
try:
sha = 'oid'
commits = _pygit2_commits(commit, repository)
except ImportError:
try:
sha = 'hexsha'
commits = _git_commits(commit, repository)
except ImportError:
print('To use this feature, please install pygit2. GitPython will '
'also work but is not recommended (python <= 2.7 only).',
file=sys.stderr)
return 2
messages = OrderedDict([(getattr(c, sha), c.message) for c in commits])
for commit_sha1 in amended_commits(messages):
if commit_sha1 in messages:
del messages[commit_sha1]
full_messages = list(
enrich_git_log_dict(messages, options.get('commit_msg_labels'))
)
indent = ' ' if group_components else ''
wrapper = textwrap.TextWrapper(
width=70,
initial_indent=indent + '- ',
subsequent_indent=indent + ' ',
)
for label, section in options.get('commit_msg_labels'):
if section is None:
continue
bullets = []
for commit in full_messages:
bullets += [
{'text': bullet, 'component': commit['component']}
for lbl, bullet in commit['paragraphs']
if lbl == label and bullet is not None
]
if len(bullets) > 0:
print(section)
print('-' * len(section))
print()
if group_components:
def key(cmt):
return cmt['component']
for component, bullets in itertools.groupby(
sorted(bullets, key=key), key):
bullets = list(bullets)
if len(bullets) > 0:
print('+ {}'.format(component))
print()
for bullet in bullets:
print(wrapper.fill(bullet['text']))
print()
else:
for bullet in bullets:
print(wrapper.fill(bullet['text']))
print()
return 0
|
import logging
from operator import methodcaller
from typing import List
from django.core.exceptions import ObjectDoesNotExist
from kobo.django.xmlrpc.decorators import user_passes_test
from tcms.issuetracker.models import Issue
from tcms.management.models import TCMSEnvValue, TestTag
from tcms.testcases.models import TestCase
from tcms.testruns.models import TestCaseRun, TestRun
from tcms.xmlrpc.decorators import log_call
from tcms.xmlrpc.utils import distinct_count, pre_process_estimated_time, pre_process_ids
__all__ = (
"add_cases",
"add_tag",
"create",
"env_value",
"filter",
"filter_count",
"get",
"get_issues",
"get_change_history",
"get_completion_report",
"get_env_values",
"get_tags",
"get_test_case_runs",
"get_test_cases",
"get_test_plan",
"link_env_value",
"remove_cases",
"remove_tag",
"unlink_env_value",
"update",
)
__xmlrpc_namespace__ = "TestRun"
logger = logging.getLogger(__name__)
@log_call(namespace=__xmlrpc_namespace__)
@user_passes_test(methodcaller("has_perm", "testruns.add_testcaserun"))
def add_cases(request, run_ids, case_ids):
"""Add one or more cases to the selected test runs.
:param run_ids: give one or more run IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a run ID.
:type run_ids: int, str or list
:param case_ids: give one or more case IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a case ID.
:type case_ids: int, str or list
:return: a list which is empty on success or a list of mappings with
failure codes if a failure occured.
:rtype: list
Example::
# Add case id 10 to run 1
TestRun.add_cases(1, 10)
# Add case ids list [10, 20] to run list [1, 2]
TestRun.add_cases([1, 2], [10, 20])
# Add case ids list '10, 20' to run list '1, 2' with String
TestRun.add_cases('1, 2', '10, 20')
"""
trs = TestRun.objects.filter(run_id__in=pre_process_ids(run_ids))
tcs = TestCase.objects.filter(case_id__in=pre_process_ids(case_ids))
for tr in trs.iterator():
for tc in tcs.iterator():
tr.add_case_run(case=tc)
@log_call(namespace=__xmlrpc_namespace__)
@user_passes_test(methodcaller("has_perm", "testruns.delete_testcaserun"))
def remove_cases(request, run_ids, case_ids):
"""Remove one or more cases from the selected test runs.
:param run_ids: give one or more run IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a run ID.
:type run_ids: int, str or list
:param case_ids: give one or more case IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a case ID.
:type case_ids: int, str or list
:return: a list which is empty on success or a list of mappings with
failure codes if a failure occured.
:rtype: list
Example::
# Remove case 10 from run 1
TestRun.remove_cases(1, 10)
# Remove case ids list [10, 20] from run list [1, 2]
TestRun.remove_cases([1, 2], [10, 20])
# Remove case ids list '10, 20' from run list '1, 2' with String
TestRun.remove_cases('1, 2', '10, 20')
"""
trs = TestRun.objects.filter(run_id__in=pre_process_ids(run_ids))
for tr in trs.iterator():
crs = TestCaseRun.objects.filter(run=tr, case__in=pre_process_ids(case_ids))
crs.delete()
@log_call(namespace=__xmlrpc_namespace__)
@user_passes_test(methodcaller("has_perm", "testruns.add_testruntag"))
def add_tag(request, run_ids, tags):
"""Add one or more tags to the selected test runs.
:param run_ids: give one or more run IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a run ID.
:type run_ids: int, str or list
:param tags: tag name or a list of tag names to remove.
:type tags: str or list
:return: a list which is empty on success or a list of mappings with
failure codes if a failure occured.
:rtype: list
Example::
# Add tag 'foobar' to run 1
TestPlan.add_tag(1, 'foobar')
# Add tag list ['foo', 'bar'] to run list [1, 2]
TestPlan.add_tag([1, 2], ['foo', 'bar'])
# Add tag list ['foo', 'bar'] to run list [1, 2] with String
TestPlan.add_tag('1, 2', 'foo, bar')
"""
trs = TestRun.objects.filter(pk__in=pre_process_ids(value=run_ids))
tags: List[str] = TestTag.string_to_list(tags)
for tag in tags:
t, _ = TestTag.objects.get_or_create(name=tag)
tr: TestRun
for tr in trs.iterator():
tr.add_tag(tag=t)
@log_call(namespace=__xmlrpc_namespace__)
@user_passes_test(methodcaller("has_perm", "testruns.add_testrun"))
def create(request, values):
"""Creates a new Test Run object and stores it in the database.
:param dict values: a mapping containing these data to create a test run.
* plan: (int) **Required** ID of test plan
* build: (int)/(str) **Required** ID of Build
* manager: (int) **Required** ID of run manager
* summary: (str) **Required**
* product: (int) **Required** ID of product
* product_version: (int) **Required** ID of product version
* default_tester: (int) optional ID of run default tester
* plan_text_version: (int) optional
* estimated_time: (str) optional, could be in format ``2h30m30s``, which is recommended or ``HH:MM:SS``.
* notes: (str) optional
* status: (int) optional 0:RUNNING 1:STOPPED (default 0)
* case: list or (str) optional list of case ids to add to the run
* tag: list or (str) optional list of tag to add to the run
:return: a mapping representing newly created :class:`TestRun`.
:rtype: dict
.. versionchanged:: 4.5
Argument ``errata_id`` is removed.
Example::
values = {
'build': 2,
'manager': 1,
'plan': 1,
'product': 1,
'product_version': 2,
'summary': 'Testing XML-RPC for TCMS',
}
TestRun.create(values)
"""
from datetime import datetime
from tcms.core import forms
from tcms.testruns.forms import XMLRPCNewRunForm
if not values.get("product"):
raise ValueError("Value of product is required")
# TODO: XMLRPC only accept HH:MM:SS rather than DdHhMm
if values.get("estimated_time"):
values["estimated_time"] = pre_process_estimated_time(values.get("estimated_time"))
if values.get("case"):
values["case"] = pre_process_ids(value=values["case"])
form = XMLRPCNewRunForm(values)
form.populate(product_id=values["product"])
if form.is_valid():
tr = TestRun.objects.create(
product_version=form.cleaned_data["product_version"],
plan_text_version=form.cleaned_data["plan_text_version"],
stop_date=form.cleaned_data["status"] and datetime.now() or None,
summary=form.cleaned_data["summary"],
notes=form.cleaned_data["notes"],
estimated_time=form.cleaned_data["estimated_time"],
plan=form.cleaned_data["plan"],
build=form.cleaned_data["build"],
manager=form.cleaned_data["manager"],
default_tester=form.cleaned_data["default_tester"],
)
if form.cleaned_data["case"]:
for c in form.cleaned_data["case"]:
tr.add_case_run(case=c)
del c
if form.cleaned_data["tag"]:
tags = form.cleaned_data["tag"]
tags = [c.strip() for c in tags.split(",") if c]
for tag in tags:
t, c = TestTag.objects.get_or_create(name=tag)
tr.add_tag(tag=t)
del tag, t, c
else:
raise ValueError(forms.errors_to_list(form))
return tr.serialize()
def __env_value_operation(request, action: str, run_ids, env_value_ids):
trs = TestRun.objects.filter(pk__in=pre_process_ids(value=run_ids))
evs = TCMSEnvValue.objects.filter(pk__in=pre_process_ids(value=env_value_ids))
for tr in trs.iterator():
for ev in evs.iterator():
try:
func = getattr(tr, action + "_env_value")
func(env_value=ev)
except ObjectDoesNotExist:
logger.debug(
"User %s wants to remove property value %r from test run %r, "
"however this test run does not have that value.",
request.user,
ev,
tr,
)
@log_call(namespace=__xmlrpc_namespace__)
@user_passes_test(methodcaller("has_perm", "testruns.change_tcmsenvrunvaluemap"))
def env_value(request, action, run_ids, env_value_ids):
"""
Add or remove env values to the given runs, function is same as
link_env_value or unlink_env_value
:param str action: what action to do, ``add`` or ``remove``.
:param run_ids: give one or more run IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a run ID.
:type run_ids: int, str or list
:param env_value_ids: give one or more environment value IDs. It could be
an integer, a string containing comma separated IDs, or a list of int
each of them is a environment value ID.
:type env_value_ids: int, str or list
:return: a list which is empty on success or a list of mappings with
failure codes if a failure occured.
:rtype: list
Example::
# Add env value 20 to run id 8
TestRun.env_value('add', 8, 20)
"""
__env_value_operation(request, action, run_ids, env_value_ids)
@log_call(namespace=__xmlrpc_namespace__)
def filter(request, values={}):
"""Performs a search and returns the resulting list of test runs.
:param dict values: a mapping containing these criteria.
* build: ForeignKey: TestBuild
* cc: ForeignKey: Auth.User
* env_value: ForeignKey: Environment Value
* default_tester: ForeignKey: Auth.User
* run_id: (int)
* manager: ForeignKey: Auth.User
* notes: (str)
* plan: ForeignKey: TestPlan
* summary: (str)
* tag: ForeignKey: Tag
* product_version: ForeignKey: Version
:return: list of mappings of found :class:`TestRun`.
:rtype: list
Example::
# Get all of runs contain 'TCMS' in summary
TestRun.filter({'summary__icontain': 'TCMS'})
# Get all of runs managed by xkuang
TestRun.filter({'manager__username': 'xkuang'})
# Get all of runs the manager name starts with x
TestRun.filter({'manager__username__startswith': 'x'})
# Get runs contain the case ID 1, 2, 3
TestRun.filter({'case_run__case__case_id__in': [1, 2, 3]})
"""
return TestRun.to_xmlrpc(values)
@log_call(namespace=__xmlrpc_namespace__)
def filter_count(request, values={}):
"""Performs a search and returns the resulting count of runs.
:param dict values: a mapping containing criteria. See also
:meth:`TestRun.filter <tcms.xmlrpc.api.testrun.filter>`.
:return: total matching runs.
:rtype: int
.. seealso::
See examples of :meth:`TestRun.filter <tcms.xmlrpc.api.testrun.filter>`.
"""
return distinct_count(TestRun, values)
@log_call(namespace=__xmlrpc_namespace__)
def get(request, run_id):
"""Used to load an existing test run from the database.
:param int run_id: test run ID.
:return: a mapping representing found :class:`TestRun`.
:rtype: dict
Example::
TestRun.get(1)
"""
try:
tr = TestRun.objects.get(run_id=run_id)
except TestRun.DoesNotExist as error:
return error
response = tr.serialize()
# get the xmlrpc tags
tag_ids = tr.tag.values_list("id", flat=True)
query = {"id__in": tag_ids}
tags = TestTag.to_xmlrpc(query)
# cut 'id' attribute off, only leave 'name' here
tags_without_id = [tag["name"] for tag in tags]
# replace tag_id list in the serialize return data
response["tag"] = tags_without_id
return response
@log_call(namespace=__xmlrpc_namespace__)
def get_issues(request, run_ids):
"""Get the list of issues attached to this run.
:param run_ids: give one or more run IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a run ID.
:type run_ids: int, str or list
:return: a list of mappings of :class:`Issue <tcms.issuetracker.models.Issue>`.
:rtype: list[dict]
Example::
# Get issues belonging to ID 12345
TestRun.get_issues(1)
# Get issues belonging to run ids list [1, 2]
TestRun.get_issues([1, 2])
# Get issues belonging to run ids list 1 and 2 with string
TestRun.get_issues('1, 2')
"""
query = {"case_run__run__in": pre_process_ids(run_ids)}
return Issue.to_xmlrpc(query)
@log_call(namespace=__xmlrpc_namespace__)
def get_change_history(request, run_id):
"""Get the list of changes to the fields of this run.
:param int run_id: run ID.
:return: list of mapping with changed fields and their details.
:rtype: list
.. warning::
NOT IMPLEMENTED - History is different than before.
"""
raise NotImplementedError("Not implemented RPC method") # pragma: no cover
@log_call(namespace=__xmlrpc_namespace__)
def get_completion_report(request, run_ids):
"""Get a report of the current status of the selected runs combined.
:param run_ids: give one or more run IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a run ID.
:type run_ids: int, str or list
:return: A mapping containing counts and percentages of the combined totals
of case-runs in the run. Counts only the most recently statused
case-run for a given build and environment.
:rtype: dict
.. warning::
NOT IMPLEMENTED
"""
raise NotImplementedError("Not implemented RPC method") # pragma: no cover
@log_call(namespace=__xmlrpc_namespace__)
def get_env_values(request, run_id):
"""Get the list of env values to this run.
:param int run_id: run ID.
:return: a list of mappings representing found :class:`TCMSEnvValue`.
:rtype: List[dict]
Example::
TestRun.get_env_values(8)
"""
from tcms.management.models import TCMSEnvValue
# FIXME: return [] if run_id is None or ""
query = {"testrun__pk": run_id}
return TCMSEnvValue.to_xmlrpc(query)
@log_call(namespace=__xmlrpc_namespace__)
def get_tags(request, run_id):
"""Get the list of tags attached to this run.
:param int run_id: run ID.
:return: a mapping representing found :class:`TestTag`.
:rtype: dict
Example::
TestRun.get_tags(1)
"""
tr = TestRun.objects.get(run_id=run_id)
tag_ids = tr.tag.values_list("id", flat=True)
query = {"id__in": tag_ids}
return TestTag.to_xmlrpc(query)
@log_call(namespace=__xmlrpc_namespace__)
def get_test_case_runs(request, run_id):
"""Get the list of cases that this run is linked to.
:param int run_id: run ID.
:return: a list of mappings of found :class:`TestCaseRun`.
:rtype: list[dict]
Example::
# Get all of case runs
TestRun.get_test_case_runs(1)
"""
return TestCaseRun.to_xmlrpc({"run__run_id": run_id})
@log_call(namespace=__xmlrpc_namespace__)
def get_test_cases(request, run_id):
"""Get the list of cases that this run is linked to.
:param int run_id: run ID.
:return: a list of mappings of found :class:`TestCase`.
:rtype: list[dict]
Example::
TestRun.get_test_cases(1)
"""
tcs_serializer = TestCase.to_xmlrpc(query={"case_run__run_id": run_id})
qs = TestCaseRun.objects.filter(run_id=run_id).values("case", "pk", "case_run_status__name")
extra_info = {row["case"]: row for row in qs.iterator()}
for case in tcs_serializer:
info = extra_info[case["case_id"]]
case["case_run_id"] = info["pk"]
case["case_run_status"] = info["case_run_status__name"]
return tcs_serializer
@log_call(namespace=__xmlrpc_namespace__)
def get_test_plan(request, run_id):
"""Get the plan that this run is associated with.
:param int run_id: run ID.
:return: a mapping of found :class:`TestPlan`.
:rtype: dict
Example::
TestRun.get_test_plan(1)
"""
return TestRun.objects.select_related("plan").get(run_id=run_id).plan.serialize()
@log_call(namespace=__xmlrpc_namespace__)
@user_passes_test(methodcaller("has_perm", "testruns.delete_testruntag"))
def remove_tag(request, run_ids, tags):
"""Remove a tag from a run.
:param run_ids: give one or more run IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a run ID.
:type run_ids: int, str or list
:param tags: tag name or a list of tag names to remove.
:type tags: str or list
:return: a list which is empty on success.
:rtype: list
Example::
# Remove tag 'foo' from run 1
TestRun.remove_tag(1, 'foo')
# Remove tag 'foo' and 'bar' from run list [1, 2]
TestRun.remove_tag([1, 2], ['foo', 'bar'])
# Remove tag 'foo' and 'bar' from run list '1, 2' with String
TestRun.remove_tag('1, 2', 'foo, bar')
"""
trs = TestRun.objects.filter(run_id__in=pre_process_ids(value=run_ids))
tgs = TestTag.objects.filter(name__in=TestTag.string_to_list(tags))
tr: TestRun
for tr in trs.iterator():
for tg in tgs.iterator():
tr.remove_tag(tag=tg)
@log_call(namespace=__xmlrpc_namespace__)
@user_passes_test(methodcaller("has_perm", "testruns.change_testrun"))
def update(request, run_ids, values):
"""Updates the fields of the selected test run.
:param run_ids: give one or more run IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a run ID.
:type run_ids: int, str or list
:param dict values: a mapping containing these data to update specified
runs.
* plan: (int) TestPlan.plan_id
* product: (int) Product.id
* build: (int) Build.id
* manager: (int) Auth.User.id
* default_tester: Intege Auth.User.id
* summary: (str)
* estimated_time: (TimeDelta) in format ``2h30m30s`` which is recommended or ``HH:MM:SS``.
* product_version: (int)
* plan_text_version: (int)
* notes: (str)
* status: (int) 0:RUNNING 1:FINISHED
:return: list of mappings of the updated test runs.
:rtype: list[dict]
.. versionchanged:: 4.5
Argument ``errata_id`` is removed.
Example::
# Update status to finished for run 1 and 2
TestRun.update([1, 2], {'status': 1})
"""
from datetime import datetime
from tcms.core import forms
from tcms.testruns.forms import XMLRPCUpdateRunForm
if values.get("product_version") and not values.get("product"):
raise ValueError('Field "product" is required by product_version')
if values.get("estimated_time"):
values["estimated_time"] = pre_process_estimated_time(values.get("estimated_time"))
form = XMLRPCUpdateRunForm(values)
if values.get("product_version"):
form.populate(product_id=values["product"])
if form.is_valid():
trs = TestRun.objects.filter(pk__in=pre_process_ids(value=run_ids))
_values = dict()
if form.cleaned_data["plan"]:
_values["plan"] = form.cleaned_data["plan"]
if form.cleaned_data["build"]:
_values["build"] = form.cleaned_data["build"]
if form.cleaned_data["manager"]:
_values["manager"] = form.cleaned_data["manager"]
if "default_tester" in values:
default_tester = form.cleaned_data["default_tester"]
if values.get("default_tester") and default_tester:
_values["default_tester"] = default_tester
else:
_values["default_tester"] = None
if form.cleaned_data["summary"]:
_values["summary"] = form.cleaned_data["summary"]
if values.get("estimated_time") is not None:
_values["estimated_time"] = form.cleaned_data["estimated_time"]
if form.cleaned_data["product_version"]:
_values["product_version"] = form.cleaned_data["product_version"]
if "notes" in values:
if values["notes"] in (None, ""):
_values["notes"] = values["notes"]
if form.cleaned_data["notes"]:
_values["notes"] = form.cleaned_data["notes"]
if form.cleaned_data["plan_text_version"]:
_values["plan_text_version"] = form.cleaned_data["plan_text_version"]
if isinstance(form.cleaned_data["status"], int):
if form.cleaned_data["status"]:
_values["stop_date"] = datetime.now()
else:
_values["stop_date"] = None
trs.update(**_values)
else:
raise ValueError(forms.errors_to_list(form))
query = {"pk__in": trs.values_list("pk", flat=True)}
return TestRun.to_xmlrpc(query)
@log_call(namespace=__xmlrpc_namespace__)
@user_passes_test(methodcaller("has_perm", "testruns.add_tcmsenvrunvaluemap"))
def link_env_value(request, run_ids, env_value_ids):
"""Link env values to the given runs.
:param run_ids: give one or more run IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a run ID.
:type run_ids: int, str or list
:param env_value_ids: give one or more environment value IDs. It could be
an integer, a string containing comma separated IDs, or a list of int
each of them is a environment value ID.
:type env_value_ids: int, str or list
:return: a list which is empty on success or a list of mappings with
failure codes if a failure occured.
:rtype: list
Example::
# Add env value 1 to run id 2
TestRun.link_env_value(2, 1)
"""
return __env_value_operation(request, "add", run_ids, env_value_ids)
@log_call(namespace=__xmlrpc_namespace__)
@user_passes_test(methodcaller("has_perm", "testruns.delete_tcmsenvrunvaluemap"))
def unlink_env_value(request, run_ids, env_value_ids):
"""Unlink env values to the given runs.
:param run_ids: give one or more run IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a run ID.
:type run_ids: int, str or list
:param env_value_ids: give one or more environment value IDs. It could be
an integer, a string containing comma separated IDs, or a list of int
each of them is a environment value ID.
:type env_value_ids: int, str or list
:return: a list which is empty on success or a list of mappings with
failure codes if a failure occured.
:rtype: list
Example::
# Unlink env value 1 to run id 2
TestRun.unlink_env_value(2, 1)
"""
return __env_value_operation(request, "remove", run_ids, env_value_ids)
|
import virtconv.formats as formats
import virtconv.vmcfg as vmcfg
import virtconv.diskcfg as diskcfg
import virtconv.netdevcfg as netdevcfg
from virtinst import virtimage
from xml.sax.saxutils import escape
import re
import logging
ide_letters = list("abcdefghijklmnopqrstuvwxyz")
pv_boot_template = \
""" <boot type="xen">
<guest>
<arch>%(arch)s</arch>
<features>
<pae/>
</features>
</guest>
<os>
<loader>pygrub</loader>
</os>
%(disks)s
</boot>"""
hvm_boot_template = \
""" <boot type="hvm">
<guest>
<arch>%(arch)s</arch>
</guest>
<os>
<loader dev="hd"/>
</os>
%(disks)s
</boot>"""
image_template = \
"""
"""
def export_os_params(vm):
"""
Export OS-specific parameters.
"""
from virtinst import osdict
os = osdict.lookup_os(vm.os_variant)
def get_os_val(key, default):
val = None
if os:
val = os.to_dict().get(key)
if val is None:
val = default
return val
acpi = ""
if vm.noacpi is False and get_os_val("acpi", True):
acpi = "<acpi />"
apic = ""
if vm.noapic is False and get_os_val("apic", False):
apic = "<apic />"
return acpi, apic
def export_disks(vm):
"""
Export code for the disks. Slightly tricky for two reasons.
We can't handle duplicate disks: some vmx files define SCSI/IDE devices
that point to the same storage, and Xen isn't happy about that. We
just ignore any entries that have duplicate paths.
Since there is no SCSI support in rombios, and the SCSI emulation is
troublesome with Solaris, we forcibly switch the disks to IDE, and expect
the guest OS to cope (which at least Linux does admirably).
Note that we even go beyond hdd: above that work if the domU has PV
drivers.
"""
paths = []
disks = {}
for (bus, instance), disk in sorted(vm.disks.iteritems()):
if disk.path and disk.path in paths:
continue
if bus == "scsi":
instance = 0
while disks.get(("ide", instance)):
instance += 1
disks[("ide", instance)] = disk
if disk.path:
paths += [disk.path]
diskout = []
storage = []
for (bus, instance), disk in sorted(disks.iteritems()):
# virt-image XML cannot handle an empty CD device
if not disk.path:
continue
path = disk.path
drive_nr = ide_letters[int(instance) % 26]
disk_prefix = "xvd"
if vm.type == vmcfg.VM_TYPE_HVM:
if bus == "ide":
disk_prefix = "hd"
else:
disk_prefix = "sd"
# FIXME: needs updating for later Xen enhancements; need to
# implement capabilities checking for max disks etc.
diskout.append(""" <drive disk="%s" target="%s%s"/>\n""" %
(path, disk_prefix, drive_nr))
typ = "raw"
if disk.format in diskcfg.qemu_formats:
typ = diskcfg.qemu_formats[disk.format]
elif disk.typ == diskcfg.DISK_TYPE_ISO:
typ = "iso"
storage.append(
""" <disk file="%s" use="system" format="%s"/>\n""" %
(path, typ))
return storage, diskout
class virtimage_parser(formats.parser):
"""
Support for virt-install's image format (see virt-image man page).
"""
name = "virt-image"
suffix = ".virt-image.xml"
can_import = True
can_export = True
can_identify = True
@staticmethod
def identify_file(input_file):
"""
Return True if the given file is of this format.
"""
try:
f = file(input_file, "r")
output = f.read()
f.close()
virtimage.parse(output, input_file)
except RuntimeError:
return False
return True
@staticmethod
def import_file(input_file):
"""
Import a configuration file. Raises if the file couldn't be
opened, or parsing otherwise failed.
"""
vm = vmcfg.vm()
try:
f = file(input_file, "r")
output = f.read()
f.close()
logging.debug("Importing virt-image XML:\n%s", output)
config = virtimage.parse(output, input_file)
except Exception, e:
raise ValueError(_("Couldn't import file '%s': %s") %
(input_file, e))
domain = config.domain
boot = domain.boots[0]
if not config.name:
raise ValueError(_("No Name defined in '%s'") % input_file)
vm.name = config.name
vm.arch = boot.arch
vm.memory = int(config.domain.memory / 1024)
if config.descr:
vm.description = config.descr
vm.nr_vcpus = config.domain.vcpu
bus = "ide"
nr_disk = 0
for d in boot.drives:
disk = d.disk
format_mappings = {
virtimage.Disk.FORMAT_RAW: diskcfg.DISK_FORMAT_RAW,
virtimage.Disk.FORMAT_VMDK: diskcfg.DISK_FORMAT_VMDK,
virtimage.Disk.FORMAT_QCOW: diskcfg.DISK_FORMAT_QCOW,
virtimage.Disk.FORMAT_QCOW2: diskcfg.DISK_FORMAT_QCOW2,
virtimage.Disk.FORMAT_VDI: diskcfg.DISK_FORMAT_VDI,
}
fmt = None
if disk.format in format_mappings:
fmt = format_mappings[disk.format]
else:
raise ValueError(_("Unknown disk format '%s'"), disk.format)
devid = (bus, nr_disk)
vm.disks[devid] = diskcfg.disk(bus=bus,
typ=diskcfg.DISK_TYPE_DISK)
vm.disks[devid].format = fmt
vm.disks[devid].path = disk.file
nr_disk = nr_disk + 1
nics = domain.interface
nic_idx = 0
while nic_idx in range(0, nics):
# XXX Eventually need to add support for mac addresses if given
vm.netdevs[nic_idx] = netdevcfg.netdev(
typ=netdevcfg.NETDEV_TYPE_UNKNOWN)
nic_idx = nic_idx + 1
vm.validate()
return vm
@staticmethod
def export(vm):
"""
Export a configuration file as a string.
@vm vm configuration instance
Raises ValueError if configuration is not suitable.
"""
if not vm.memory:
raise ValueError(_("VM must have a memory setting"))
# xend wants the name to match r'^[A-Za-z0-9_\-\.\:\/\+]+$', and
# the schema agrees.
vmname = re.sub(r'[^A-Za-z0-9_\-\.:\/\+]+', '_', vm.name)
# Hmm. Any interface is a good interface?
interface = None
if len(vm.netdevs):
interface = " <interface/>"
acpi, apic = export_os_params(vm)
if vm.type == vmcfg.VM_TYPE_PV:
boot_template = pv_boot_template
else:
boot_template = hvm_boot_template
(storage, disks) = export_disks(vm)
boot_xml = boot_template % {
"disks" : "".join(disks).strip("\n"),
"arch" : vm.arch,
"acpi" : acpi,
"apic" : apic,
}
out = image_template % {
"boot_template": boot_xml,
"name" : vmname,
"description" : escape(vm.description),
"nr_vcpus" : vm.nr_vcpus,
# Mb to Kb
"memory" : int(vm.memory) * 1024,
"interface" : interface,
"storage" : "".join(storage).strip("\n"),
}
return out
formats.register_parser(virtimage_parser)
|
"""Zenodo JSON schema."""
from __future__ import absolute_import, print_function, unicode_literals
from flask_babelex import lazy_gettext as _
from invenio_pidrelations.serializers.utils import serialize_relations
from invenio_pidstore.models import PersistentIdentifier
from marshmallow import Schema, ValidationError, fields, missing, \
validates_schema
from werkzeug.routing import BuildError
from zenodo.modules.records.utils import is_deposit
from zenodo.modules.stats.utils import get_record_stats
from ...models import AccessRight, ObjectType
from . import common
class StrictKeysSchema(Schema):
"""Ensure only valid keys exists."""
@validates_schema(pass_original=True)
def check_unknown_fields(self, data, original_data):
"""Check for unknown keys."""
for key in original_data:
if key not in self.fields:
raise ValidationError('Unknown field name {}'.format(key))
class ResourceTypeSchema(StrictKeysSchema):
"""Resource type schema."""
type = fields.Str(
required=True,
error_messages=dict(
required=_('Type must be specified.')
),
)
subtype = fields.Str()
openaire_subtype = fields.Str()
title = fields.Method('get_title', dump_only=True)
def get_title(self, obj):
"""Get title."""
obj = ObjectType.get_by_dict(obj)
return obj['title']['en'] if obj else missing
@validates_schema
def validate_data(self, data):
"""Validate resource type."""
obj = ObjectType.get_by_dict(data)
if obj is None:
raise ValidationError(_('Invalid resource type.'))
def dump_openaire_type(self, obj):
"""Get OpenAIRE subtype."""
acc = obj.get('access_right')
if acc:
return AccessRight.as_category(acc)
return missing
class JournalSchemaV1(StrictKeysSchema):
"""Schema for a journal."""
issue = fields.Str()
pages = fields.Str()
title = fields.Str()
volume = fields.Str()
year = fields.Str()
class MeetingSchemaV1(StrictKeysSchema):
"""Schema for a meeting."""
title = fields.Str()
acronym = fields.Str()
dates = fields.Str()
place = fields.Str()
url = fields.Str()
session = fields.Str()
session_part = fields.Str()
class ImprintSchemaV1(StrictKeysSchema):
"""Schema for imprint."""
publisher = fields.Str()
place = fields.Str()
isbn = fields.Str()
class PartOfSchemaV1(StrictKeysSchema):
"""Schema for imprint."""
pages = fields.Str()
title = fields.Str()
class ThesisSchemaV1(StrictKeysSchema):
"""Schema for thesis."""
university = fields.Str()
supervisors = fields.Nested(common.PersonSchemaV1, many=True)
class FunderSchemaV1(StrictKeysSchema):
"""Schema for a funder."""
doi = fields.Str()
name = fields.Str(dump_only=True)
acronyms = fields.List(fields.Str(), dump_only=True)
links = fields.Method('get_funder_url', dump_only=True)
def get_funder_url(self, obj):
"""Get grant url."""
return dict(self=common.api_link_for('funder', id=obj['doi']))
class GrantSchemaV1(StrictKeysSchema):
"""Schema for a grant."""
title = fields.Str(dump_only=True)
code = fields.Str()
program = fields.Str(dump_only=True)
acronym = fields.Str(dump_only=True)
funder = fields.Nested(FunderSchemaV1)
links = fields.Method('get_grant_url', dump_only=True)
def get_grant_url(self, obj):
"""Get grant url."""
return dict(self=common.api_link_for('grant', id=obj['internal_id']))
class CommunitiesSchemaV1(StrictKeysSchema):
"""Schema for communities."""
id = fields.Function(lambda x: x)
class ActionSchemaV1(StrictKeysSchema):
"""Schema for a actions."""
prereserve_doi = fields.Str(load_only=True)
class FilesSchema(Schema):
"""Files metadata schema."""
type = fields.String()
checksum = fields.String()
size = fields.Integer()
bucket = fields.String()
key = fields.String()
links = fields.Method('get_links')
def get_links(self, obj):
"""Get links."""
return {
'self': common.api_link_for(
'object', bucket=obj['bucket'], key=obj['key'])
}
class OwnerSchema(StrictKeysSchema):
"""Schema for owners.
Allows us to later introduce more properties for an owner.
"""
id = fields.Function(lambda x: x)
class LicenseSchemaV1(StrictKeysSchema):
"""Schema for license.
Allows us to later introduce more properties for an owner.
"""
id = fields.Str(attribute='id')
class MetadataSchemaV1(common.CommonMetadataSchemaV1):
"""Schema for a record."""
resource_type = fields.Nested(ResourceTypeSchema)
access_right_category = fields.Method(
'dump_access_right_category', dump_only=True)
license = fields.Nested(LicenseSchemaV1)
communities = fields.Nested(CommunitiesSchemaV1, many=True)
grants = fields.Nested(GrantSchemaV1, many=True)
journal = fields.Nested(JournalSchemaV1)
meeting = fields.Nested(MeetingSchemaV1)
imprint = fields.Nested(ImprintSchemaV1)
part_of = fields.Nested(PartOfSchemaV1)
thesis = fields.Nested(ThesisSchemaV1)
relations = fields.Method('dump_relations')
def dump_access_right_category(self, obj):
"""Get access right category."""
acc = obj.get('access_right')
if acc:
return AccessRight.as_category(acc)
return missing
def dump_relations(self, obj):
"""Dump the relations to a dictionary."""
if 'relations' in obj:
return obj['relations']
if is_deposit(obj):
pid = self.context['pid']
return serialize_relations(pid)
else:
pid = self.context['pid']
return serialize_relations(pid)
class RecordSchemaV1(common.CommonRecordSchemaV1):
"""Schema for records v1 in JSON."""
files = fields.Nested(
FilesSchema, many=True, dump_only=True, attribute='files')
metadata = fields.Nested(MetadataSchemaV1)
owners = fields.List(
fields.Integer, attribute='metadata.owners', dump_only=True)
revision = fields.Integer(dump_only=True)
updated = fields.Str(dump_only=True)
stats = fields.Method('dump_stats')
def dump_stats(self, obj):
"""Dump the stats to a dictionary."""
if '_stats' in obj.get('metadata', {}):
return obj['metadata'].get('_stats', {})
else:
pid = self.context.get('pid')
if isinstance(pid, PersistentIdentifier):
return get_record_stats(pid.object_uuid, False)
else:
return None
class DepositSchemaV1(RecordSchemaV1):
"""Deposit schema.
Same as the Record schema except for some few extra additions.
"""
files = None
owners = fields.Nested(
OwnerSchema, dump_only=True, attribute='metadata._deposit.owners',
many=True)
status = fields.Str(dump_only=True, attribute='metadata._deposit.status')
recid = fields.Str(dump_only=True, attribute='metadata.recid')
|
import logging
from django.core.management.base import BaseCommand
from payment.postfinance_connector import ISO2022Parser
log = logging.getLogger('tq')
class Command(BaseCommand):
help = '(re)parse ISO 20022 files, ignoring duplicates'
def add_arguments(self, parser):
parser.add_argument(
'--dry-run',
action='store_true',
dest='dry_run',
default=False,
help='dry run',
)
parser.add_argument(
'--reparse',
action='store_true',
dest='reparse',
default=False,
help='parse file also if already processed',
)
def handle(self, *args, **options):
log.info('run management command: {}'.format(__file__))
parser = ISO2022Parser()
count = parser.parse(reparse=options['reparse'], dry_run=options['dry_run'])
log.info('found and parsed {} new transactions'.format(count))
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import xml.etree.ElementTree
from xml.etree.cElementTree import ElementTree, Element, SubElement
from xml.etree.cElementTree import fromstring, tostring
import fs_uae_launcher.fsui as fsui
from ..Config import Config
from ..Settings import Settings
from ..I18N import _, ngettext
class XMLControl(fsui.TextArea):
def __init__(self, parent):
fsui.TextArea.__init__(self, parent, horizontal_scroll=True)
self.path = ""
def connect_game(self, info):
tree = self.get_tree()
root = tree.getroot()
if not root.tag == "config":
return
game_node = self.find_or_create_node(root, "game")
game_node.set("uuid", info["uuid"])
game_name_node = self.find_or_create_node(game_node, "name")
game_name_node.text = info["name"]
self.set_tree(tree)
def find_or_create_node(self, element, name):
node = element.find(name)
if node is None:
node = SubElement(element, name)
return node
def set_path(self, path):
if not os.path.exists(path):
path = ""
self.path = path
if path:
self.load_xml(path)
else:
self.set_text("")
def get_tree(self):
text = self.get_text().strip()
try:
root = fromstring(text.encode("UTF-8"))
except Exception:
# FIXME: show message
import traceback
traceback.print_exc()
return
tree = ElementTree(root)
indent_tree(root)
return tree
def set_tree(self, tree):
data = tostring(tree.getroot(), encoding="UTF-8").decode("UTF-8")
std_decl = "<?xml version='1.0' encoding='UTF-8'?>"
if data.startswith(std_decl):
data = data[len(std_decl):].strip()
self.set_text(data)
def load_xml(self, path):
with open(path, "rb") as f:
data = f.read()
self.set_text(data)
def save(self):
if not self.path:
print("no path to save XML to")
return
self.save_xml(self.path)
def save_xml(self, path):
self.get_tree().write(self.path)
def indent_tree(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent_tree(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
|
from ABE_ADCDACPi import ADCDACPi
import time
import math
"""
================================================
ABElectronics ADCDAC Pi 2-Channel ADC, 2-Channel DAC | DAC sine wave generator demo
Version 1.0 Created 17/05/2014
Version 1.1 16/11/2014 updated code and functions to PEP8 format
run with: python demo-dacsinewave.py
================================================
"""
adcdac = ADCDACPi(1) # create an instance of the ADCDAC Pi with a DAC gain set to 1
DACLookup_FullSine_12Bit = \
[2048, 2073, 2098, 2123, 2148, 2174, 2199, 2224,
2249, 2274, 2299, 2324, 2349, 2373, 2398, 2423,
2448, 2472, 2497, 2521, 2546, 2570, 2594, 2618,
2643, 2667, 2690, 2714, 2738, 2762, 2785, 2808,
2832, 2855, 2878, 2901, 2924, 2946, 2969, 2991,
3013, 3036, 3057, 3079, 3101, 3122, 3144, 3165,
3186, 3207, 3227, 3248, 3268, 3288, 3308, 3328,
3347, 3367, 3386, 3405, 3423, 3442, 3460, 3478,
3496, 3514, 3531, 3548, 3565, 3582, 3599, 3615,
3631, 3647, 3663, 3678, 3693, 3708, 3722, 3737,
3751, 3765, 3778, 3792, 3805, 3817, 3830, 3842,
3854, 3866, 3877, 3888, 3899, 3910, 3920, 3930,
3940, 3950, 3959, 3968, 3976, 3985, 3993, 4000,
4008, 4015, 4022, 4028, 4035, 4041, 4046, 4052,
4057, 4061, 4066, 4070, 4074, 4077, 4081, 4084,
4086, 4088, 4090, 4092, 4094, 4095, 4095, 4095,
4095, 4095, 4095, 4095, 4094, 4092, 4090, 4088,
4086, 4084, 4081, 4077, 4074, 4070, 4066, 4061,
4057, 4052, 4046, 4041, 4035, 4028, 4022, 4015,
4008, 4000, 3993, 3985, 3976, 3968, 3959, 3950,
3940, 3930, 3920, 3910, 3899, 3888, 3877, 3866,
3854, 3842, 3830, 3817, 3805, 3792, 3778, 3765,
3751, 3737, 3722, 3708, 3693, 3678, 3663, 3647,
3631, 3615, 3599, 3582, 3565, 3548, 3531, 3514,
3496, 3478, 3460, 3442, 3423, 3405, 3386, 3367,
3347, 3328, 3308, 3288, 3268, 3248, 3227, 3207,
3186, 3165, 3144, 3122, 3101, 3079, 3057, 3036,
3013, 2991, 2969, 2946, 2924, 2901, 2878, 2855,
2832, 2808, 2785, 2762, 2738, 2714, 2690, 2667,
2643, 2618, 2594, 2570, 2546, 2521, 2497, 2472,
2448, 2423, 2398, 2373, 2349, 2324, 2299, 2274,
2249, 2224, 2199, 2174, 2148, 2123, 2098, 2073,
2048, 2023, 1998, 1973, 1948, 1922, 1897, 1872,
1847, 1822, 1797, 1772, 1747, 1723, 1698, 1673,
1648, 1624, 1599, 1575, 1550, 1526, 1502, 1478,
1453, 1429, 1406, 1382, 1358, 1334, 1311, 1288,
1264, 1241, 1218, 1195, 1172, 1150, 1127, 1105,
1083, 1060, 1039, 1017, 995, 974, 952, 931,
910, 889, 869, 848, 828, 808, 788, 768,
749, 729, 710, 691, 673, 654, 636, 618,
600, 582, 565, 548, 531, 514, 497, 481,
465, 449, 433, 418, 403, 388, 374, 359,
345, 331, 318, 304, 291, 279, 266, 254,
242, 230, 219, 208, 197, 186, 176, 166,
156, 146, 137, 128, 120, 111, 103, 96,
88, 81, 74, 68, 61, 55, 50, 44,
39, 35, 30, 26, 22, 19, 15, 12,
10, 8, 6, 4, 2, 1, 1, 0,
0, 0, 1, 1, 2, 4, 6, 8,
10, 12, 15, 19, 22, 26, 30, 35,
39, 44, 50, 55, 61, 68, 74, 81,
88, 96, 103, 111, 120, 128, 137, 146,
156, 166, 176, 186, 197, 208, 219, 230,
242, 254, 266, 279, 291, 304, 318, 331,
345, 359, 374, 388, 403, 418, 433, 449,
465, 481, 497, 514, 531, 548, 565, 582,
600, 618, 636, 654, 673, 691, 710, 729,
749, 768, 788, 808, 828, 848, 869, 889,
910, 931, 952, 974, 995, 1017, 1039, 1060,
1083, 1105, 1127, 1150, 1172, 1195, 1218, 1241,
1264, 1288, 1311, 1334, 1358, 1382, 1406, 1429,
1453, 1478, 1502, 1526, 1550, 1575, 1599, 1624,
1648, 1673, 1698, 1723, 1747, 1772, 1797, 1822,
1847, 1872, 1897, 1922, 1948, 1973, 1998, 2023]
while True:
for val in DACLookup_FullSine_12Bit:
adcdac.set_dac_raw(1, val)
|
import re
from ANNarchy.core import Global
from ANNarchy.core.PopulationView import PopulationView
from ANNarchy.models.Synapses import DefaultSpikingSynapse, DefaultRateCodedSynapse
reserved_variables = [
't',
'dt',
't_pre',
't_post',
't_last',
'last_spike',
'rk_post',
'rk_pre',
'i',
'j',
'active',
'refractory',
'size',
]
def check_structure(populations, projections):
"""
Checks the structure before compilation to display more useful error messages.
"""
from ANNarchy.extensions.convolution.Transpose import Transpose
# Check variable names
_check_reserved_names(populations, projections)
# Check that projections are created before compile
for proj in projections:
if isinstance(proj, Transpose):
continue
if not proj._connection_method:
Global._error('The projection between populations', proj.pre.id, 'and', proj.post.id, 'has not been connected.',
' Call a connector method before compiling the network.')
# Check if the storage formats are valid for the selected paradigm
_check_storage_formats(projections)
# Check that synapses access existing variables in the pre or post neurons
_check_prepost(populations, projections)
# Check locality of variable is respected
_check_locality(populations, projections)
def check_experimental_features(populations, projections):
"""
The idea behind this method, is to check if new experimental features are used. This
should help also the user to be aware of changes.
"""
# CPU-related formats
if Global.config['paradigm'] == "openmp":
for proj in projections:
if proj._storage_format == "csr" and proj._storage_order == "pre_to_post":
Global._warning("Compressed sparse row (CSR) and pre_to_post ordering representation is an experimental feature, we greatly appreciate bug reports.")
break
for proj in projections:
if proj._storage_format == "bsr":
Global._warning("Blocked sparse row (BSR) representation is an experimental feature, we greatly appreciate bug reports.")
break
for proj in projections:
if proj._storage_format == "coo":
Global._warning("Coordinate (COO) representation is an experimental feature, we greatly appreciate bug reports.")
break
for proj in projections:
if proj._storage_format == "ellr":
Global._warning("ELLPACK-R (ELLR) representation is an experimental feature, we greatly appreciate bug reports.")
break
for proj in projections:
if proj._storage_format == "ell":
Global._warning("ELLPACK (ELL) representation is an experimental feature, we greatly appreciate bug reports.")
break
for proj in projections:
if proj._storage_format == "hyb":
Global._warning("Hybrid (ELL + COO) representation is an experimental feature, we greatly appreciate bug reports.")
break
# GPU-related formats
elif Global.config['paradigm'] == "cuda":
for pop in populations:
if pop.neuron_type.description['type'] == "spike":
Global._warning('Spiking neurons on GPUs is an experimental feature. We greatly appreciate bug reports.')
break
for proj in projections:
if proj._storage_format == "ellr":
Global._warning("ELLPACK-R (ELLR) representation is an experimental feature, we greatly appreciate bug reports.")
break
for proj in projections:
if proj._storage_format == "bsr":
Global._warning("Blocked sparse row (BSR) representation is an experimental feature, we greatly appreciate bug reports.")
break
for proj in projections:
if proj._storage_format == "coo":
Global._warning("Coordinate (COO) representation is an experimental feature, we greatly appreciate bug reports.")
break
for proj in projections:
if proj._storage_format == "hyb":
Global._warning("Hybrid (ELL + COO) representation is an experimental feature, we greatly appreciate bug reports.")
break
else:
pass
def _check_reserved_names(populations, projections):
"""
Checks no reserved variable names is redefined
"""
# Check populations
for pop in populations:
# Reserved variable names
for term in reserved_variables:
if term in pop.attributes:
Global._print(pop.neuron_type.parameters)
Global._print(pop.neuron_type.equations)
Global._error(term + ' is a reserved variable name')
# Check projections
for proj in projections:
# Reserved variable names
for term in reserved_variables:
if term in proj.attributes:
Global._print(proj.synapse_type.parameters)
Global._print(proj.synapse_type.equations)
Global._error(term + ' is a reserved variable name')
def _check_storage_formats(projections):
"""
ANNarchy 4.7 introduced a set of sparse matrix formats. Some of them are not implemented for
all paradigms or might not support specific optimizations.
"""
for proj in projections:
# Most of the sparse matrix formats are not trivially invertable and therefore we can not implement
# spiking models with them
if proj.synapse_type.type == "spike" and proj._storage_format in ["ell", "ellr", "coo", "hyb"]:
raise Global.ANNarchyException("Using 'storage_format="+ proj._storage_format + "' is not allowed for spiking synapses.", True)
# For some of the sparse matrix formats we don't implemented plasticity yet.
if proj.synapse_type.type == "spike" and proj._storage_format in ["dense"] and not isinstance(proj.synapse_type, DefaultSpikingSynapse):
raise Global.ANNarchyException("Using 'storage_format="+ proj._storage_format + "' is only allowed for default spiking synapses yet.", True)
# For some of the sparse matrix formats we don't implemented plasticity yet.
if proj.synapse_type.type == "rate" and proj._storage_format in ["coo", "hyb"] and not isinstance(proj.synapse_type, DefaultRateCodedSynapse):
raise Global.ANNarchyException("Using 'storage_format="+ proj._storage_format + "' is only allowed for default rate-coded synapses yet.", True)
# OpenMP disabled?
if proj._storage_format in ["bsr"] and Global.config["num_threads"]>1:
raise Global.ANNarchyException("Using 'storage_format="+ proj._storage_format + "' is not available for OpenMP yet.", True)
# Single weight optimization available?
if proj._has_single_weight() and proj._storage_format in ["dense"]:
raise Global.ANNarchyException("Using 'storage_format="+ proj._storage_format + "' is not allowed for single weight projections.", True)
# Slicing available?
if isinstance(proj.post, PopulationView) and proj._storage_format in ["dense"]:
raise Global.ANNarchyException("Using 'storage_format="+ proj._storage_format + "' is not allowed for PopulationViews as target.", True)
# In some cases we don't allow the usage of non-unifom delay
if (proj.max_delay > 1 and proj.uniform_delay == -1):
if Global._check_paradigm("cuda"):
raise Global.ANNarchyException("Using non-uniform delays is not available for CUDA devices.", True)
else:
if proj._storage_format == "ellr":
raise Global.ANNarchyException("Using 'storage_format="+ proj._storage_format + "' is and non-uniform delays is not implemented.", True)
if Global._check_paradigm("cuda") and proj._storage_format == "lil":
proj._storage_format = "csr"
Global._info("LIL-type projections are not available for GPU devices ... default to CSR")
if Global._check_paradigm("cuda") and proj._storage_format == "ell":
Global._info("We would recommend to use ELLPACK-R (format=ellr) on GPUs.")
def _check_prepost(populations, projections):
"""
Checks that when a synapse uses pre.x r post.x, the variable x exists in the corresponding neuron
"""
for proj in projections:
for dep in proj.synapse_type.description['dependencies']['pre']:
if dep.startswith('sum('):
target = re.findall(r'\(([\s\w]+)\)', dep)[0].strip()
if not target in proj.pre.targets:
Global._print(proj.synapse_type.equations)
Global._error('The pre-synaptic population ' + proj.pre.name + ' receives no projection with the type ' + target)
continue
if not dep in proj.pre.attributes:
Global._print(proj.synapse_type.equations)
Global._error('The pre-synaptic population ' + proj.pre.name + ' has no variable called ' + dep)
for dep in proj.synapse_type.description['dependencies']['post']:
if dep.startswith('sum('):
target = re.findall(r'\(([\s\w]+)\)', dep)[0].strip()
if not target in proj.post.targets:
Global._print(proj.synapse_type.equations)
Global._error('The post-synaptic population ' + proj.post.name + ' receives no projection with the type ' + target)
continue
if not dep in proj.post.attributes:
Global._print(proj.synapse_type.equations)
Global._error('The post-synaptic population ' + proj.post.name + ' has no variable called ' + dep)
def _check_locality(populations, projections):
"""
Checks that a global variable does not depend on local ones.
"""
for proj in projections:
for var in proj.synapse_type.description['variables']:
if var['locality'] == 'global': # cannot depend on local or semiglobal variables
# Inside the equation
for v in var['dependencies']:
if _get_locality(v, proj.synapse_type.description) in ['local', 'semiglobal']:
Global._print(var['eq'])
Global._error('The global variable', var['name'], 'cannot depend on a synapse-specific/post-synaptic one:', v)
# As pre/post dependencies
deps = var['prepost_dependencies']
if len(deps['pre']) > 0 or len(deps['post']) > 0 :
Global._print(proj.synapse_type.equations)
Global._error('The global variable', var['name'], 'cannot depend on pre- or post-synaptic variables.')
if var['locality'] == 'semiglobal': # cannot depend on pre-synaptic variables
# Inside the equation
for v in var['dependencies']:
if _get_locality(v, proj.synapse_type.description) == 'local':
Global._print(var['eq'])
Global._error('The postsynaptic variable', var['name'], 'cannot depend on a synapse-specific one:', v)
# As pre/post dependencies
deps = var['prepost_dependencies']
if len(deps['pre']) > 0 :
Global._print(proj.synapse_type.equations)
Global._error('The postsynaptic variable', var['name'], 'cannot depend on pre-synaptic ones (e.g. pre.r).')
def _get_locality(name, description):
"Returns the locality of an attribute based on its name"
for var in description['variables'] + description['parameters']:
if var['name'] == name:
return var['locality']
return 'local'
|
QgsMapLayer.LayerType = QgsMapLayerType
QgsMapLayer.VectorLayer = QgsMapLayerType.VectorLayer
QgsMapLayer.VectorLayer.__doc__ = ""
QgsMapLayer.RasterLayer = QgsMapLayerType.RasterLayer
QgsMapLayer.RasterLayer.__doc__ = ""
QgsMapLayer.PluginLayer = QgsMapLayerType.PluginLayer
QgsMapLayer.PluginLayer.__doc__ = ""
QgsMapLayer.MeshLayer = QgsMapLayerType.MeshLayer
QgsMapLayer.MeshLayer.__doc__ = "Added in 3.2"
QgsMapLayer.VectorTileLayer = QgsMapLayerType.VectorTileLayer
QgsMapLayer.VectorTileLayer.__doc__ = "Added in 3.14"
QgsMapLayer.AnnotationLayer = QgsMapLayerType.AnnotationLayer
QgsMapLayer.AnnotationLayer.__doc__ = "Contains freeform, georeferenced annotations. Added in QGIS 3.16"
QgsMapLayerType.__doc__ = 'Types of layers that can be added to a map\n\n.. versionadded:: 3.8\n\n' + '* ``VectorLayer``: ' + QgsMapLayerType.VectorLayer.__doc__ + '\n' + '* ``RasterLayer``: ' + QgsMapLayerType.RasterLayer.__doc__ + '\n' + '* ``PluginLayer``: ' + QgsMapLayerType.PluginLayer.__doc__ + '\n' + '* ``MeshLayer``: ' + QgsMapLayerType.MeshLayer.__doc__ + '\n' + '* ``VectorTileLayer``: ' + QgsMapLayerType.VectorTileLayer.__doc__ + '\n' + '* ``AnnotationLayer``: ' + QgsMapLayerType.AnnotationLayer.__doc__
QgsMapLayer.LayerFlag.baseClass = QgsMapLayer
QgsMapLayer.LayerFlags.baseClass = QgsMapLayer
LayerFlags = QgsMapLayer # dirty hack since SIP seems to introduce the flags in module
QgsMapLayer.StyleCategory.baseClass = QgsMapLayer
QgsMapLayer.StyleCategories.baseClass = QgsMapLayer
StyleCategories = QgsMapLayer # dirty hack since SIP seems to introduce the flags in module
|
from distutils.core import setup, Extension
from distutils.sysconfig import parse_makefile
from DistUtilsExtra.command import *
import glob, os, string
files = map(lambda source: "python/"+source,
string.split(parse_makefile("python/makefile")["APT_PKG_SRC"]))
apt_pkg = Extension("apt_pkg", files, libraries=["apt-pkg"]);
files = map(lambda source: "python/"+source,
string.split(parse_makefile("python/makefile")["APT_INST_SRC"]))
apt_inst = Extension("apt_inst", files, libraries=["apt-pkg","apt-inst"]);
templates = []
if not os.path.exists("build/data/templates/"):
os.makedirs("build/data/templates")
for template in glob.glob('data/templates/*.info.in'):
source = open(template, "r")
build = open(os.path.join("build", template[:-3]), "w")
lines = source.readlines()
for line in lines:
build.write(line.lstrip("_"))
source.close()
build.close()
setup(name="python-apt",
version="0.6.17",
description="Python bindings for APT",
author="APT Development Team",
author_email="deity@lists.debian.org",
ext_modules=[apt_pkg,apt_inst],
packages=['apt', 'aptsources'],
data_files = [('share/python-apt/templates',
glob.glob('build/data/templates/*.info')),
('share/python-apt/templates',
glob.glob('data/templates/*.mirrors'))],
cmdclass = { "build" : build_extra.build_extra,
"build_i18n" : build_i18n.build_i18n },
license = 'GNU GPL',
platforms = 'posix'
)
|
"""
Script to fetch test status info from sqlit data base. Before use this
script, avocado We must be lanuch with '--journal' option.
"""
import os
import sys
import sqlite3
import argparse
from avocado.core import data_dir
from dateutil import parser as dateparser
def colour_result(result):
"""Colour result in the test status info"""
colours_map = {"PASS": "\033[92mPASS\033[00m",
"ERROR": "\033[93mERROR\033[00m",
"FAIL": "\033[91mFAIL\033[00m"}
return colours_map.get(result) or result
def summarise_records(records):
"""Summarise test records and print it in cyan"""
num_row = len(records[0])
rows = tuple([("row%s" % x) for x in xrange(num_row)])
records_summary = {}
for rows in records:
records_summary[rows[1]] = records_summary.get(rows[1], 0) + 1
records_summary[rows[4]] = records_summary.get(rows[4], 0) + 1
res = ", ".join("%s=%r" % (
key, val) for (key, val) in records_summary.iteritems())
print "\033[96mSummary: \n" + res + "\033[00m"
def get_total_seconds(td):
""" Alias for get total_seconds in python2.6 """
if hasattr(td, 'total_seconds'):
return td.total_seconds()
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
def fetch_data(db_file=".journal.sqlite"):
""" Fetch tests status info from journal database"""
records = []
con = sqlite3.connect(db_file)
try:
cur = con.cursor()
cur.execute("select tag, time, action, status from test_journal")
while True:
# First record contation start info, second contain end info
# merged start info and end info into one record.
data = cur.fetchmany(2)
if not data:
break
tag = data[0][0]
result = "N/A"
status = "Running"
end_time = None
end_str = None
elapsed = None
start_time = dateparser.parse(data[0][1])
start_str = start_time.strftime("%Y-%m-%d %X")
if len(data) > 1:
status = "Finshed"
result = data[1][3]
end_time = dateparser.parse(data[1][1])
time_delta = end_time - start_time
elapsed = get_total_seconds(time_delta)
end_str = end_time.strftime("%Y-%m-%d %X")
record = (tag, status, start_str, end_str, result, elapsed)
records.append(record)
finally:
con.close()
return records
def print_data(records, skip_timestamp=False):
""" Print formated tests status info"""
if not records:
return
if not skip_timestamp:
print "%-40s %-15s %-15s %-15s %-10s %-10s" % (
"CaseName", "Status", "StartTime",
"EndTime", "Result", "TimeElapsed")
else:
print "%-40s %-15s %-10s" % ("CaseName", "Status", "Result")
for row in records:
if not skip_timestamp:
print "%s %s %s %s %s %s" % (
row[0], row[1], row[2], row[3], colour_result(row[4]), row[5])
else:
print "%s %s %s" % (row[0], row[1], colour_result(row[4]))
summarise_records(records)
if __name__ == "__main__":
default_results_dir = os.path.join(data_dir.get_logs_dir(), 'latest')
parser = argparse.ArgumentParser(description="Avocado journal dump tool")
parser.add_argument(
'-d',
'--test-results-dir',
action='store',
default=default_results_dir,
dest='results_dir',
help="avocado test results dir, Default: %s" %
default_results_dir)
parser.add_argument(
'-s',
'--skip-timestamp',
action='store_true',
default=False,
dest='skip_timestamp',
help="skip timestamp output (leaving status and result enabled)")
parser.add_argument(
'-v',
'--version',
action='version',
version='%(prog)s 1.0')
arguments = parser.parse_args()
db_file = os.path.join(arguments.results_dir, '.journal.sqlite')
if not os.path.isfile(db_file):
print "`.journal.sqlite` DB not found in results directory, "
print "Please start avocado with option '--journal'."
parser.print_help()
sys.exit(1)
data = fetch_data(db_file)
print_data(data, arguments.skip_timestamp)
|
from __future__ import unicode_literals
__author__ = "mozman <mozman@gmx.at>"
from .headervars import VARMAP
from ..ac1018 import AC1018Factory
class AC1021Factory(AC1018Factory):
HEADERVARS = dict(VARMAP)
|
"""
clang/llvm detection.
"""
import os, sys
from waflib import Configure, Options, Utils
from waflib.Tools import ccroot, ar
from waflib.Configure import conf
@conf
def find_clang(conf):
"""
Find the program clang, and if present, try to detect its version number
"""
cc = conf.find_program(['clang', 'cc'], var='CC')
cc = conf.cmd_to_list(cc)
conf.get_cc_version(cc, gcc=True)
conf.env.CC_NAME = 'clang'
conf.env.CC = cc
@conf
def clang_common_flags(conf):
"""
Common flags for clang on nearly all platforms
"""
v = conf.env
v['CC_SRC_F'] = []
v['CC_TGT_F'] = ['-c', '-o']
# linker
if not v['LINK_CC']: v['LINK_CC'] = v['CC']
v['CCLNK_SRC_F'] = []
v['CCLNK_TGT_F'] = ['-o']
v['CPPPATH_ST'] = '-I%s'
v['DEFINES_ST'] = '-D%s'
v['LIB_ST'] = '-l%s' # template for adding libs
v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
v['STLIB_ST'] = '-l%s'
v['STLIBPATH_ST'] = '-L%s'
v['RPATH_ST'] = '-Wl,-rpath,%s'
v['SONAME_ST'] = '-Wl,-h,%s'
v['SHLIB_MARKER'] = '-Wl,-Bdynamic'
v['STLIB_MARKER'] = '-Wl,-Bstatic'
# program
v['cprogram_PATTERN'] = '%s'
# shared librar
v['CFLAGS_cshlib'] = ['-fPIC']
v['LINKFLAGS_cshlib'] = ['-shared']
v['cshlib_PATTERN'] = 'lib%s.so'
# static lib
v['LINKFLAGS_cstlib'] = ['-Wl,-Bstatic']
v['cstlib_PATTERN'] = 'lib%s.a'
# osx stuff
v['LINKFLAGS_MACBUNDLE'] = ['-bundle', '-undefined', 'dynamic_lookup']
v['CFLAGS_MACBUNDLE'] = ['-fPIC']
v['macbundle_PATTERN'] = '%s.bundle'
@conf
def clang_modifier_win32(conf):
"""Configuration flags for executing clang on Windows"""
v = conf.env
v['cprogram_PATTERN'] = '%s.exe'
v['cshlib_PATTERN'] = '%s.dll'
v['implib_PATTERN'] = 'lib%s.dll.a'
v['IMPLIB_ST'] = '-Wl,--out-implib,%s'
v['CFLAGS_cshlib'] = []
v.append_value('CFLAGS_cshlib', ['-DDLL_EXPORT']) # TODO adding nonstandard defines like this DLL_EXPORT is not a good idea
# Auto-import is enabled by default even without this option,
# but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages
# that the linker emits otherwise.
v.append_value('LINKFLAGS', ['-Wl,--enable-auto-import'])
@conf
def clang_modifier_cygwin(conf):
"""Configuration flags for executing clang on Cygwin"""
clang_modifier_win32(conf)
v = conf.env
v['cshlib_PATTERN'] = 'cyg%s.dll'
v.append_value('LINKFLAGS_cshlib', ['-Wl,--enable-auto-image-base'])
v['CFLAGS_cshlib'] = []
@conf
def clang_modifier_darwin(conf):
"""Configuration flags for executing clang on MacOS"""
v = conf.env
v['CFLAGS_cshlib'] = ['-fPIC', '-compatibility_version', '1', '-current_version', '1']
v['LINKFLAGS_cshlib'] = ['-dynamiclib']
v['cshlib_PATTERN'] = 'lib%s.dylib'
v['FRAMEWORKPATH_ST'] = '-F%s'
v['FRAMEWORK_ST'] = ['-framework']
v['ARCH_ST'] = ['-arch']
v['LINKFLAGS_cstlib'] = []
v['SHLIB_MARKER'] = []
v['STLIB_MARKER'] = []
v['SONAME_ST'] = []
@conf
def clang_modifier_aix(conf):
"""Configuration flags for executing clang on AIX"""
v = conf.env
v['LINKFLAGS_cprogram'] = ['-Wl,-brtl']
v['LINKFLAGS_cshlib'] = ['-shared','-Wl,-brtl,-bexpfull']
v['SHLIB_MARKER'] = []
@conf
def clang_modifier_hpux(conf):
v = conf.env
v['SHLIB_MARKER'] = []
v['CFLAGS_cshlib'] = ['-fPIC','-DPIC']
v['cshlib_PATTERN'] = 'lib%s.sl'
@conf
def clang_modifier_platform(conf):
"""Execute platform-specific functions based on *clang_modifier_+NAME*"""
# * set configurations specific for a platform.
# * the destination platform is detected automatically by looking at the macros the compiler predefines,
# and if it's not recognised, it fallbacks to sys.platform.
clang_modifier_func = getattr(conf, 'clang_modifier_' + conf.env.DEST_OS, None)
if clang_modifier_func:
clang_modifier_func()
def configure(conf):
"""
Configuration for clang
"""
conf.find_clang()
conf.find_ar()
conf.clang_common_flags()
conf.clang_modifier_platform()
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()
|
import math
import wx
import eos.db
import gui.mainFrame
from gui import globalEvents as GE
from gui.fitCommands.calc.cargo.remove import CalcRemoveCargoCommand
from gui.fitCommands.helpers import CargoInfo, InternalCommandHistory
from service.market import Market
class GuiRemoveCargosCommand(wx.Command):
def __init__(self, fitID, itemIDs):
wx.Command.__init__(self, True, 'Remove Cargos')
self.internalHistory = InternalCommandHistory()
self.fitID = fitID
self.itemIDs = itemIDs
def Do(self):
sMkt = Market.getInstance()
results = []
for itemID in self.itemIDs:
cmd = CalcRemoveCargoCommand(
fitID=self.fitID,
cargoInfo=CargoInfo(itemID=itemID, amount=math.inf))
results.append(self.internalHistory.submit(cmd))
sMkt.storeRecentlyUsed(itemID)
success = any(results)
eos.db.commit()
wx.PostEvent(gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,)))
return success
def Undo(self):
success = self.internalHistory.undoAll()
eos.db.commit()
wx.PostEvent(gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,)))
return success
|
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import urllib.parse
import pycurl
from io import StringIO,BytesIO
import re
import random
import subprocess
from subprocess import check_output
from bs4 import BeautifulSoup
import os.path
from subprocess import check_output
from player_functions import send_notification,ccurl
try:
import libtorrent as lt
from stream import ThreadServer,TorrentThread,get_torrent_info
except:
notify_txt = 'python3 bindings for libtorrent are broken\nTorrent Streaming feature will be disabled'
send_notification(notify_txt)
import shutil
try:
from headlessBrowser import BrowseUrl
except:
from headlessBrowser_webkit import BrowseUrl
def cloudfare(url,quality,nyaa_c):
web = BrowseUrl(url,quality,nyaa_c)
class Nyaa():
def __init__(self,tmp):
self.hdr = 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:45.0) Gecko/20100101 Firefox/45.0'
self.tmp_dir = tmp
self.cookie_file = os.path.join(tmp,'nyaa.txt')
if not os.path.exists(self.cookie_file):
f = open(self.cookie_file,'w')
f.close()
def getOptions(self):
criteria = ['Date','Seeders','Leechers','Downloads','History','LocalStreaming']
return criteria
def ccurlN(self,url):
content = ccurl(url+'#-b#'+self.cookie_file)
#print(content)
if 'checking_browser' in content:
if os.path.exists(self.cookie_file):
os.remove(self.cookie_file)
cloudfare(url,'',self.cookie_file)
content = ccurl(url+'#-b#'+self.cookie_file)
return content
def process_page(self,url):
content = self.ccurlN(url)
soup = BeautifulSoup(content,'lxml')
#print(soup.prettify())
unit_element = soup.findAll('td',{'colspan':'2'})
#print(unit_element[0])
s = []
for i in unit_element:
try:
element = i.findAll('a')
for index in element:
et = index['href']
if '#comment' not in et:
elem = index
j = elem['title']
try:
k = elem['href'].split('/')[-1]
except:
k = 'Download Not Available'
break
td = i.findNext('td', {'class':'text-center'})
sz = td.findNext('td', {'class':'text-center'})
dt = sz.findNext('td', {'class':'text-center'})
se = dt.findNext('td', {'class':'text-center'})
le = se.findNext('td', {'class':'text-center'})
down = le.findNext('td', {'class':'text-center'})
try:
tmp = j.replace('_',' ')+' id='+k+'|Size='+sz.text+'|Seeds='+se.text+'|Leechers='+le.text+'|Total Downloads='+down.text
except:
tmp = 'Not Available'
print(tmp)
s.append(tmp)
except Exception as e:
print(e,'--98---')
return s
def search(self,name):
strname = str(name)
print(strname)
url = "https://nyaa.si/?f=0&c=1_2&s=seeders&o=desc&q="+str(strname)
m = self.process_page(url)
return m
def getCompleteList(self,opt,genre_num,ui,tmp_dir,hist_folder):
global tmp_working_dir
instr = "Press . or > for next page -1"
tmp_working_dir = tmp_dir
if opt == 'Date':
url = 'https://nyaa.si/?c=1_2'
elif opt == 'Seeders':
url = 'https://nyaa.si/?c=1_2&s=seeders&o=desc'
elif opt == 'Leechers':
url = 'https://nyaa.si/?c=1_2&s=leechers&o=desc'
elif opt == 'Downloads':
url = 'https://nyaa.si/?c=1_2&s=downloads&o=desc'
print(opt,url)
m = self.process_page(url)
m.append(instr)
return m
def getEpnList(self,name,opt,depth_list,extra_info,siteName,category):
if extra_info == '-1':
arr = []
return (arr,'Instructions','No.jpg',False,depth_list)
else:
print(extra_info)
name_id = (re.search('id=[^|]*',extra_info).group()).split('=')[1]
url = "https://nyaa.si/download/" + name_id + '.torrent'
print(url)
summary = ""
torrent_dest = os.path.join(siteName,name+'.torrent')
if not os.path.exists(torrent_dest):
ccurl(url+'#'+'-o'+'#'+torrent_dest,self.cookie_file)
info = lt.torrent_info(torrent_dest)
file_arr = []
for f in info.files():
file_path = f.path
file_path = os.path.basename(file_path)
file_arr.append(file_path)
record_history = True
return (file_arr,'Summary Not Available','No.jpg',record_history,depth_list)
def getNextPage(self,opt,pgn,genre_num,name):
if opt == 'Date':
url = 'https://nyaa.si/?c=1_2'
elif opt == 'Seeders':
url = 'https://nyaa.si/?c=1_2&s=seeders&o=desc'
elif opt == 'Leechers':
url = 'https://nyaa.si/?c=1_2&s=leechers&o=desc'
elif opt == 'Downloads':
url = 'https://nyaa.si/?c=1_2&s=downloads&o=desc'
elif opt == 'Search':
url = "https://nyaa.si/?f=0&c=1_2&s=seeders&o=desc&q="+str(name)
url = url + '&p='+str(pgn)
print(url)
m = self.process_page(url)
return m
|
"""
Waf tool for ChibiOS build
"""
from waflib import Errors, Logs, Task, Utils
from waflib.TaskGen import after_method, before_method, feature
import os
import shutil
import sys
import re
import pickle
_dynamic_env_data = {}
def _load_dynamic_env_data(bld):
bldnode = bld.bldnode.make_node('modules/ChibiOS')
tmp_str = bldnode.find_node('include_dirs').read()
tmp_str = tmp_str.replace(';\n','')
tmp_str = tmp_str.replace('-I','') #remove existing -I flags
# split, coping with separator
idirs = re.split('; ', tmp_str)
# create unique list, coping with relative paths
idirs2 = []
for d in idirs:
if d.startswith('../'):
# relative paths from the make build are relative to BUILDROOT
d = os.path.join(bld.env.BUILDROOT, d)
d = os.path.normpath(d)
if not d in idirs2:
idirs2.append(d)
_dynamic_env_data['include_dirs'] = idirs2
@feature('ch_ap_library', 'ch_ap_program')
@before_method('process_source')
def ch_dynamic_env(self):
# The generated files from configuration possibly don't exist if it's just
# a list command (TODO: figure out a better way to address that).
if self.bld.cmd == 'list':
return
if not _dynamic_env_data:
_load_dynamic_env_data(self.bld)
self.use += ' ch'
self.env.append_value('INCLUDES', _dynamic_env_data['include_dirs'])
class upload_fw(Task.Task):
color='BLUE'
always_run = True
def run(self):
upload_tools = self.env.get_flat('UPLOAD_TOOLS')
src = self.inputs[0]
return self.exec_command("python '{}/px_uploader.py' '{}'".format(upload_tools, src))
def exec_command(self, cmd, **kw):
kw['stdout'] = sys.stdout
return super(upload_fw, self).exec_command(cmd, **kw)
def keyword(self):
return "Uploading"
class set_default_parameters(Task.Task):
color='CYAN'
always_run = True
def keyword(self):
return "apj_tool"
def run(self):
rel_default_parameters = self.env.get_flat('DEFAULT_PARAMETERS')
abs_default_parameters = os.path.join(self.env.SRCROOT, rel_default_parameters)
apj_tool = self.env.APJ_TOOL
sys.path.append(os.path.dirname(apj_tool))
from apj_tool import embedded_defaults
defaults = embedded_defaults(self.inputs[0].abspath())
if not defaults.find():
print("Error: Param defaults support not found in firmware")
sys.exit(1)
defaults.set_file(abs_default_parameters)
defaults.save()
class generate_bin(Task.Task):
color='CYAN'
run_str="${OBJCOPY} -O binary ${SRC} ${TGT}"
always_run = True
def keyword(self):
return "Generating"
def __str__(self):
return self.outputs[0].path_from(self.generator.bld.bldnode)
class generate_apj(Task.Task):
'''generate an apj firmware file'''
color='CYAN'
always_run = True
def keyword(self):
return "apj_gen"
def run(self):
import json, time, base64, zlib
img = open(self.inputs[0].abspath(),'rb').read()
d = {
"board_id": int(self.env.APJ_BOARD_ID),
"magic": "APJFWv1",
"description": "Firmware for a %s board" % self.env.APJ_BOARD_TYPE,
"image": base64.b64encode(zlib.compress(img,9)).decode('utf-8'),
"build_time": int(time.time()),
"summary": self.env.BOARD,
"version": "0.1",
"image_size": len(img),
"git_identity": self.generator.bld.git_head_hash(short=True),
"board_revision": 0
}
apj_file = self.outputs[0].abspath()
f = open(apj_file, "w")
f.write(json.dumps(d, indent=4))
f.close()
class build_abin(Task.Task):
'''build an abin file for skyviper firmware upload via web UI'''
color='CYAN'
run_str='${TOOLS_SCRIPTS}/make_abin.sh ${SRC}.bin ${SRC}.abin'
always_run = True
def keyword(self):
return "Generating"
def __str__(self):
return self.outputs[0].path_from(self.generator.bld.bldnode)
class build_intel_hex(Task.Task):
'''build an intel hex file for upload with DFU'''
color='CYAN'
run_str='${TOOLS_SCRIPTS}/make_intel_hex.py ${SRC} ${FLASH_RESERVE_START_KB}'
always_run = True
def keyword(self):
return "Generating"
def __str__(self):
return self.outputs[0].path_from(self.generator.bld.bldnode)
@feature('ch_ap_program')
@after_method('process_source')
def chibios_firmware(self):
self.link_task.always_run = True
link_output = self.link_task.outputs[0]
bin_target = self.bld.bldnode.find_or_declare('bin/' + link_output.change_ext('.bin').name)
apj_target = self.bld.bldnode.find_or_declare('bin/' + link_output.change_ext('.apj').name)
generate_bin_task = self.create_task('generate_bin', src=link_output, tgt=bin_target)
generate_bin_task.set_run_after(self.link_task)
generate_apj_task = self.create_task('generate_apj', src=bin_target, tgt=apj_target)
generate_apj_task.set_run_after(generate_bin_task)
if self.env.BUILD_ABIN:
abin_target = self.bld.bldnode.find_or_declare('bin/' + link_output.change_ext('.abin').name)
abin_task = self.create_task('build_abin', src=link_output, tgt=abin_target)
abin_task.set_run_after(generate_apj_task)
bootloader_bin = self.bld.srcnode.make_node("Tools/bootloaders/%s_bl.bin" % self.env.BOARD)
if os.path.exists(bootloader_bin.abspath()) and self.bld.env.HAVE_INTEL_HEX:
hex_target = self.bld.bldnode.find_or_declare('bin/' + link_output.change_ext('.hex').name)
hex_task = self.create_task('build_intel_hex', src=[bin_target, bootloader_bin], tgt=hex_target)
hex_task.set_run_after(generate_bin_task)
if self.env.DEFAULT_PARAMETERS:
default_params_task = self.create_task('set_default_parameters',
src=link_output)
default_params_task.set_run_after(self.link_task)
generate_bin_task.set_run_after(default_params_task)
if self.bld.options.upload:
_upload_task = self.create_task('upload_fw', src=apj_target)
_upload_task.set_run_after(generate_apj_task)
def setup_can_build(cfg):
'''enable CAN build. By doing this here we can auto-enable CAN in
the build based on the presence of CAN pins in hwdef.dat'''
env = cfg.env
env.AP_LIBRARIES += [
'AP_UAVCAN',
'modules/uavcan/libuavcan/src/**/*.cpp',
'modules/uavcan/libuavcan_drivers/stm32/driver/src/*.cpp'
]
env.CFLAGS += ['-DUAVCAN_STM32_CHIBIOS=1',
'-DUAVCAN_STM32_NUM_IFACES=2']
env.CXXFLAGS += [
'-Wno-error=cast-align',
'-DUAVCAN_STM32_CHIBIOS=1',
'-DUAVCAN_STM32_NUM_IFACES=2'
]
env.DEFINES += [
'UAVCAN_CPP_VERSION=UAVCAN_CPP03',
'UAVCAN_NO_ASSERTIONS=1',
'UAVCAN_NULLPTR=nullptr'
]
env.INCLUDES += [
cfg.srcnode.find_dir('modules/uavcan/libuavcan/include').abspath(),
cfg.srcnode.find_dir('modules/uavcan/libuavcan_drivers/stm32/driver/include').abspath()
]
cfg.get_board().with_uavcan = True
def load_env_vars(env):
'''optionally load extra environment variables from env.py in the build directory'''
print("Checking for env.py")
env_py = os.path.join(env.BUILDROOT, 'env.py')
if not os.path.exists(env_py):
print("No env.py found")
return
e = pickle.load(open(env_py, 'rb'))
for k in e.keys():
v = e[k]
if k == 'ROMFS_FILES':
env.ROMFS_FILES += v
continue
if k in env:
if isinstance(env[k], dict):
a = v.split('=')
env[k][a[0]] = '='.join(a[1:])
print("env updated %s=%s" % (k, v))
elif isinstance(env[k], list):
env[k].append(v)
print("env appended %s=%s" % (k, v))
else:
env[k] = v
print("env added %s=%s" % (k, v))
else:
env[k] = v
print("env set %s=%s" % (k, v))
def configure(cfg):
cfg.find_program('make', var='MAKE')
#cfg.objcopy = cfg.find_program('%s-%s'%(cfg.env.TOOLCHAIN,'objcopy'), var='OBJCOPY', mandatory=True)
cfg.find_program('arm-none-eabi-objcopy', var='OBJCOPY')
env = cfg.env
bldnode = cfg.bldnode.make_node(cfg.variant)
def srcpath(path):
return cfg.srcnode.make_node(path).abspath()
def bldpath(path):
return bldnode.make_node(path).abspath()
env.AP_PROGRAM_FEATURES += ['ch_ap_program']
kw = env.AP_LIBRARIES_OBJECTS_KW
kw['features'] = Utils.to_list(kw.get('features', [])) + ['ch_ap_library']
env.CH_ROOT = srcpath('modules/ChibiOS')
env.AP_HAL_ROOT = srcpath('libraries/AP_HAL_ChibiOS')
env.BUILDDIR = bldpath('modules/ChibiOS')
env.BUILDROOT = bldpath('')
env.SRCROOT = srcpath('')
env.PT_DIR = srcpath('Tools/ardupilotwaf/chibios/image')
env.UPLOAD_TOOLS = srcpath('Tools/ardupilotwaf')
env.CHIBIOS_SCRIPTS = srcpath('libraries/AP_HAL_ChibiOS/hwdef/scripts')
env.TOOLS_SCRIPTS = srcpath('Tools/scripts')
env.APJ_TOOL = srcpath('Tools/scripts/apj_tool.py')
env.SERIAL_PORT = srcpath('/dev/serial/by-id/*_STLink*')
# relative paths to pass to make, relative to directory that make is run from
env.CH_ROOT_REL = os.path.relpath(env.CH_ROOT, env.BUILDROOT)
env.AP_HAL_REL = os.path.relpath(env.AP_HAL_ROOT, env.BUILDROOT)
env.BUILDDIR_REL = os.path.relpath(env.BUILDDIR, env.BUILDROOT)
mk_custom = srcpath('libraries/AP_HAL_ChibiOS/hwdef/%s/chibios_board.mk' % env.BOARD)
mk_common = srcpath('libraries/AP_HAL_ChibiOS/hwdef/common/chibios_board.mk')
# see if there is a board specific make file
if os.path.exists(mk_custom):
env.BOARD_MK = mk_custom
else:
env.BOARD_MK = mk_common
if cfg.options.default_parameters:
cfg.msg('Default parameters', cfg.options.default_parameters, color='YELLOW')
env.DEFAULT_PARAMETERS = srcpath(cfg.options.default_parameters)
# we need to run chibios_hwdef.py at configure stage to generate the ldscript.ld
# that is needed by the remaining configure checks
import subprocess
if env.BOOTLOADER:
env.HWDEF = srcpath('libraries/AP_HAL_ChibiOS/hwdef/%s/hwdef-bl.dat' % env.BOARD)
env.BOOTLOADER_OPTION="--bootloader"
else:
env.HWDEF = srcpath('libraries/AP_HAL_ChibiOS/hwdef/%s/hwdef.dat' % env.BOARD)
env.BOOTLOADER_OPTION=""
hwdef_script = srcpath('libraries/AP_HAL_ChibiOS/hwdef/scripts/chibios_hwdef.py')
hwdef_out = env.BUILDROOT
if not os.path.exists(hwdef_out):
os.mkdir(hwdef_out)
try:
cmd = "python '{0}' -D '{1}' '{2}' {3}".format(hwdef_script, hwdef_out, env.HWDEF, env.BOOTLOADER_OPTION)
ret = subprocess.call(cmd, shell=True)
except Exception:
cfg.fatal("Failed to process hwdef.dat")
if ret != 0:
cfg.fatal("Failed to process hwdef.dat ret=%d" % ret)
load_env_vars(cfg.env)
if env.HAL_WITH_UAVCAN:
setup_can_build(cfg)
def pre_build(bld):
'''pre-build hook to change dynamic sources'''
load_env_vars(bld.env)
if bld.env.HAL_WITH_UAVCAN:
bld.get_board().with_uavcan = True
def build(bld):
bld(
# build hwdef.h from hwdef.dat. This is needed after a waf clean
source=bld.path.ant_glob(bld.env.HWDEF),
rule="python '${AP_HAL_ROOT}/hwdef/scripts/chibios_hwdef.py' -D '${BUILDROOT}' '%s' %s" % (bld.env.HWDEF, bld.env.BOOTLOADER_OPTION),
group='dynamic_sources',
target=[bld.bldnode.find_or_declare('hwdef.h'),
bld.bldnode.find_or_declare('ldscript.ld')]
)
bld(
# create the file modules/ChibiOS/include_dirs
rule="touch Makefile && BUILDDIR=${BUILDDIR_REL} CHIBIOS=${CH_ROOT_REL} AP_HAL=${AP_HAL_REL} ${CHIBIOS_BUILD_FLAGS} ${CHIBIOS_BOARD_NAME} ${MAKE} pass -f '${BOARD_MK}'",
group='dynamic_sources',
target=bld.bldnode.find_or_declare('modules/ChibiOS/include_dirs')
)
common_src = [bld.bldnode.find_or_declare('hwdef.h'),
bld.bldnode.find_or_declare('modules/ChibiOS/include_dirs')]
common_src += bld.path.ant_glob('libraries/AP_HAL_ChibiOS/hwdef/common/*.[ch]')
common_src += bld.path.ant_glob('libraries/AP_HAL_ChibiOS/hwdef/common/*.mk')
common_src += bld.path.ant_glob('modules/ChibiOS/os/hal/**/*.[ch]')
common_src += bld.path.ant_glob('modules/ChibiOS/os/hal/**/*.mk')
if bld.env.ROMFS_FILES:
common_src += [bld.bldnode.find_or_declare('ap_romfs_embedded.h')]
ch_task = bld(
# build libch.a from ChibiOS sources and hwdef.h
rule="BUILDDIR='${BUILDDIR_REL}' CHIBIOS='${CH_ROOT_REL}' AP_HAL=${AP_HAL_REL} ${CHIBIOS_BUILD_FLAGS} ${CHIBIOS_BOARD_NAME} '${MAKE}' lib -f '${BOARD_MK}'",
group='dynamic_sources',
source=common_src,
target=bld.bldnode.find_or_declare('modules/ChibiOS/libch.a')
)
ch_task.name = "ChibiOS_lib"
bld.env.LIB += ['ch']
bld.env.LIBPATH += ['modules/ChibiOS/']
wraplist = ['strerror_r', 'fclose', 'freopen', 'fread']
for w in wraplist:
bld.env.LINKFLAGS += ['-Wl,--wrap,%s' % w]
|
import sys,os
from email.Utils import COMMASPACE, formatdate
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEImage import MIMEImage
from email.MIMEImage import MIMEImage
from email.MIMEBase import MIMEBase
from email import Encoders
import smtplib
import XmlDict
function=sys.argv[1]
user=sys.argv[2]
filename=sys.argv[3]
conf = XmlDict.loadXml("global.xml")
for option in conf["menu"]["option"]:
if ((option["type"].lower()==function.lower()) and (option["name"]==user)):
option_selected = option
msg = MIMEMultipart()
msg['Subject'] = conf["subject"]
msg['From'] = conf["source"]
msg['To'] = COMMASPACE.join([option_selected["config"]])
msg['Date'] = formatdate(localtime=True)
text = "Your scanner happely delivered this pdf to your mailbox.\n"
msg.attach( MIMEText(text) )
part = MIMEBase('application', "pdf")
part.set_payload( open(filename,"rb").read() )
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(filename) )
msg.attach(part)
mailer = smtplib.SMTP(conf["smtp"])
mailer.sendmail(conf["source"],option_selected["config"] , msg.as_string())
mailer.close()
|
"""
Just for backwards-compatibility
"""
from indico.util.contextManager import *
|
from elan import *
Configurator.Start()
Configurator.Wait()
sleep(3)
Configurator.media.Click()
Configurator.interfacetemplates.Click()
for i in range(100):
try:
Configurator.ComboBox.Select(0,1)
break
except:
sleep(2)
print("Try again")
Configurator.apply.Click()
Configurator.CloseAndClean()
|
from Cryptodome.Util.py3compat import bord
from Cryptodome.Util._raw_api import (load_pycryptodome_raw_lib,
VoidPointer, SmartPointer,
create_string_buffer,
get_raw_buffer, c_size_t,
c_uint8_ptr)
from Cryptodome.Hash.keccak import _raw_keccak_lib
class SHAKE256_XOF(object):
"""A SHAKE256 hash object.
Do not instantiate directly.
Use the :func:`new` function.
:ivar oid: ASN.1 Object ID
:vartype oid: string
"""
# ASN.1 Object ID
oid = "2.16.840.1.101.3.4.2.12"
def __init__(self, data=None):
state = VoidPointer()
result = _raw_keccak_lib.keccak_init(state.address_of(),
c_size_t(64),
0x1F)
if result:
raise ValueError("Error %d while instantiating SHAKE256"
% result)
self._state = SmartPointer(state.get(),
_raw_keccak_lib.keccak_destroy)
self._is_squeezing = False
if data:
self.update(data)
def update(self, data):
"""Continue hashing of a message by consuming the next chunk of data.
Args:
data (byte string/byte array/memoryview): The next chunk of the message being hashed.
"""
if self._is_squeezing:
raise TypeError("You cannot call 'update' after the first 'read'")
result = _raw_keccak_lib.keccak_absorb(self._state.get(),
c_uint8_ptr(data),
c_size_t(len(data)))
if result:
raise ValueError("Error %d while updating SHAKE256 state"
% result)
return self
def read(self, length):
"""
Compute the next piece of XOF output.
.. note::
You cannot use :meth:`update` anymore after the first call to
:meth:`read`.
Args:
length (integer): the amount of bytes this method must return
:return: the next piece of XOF output (of the given length)
:rtype: byte string
"""
self._is_squeezing = True
bfr = create_string_buffer(length)
result = _raw_keccak_lib.keccak_squeeze(self._state.get(),
bfr,
c_size_t(length))
if result:
raise ValueError("Error %d while extracting from SHAKE256"
% result)
return get_raw_buffer(bfr)
def new(self, data=None):
return type(self)(data=data)
def new(data=None):
"""Return a fresh instance of a SHAKE256 object.
Args:
data (byte string/byte array/memoryview):
The very first chunk of the message to hash.
It is equivalent to an early call to :meth:`update`.
Optional.
:Return: A :class:`SHAKE256_XOF` object
"""
return SHAKE256_XOF(data=data)
|
"""Testing i18n template search and interpolation."""
from __future__ import absolute_import, print_function, unicode_literals
__metaclass__ = type
__all__ = [
]
import os
import shutil
import tempfile
import unittest
from pkg_resources import resource_filename
from zope.component import getUtility
from mailman.app.lifecycle import create_list
from mailman.config import config
from mailman.interfaces.languages import ILanguageManager
from mailman.testing.layers import ConfigLayer
from mailman.utilities.i18n import TemplateNotFoundError, find, make, search
class TestSearchOrder(unittest.TestCase):
"""Test internal search order for language templates."""
layer = ConfigLayer
def setUp(self):
self.var_dir = tempfile.mkdtemp()
config.push('no template dir', """\
[mailman]
default_language: fr
[paths.testing]
var_dir: {0}
""".format(self.var_dir))
language_manager = getUtility(ILanguageManager)
language_manager.add('de', 'utf-8', 'German')
language_manager.add('it', 'utf-8', 'Italian')
self.mlist = create_list('l@example.com')
self.mlist.preferred_language = 'de'
def tearDown(self):
config.pop('no template dir')
shutil.rmtree(self.var_dir)
def _stripped_search_order(self, template_file,
mailing_list=None, language=None):
# Return the search path order for a given template, possibly using
# the mailing list and the language as context. Note that this only
# returns the search path, and does not check for whether the paths
# exist or not.
#
# Replace the tempdir prefix with a placeholder for more readable and
# reproducible tests. Essentially the paths below are rooted at
# $var_dir, except those files that live within Mailman's source
# tree. The former will use /v/ as the root and the latter will use
# /m/ as the root.
in_tree = os.path.dirname(resource_filename('mailman', 'templates'))
raw_search_order = search(template_file, mailing_list, language)
for path in raw_search_order:
if path.startswith(self.var_dir):
path = '/v' + path[len(self.var_dir):]
elif path.startswith(in_tree):
path = '/m' + path[len(in_tree):]
else:
# This will cause tests to fail, so keep the full bogus
# pathname for better debugging.
pass
yield path
def test_fully_specified_search_order(self):
search_order = self._stripped_search_order('foo.txt', self.mlist, 'it')
# For convenience.
def nexteq(path):
self.assertEqual(next(search_order), path)
# 1: Use the given language argument
nexteq('/v/templates/lists/l@example.com/it/foo.txt')
nexteq('/v/templates/domains/example.com/it/foo.txt')
nexteq('/v/templates/site/it/foo.txt')
# 2: Use mlist.preferred_language
nexteq('/v/templates/lists/l@example.com/de/foo.txt')
nexteq('/v/templates/domains/example.com/de/foo.txt')
nexteq('/v/templates/site/de/foo.txt')
# 3: Use the site's default language
nexteq('/v/templates/lists/l@example.com/fr/foo.txt')
nexteq('/v/templates/domains/example.com/fr/foo.txt')
nexteq('/v/templates/site/fr/foo.txt')
# 4: English
nexteq('/v/templates/lists/l@example.com/en/foo.txt')
nexteq('/v/templates/domains/example.com/en/foo.txt')
nexteq('/v/templates/site/en/foo.txt')
# 5: After all the site-admin override paths have been searched, the
# Mailman in-tree paths are searched. Note that Mailman only ships
# one set of English templates.
nexteq('/m/templates/en/foo.txt')
def test_no_language_argument_search_order(self):
search_order = self._stripped_search_order('foo.txt', self.mlist)
# For convenience.
def nexteq(path):
self.assertEqual(next(search_order), path)
# 1: Use mlist.preferred_language
nexteq('/v/templates/lists/l@example.com/de/foo.txt')
nexteq('/v/templates/domains/example.com/de/foo.txt')
nexteq('/v/templates/site/de/foo.txt')
# 2: Use the site's default language
nexteq('/v/templates/lists/l@example.com/fr/foo.txt')
nexteq('/v/templates/domains/example.com/fr/foo.txt')
nexteq('/v/templates/site/fr/foo.txt')
# 3: English
nexteq('/v/templates/lists/l@example.com/en/foo.txt')
nexteq('/v/templates/domains/example.com/en/foo.txt')
nexteq('/v/templates/site/en/foo.txt')
# 4: After all the site-admin override paths have been searched, the
# Mailman in-tree paths are searched. Note that Mailman only ships
# one set of English templates.
nexteq('/m/templates/en/foo.txt')
def test_no_mailing_list_argument_search_order(self):
search_order = self._stripped_search_order('foo.txt', language='it')
# For convenience.
def nexteq(path):
self.assertEqual(next(search_order), path)
# 1: Use the given language argument
nexteq('/v/templates/site/it/foo.txt')
# 2: Use the site's default language
nexteq('/v/templates/site/fr/foo.txt')
# 3: English
nexteq('/v/templates/site/en/foo.txt')
# 4: After all the site-admin override paths have been searched, the
# Mailman in-tree paths are searched. Note that Mailman only ships
# one set of English templates.
nexteq('/m/templates/en/foo.txt')
def test_no_optional_arguments_search_order(self):
search_order = self._stripped_search_order('foo.txt')
# For convenience.
def nexteq(path):
self.assertEqual(next(search_order), path)
# 1: Use the site's default language
nexteq('/v/templates/site/fr/foo.txt')
# 2: English
nexteq('/v/templates/site/en/foo.txt')
# 3: After all the site-admin override paths have been searched, the
# Mailman in-tree paths are searched. Note that Mailman only ships
# one set of English templates.
nexteq('/m/templates/en/foo.txt')
class TestFind(unittest.TestCase):
"""Test template search."""
layer = ConfigLayer
def setUp(self):
self.var_dir = tempfile.mkdtemp()
config.push('template config', """\
[paths.testing]
var_dir: {0}
""".format(self.var_dir))
# The following MUST happen AFTER the push() above since pushing a new
# config also clears out the language manager.
getUtility(ILanguageManager).add('xx', 'utf-8', 'Xlandia')
self.mlist = create_list('test@example.com')
self.mlist.preferred_language = 'xx'
self.fp = None
# Populate the template directories with a few fake templates.
def write(text, path):
os.makedirs(os.path.dirname(path))
with open(path, 'w') as fp:
fp.write(text)
self.xxsite = os.path.join(
self.var_dir, 'templates', 'site', 'xx', 'site.txt')
write('Site template', self.xxsite)
self.xxdomain = os.path.join(
self.var_dir, 'templates',
'domains', 'example.com', 'xx', 'domain.txt')
write('Domain template', self.xxdomain)
self.xxlist = os.path.join(
self.var_dir, 'templates',
'lists', 'test@example.com', 'xx', 'list.txt')
write('List template', self.xxlist)
def tearDown(self):
if self.fp is not None:
self.fp.close()
config.pop('template config')
shutil.rmtree(self.var_dir)
def test_find_site_template(self):
filename, self.fp = find('site.txt', language='xx')
self.assertEqual(filename, self.xxsite)
self.assertEqual(self.fp.read(), 'Site template')
def test_find_domain_template(self):
filename, self.fp = find('domain.txt', self.mlist)
self.assertEqual(filename, self.xxdomain)
self.assertEqual(self.fp.read(), 'Domain template')
def test_find_list_template(self):
filename, self.fp = find('list.txt', self.mlist)
self.assertEqual(filename, self.xxlist)
self.assertEqual(self.fp.read(), 'List template')
def test_template_not_found(self):
with self.assertRaises(TemplateNotFoundError) as cm:
find('missing.txt', self.mlist)
self.assertEqual(cm.exception.template_file, 'missing.txt')
class TestMake(unittest.TestCase):
"""Test template interpolation."""
layer = ConfigLayer
def setUp(self):
self.var_dir = tempfile.mkdtemp()
config.push('template config', """\
[paths.testing]
var_dir: {0}
""".format(self.var_dir))
# The following MUST happen AFTER the push() above since pushing a new
# config also clears out the language manager.
getUtility(ILanguageManager).add('xx', 'utf-8', 'Xlandia')
self.mlist = create_list('test@example.com')
self.mlist.preferred_language = 'xx'
# Populate the template directories with a few fake templates.
path = os.path.join(self.var_dir, 'templates', 'site', 'xx')
os.makedirs(path)
with open(os.path.join(path, 'nosub.txt'), 'w') as fp:
print("""\
This is a global template.
It has no substitutions.
It will be wrapped.
""", file=fp)
with open(os.path.join(path, 'subs.txt'), 'w') as fp:
print("""\
This is a $kind template.
It has $howmany substitutions.
It will be wrapped.
""", file=fp)
with open(os.path.join(path, 'nowrap.txt'), 'w') as fp:
print("""\
This is a $kind template.
It has $howmany substitutions.
It will not be wrapped.
""", file=fp)
def tearDown(self):
config.pop('template config')
shutil.rmtree(self.var_dir)
def test_no_substitutions(self):
self.assertEqual(make('nosub.txt', self.mlist), """\
This is a global template. It has no substitutions. It will be
wrapped.""")
def test_substitutions(self):
self.assertEqual(make('subs.txt', self.mlist,
kind='very nice',
howmany='a few'), """\
This is a very nice template. It has a few substitutions. It will be
wrapped.""")
def test_substitutions_no_wrap(self):
self.assertEqual(make('nowrap.txt', self.mlist, wrap=False,
kind='very nice',
howmany='a few'), """\
This is a very nice template.
It has a few substitutions.
It will not be wrapped.
""")
|
import time
import os
import sys
import json
import ConfigParser
import base64
import argparse
import requests
from flask import Flask
from flask import request
from faraday.proxyio import faradaybasicproxyio
from faraday.proxyio import faradaycommands
from faraday.proxyio import deviceconfig
from classes import helper
configTruthFile = "deviceconfiguration.sample.ini"
configFile = "deviceconfiguration.ini"
faradayTruthFile = "faraday_config.sample.ini"
faradayFile = "faraday_config.ini"
faradayHelper = helper.Helper("DeviceConfiguration")
logger = faradayHelper.getLogger()
deviceConfigPath = os.path.join(faradayHelper.path, configFile)
faradayConfigPath = os.path.join(faradayHelper.path, faradayFile)
deviceConfigurationConfig = ConfigParser.RawConfigParser()
deviceConfigurationConfig.read(deviceConfigPath)
parser = argparse.ArgumentParser(description='Device Configuration application provides a Flask server to program Faraday radios via an API')
parser.add_argument('--init-config', dest='init', action='store_true', help='Initialize Device Configuration configuration file')
parser.add_argument('--init-faraday-config', dest='initfaraday', action='store_true', help='Initialize Faraday configuration file')
parser.add_argument('--start', action='store_true', help='Start Device Configuration server')
parser.add_argument('--faradayconfig', action='store_true', help='Display Faraday configuration file contents')
parser.add_argument('--callsign', help='Set Faraday radio callsign')
parser.add_argument('--nodeid', type=int, help='Set Faraday radio nodeid', default=1)
parser.add_argument('--redledtxon', action='store_true', help='Set Faraday radio RED LED during RF transmissions ON')
parser.add_argument('--redledtxoff', action='store_true', help='Set Faraday radio RED LED during RF transmissions OFF')
parser.add_argument('--greenledrxon', action='store_true', help='Set Faraday radio GREEN LED during RF reception ON')
parser.add_argument('--greenledrxoff', action='store_true', help='Set Faraday radio GREEN LED during RF reception OFF')
parser.add_argument('--unitconfigured', action='store_true', help='Set Faraday radio configured bit ON')
parser.add_argument('--unitunconfigured', action='store_true', help='Set Faraday radio configured bit OFF')
parser.add_argument('--gpiop3on', type=int, help='Set Faraday radio GPIO port 3 bits on, specify bit to turn ON')
parser.add_argument('--gpiop3off', type=int, help='Set Faraday radio GPIO port 3 bits on, specify bit to turn OFF')
parser.add_argument('--gpiop3clear', action='store_true', help='Reset Faraday radio GPIO port 3 bits to OFF')
parser.add_argument('--gpiop4on', type=int, help='Set Faraday radio GPIO port 4 bits on, specify bit to turn ON')
parser.add_argument('--gpiop4off', type=int, help='Set Faraday radio GPIO port 4 bits on, specify bit to turn OFF')
parser.add_argument('--gpiop4clear', action='store_true', help='Reset Faraday radio GPIO port 4 bits to OFF')
parser.add_argument('--gpiop5on', type=int, help='Set Faraday radio GPIO port 5 bits on, specify bit to turn ON')
parser.add_argument('--gpiop5off', type=int, help='Set Faraday radio GPIO port 5 bits on, specify bit to turn OFF')
parser.add_argument('--gpiop5clear', action='store_true', help='Reset Faraday radio GPIO port 5 bits to OFF')
parser.add_argument('--gpiop5', type=int, help='Set Faraday radio fgpio_p5')
parser.add_argument('--bootfrequency', type=float, help='Set Faraday radio boot frequency', default=914.5)
parser.add_argument('--bootrfpower', type=int, help='Set Faraday radio boot RF power', default=20)
parser.add_argument('--latitude', type=float, help='Set Faraday radio default latitude. Format \"ddmm.mmmm\"')
parser.add_argument('--longitude', type=float, help='Set Faraday radio default longitude. Format \"dddmm.mmmm\"')
parser.add_argument('--latitudedir', help='Set Faraday radio default latitude direction (N/S)')
parser.add_argument('--longitudedir', help='Set Faraday radio default longitude direction (E/W)')
parser.add_argument('--altitude', type=float, help='Set Faraday radio default altitude in meters. Maximum of 17999.99 Meters')
parser.add_argument('--gpsbooton', action='store_true', help='Set Faraday radio GPS boot power ON')
parser.add_argument('--gpsbootoff', action='store_true', help='Set Faraday radio GPS boot power OFF')
parser.add_argument('--gpsenabled', action='store_true', help='Set Faraday radio GPS use ON')
parser.add_argument('--gpsdisabled', action='store_true', help='Set Faraday radio GPS use OFF')
parser.add_argument('--uarttelemetryenabled', action='store_true', help='Set Faraday radio UART Telemetry ON')
parser.add_argument('--uarttelemetrydisabled', action='store_true', help='Set Faraday radio UART Telemetry OFF')
parser.add_argument('--rftelemetryenabled', action='store_true', help='Set Faraday radio RF Telemetry ON')
parser.add_argument('--rftelemetrydisabled', action='store_true', help='Set Faraday radio RF Telemetry OFF')
parser.add_argument('--uartinterval', type=int, help='Set Faraday radio UART telemetry interval in seconds', default=5)
parser.add_argument('--rfinterval', type=int, help='Set Faraday radio RF telemetry interval in seconds', default=3)
args = parser.parse_args()
def proxyConfig(host, port):
r = requests.get("http://{0}:{1}/config".format(host, port))
return r.json()
def initializeDeviceConfigurationConfig():
'''
Initialize device configuration configuration file from deviceconfiguration.sample.ini
:return: None, exits program
'''
faradayHelper.initializeConfig(configTruthFile, configFile)
sys.exit(0)
def initializeFaradayConfig():
'''
Initialize Faraday radio configuration file from faraday_config.sample.ini
:return: None, exits program
'''
faradayHelper.initializeConfig(faradayTruthFile, faradayFile)
sys.exit(0)
def programFaraday(deviceConfigurationConfigPath):
'''
Programs Faraday by generating a HTTP POST query that Proxy uses to send data to the CC430 FLASH memory.
:param deviceConfigurationConfigPath: Path to deviceconfiguration.ini file
:return: None
'''
config = ConfigParser.RawConfigParser()
config.read(deviceConfigPath)
# Variables
local_device_callsign = config.get("DEVICES", "CALLSIGN")
local_device_node_id = config.get("DEVICES", "NODEID")
local_device_callsign = str(local_device_callsign).upper()
hostname = config.get("PROXY", "HOST")
port = config.get("PROXY", "PORT")
cmdPort = config.get("PROXY", "CMDPORT")
# Send POST data to Proxy to configure unit
try:
r = requests.post('http://{0}:{1}'.format(hostname, port),
params={'callsign': str(local_device_callsign), 'nodeid': int(local_device_node_id), 'port': cmdPort})
logger.info(r.url)
logger.info("Sent Programming Request")
except requests.exceptions.RequestException as e:
# Some error occurred
logger.error(e)
logger.error(r.text)
def displayConfig(faradayConfigPath):
'''
Prints out the Faraday Configuration file
:param faradayConfigPath: path to faraday configuration file
:return: None
'''
with open(faradayConfigPath, 'r') as configFile:
print configFile.read()
sys.exit(0)
def eightBitListToInt(list):
'''
Turn an eight bit list of integers into an integer
:param list: list to convert to an integer
:return: integer
'''
if len(list) == 8:
return int(''.join(str(e) for e in list), 2)
def configureDeviceConfiguration(args, faradayConfigPath):
'''
Configure device configuration configuration file from command line
:param args: argparse arguments
:return: None
'''
config = ConfigParser.RawConfigParser()
config.read(deviceConfigPath)
fconfig = ConfigParser.RawConfigParser()
fconfig.read(faradayConfigPath)
# Obtain proxy configuration
# TODO: Not hardcode
proxyConfiguration = proxyConfig("127.0.0.1", 8000)
#Only works for UNIT0 at this time
config.set('DEVICES', 'CALLSIGN', proxyConfiguration["UNIT0"].get("callsign"))
config.set('DEVICES', 'NODEID', proxyConfiguration["UNIT0"].get("nodeid"))
# Faraday radio configuration
if args.callsign is not None:
fconfig.set('BASIC', 'CALLSIGN', args.callsign)
if args.nodeid is not None:
fconfig.set('BASIC', 'ID', args.nodeid)
# Obtain configboot bitmask options
if args.redledtxon:
fconfig.set('BASIC', 'REDLEDTX', 1)
if args.redledtxoff:
fconfig.set('BASIC', 'REDLEDTX', 0)
if args.greenledrxon:
fconfig.set('BASIC', 'GREENLEDRX', 1)
if args.greenledrxoff:
fconfig.set('BASIC', 'GREENLEDRX', 0)
if args.unitconfigured:
fconfig.set('BASIC', 'UNITCONFIGURED', 1)
if args.unitunconfigured:
fconfig.set('BASIC', 'UNITCONFIGURED', 0)
# Create configuration boot bitmask integer
bootmask = [0] * 8
redledtx = fconfig.get('BASIC', 'REDLEDTX')
greenledrx = fconfig.get('BASIC', 'GREENLEDRX')
unitconfigured = fconfig.get('BASIC', 'UNITCONFIGURED')
bootmask[5] = greenledrx
bootmask[6] = redledtx
bootmask[7] = unitconfigured
configbootbitmask = eightBitListToInt(bootmask)
fconfig.set('BASIC', 'CONFIGBOOTBITMASK', configbootbitmask)
# Detect and set GPIO P3 settings, create bitmask
if args.gpiop3on >= 0 and args.gpiop3on <= 7:
if args.gpiop3on is not None:
fconfig.set('BASIC', 'GPIO_P3_' + str(args.gpiop3on), 1)
if args.gpiop3off >= 0 and args.gpiop3off <= 7:
if args.gpiop3off is not None:
fconfig.set('BASIC', 'GPIO_P3_' + str(args.gpiop3off), 0)
gpiomask = [0] * 8
if not args.gpiop3clear:
gpio0 = fconfig.get('BASIC', 'GPIO_P3_0')
gpio1 = fconfig.get('BASIC', 'GPIO_P3_1')
gpio2 = fconfig.get('BASIC', 'GPIO_P3_2')
gpio3 = fconfig.get('BASIC', 'GPIO_P3_3')
gpio4 = fconfig.get('BASIC', 'GPIO_P3_4')
gpio5 = fconfig.get('BASIC', 'GPIO_P3_5')
gpio6 = fconfig.get('BASIC', 'GPIO_P3_6')
gpio7 = fconfig.get('BASIC', 'GPIO_P3_7')
gpiomask = [gpio7, gpio6, gpio5, gpio4, gpio3, gpio2, gpio1, gpio0]
if args.gpiop3clear:
fconfig.set('BASIC', 'GPIO_P3_0', 0)
fconfig.set('BASIC', 'GPIO_P3_1', 0)
fconfig.set('BASIC', 'GPIO_P3_2', 0)
fconfig.set('BASIC', 'GPIO_P3_3', 0)
fconfig.set('BASIC', 'GPIO_P3_4', 0)
fconfig.set('BASIC', 'GPIO_P3_5', 0)
fconfig.set('BASIC', 'GPIO_P3_6', 0)
fconfig.set('BASIC', 'GPIO_P3_7', 0)
gpiop3bitmask = eightBitListToInt(gpiomask)
fconfig.set('BASIC', 'GPIO_P3', gpiop3bitmask)
# Detect and set GPIO P4 settings, create bitmask
if args.gpiop4on >= 0 and args.gpiop4on <= 7:
if args.gpiop4on is not None:
fconfig.set('BASIC', 'GPIO_P4_' + str(args.gpiop4on), 1)
if args.gpiop4off >= 0 and args.gpiop4off <= 7:
if args.gpiop4off is not None:
fconfig.set('BASIC', 'GPIO_P4_' + str(args.gpiop4off), 0)
gpiomask = [0] * 8
if not args.gpiop4clear:
gpio0 = fconfig.get('BASIC', 'GPIO_P4_0')
gpio1 = fconfig.get('BASIC', 'GPIO_P4_1')
gpio2 = fconfig.get('BASIC', 'GPIO_P4_2')
gpio3 = fconfig.get('BASIC', 'GPIO_P4_3')
gpio4 = fconfig.get('BASIC', 'GPIO_P4_4')
gpio5 = fconfig.get('BASIC', 'GPIO_P4_5')
gpio6 = fconfig.get('BASIC', 'GPIO_P4_6')
gpio7 = fconfig.get('BASIC', 'GPIO_P4_7')
gpiomask = [gpio7, gpio6, gpio5, gpio4, gpio3, gpio2, gpio1, gpio0]
if args.gpiop4clear:
fconfig.set('BASIC', 'GPIO_P4_0', 0)
fconfig.set('BASIC', 'GPIO_P4_1', 0)
fconfig.set('BASIC', 'GPIO_P4_2', 0)
fconfig.set('BASIC', 'GPIO_P4_3', 0)
fconfig.set('BASIC', 'GPIO_P4_4', 0)
fconfig.set('BASIC', 'GPIO_P4_5', 0)
fconfig.set('BASIC', 'GPIO_P4_6', 0)
fconfig.set('BASIC', 'GPIO_P4_7', 0)
gpiop4bitmask = eightBitListToInt(gpiomask)
fconfig.set('BASIC', 'GPIO_P4', gpiop4bitmask)
# Detect and set GPIO P5 settings, create bitmask
if args.gpiop5on >= 0 and args.gpiop5on <= 7:
if args.gpiop5on is not None:
fconfig.set('BASIC', 'GPIO_P5_' + str(args.gpiop5on), 1)
if args.gpiop5off >= 0 and args.gpiop5off <= 7:
if args.gpiop5off is not None:
fconfig.set('BASIC', 'GPIO_P5_' + str(args.gpiop5off), 0)
gpiomask = [0] * 8
if not args.gpiop5clear:
gpio0 = fconfig.get('BASIC', 'GPIO_P5_0')
gpio1 = fconfig.get('BASIC', 'GPIO_P5_1')
gpio2 = fconfig.get('BASIC', 'GPIO_P5_2')
gpio3 = fconfig.get('BASIC', 'GPIO_P5_3')
gpio4 = fconfig.get('BASIC', 'GPIO_P5_4')
gpio5 = fconfig.get('BASIC', 'GPIO_P5_5')
gpio6 = fconfig.get('BASIC', 'GPIO_P5_6')
gpio7 = fconfig.get('BASIC', 'GPIO_P5_7')
gpiomask = [gpio7, gpio6, gpio5, gpio4, gpio3, gpio2, gpio1, gpio0]
if args.gpiop5clear:
fconfig.set('BASIC', 'GPIO_P5_0', 0)
fconfig.set('BASIC', 'GPIO_P5_1', 0)
fconfig.set('BASIC', 'GPIO_P5_2', 0)
fconfig.set('BASIC', 'GPIO_P5_3', 0)
fconfig.set('BASIC', 'GPIO_P5_4', 0)
fconfig.set('BASIC', 'GPIO_P5_5', 0)
fconfig.set('BASIC', 'GPIO_P5_6', 0)
fconfig.set('BASIC', 'GPIO_P5_7', 0)
gpiop5bitmask = eightBitListToInt(gpiomask)
fconfig.set('BASIC', 'GPIO_P5', gpiop5bitmask)
if args.bootfrequency is not None:
fconfig.set('RF', 'boot_frequency_mhz', args.bootfrequency)
if args.bootrfpower is not None:
fconfig.set('RF', 'boot_rf_power', args.bootrfpower)
if args.latitude is not None:
fconfig.set('GPS', 'default_latitude', args.latitude)
if args.longitude is not None:
fconfig.set('GPS', 'default_longitude', args.longitude)
if args.latitudedir is not None:
fconfig.set('GPS', 'default_latitude_direction', args.latitudedir)
if args.longitudedir is not None:
fconfig.set('GPS', 'default_longitude_direction', args.longitudedir)
if args.altitude is not None:
fconfig.set('GPS', 'default_altitude', args.altitude)
if args.gpsbooton:
fconfig.set('GPS', 'gps_boot_bit', 1)
if args.gpsbootoff:
fconfig.set('GPS', 'gps_boot_bit', 0)
if args.gpsenabled:
fconfig.set('GPS', 'gps_present_bit', 1)
if args.gpsdisabled:
fconfig.set('GPS', 'gps_present_bit', 0)
if args.uarttelemetryenabled:
fconfig.set('TELEMETRY', 'uart_telemetry_boot_bit', 1)
if args.uarttelemetrydisabled:
fconfig.set('TELEMETRY', 'uart_telemetry_boot_bit', 0)
if args.rftelemetryenabled:
fconfig.set('TELEMETRY', 'rf_telemetry_boot_bit', 1)
if args.rftelemetrydisabled:
fconfig.set('TELEMETRY', 'rf_telemetry_boot_bit', 0)
if args.uartinterval is not None and args.uartinterval > 0:
fconfig.set('TELEMETRY', 'telemetry_default_uart_interval', args.uartinterval)
if args.rfinterval is not None and args.rfinterval > 0:
fconfig.set('TELEMETRY', 'telemetry_default_rf_interval', args.rfinterval)
# Save device configuration
with open(deviceConfigPath, 'wb') as configfile:
config.write(configfile)
# Save Faraday configuration
with open(faradayConfigPath, 'wb') as configfile:
fconfig.write(configfile)
if args.init:
initializeDeviceConfigurationConfig()
if args.initfaraday:
initializeFaradayConfig()
if args.faradayconfig:
displayConfig(faradayConfigPath)
if not os.path.isfile(deviceConfigPath):
logger.error("Please initialize device configuration with \'--init-config\' option")
sys.exit(0)
if not os.path.isfile(faradayConfigPath):
logger.error("Please initialize Faraday configuration with \'--init-faraday-config\' option")
sys.exit(0)
configureDeviceConfiguration(args, faradayConfigPath)
if not args.start:
logger.warning("--start option not present, exiting Device Configuration server!")
sys.exit(0)
UART_PORT_APP_COMMAND = 2
proxy = faradaybasicproxyio.proxyio()
faradayCmd = faradaycommands.faraday_commands()
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def unitconfig():
"""
This function is called when the RESTful API GET or POST call is made to the '/' of the operating port. Querying a
GET will command the local and queried unit's device configuration in Flash memory and return the information as a
JSON dictionary. Issuing a POST will cause the local .INI file configuration to be loaded into the respective units
Flash memory device configuration.
"""
if request.method == "POST":
try:
print "test POST"
# Obtain URL parameters (for local unit device callsign/ID assignment)
callsign = request.args.get("callsign", "%")
nodeid = request.args.get("nodeid", "%")
# Obtain configuration values
config = ConfigParser.RawConfigParser()
config.read(deviceConfigPath)
hostname = config.get("PROXY", "HOST")
# Read Faraday device configuration file
# Read configuration file
faradayConfig = ConfigParser.RawConfigParser()
faradayConfig.read(faradayConfigPath)
# Create dictionaries of each config section
device_basic_dict = dict()
device_basic_dict['CONFIGBOOTBITMASK'] = faradayConfig.get("BASIC", 'CONFIGBOOTBITMASK')
device_basic_dict['CALLSIGN'] = faradayConfig.get("BASIC", 'CALLSIGN')
device_basic_dict['ID'] = faradayConfig.get("BASIC", 'ID')
device_basic_dict['GPIO_P3'] = faradayConfig.get("BASIC", 'GPIO_P3')
device_basic_dict['GPIO_P4'] = faradayConfig.get("BASIC", 'GPIO_P4')
device_basic_dict['GPIO_P5'] = faradayConfig.get("BASIC", 'GPIO_P5')
device_rf_dict = dict()
device_rf_dict['BOOT_FREQUENCY_MHZ'] = faradayConfig.get("RF", 'BOOT_FREQUENCY_MHZ')
device_rf_dict['BOOT_RF_POWER'] = faradayConfig.get("RF", 'BOOT_RF_POWER')
device_gps_dict = dict()
device_gps_dict['DEFAULT_LATITUDE'] = faradayConfig.get("GPS", 'DEFAULT_LATITUDE')
device_gps_dict['DEFAULT_LATITUDE_DIRECTION'] = faradayConfig.get("GPS", 'DEFAULT_LATITUDE_DIRECTION')
device_gps_dict['DEFAULT_LONGITUDE'] = faradayConfig.get("GPS", 'DEFAULT_LONGITUDE')
device_gps_dict['DEFAULT_LONGITUDE_DIRECTION'] = faradayConfig.get("GPS", 'DEFAULT_LONGITUDE_DIRECTION')
device_gps_dict['DEFAULT_ALTITUDE'] = faradayConfig.get("GPS", 'DEFAULT_ALTITUDE')
device_gps_dict['DEFAULT_ALTITUDE_UNITS'] = faradayConfig.get("GPS", 'DEFAULT_ALTITUDE_UNITS')
device_gps_dict['GPS_BOOT_BIT'] = faradayConfig.get("GPS", 'GPS_BOOT_BIT')
device_gps_dict['GPS_PRESENT_BIT'] = faradayConfig.get("GPS", 'GPS_PRESENT_BIT')
device_telemetry_dict = dict()
device_telemetry_dict['UART_TELEMETRY_BOOT_BIT'] = faradayConfig.get("TELEMETRY", 'UART_TELEMETRY_BOOT_BIT')
device_telemetry_dict['RF_TELEMETRY_BOOT_BIT'] = faradayConfig.get("TELEMETRY", 'RF_TELEMETRY_BOOT_BIT')
device_telemetry_dict['TELEMETRY_DEFAULT_UART_INTERVAL'] = faradayConfig.get("TELEMETRY", 'TELEMETRY_DEFAULT_UART_INTERVAL')
device_telemetry_dict['TELEMETRY_DEFAULT_RF_INTERVAL'] = faradayConfig.get("TELEMETRY", 'TELEMETRY_DEFAULT_RF_INTERVAL')
# Create device configuration module object to use for programming packet creation
device_config_object = deviceconfig.DeviceConfigClass()
# Update the device configuration object with the fields obtained from the INI configuration files loaded
config_bitmask = device_config_object.create_bitmask_configuration(int(device_basic_dict['CONFIGBOOTBITMASK']))
status_basic = device_config_object.update_basic(
config_bitmask, str(device_basic_dict['CALLSIGN']),
int(device_basic_dict['ID']), int(device_basic_dict['GPIO_P3']),
int(device_basic_dict['GPIO_P4']), int(device_basic_dict['GPIO_P5']))
status_rf = device_config_object.update_rf(
float(device_rf_dict['BOOT_FREQUENCY_MHZ']),
int(device_rf_dict['BOOT_RF_POWER']))
status_gps = device_config_object.update_gps(
device_config_object.update_bitmask_gps_boot(int(device_gps_dict['GPS_PRESENT_BIT']),
int(device_gps_dict['GPS_BOOT_BIT'])),
device_gps_dict['DEFAULT_LATITUDE'], device_gps_dict['DEFAULT_LATITUDE_DIRECTION'],
device_gps_dict['DEFAULT_LONGITUDE'], device_gps_dict['DEFAULT_LONGITUDE_DIRECTION'],
device_gps_dict['DEFAULT_ALTITUDE'], device_gps_dict['DEFAULT_ALTITUDE_UNITS'])
status_telem = device_config_object.update_telemetry(device_config_object.update_bitmask_telemetry_boot(
int(device_telemetry_dict['RF_TELEMETRY_BOOT_BIT']),
int(device_telemetry_dict['UART_TELEMETRY_BOOT_BIT'])),
int(device_telemetry_dict['TELEMETRY_DEFAULT_UART_INTERVAL']),
int(device_telemetry_dict['TELEMETRY_DEFAULT_RF_INTERVAL']))
if (status_basic and status_gps and status_rf and status_telem):
# Create the raw device configuration packet to send to unit
device_config_packet = device_config_object.create_config_packet()
# Transmit device configuration to local unit as supplied by the function arguments
proxy.POST(hostname, str(callsign), int(nodeid), UART_PORT_APP_COMMAND,
faradayCmd.CommandLocal(faradayCmd.CMD_DEVICECONFIG, device_config_packet))
return '', 204 # nothing to return but successful transmission
else:
logger.error('Failed to create configuration packet!')
return 'Failed to create configuration packet!', 400
except ValueError as e:
logger.error("ValueError: " + str(e))
return json.dumps({"error": str(e)}), 400
except IndexError as e:
logger.error("IndexError: " + str(e))
return json.dumps({"error": str(e)}), 400
except KeyError as e:
logger.error("KeyError: " + str(e))
return json.dumps({"error": str(e)}), 400
else: # If a GET command
"""
Provides a RESTful interface to device-configuration at URL '/'
"""
try:
# Obtain URL parameters
callsign = request.args.get("callsign", "%")
nodeid = request.args.get("nodeid", "%")
# Obtain configuration values
config = ConfigParser.RawConfigParser()
config.read(deviceConfigPath)
hostname = config.get("PROXY", "HOST")
callsign = str(callsign).upper()
nodeid = str(nodeid)
# Flush all old data from recieve buffer of local unit
proxy.FlushRxPort(callsign, nodeid, proxy.CMD_UART_PORT)
proxy.POST(hostname, str(callsign), int(nodeid), UART_PORT_APP_COMMAND,
faradayCmd.CommandLocalSendReadDeviceConfig())
# Wait enough time for Faraday to respond to commanded memory read.
time.sleep(2)
try:
# Retrieve the next device configuration read packet to arrive
data = proxy.GETWait(hostname, str(callsign), str(nodeid), proxy.CMD_UART_PORT, 2)
# Create device configuration module object
device_config_object = deviceconfig.DeviceConfigClass()
# Decode BASE64 JSON data packet into
data = proxy.DecodeRawPacket(data[0]["data"]) # Get first item
data = device_config_object.extract_config_packet(data)
# Parse device configuration into dictionary
parsed_config_dict = device_config_object.parse_config_packet(data)
# Encoded dictionary data for save network transit
pickled_parsed_config_dict = json.dumps(parsed_config_dict)
pickled_parsed_config_dict_b64 = base64.b64encode(pickled_parsed_config_dict)
except ValueError as e:
print e
except IndexError as e:
print e
except KeyError as e:
print e
except StandardError as e:
print e
except ValueError as e:
logger.error("ValueError: " + str(e))
return json.dumps({"error": str(e)}), 400
except IndexError as e:
logger.error("IndexError: " + str(e))
return json.dumps({"error": str(e)}), 400
except KeyError as e:
logger.error("KeyError: " + str(e))
return json.dumps({"error": str(e)}), 400
return json.dumps({"data": pickled_parsed_config_dict_b64}, indent=1), 200, \
{'Content-Type': 'application/json'}
def main():
"""Main function which starts deviceconfiguration Flask server."""
logger.info('Starting deviceconfiguration server')
# Start the flask server
deviceConfigHost = deviceConfigurationConfig.get("FLASK", "HOST")
deviceConfigPort = deviceConfigurationConfig.getint("FLASK", "PORT")
#proxyConfiguration = proxyConfig("127.0.0.1", 8000)
app.run(host=deviceConfigHost, port=deviceConfigPort, threaded=True)
if __name__ == '__main__':
main()
|
import re
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def read(relpath):
"""
Return string containing the contents of the file at *relpath* relative to
this file.
"""
cwd = os.path.dirname(__file__)
abspath = os.path.join(cwd,os.path.normpath(relpath))
with open(abspath) as f:
return f.read()
PACKAGE = os.path.basename(os.getcwd())
PACKAGES = [PACKAGE]
PROVIDES = [PACKAGE]
PACKAGE_DIR = {PACKAGE: PACKAGE}
SCRIPT_FILE = PACKAGE_DIR[PACKAGE] + '/__init__.py'
ENTRY_POINTS = {
# 'console_scripts': [PACKAGE + '=' + PACKAGE + '.' + PACKAGE + ':main'],
'console_scripts': ['{0}={0}.{0}:main'.format(PACKAGE)],
}
PLATFORMS = ['Linux']
KEYWORDS = 'ipsec ike'
INSTALL_REQUIRES = [
x.replace('-','_') for x in read('requirements.txt').split('\n') if x != ''
]
main_py = open(SCRIPT_FILE).read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", main_py))
docstrings = re.findall('"""(.*?)"""', main_py, re.DOTALL)
VERSION = metadata['version']
WEBSITE = metadata['website']
LICENSE = metadata['license']
AUTHOR_EMAIL = metadata['author']
AUTHOR, EMAIL = re.match(r'(.*) <(.*)>', AUTHOR_EMAIL).groups()
DESCRIPTION = docstrings[0].strip()
if '\n\n' in DESCRIPTION:
DESCRIPTION, LONG_DESCRIPTION = DESCRIPTION.split('\n\n', 1)
else:
LONG_DESCRIPTION = None
CLASSIFIERS = [
'Development Status :: 3 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: GPL',
'Operating System :: OS Independent',
'Operating System :: POSIX :: Linux',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
]
PARAMS = {
'platforms': PLATFORMS,
'name': PACKAGE,
'version': VERSION,
'description': DESCRIPTION,
'keywords': KEYWORDS,
'long_description': LONG_DESCRIPTION,
'author': AUTHOR,
'author_email': EMAIL,
'url': WEBSITE,
'license': LICENSE,
'packages': PACKAGES,
'package_dir': PACKAGE_DIR,
#'scripts': SCRIPTS,
'entry_points': ENTRY_POINTS,
'provides': PROVIDES,
'requires': INSTALL_REQUIRES,
'install_requires': INSTALL_REQUIRES,
'classifiers': CLASSIFIERS,
}
setup(**PARAMS)
|
"""
Python script 'process_NCEI_03_prcp_180d.py'
by Matthew Garcia, PhD student
Dept. of Forest and Wildlife Ecology
University of Wisconsin - Madison
matt.e.garcia@gmail.com
Copyright (C) 2015-2016 by Matthew Garcia
Licensed Gnu GPL v3; see 'LICENSE_GnuGPLv3.txt' for complete terms
Send questions, bug reports, any related requests to matt.e.garcia@gmail.com
See also 'README.md', 'DISCLAIMER.txt', 'CITATION.txt', 'ACKNOWLEDGEMENTS.txt'
Treat others as you would be treated. Pay it forward. Valar dohaeris.
PURPOSE: Temporal calculation of PRCP 180-day accumulation
DEPENDENCIES: h5py, numpy
'process_NCEI_03_aux' module has its own requirements
USAGE: '$ python process_NCEI_03_prcp_180d.py NCEI_WLS_1983 1983 ./grids'
INPUT: copied '.h5' file from process_NCEI_03_preprocess.py
(with the naming convention 'grids/[YYYYMMDD]_NCEI_grids_2.h5')
OUTPUT: updated daily '.h5' file with new accumulation grid
(with the naming convention 'grids/[YYYYMMDD]_NCEI_grids_2.h5')
year-end '.h5' and '.pickle' files with rolling accounted variable
"""
import sys
import datetime
import glob
import h5py as hdf
import numpy as np
from process_NCEI_03_aux import get_stn_lists, write_stn_lists, \
write_to_file, cube_sum
def message(char_string):
"""
prints a string to the terminal and flushes the buffer
"""
print char_string
sys.stdout.flush()
return
message(' ')
message('process_NCEI_03_prcp_180d.py started at %s' %
datetime.datetime.now().isoformat())
message(' ')
if len(sys.argv) < 4:
message('input warning: no input directory indicated,, using ./grids')
path = './grids'
else:
path = sys.argv[3]
if len(sys.argv) < 3:
message('input error: need year to process')
sys.exit(1)
else:
this_year = int(sys.argv[2])
if len(sys.argv) < 2:
message('input error: need prefix for weather data h5 file')
sys.exit(1)
else:
NCEIfname = sys.argv[1]
h5infname = '%s/../data/%s_processed.h5' % (path, NCEIfname)
message('reading dates information from %s' % h5infname)
with hdf.File(h5infname, 'r') as h5infile:
all_dates = np.copy(h5infile['dates'])
message('- information for %d total dates found' % len(all_dates))
dates = sorted([j for j in all_dates if int(j // 1E4) == this_year])
message('- processing %d dates in %d' % (len(dates), this_year))
message(' ')
prev_year = this_year - 1
vars_files = sorted(glob.glob('%s/*_year_end_prcp_180d.h5' % path))
use_vars_file = False
if len(vars_files) > 0:
for vars_file in vars_files:
if str(prev_year) in vars_file:
use_vars_file = True
varfname = vars_file
break
if use_vars_file:
message('extracting prcp_180d datacube from %s' % varfname)
with hdf.File(varfname, 'r') as h5infile:
nrows = np.copy(h5infile['nrows'])
ncols = np.copy(h5infile['ncols'])
prcp_180d = np.copy(h5infile['prcp_180d'])
message('extracting station lists')
prcp_180d_stns = get_stn_lists(path, prev_year, 'prcp_180d_stns')
else: # otherwise, initialize the variable space(s)
h5infname = '%s/%d_NCEI_grids_2.h5' % (path, dates[0])
message('extracting grid information from %s' % h5infname)
with hdf.File(h5infname, 'r') as h5infile:
nrows = np.copy(h5infile['grid/nrows'])
ncols = np.copy(h5infile['grid/ncols'])
message('establishing prcp_180d datacube')
prcp_180d = np.zeros((180, nrows, ncols))
prcp_180d_stns = []
message(' ')
for date in dates:
h5infname = '%s/%d_NCEI_grids_2.h5' % (path, date)
message('extracting PRCP grid from %s' % h5infname)
with hdf.File(h5infname, 'r') as h5infile:
prcp_stns = np.copy(h5infile['stns/prcp_stns'])
prcp = np.copy(h5infile['grid_prcp'])
#
year = date // 10000
month = (date - (year * 10000)) // 100
day = date - (year * 10000) - (month * 100)
#
grid_prcp_180d, prcp_180d_stns_all, prcp_180d, prcp_180d_stns = \
cube_sum(180, prcp_180d, prcp, prcp_180d_stns, prcp_stns)
message('- calculated updated 180-day running precipitation total, \
mean %.1f' % np.mean(grid_prcp_180d))
#
h5outfname = '%s/%d_NCEI_grids_2.h5' % (path, date)
message('saving grids to %s' % h5outfname)
with hdf.File(h5outfname, 'r+') as h5outfile:
del h5outfile['meta/last_updated']
h5outfile.create_dataset('meta/last_updated',
data=datetime.datetime.now().isoformat())
del h5outfile['meta/at']
outstr = 'prcp_180d'
h5outfile.create_dataset('meta/at', data=outstr)
write_to_file(h5outfile, 'prcp_180d_sum', grid_prcp_180d,
'prcp_180d_stns', prcp_180d_stns_all)
message(' ')
varfname = '%s/%d_year_end_prcp_180d.h5' % (path, this_year)
message('saving variable datacube to %s' % varfname)
with hdf.File(varfname, 'w') as h5outfile:
h5outfile.create_dataset('nrows', data=nrows)
h5outfile.create_dataset('ncols', data=ncols)
h5outfile.create_dataset('prcp_180d', data=prcp_180d,
dtype=np.float32, compression='gzip')
message('saving station lists')
write_stn_lists(path, this_year, 'prcp_180d_stns', prcp_180d_stns)
message('process_NCEI_03_prcp_180d.py completed at %s' %
datetime.datetime.now().isoformat())
message(' ')
sys.exit(0)
|
from gnuradio import gr, gr_unittest
import random, numpy
from gnuradio import digital, blocks, channels
class qa_linear_equalizer(gr_unittest.TestCase):
def unpack_values(self, values_in, bits_per_value, bits_per_symbol):
# verify that 8 is divisible by bits_per_symbol
m = bits_per_value / bits_per_symbol
# print(m)
mask = 2**(bits_per_symbol)-1
if bits_per_value != m*bits_per_symbol:
print("error - bits per symbols must fit nicely into bits_per_value bit values")
return []
num_values = len(values_in)
num_symbols = int(num_values*( m) )
cur_byte = 0
cur_bit = 0
out = []
for i in range(num_symbols):
s = (values_in[cur_byte] >> (bits_per_value-bits_per_symbol-cur_bit)) & mask
out.append(s)
cur_bit += bits_per_symbol
if cur_bit >= bits_per_value:
cur_bit = 0
cur_byte += 1
return out
def map_symbols_to_constellation(self, symbols, cons):
l = list(map(lambda x: cons.points()[x], symbols))
return l
def setUp(self):
random.seed(987654)
self.tb = gr.top_block()
self.num_data = num_data = 10000
self.sps = sps = 4
self.eb = eb = 0.35
self.preamble = preamble = [0x27,0x2F,0x18,0x5D,0x5B,0x2A,0x3F,0x71,0x63,0x3C,0x17,0x0C,0x0A,0x41,0xD6,0x1F,0x4C,0x23,0x65,0x68,0xED,0x1C,0x77,0xA7,0x0E,0x0A,0x9E,0x47,0x82,0xA4,0x57,0x24,]
self.payload_size = payload_size = 300 # bytes
self.data = data = [0]*4+[random.getrandbits(8) for i in range(payload_size)]
self.gain = gain = .001 # LMS gain
self.corr_thresh = corr_thresh = 3e6
self.num_taps = num_taps = 16
def tearDown(self):
self.tb = None
def transform(self, src_data, gain, const):
SRC = blocks.vector_source_c(src_data, False)
EQU = digital.lms_dd_equalizer_cc(4, gain, 1, const.base())
DST = blocks.vector_sink_c()
self.tb.connect(SRC, EQU, DST)
self.tb.run()
return DST.data()
def test_001_identity(self):
# Constant modulus signal so no adjustments
const = digital.constellation_qpsk()
src_data = const.points()*1000
N = 100 # settling time
expected_data = src_data[N:]
result = self.transform(src_data, 0.1, const)[N:]
N = -500
self.assertComplexTuplesAlmostEqual(expected_data[N:], result[N:], 5)
def test_qpsk_3tap_lms_training(self):
# set up fg
gain = 0.01 # LMS gain
num_taps = 16
num_samp = 2000
num_test = 500
cons = digital.constellation_qpsk().base()
rxmod = digital.generic_mod(cons, False, self.sps, True, self.eb, False, False)
modulated_sync_word_pre = digital.modulate_vector_bc(rxmod.to_basic_block(), self.preamble+self.preamble, [1])
modulated_sync_word = modulated_sync_word_pre[86:(512+86)] # compensate for the RRC filter delay
corr_max = numpy.abs(numpy.dot(modulated_sync_word,numpy.conj(modulated_sync_word)))
corr_calc = self.corr_thresh/(corr_max*corr_max)
preamble_symbols = self.map_symbols_to_constellation(self.unpack_values(self.preamble, 8, 2), cons)
alg = digital.adaptive_algorithm_lms(cons, gain).base()
evm = digital.meas_evm_cc(cons, digital.evm_measurement_t.EVM_PERCENT)
leq = digital.linear_equalizer(num_taps, self.sps, alg, False, preamble_symbols, 'corr_est')
correst = digital.corr_est_cc(modulated_sync_word, self.sps, 12, corr_calc, digital.THRESHOLD_ABSOLUTE)
constmod = digital.generic_mod(
constellation=cons,
differential=False,
samples_per_symbol=4,
pre_diff_code=True,
excess_bw=0.35,
verbose=False,
log=False)
chan = channels.channel_model(
noise_voltage=0.0,
frequency_offset=0.0,
epsilon=1.0,
taps=(1.0 + 1.0j, 0.63-.22j, -.1+.07j),
noise_seed=0,
block_tags=False)
vso = blocks.vector_source_b(self.preamble+self.data, True, 1, [])
head = blocks.head(gr.sizeof_float*1, num_samp)
vsi = blocks.vector_sink_f()
self.tb.connect(vso, constmod, chan, correst, leq, evm, head, vsi)
self.tb.run()
# look at the last 1000 samples, should converge quickly, below 5% EVM
upper_bound = list(20.0*numpy.ones((num_test,)))
lower_bound = list(0.0*numpy.zeros((num_test,)))
output_data = vsi.data()
output_data = output_data[-num_test:]
self.assertLess(output_data, upper_bound)
self.assertGreater(output_data, lower_bound)
if __name__ == '__main__':
gr_unittest.run(qa_linear_equalizer)
|
from __future__ import absolute_import
import six
import github.GithubObject
import github.HookResponse
class Hook(github.GithubObject.CompletableGithubObject):
"""
This class represents Hooks. The reference can be found here http://developer.github.com/v3/repos/hooks
"""
def __repr__(self):
return self.get__repr__({"id": self._id.value, "url": self._url.value})
@property
def active(self):
"""
:type: bool
"""
self._completeIfNotSet(self._active)
return self._active.value
@property
def config(self):
"""
:type: dict
"""
self._completeIfNotSet(self._config)
return self._config.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def events(self):
"""
:type: list of string
"""
self._completeIfNotSet(self._events)
return self._events.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def last_response(self):
"""
:type: :class:`github.HookResponse.HookResponse`
"""
self._completeIfNotSet(self._last_response)
return self._last_response.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def test_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._test_url)
return self._test_url.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def ping_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._ping_url)
return self._ping_url.value
def delete(self):
"""
:calls: `DELETE /repos/:owner/:repo/hooks/:id <http://developer.github.com/v3/repos/hooks>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck("DELETE", self.url)
def edit(
self,
name,
config,
events=github.GithubObject.NotSet,
add_events=github.GithubObject.NotSet,
remove_events=github.GithubObject.NotSet,
active=github.GithubObject.NotSet,
):
"""
:calls: `PATCH /repos/:owner/:repo/hooks/:id <http://developer.github.com/v3/repos/hooks>`_
:param name: string
:param config: dict
:param events: list of string
:param add_events: list of string
:param remove_events: list of string
:param active: bool
:rtype: None
"""
assert isinstance(name, (str, six.text_type)), name
assert isinstance(config, dict), config
assert events is github.GithubObject.NotSet or all(
isinstance(element, (str, six.text_type)) for element in events
), events
assert add_events is github.GithubObject.NotSet or all(
isinstance(element, (str, six.text_type)) for element in add_events
), add_events
assert remove_events is github.GithubObject.NotSet or all(
isinstance(element, (str, six.text_type)) for element in remove_events
), remove_events
assert active is github.GithubObject.NotSet or isinstance(active, bool), active
post_parameters = {
"name": name,
"config": config,
}
if events is not github.GithubObject.NotSet:
post_parameters["events"] = events
if add_events is not github.GithubObject.NotSet:
post_parameters["add_events"] = add_events
if remove_events is not github.GithubObject.NotSet:
post_parameters["remove_events"] = remove_events
if active is not github.GithubObject.NotSet:
post_parameters["active"] = active
headers, data = self._requester.requestJsonAndCheck(
"PATCH", self.url, input=post_parameters
)
self._useAttributes(data)
def test(self):
"""
:calls: `POST /repos/:owner/:repo/hooks/:id/tests <http://developer.github.com/v3/repos/hooks>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck("POST", self.url + "/tests")
def ping(self):
"""
:calls: `POST /repos/:owner/:repo/hooks/:id/pings <http://developer.github.com/v3/repos/hooks>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck("POST", self.url + "/pings")
def _initAttributes(self):
self._active = github.GithubObject.NotSet
self._config = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._events = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._last_response = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._test_url = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
self._ping_url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "active" in attributes: # pragma no branch
self._active = self._makeBoolAttribute(attributes["active"])
if "config" in attributes: # pragma no branch
self._config = self._makeDictAttribute(attributes["config"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "events" in attributes: # pragma no branch
self._events = self._makeListOfStringsAttribute(attributes["events"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "last_response" in attributes: # pragma no branch
self._last_response = self._makeClassAttribute(
github.HookResponse.HookResponse, attributes["last_response"]
)
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "test_url" in attributes: # pragma no branch
self._test_url = self._makeStringAttribute(attributes["test_url"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "ping_url" in attributes: # pragma no branch
self._ping_url = self._makeStringAttribute(attributes["ping_url"])
|
def interactiveConsole(a,b=None):
'''
Useful function for debugging
Placing interactiveConsole(locals(),globals()) into code will
drop into an interactive console when run
'''
import code
d = {}
if b:
d.update(b)
d.update(a)
c=code.InteractiveConsole(locals=d)
c.interact()
class SecureDict(object):
__slots__ = ('__items__',)
def __init__(self, *a, **kw):
self.__items__ = dict(*a, **kw)
def getItem(self, item, default = None):
if default!=None:
return self.__items__.get(item,default)
try:
return self.__items__[item]
except KeyError:
raise KeyError('Key Error: %s'%item)
def setItem(self, item, value):
self.__items__[item] = value
def __len__(self):
return len(self.__items__)
def __repr__(self):
return 'SecureDict(%r)' % self.__items__
__str__ = __repr__
def keys(self):
return self.__items__.keys()
def values(self):
return self.__items__.values()
def pop(self,key):
return self.__items__.pop(key)
|
import os
import sys
import string
from samba.net import Net
from samba import enable_net_export_keytab
from samba import tests
from samba.param import LoadParm
enable_net_export_keytab()
def open_bytes(filename):
if sys.version_info[0] == 3:
return open(filename, errors='ignore')
else:
return open(filename, 'rb')
class DCKeytabTests(tests.TestCase):
def setUp(self):
super(DCKeytabTests, self).setUp()
self.lp = LoadParm()
self.lp.load_default()
self.creds = self.insta_creds(template=self.get_credentials())
self.ktfile = os.path.join(self.lp.get('private dir'), 'test.keytab')
self.principal = self.creds.get_principal()
def tearDown(self):
super(DCKeytabTests, self).tearDown()
os.remove(self.ktfile)
def test_export_keytab(self):
net = Net(None, self.lp)
net.export_keytab(keytab=self.ktfile, principal=self.principal)
assert os.path.exists(self.ktfile), 'keytab was not created'
with open_bytes(self.ktfile) as bytes_kt:
result = ''
for c in bytes_kt.read():
if c in string.printable:
result += c
principal_parts = self.principal.split('@')
assert principal_parts[0] in result and \
principal_parts[1] in result, \
'Principal not found in generated keytab'
|
import andbug.command, andbug.screed
import andbug.vm
def stepComplete(t):
t = t[0]
with andbug.screed.section("Single step complete in %s, suspended." % t):
showCallStack(t, 1)
def showCallStack(t, count = 0):
if count >= len(t.frames) or count <= 0:
count = len(t.frames)
for f in t.frames[0:count]:
name = str(f.loc)
f.loc.method.firstLoc
if f.native:
name += ' <native>'
andbug.screed.item(name)
def printValues(dist, name = None):
if name == None:
for key in dist.keys():
print key + ' : ' + str(dist[key])
else :
if dist[name] != None:
print name + ' : ' + str(dist[name])
if (isinstance(dist[name], andbug.vm.Object)):
print "{"
printValues(dist[name].fields)
print "}"
else:
print 'not found \"' + name + '\" variable'
@andbug.command.action('', aliases=('vs',))
def values(ctxt, name = None):
'if you suspend, you print the values.'
with andbug.screed.section('values'):
if ctxt.sess.getSuspendState().isSuspend:
t = ctxt.sess.getSuspendState().getThread()
printValues(t.frames[0].values, name)
else :
print 'Not suspend, you can\'t print values'
@andbug.command.action('<variable name> <value>', aliases=('set', 'sv', ))
def setValues(ctxt, name = None, value = None):
'if you suspend, you can set the values.'
if name == None or value == None:
print 'parameter not enough'
return
with andbug.screed.section('values'):
if ctxt.sess.getSuspendState().isSuspend:
t = ctxt.sess.getSuspendState().getThread()
t.frames[0].setValue(name, value)
else :
print 'Not suspend, you can\'t print values'
@andbug.command.action('[<count/all>]', aliases=('bt',))
def backtrace(ctxt, count = None):
'if you suspend, you print the backtrace.'
with andbug.screed.section('Back Trace'):
if ctxt.sess.getSuspendState().isSuspend:
t = ctxt.sess.getSuspendState().getThread()
if count == 'all' or count == None:
showCallStack(t)
if count.isdigit():
showCallStack(t, int(count))
else :
print 'Not suspend, you can\'t print backtrace'
@andbug.command.action('', aliases=('s',))
def stepover(ctxt, expr=None):
'if you suspend, you can step over.'
with andbug.screed.section('Step Over'):
if ctxt.sess.getSuspendState().isSuspend:
t = ctxt.sess.getSuspendState().getThread()
t.singleStep(func = stepComplete)
else :
print 'Not suspend, you can\'t step'
@andbug.command.action('', aliases=('si',))
def stepinto(ctxt, expr=None):
'if you suspend, you can step into.'
with andbug.screed.section('Step Into'):
if ctxt.sess.getSuspendState().isSuspend:
t = ctxt.sess.getSuspendState().getThread()
t.singleStep(func = stepComplete, stepdepth = 0)
else :
print 'Not suspend, you can\'t step into'
@andbug.command.action('', aliases=('so',))
def stepout(ctxt, expr=None):
'if you suspend, you can step out.'
with andbug.screed.section('Step Out'):
if ctxt.sess.getSuspendState().isSuspend:
t = ctxt.sess.getSuspendState().getThread()
t.singleStep(func = stepComplete, stepdepth = 2)
else :
print 'Not suspend, you can\'t step out'
|
from __future__ import print_function
import sys
from DIRAC.Core.Base import Script
from DIRAC.FrameworkSystem.Client.ProxyUpload import CLIParams, uploadProxy
__RCSID__ = "$Id$"
if __name__ == "__main__":
cliParams = CLIParams()
cliParams.registerCLISwitches()
Script.parseCommandLine()
retVal = uploadProxy(cliParams)
if not retVal['OK']:
print(retVal['Message'])
sys.exit(1)
sys.exit(0)
|
type = "passive"
def handler(fit, container, context):
level = container.level if "skill" in context else 1
fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Missile Launcher Operation"),
"cpu", container.getModifiedItemAttr("cpuNeedBonus") * level)
|
import mpi4py, petsc4py
from petsc4py import PETSc
import numpy as np
import pytest
import gridPy
import geometryPy
petsc4py.init()
petscComm = petsc4py.PETSc.COMM_WORLD
comm = petscComm.tompi4py()
rank = comm.Get_rank()
numProcs = comm.Get_size()
PETSc.Sys.Print("Using %d procs" % numProcs)
N1 = int(pytest.config.getoption('N1'))
N2 = int(pytest.config.getoption('N2'))
N3 = int(pytest.config.getoption('N3'))
dim = int(pytest.config.getoption('dim'))
blackHoleSpin = float(pytest.config.getoption('blackHoleSpin'))
hSlope = float(pytest.config.getoption('hSlope'))
numGhost = 3
X1Start = 0.; X1End = 1.
X2Start = 0.; X2End = 1.
X3Start = 0.; X3End = 1.
periodicBoundariesX1 = False
periodicBoundariesX2 = False
periodicBoundariesX3 = False
XCoords = gridPy.coordinatesGridPy(N1, N2, N3,
dim, numGhost,
X1Start, X1End,
X2Start, X2End,
X3Start, X3End
)
X1Coords, X2Coords, X3Coords = XCoords.getCoords(gridPy.CENTER)
geomMinkowski = geometryPy.geometryPy(geometryPy.MINKOWSKI,
0., 0.,
XCoords
)
def test_minkowski_params():
np.testing.assert_equal(N1, geomMinkowski.N1)
np.testing.assert_equal(N2, geomMinkowski.N2)
np.testing.assert_equal(N3, geomMinkowski.N3)
np.testing.assert_equal(dim, geomMinkowski.dim)
np.testing.assert_equal(numGhost, geomMinkowski.numGhost)
def test_minkowski_gCov():
np.testing.assert_allclose(geomMinkowski.gCov[0][0], -1.)
np.testing.assert_allclose(geomMinkowski.gCov[0][1], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[0][2], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[0][3], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[1][0], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[1][1], 1.)
np.testing.assert_allclose(geomMinkowski.gCov[1][2], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[1][3], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[2][0], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[2][1], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[2][2], 1.)
np.testing.assert_allclose(geomMinkowski.gCov[2][3], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[3][0], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[3][1], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[3][2], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[3][3], 1.)
def test_minkowski_gCon():
np.testing.assert_allclose(geomMinkowski.gCon[0][0], -1.)
np.testing.assert_allclose(geomMinkowski.gCon[0][1], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[0][2], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[0][3], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[1][0], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[1][1], 1.)
np.testing.assert_allclose(geomMinkowski.gCon[1][2], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[1][3], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[2][0], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[2][1], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[2][2], 1.)
np.testing.assert_allclose(geomMinkowski.gCon[2][3], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[3][0], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[3][1], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[3][2], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[3][3], 1.)
def test_minkowski_g():
np.testing.assert_allclose(geomMinkowski.g, 1.)
def test_minkowski_alpha():
np.testing.assert_allclose(geomMinkowski.g, 1.)
geomKerrSchild = geometryPy.geometryPy(geometryPy.MODIFIED_KERR_SCHILD,
blackHoleSpin, hSlope,
XCoords
)
r = np.exp(X1Coords)
theta = np.pi*X2Coords + 0.5*(1. - hSlope)*np.sin(2.*np.pi*X2Coords)
phi = 2*np.pi*X3Coords
sigma = r**2. + (blackHoleSpin*np.cos(theta) )**2.
delta = r**2. - 2*r + blackHoleSpin**2.
A = (r**2. + blackHoleSpin**2.)**2.
sigmaMinus = r**2. - (blackHoleSpin*np.cos(theta) )**2.
dr_dX1 = np.exp(X1Coords)
dtheta_dX2 = np.pi*(1. + (1. - hSlope)*np.cos(2.*np.pi*X2Coords))
d2theta_dX22 = -2.*np.pi*np.pi*(1-hSlope)*np.sin(2.*np.pi*X2Coords);
N1Total = XCoords.N1Total
N2Total = XCoords.N2Total
N3Total = XCoords.N3Total
gCovCheck = np.zeros([4, 4, N3Total, N2Total, N1Total])
gConCheck = np.zeros([4, 4, N3Total, N2Total, N1Total])
gCheck = np.zeros([N3Total, N2Total, N1Total])
gCovCheck[0][0] = -(1. - 2*r/sigma) # dt^2
gCovCheck[0][1] = (2*r/sigma) * dr_dX1 # dt dX1
gCovCheck[0][2] = 0. # dt dX2
gCovCheck[0][3] = -(2.*blackHoleSpin*r*np.sin(theta)**2./sigma) # dt dphi
gCovCheck[1][0] = gCovCheck[0][1]
gCovCheck[1][1] = (1. + 2*r/sigma) * dr_dX1**2. # dX1 dX1
gCovCheck[1][2] = 0.
gCovCheck[1][3] = -blackHoleSpin * (1. + 2*r/sigma)*np.sin(theta)**2. \
* dr_dX1 # dX1 dphi
gCovCheck[2][0] = gCovCheck[0][2]
gCovCheck[2][1] = gCovCheck[1][2]
gCovCheck[2][2] = sigma * dtheta_dX2 * dtheta_dX2 # dX2 dX2
gCovCheck[2][3] = 0. # dX2 dphi
gCovCheck[3][0] = gCovCheck[0][3]
gCovCheck[3][1] = gCovCheck[1][3]
gCovCheck[3][2] = gCovCheck[2][3]
gCovCheck[3][3] = np.sin(theta)**2. \
* (sigma + blackHoleSpin**2. \
* (1. + 2.*r/sigma)*np.sin(theta)**2. \
) # dphi dphi
gCovPerZone = np.zeros([4, 4])
for k in xrange(N3Total):
for j in xrange(N2Total):
for i in xrange(N1Total):
gCovPerZone[0, 0] = gCovCheck[0][0][k, j, i]
gCovPerZone[0, 1] = gCovCheck[0][1][k, j, i]
gCovPerZone[0, 2] = gCovCheck[0][2][k, j, i]
gCovPerZone[0, 3] = gCovCheck[0][3][k, j, i]
gCovPerZone[1, 0] = gCovCheck[1][0][k, j, i]
gCovPerZone[1, 1] = gCovCheck[1][1][k, j, i]
gCovPerZone[1, 2] = gCovCheck[1][2][k, j, i]
gCovPerZone[1, 3] = gCovCheck[1][3][k, j, i]
gCovPerZone[2, 0] = gCovCheck[2][0][k, j, i]
gCovPerZone[2, 1] = gCovCheck[2][1][k, j, i]
gCovPerZone[2, 2] = gCovCheck[2][2][k, j, i]
gCovPerZone[2, 3] = gCovCheck[2][3][k, j, i]
gCovPerZone[3, 0] = gCovCheck[3][0][k, j, i]
gCovPerZone[3, 1] = gCovCheck[3][1][k, j, i]
gCovPerZone[3, 2] = gCovCheck[3][2][k, j, i]
gCovPerZone[3, 3] = gCovCheck[3][3][k, j, i]
gConPerZone = np.linalg.inv(gCovPerZone)
gCheck[k, j, i] = np.sqrt(-np.linalg.det(gCovPerZone))
gConCheck[0][0][k, j, i] = gConPerZone[0, 0]
gConCheck[0][1][k, j, i] = gConPerZone[0, 1]
gConCheck[0][2][k, j, i] = gConPerZone[0, 2]
gConCheck[0][3][k, j, i] = gConPerZone[0, 3]
gConCheck[1][0][k, j, i] = gConPerZone[1, 0]
gConCheck[1][1][k, j, i] = gConPerZone[1, 1]
gConCheck[1][2][k, j, i] = gConPerZone[1, 2]
gConCheck[1][3][k, j, i] = gConPerZone[1, 3]
gConCheck[2][0][k, j, i] = gConPerZone[2, 0]
gConCheck[2][1][k, j, i] = gConPerZone[2, 1]
gConCheck[2][2][k, j, i] = gConPerZone[2, 2]
gConCheck[2][3][k, j, i] = gConPerZone[2, 3]
gConCheck[3][0][k, j, i] = gConPerZone[3, 0]
gConCheck[3][1][k, j, i] = gConPerZone[3, 1]
gConCheck[3][2][k, j, i] = gConPerZone[3, 2]
gConCheck[3][3][k, j, i] = gConPerZone[3, 3]
alphaCheck = 1./np.sqrt(-gConCheck[0][0])
geomKerrSchild.computeConnectionCoeffs()
gammaUpDownDownCheck = np.zeros([4, 4, 4, N3Total, N2Total, N1Total])
gammaUpDownDownCheck[0][0][0] = 2.*r*sigmaMinus / sigma**3.
gammaUpDownDownCheck[0][0][1] = r * (2*r + sigma) * sigmaMinus / sigma**3.
gammaUpDownDownCheck[0][0][2] = -blackHoleSpin**2. * r * np.sin(2.*theta) \
* dtheta_dX2 / sigma**2.
gammaUpDownDownCheck[0][0][3] = -2. * blackHoleSpin * r * np.sin(theta)**2. \
* sigmaMinus / sigma**3.
gammaUpDownDownCheck[0][1][0] = gammaUpDownDownCheck[0][0][1]
gammaUpDownDownCheck[0][1][1] = 2.*r**2.*(r**4. + r*sigmaMinus
- (blackHoleSpin*np.cos(theta))**4.
) / sigma**3.
gammaUpDownDownCheck[0][1][2] = -blackHoleSpin**2. * r**2. * np.sin(2.*theta) \
* dtheta_dX2 / sigma**2.
gammaUpDownDownCheck[0][1][3] = blackHoleSpin * r * (-r*(r**3. + 2*sigmaMinus)
+ ( blackHoleSpin
* np.cos(theta)
)**4.
) * np.sin(theta)**2. \
/ sigma**3.
gammaUpDownDownCheck[0][2][0] = gammaUpDownDownCheck[0][0][2]
gammaUpDownDownCheck[0][2][1] = gammaUpDownDownCheck[0][1][2]
gammaUpDownDownCheck[0][2][2] = -2. * r**2. * dtheta_dX2**2. / sigma
gammaUpDownDownCheck[0][2][3] = blackHoleSpin**3. * r * np.sin(theta)**2. \
* np.sin(2.*theta) * dtheta_dX2 / sigma**2.
gammaUpDownDownCheck[0][3][0] = gammaUpDownDownCheck[0][0][3]
gammaUpDownDownCheck[0][3][1] = gammaUpDownDownCheck[0][1][3]
gammaUpDownDownCheck[0][3][2] = gammaUpDownDownCheck[0][2][3]
gammaUpDownDownCheck[0][3][3] = 2.*r*np.sin(theta)**2. \
* (-r*sigma**2. +
blackHoleSpin**2.*np.sin(theta)**2.*sigmaMinus
) / sigma**3.
gammaUpDownDownCheck[1][0][0] = (blackHoleSpin**2. + r*(-2. + r)) \
* sigmaMinus / (r * sigma**3.)
gammaUpDownDownCheck[1][0][1] = sigmaMinus \
* ( -2.*r + (blackHoleSpin*np.sin(theta))**2.) \
/ sigma**3.
gammaUpDownDownCheck[1][0][2] = 0.
gammaUpDownDownCheck[1][0][3] = -blackHoleSpin * np.sin(theta)**2. \
* (blackHoleSpin**2. + r*(-2. + r)) * sigmaMinus \
/ (r * sigma**3.)
gammaUpDownDownCheck[1][1][0] = gammaUpDownDownCheck[1][0][1]
gammaUpDownDownCheck[1][1][1] = \
( r**4.*(-2. + r)*(1. + r)
+ blackHoleSpin**2. * ( blackHoleSpin**2.*r*(1. + 3.*r)*np.cos(theta)**4. \
+ (blackHoleSpin*np.cos(theta))**4. * np.cos(theta)**2. \
+ r**3.*np.sin(theta)**2. \
+ r*np.cos(theta)**2. \
*(2.*r + 3.*r**3. - (blackHoleSpin*np.sin(theta))**2.)
)
) / sigma**3.
gammaUpDownDownCheck[1][1][2] = -blackHoleSpin**2. * dtheta_dX2 \
* np.sin(2.*theta) \
/ (blackHoleSpin**2. + 2.*r**2.
+ blackHoleSpin**2.*np.cos(2.*theta)
)
gammaUpDownDownCheck[1][1][3] = \
blackHoleSpin * np.sin(theta)**2. * (blackHoleSpin**4. * r * np.cos(theta)**4.
+ r**2*(2.*r + r**3.
-(blackHoleSpin*np.sin(theta))**2.
) \
+ (blackHoleSpin*np.cos(theta))**2. \
* (2.*r*(-1. + r**2.)
+ (blackHoleSpin*np.sin(theta))**2.
)
) / sigma**3.
gammaUpDownDownCheck[1][2][0] = gammaUpDownDownCheck[1][0][2]
gammaUpDownDownCheck[1][2][1] = gammaUpDownDownCheck[1][1][2]
gammaUpDownDownCheck[1][2][2] = -(blackHoleSpin**2. + r*(-2. + r)) \
* dtheta_dX2**2. / sigma
gammaUpDownDownCheck[1][2][3] = 0.
gammaUpDownDownCheck[1][3][0] = gammaUpDownDownCheck[1][0][3]
gammaUpDownDownCheck[1][3][1] = gammaUpDownDownCheck[1][1][3]
gammaUpDownDownCheck[1][3][2] = gammaUpDownDownCheck[1][2][3]
gammaUpDownDownCheck[1][3][3] = \
-(blackHoleSpin**2. + r*(-2. + r) ) * np.sin(theta)**2. \
* (r * sigma**2. -
blackHoleSpin**2.*sigmaMinus*np.sin(theta)**2.
) / (r * sigma**3.)
gammaUpDownDownCheck[2][0][0] = -blackHoleSpin**2. * r * np.sin(2.*theta) \
/ sigma**3. / dtheta_dX2
gammaUpDownDownCheck[2][0][1] = r * gammaUpDownDownCheck[2][0][0]
gammaUpDownDownCheck[2][0][2] = 0.
gammaUpDownDownCheck[2][0][3] = blackHoleSpin*r*(blackHoleSpin**2. + r**2.) \
* np.sin(2.*theta) / sigma**3. / dtheta_dX2
gammaUpDownDownCheck[2][1][0] = gammaUpDownDownCheck[2][0][1]
gammaUpDownDownCheck[2][1][1] = r**2. * gammaUpDownDownCheck[2][0][0]
gammaUpDownDownCheck[2][1][2] = r**2. / sigma
gammaUpDownDownCheck[2][1][3] = (blackHoleSpin*r*np.cos(theta)*np.sin(theta)
*(r**3.*(2. + r)
+ blackHoleSpin**2.
*( 2.*r*(1. + r)*np.cos(theta)**2.
+ blackHoleSpin**2.*np.cos(theta)**4.
+ 2.*r*np.sin(theta)**2.
)
)
) / sigma**3. / dtheta_dX2
gammaUpDownDownCheck[2][2][0] = gammaUpDownDownCheck[2][0][2]
gammaUpDownDownCheck[2][2][1] = gammaUpDownDownCheck[2][1][2]
gammaUpDownDownCheck[2][2][2] = -blackHoleSpin**2.*np.cos(theta)*np.sin(theta) \
*dtheta_dX2/sigma + d2theta_dX22/dtheta_dX2
gammaUpDownDownCheck[2][2][3] = 0.
gammaUpDownDownCheck[2][3][0] = gammaUpDownDownCheck[2][0][3]
gammaUpDownDownCheck[2][3][1] = gammaUpDownDownCheck[2][1][3]
gammaUpDownDownCheck[2][3][2] = gammaUpDownDownCheck[2][2][3]
gammaUpDownDownCheck[2][3][3] = \
-np.cos(theta)*np.sin(theta) \
*(sigma**3. + (blackHoleSpin*np.sin(theta))**2. \
* sigma*(r*(4. + r) + (blackHoleSpin*np.cos(theta)**2.)) \
+ 2.*r*(blackHoleSpin * np.sin(theta))**4. \
) / sigma**3. / dtheta_dX2
gammaUpDownDownCheck[3][0][0] = blackHoleSpin * sigmaMinus / sigma**3.
gammaUpDownDownCheck[3][0][1] = r * gammaUpDownDownCheck[3][0][0]
gammaUpDownDownCheck[3][0][2] = -2.*blackHoleSpin*r*np.cos(theta) \
* dtheta_dX2 / (np.sin(theta) * sigma**2.)
gammaUpDownDownCheck[3][0][3] = -blackHoleSpin**2. * np.sin(theta)**2. \
* sigmaMinus / sigma**3.
gammaUpDownDownCheck[3][1][0] = gammaUpDownDownCheck[3][0][1]
gammaUpDownDownCheck[3][1][1] = blackHoleSpin * r**2. * sigmaMinus \
/ sigma**3.
gammaUpDownDownCheck[3][1][2] = -2.*blackHoleSpin*r \
*(blackHoleSpin**2. + 2.*r*(2. + r)
+ blackHoleSpin**2. * np.cos(2.*theta)
) * np.cos(theta) * dtheta_dX2 \
/ (np.sin(theta) \
* (blackHoleSpin**2. + 2.*r**2.
+ blackHoleSpin**2.*np.cos(2.*theta)
)**2.
)
gammaUpDownDownCheck[3][1][3] = \
r*(r*sigma**2. - (blackHoleSpin*np.sin(theta))**2.*sigmaMinus)/sigma**3.
gammaUpDownDownCheck[3][2][0] = gammaUpDownDownCheck[3][0][2]
gammaUpDownDownCheck[3][2][1] = gammaUpDownDownCheck[3][1][2]
gammaUpDownDownCheck[3][2][2] = -blackHoleSpin * r * dtheta_dX2**2./sigma
gammaUpDownDownCheck[3][2][3] = \
dtheta_dX2*(.25*(blackHoleSpin**2.
+ 2.*r**2. + blackHoleSpin**2.*np.cos(2.*theta)
)**2. * np.cos(theta)/np.sin(theta)
+ blackHoleSpin**2. * r * np.sin(2.*theta)
)/sigma**2.
gammaUpDownDownCheck[3][3][0] = gammaUpDownDownCheck[3][0][3]
gammaUpDownDownCheck[3][3][1] = gammaUpDownDownCheck[3][1][3]
gammaUpDownDownCheck[3][3][2] = gammaUpDownDownCheck[3][2][3]
gammaUpDownDownCheck[3][3][3] = \
(-blackHoleSpin * r * np.sin(theta)**2. * sigma**2. \
+ blackHoleSpin**3. * np.sin(theta)**4. * sigmaMinus) / sigma**3.
def test_modifiedKerrSchild_params():
np.testing.assert_equal(N1, geomKerrSchild.N1)
np.testing.assert_equal(N2, geomKerrSchild.N2)
np.testing.assert_equal(N3, geomKerrSchild.N3)
np.testing.assert_equal(dim, geomKerrSchild.dim)
np.testing.assert_equal(numGhost, geomKerrSchild.numGhost)
def test_modifiedKerrSchild_xCoords():
np.testing.assert_allclose(r, geomKerrSchild.xCoords[0])
np.testing.assert_allclose(theta, geomKerrSchild.xCoords[1])
np.testing.assert_allclose(phi, geomKerrSchild.xCoords[2])
def test_modifiedKerrSchild_gCov():
np.testing.assert_allclose(gCovCheck[0][0], geomKerrSchild.gCov[0][0])
np.testing.assert_allclose(gCovCheck[0][1], geomKerrSchild.gCov[0][1])
np.testing.assert_allclose(gCovCheck[0][2], geomKerrSchild.gCov[0][2])
np.testing.assert_allclose(gCovCheck[0][3], geomKerrSchild.gCov[0][3])
np.testing.assert_allclose(gCovCheck[1][0], geomKerrSchild.gCov[1][0])
np.testing.assert_allclose(gCovCheck[1][1], geomKerrSchild.gCov[1][1])
np.testing.assert_allclose(gCovCheck[1][2], geomKerrSchild.gCov[1][2])
np.testing.assert_allclose(gCovCheck[1][3], geomKerrSchild.gCov[1][3])
np.testing.assert_allclose(gCovCheck[2][0], geomKerrSchild.gCov[2][0])
np.testing.assert_allclose(gCovCheck[2][1], geomKerrSchild.gCov[2][1])
np.testing.assert_allclose(gCovCheck[2][2], geomKerrSchild.gCov[2][2])
np.testing.assert_allclose(gCovCheck[2][3], geomKerrSchild.gCov[2][3])
np.testing.assert_allclose(gCovCheck[3][0], geomKerrSchild.gCov[3][0])
np.testing.assert_allclose(gCovCheck[3][1], geomKerrSchild.gCov[3][1])
np.testing.assert_allclose(gCovCheck[3][2], geomKerrSchild.gCov[3][2])
np.testing.assert_allclose(gCovCheck[3][3], geomKerrSchild.gCov[3][3])
def test_modifiedKerrSchild_gCon():
np.testing.assert_allclose(gConCheck[0][0], geomKerrSchild.gCon[0][0])
np.testing.assert_allclose(gConCheck[0][1], geomKerrSchild.gCon[0][1])
np.testing.assert_allclose(gConCheck[0][2], geomKerrSchild.gCon[0][2])
np.testing.assert_allclose(gConCheck[0][3], geomKerrSchild.gCon[0][3],
atol=1e-14
)
np.testing.assert_allclose(gConCheck[1][0], geomKerrSchild.gCon[1][0])
np.testing.assert_allclose(gConCheck[1][1], geomKerrSchild.gCon[1][1])
np.testing.assert_allclose(gConCheck[1][2], geomKerrSchild.gCon[1][2])
np.testing.assert_allclose(gConCheck[1][3], geomKerrSchild.gCon[1][3])
np.testing.assert_allclose(gConCheck[2][0], geomKerrSchild.gCon[2][0])
np.testing.assert_allclose(gConCheck[2][1], geomKerrSchild.gCon[2][1])
np.testing.assert_allclose(gConCheck[2][2], geomKerrSchild.gCon[2][2])
np.testing.assert_allclose(gConCheck[2][3], geomKerrSchild.gCon[2][3])
np.testing.assert_allclose(gConCheck[3][0], geomKerrSchild.gCon[3][0],
atol=1e-14
)
np.testing.assert_allclose(gConCheck[3][1], geomKerrSchild.gCon[3][1])
np.testing.assert_allclose(gConCheck[3][2], geomKerrSchild.gCon[3][2])
np.testing.assert_allclose(gConCheck[3][3], geomKerrSchild.gCon[3][3])
def test_modifiedKerrSchild_g():
np.testing.assert_allclose(gCheck, geomKerrSchild.g)
def test_modifiedKerrSchild_alpha():
np.testing.assert_allclose(alphaCheck, geomKerrSchild.alpha)
def test_modifiedKerrSchild_gammaUpDownDown():
np.testing.assert_allclose( gammaUpDownDownCheck[0][0][0],
geomKerrSchild.gammaUpDownDown[0][0][0]
)
np.testing.assert_allclose( gammaUpDownDownCheck[0][0][1],
geomKerrSchild.gammaUpDownDown[0][0][1]
)
np.testing.assert_allclose( gammaUpDownDownCheck[0][0][2],
geomKerrSchild.gammaUpDownDown[0][0][2],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[0][0][3],
geomKerrSchild.gammaUpDownDown[0][0][3]
)
np.testing.assert_allclose( gammaUpDownDownCheck[0][1][0],
geomKerrSchild.gammaUpDownDown[0][1][0]
)
np.testing.assert_allclose( gammaUpDownDownCheck[0][1][1],
geomKerrSchild.gammaUpDownDown[0][1][1]
)
np.testing.assert_allclose( gammaUpDownDownCheck[0][1][2],
geomKerrSchild.gammaUpDownDown[0][1][2],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[0][1][3],
geomKerrSchild.gammaUpDownDown[0][1][3]
)
np.testing.assert_allclose( gammaUpDownDownCheck[0][2][0],
geomKerrSchild.gammaUpDownDown[0][2][0],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[0][2][1],
geomKerrSchild.gammaUpDownDown[0][2][1],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[0][2][2],
geomKerrSchild.gammaUpDownDown[0][2][2]
)
np.testing.assert_allclose( gammaUpDownDownCheck[0][2][3],
geomKerrSchild.gammaUpDownDown[0][2][3],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[0][3][0],
geomKerrSchild.gammaUpDownDown[0][3][0]
)
np.testing.assert_allclose( gammaUpDownDownCheck[0][3][1],
geomKerrSchild.gammaUpDownDown[0][3][1],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[0][3][2],
geomKerrSchild.gammaUpDownDown[0][3][2],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[0][3][3],
geomKerrSchild.gammaUpDownDown[0][3][3]
)
np.testing.assert_allclose( gammaUpDownDownCheck[1][0][0],
geomKerrSchild.gammaUpDownDown[1][0][0]
)
np.testing.assert_allclose( gammaUpDownDownCheck[1][0][1],
geomKerrSchild.gammaUpDownDown[1][0][1]
)
np.testing.assert_allclose( gammaUpDownDownCheck[1][0][2],
geomKerrSchild.gammaUpDownDown[1][0][2],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[1][0][3],
geomKerrSchild.gammaUpDownDown[1][0][3]
)
np.testing.assert_allclose( gammaUpDownDownCheck[1][1][0],
geomKerrSchild.gammaUpDownDown[1][1][0]
)
np.testing.assert_allclose( gammaUpDownDownCheck[1][1][1],
geomKerrSchild.gammaUpDownDown[1][1][1],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[1][1][2],
geomKerrSchild.gammaUpDownDown[1][1][2],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[1][1][3],
geomKerrSchild.gammaUpDownDown[1][1][3]
)
np.testing.assert_allclose( gammaUpDownDownCheck[1][2][0],
geomKerrSchild.gammaUpDownDown[1][2][0],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[1][2][1],
geomKerrSchild.gammaUpDownDown[1][2][1],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[1][2][2],
geomKerrSchild.gammaUpDownDown[1][2][2]
)
np.testing.assert_allclose( gammaUpDownDownCheck[1][2][3],
geomKerrSchild.gammaUpDownDown[1][2][3],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[1][3][0],
geomKerrSchild.gammaUpDownDown[1][3][0]
)
np.testing.assert_allclose( gammaUpDownDownCheck[1][3][1],
geomKerrSchild.gammaUpDownDown[1][3][1],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[1][3][2],
geomKerrSchild.gammaUpDownDown[1][3][2],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[1][3][3],
geomKerrSchild.gammaUpDownDown[1][3][3]
)
np.testing.assert_allclose( gammaUpDownDownCheck[2][0][0],
geomKerrSchild.gammaUpDownDown[2][0][0],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[2][0][1],
geomKerrSchild.gammaUpDownDown[2][0][1],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[2][0][2],
geomKerrSchild.gammaUpDownDown[2][0][2]
)
np.testing.assert_allclose( gammaUpDownDownCheck[2][0][3],
geomKerrSchild.gammaUpDownDown[2][0][3],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[2][1][0],
geomKerrSchild.gammaUpDownDown[2][1][0],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[2][1][1],
geomKerrSchild.gammaUpDownDown[2][1][1],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[2][1][2],
geomKerrSchild.gammaUpDownDown[2][1][2]
)
np.testing.assert_allclose( gammaUpDownDownCheck[2][1][3],
geomKerrSchild.gammaUpDownDown[2][1][3],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[2][2][0],
geomKerrSchild.gammaUpDownDown[2][2][0],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[2][2][1],
geomKerrSchild.gammaUpDownDown[2][2][1],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[2][2][2],
geomKerrSchild.gammaUpDownDown[2][2][2]
)
np.testing.assert_allclose( gammaUpDownDownCheck[2][2][3],
geomKerrSchild.gammaUpDownDown[2][2][3],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[2][3][0],
geomKerrSchild.gammaUpDownDown[2][3][0],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[2][3][1],
geomKerrSchild.gammaUpDownDown[2][3][1],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[2][3][2],
geomKerrSchild.gammaUpDownDown[2][3][2]
)
np.testing.assert_allclose( gammaUpDownDownCheck[2][3][3],
geomKerrSchild.gammaUpDownDown[2][3][3],
atol=3e-3
)
np.testing.assert_allclose( gammaUpDownDownCheck[3][0][0],
geomKerrSchild.gammaUpDownDown[3][0][0]
)
np.testing.assert_allclose( gammaUpDownDownCheck[3][0][1],
geomKerrSchild.gammaUpDownDown[3][0][1],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[3][0][2],
geomKerrSchild.gammaUpDownDown[3][0][2]
)
np.testing.assert_allclose( gammaUpDownDownCheck[3][0][3],
geomKerrSchild.gammaUpDownDown[3][0][3]
)
np.testing.assert_allclose( gammaUpDownDownCheck[3][1][0],
geomKerrSchild.gammaUpDownDown[3][1][0]
)
np.testing.assert_allclose( gammaUpDownDownCheck[3][1][1],
geomKerrSchild.gammaUpDownDown[3][1][1],
atol=1e-7
)
np.testing.assert_allclose( gammaUpDownDownCheck[3][1][2],
geomKerrSchild.gammaUpDownDown[3][1][2]
)
np.testing.assert_allclose( gammaUpDownDownCheck[3][1][3],
geomKerrSchild.gammaUpDownDown[3][1][3]
)
np.testing.assert_allclose( gammaUpDownDownCheck[3][2][0],
geomKerrSchild.gammaUpDownDown[3][2][0]
)
np.testing.assert_allclose( gammaUpDownDownCheck[3][2][1],
geomKerrSchild.gammaUpDownDown[3][2][1]
)
np.testing.assert_allclose( gammaUpDownDownCheck[3][2][2],
geomKerrSchild.gammaUpDownDown[3][2][2]
)
np.testing.assert_allclose( gammaUpDownDownCheck[3][2][3],
geomKerrSchild.gammaUpDownDown[3][2][3]
)
np.testing.assert_allclose( gammaUpDownDownCheck[3][3][0],
geomKerrSchild.gammaUpDownDown[3][3][0]
)
np.testing.assert_allclose( gammaUpDownDownCheck[3][3][1],
geomKerrSchild.gammaUpDownDown[3][3][1]
)
np.testing.assert_allclose( gammaUpDownDownCheck[3][3][2],
geomKerrSchild.gammaUpDownDown[3][3][2]
)
np.testing.assert_allclose( gammaUpDownDownCheck[3][3][3],
geomKerrSchild.gammaUpDownDown[3][3][3]
)
|
import math
from gi.repository import Gtk
class Eye(Gtk.DrawingArea):
def __init__(self, fill_color):
Gtk.DrawingArea.__init__(self)
self.connect("draw", self.draw)
self.x, self.y = 0, 0
self.fill_color = fill_color
def has_padding(self):
return True
def has_left_center_right(self):
return False
def look_at(self, x, y):
self.x = x
self.y = y
self.queue_draw()
def look_ahead(self):
self.x = None
self.y = None
self.queue_draw()
# Thanks to xeyes :)
def computePupil(self):
a = self.get_allocation()
if self.x is None or self.y is None:
# look ahead, but not *directly* in the middle
pw = self.get_parent().get_allocation().width
if a.x + a.width // 2 < pw // 2:
cx = a.width * 0.6
else:
cx = a.width * 0.4
return cx, a.height * 0.6
EYE_X, EYE_Y = self.translate_coordinates(
self.get_toplevel(), a.width // 2, a.height // 2)
EYE_HWIDTH = a.width
EYE_HHEIGHT = a.height
BALL_DIST = EYE_HWIDTH / 4
dx = self.x - EYE_X
dy = self.y - EYE_Y
if dx or dy:
angle = math.atan2(dy, dx)
cosa = math.cos(angle)
sina = math.sin(angle)
h = math.hypot(EYE_HHEIGHT * cosa, EYE_HWIDTH * sina)
x = (EYE_HWIDTH * EYE_HHEIGHT) * cosa / h
y = (EYE_HWIDTH * EYE_HHEIGHT) * sina / h
dist = BALL_DIST * math.hypot(x, y)
if dist < math.hypot(dx, dy):
dx = dist * cosa
dy = dist * sina
return a.width // 2 + dx, a.height // 2 + dy
def draw(self, widget, cr):
bounds = self.get_allocation()
eyeSize = min(bounds.width, bounds.height)
outlineWidth = eyeSize / 20.0
pupilSize = eyeSize / 10.0
pupilX, pupilY = self.computePupil()
dX = pupilX - bounds.width / 2.
dY = pupilY - bounds.height / 2.
distance = math.sqrt(dX * dX + dY * dY)
limit = eyeSize // 2 - outlineWidth * 2 - pupilSize
if distance > limit:
pupilX = bounds.width // 2 + dX * limit // distance
pupilY = bounds.height // 2 + dY * limit // distance
# background
cr.set_source_rgba(*self.fill_color.get_rgba())
cr.rectangle(0, 0, bounds.width, bounds.height)
cr.fill()
# eye ball
cr.arc(bounds.width // 2, bounds.height // 2,
eyeSize // 2 - outlineWidth // 2, 0, 2 * math.pi)
cr.set_source_rgb(1, 1, 1)
cr.fill()
# outline
cr.set_line_width(outlineWidth)
cr.arc(bounds.width // 2, bounds.height // 2,
eyeSize // 2 - outlineWidth // 2, 0, 2 * math.pi)
cr.set_source_rgb(0, 0, 0)
cr.stroke()
# pupil
cr.arc(pupilX, pupilY, pupilSize, 0, 2 * math.pi)
cr.set_source_rgb(0, 0, 0)
cr.fill()
return True
|
'''This module contains the ChatTextEdit class'''
import logging
import PyQt4.QtGui as QtGui
import PyQt4.QtCore as QtCore
from e3.common import MessageFormatter
import gui
from gui.base import Plus
from gui.base import Desktop
log = logging.getLogger('qt4ui.widgets.ChatOutput')
class ChatOutput (gui.base.OutputText, QtGui.QTextBrowser):
'''A widget which displays various messages of a conversation'''
NAME = 'Output Text'
DESCRIPTION = _('A widget to display the conversation messages')
AUTHOR = 'Gabriele "Whisky" Visconti'
WEBSITE = ''
search_request = QtCore.pyqtSignal(basestring)
def __init__(self, config, parent=None):
'''Constructor'''
QtGui.QTextBrowser.__init__(self, parent)
gui.base.OutputText.__init__(self, config)
self.formatter = MessageFormatter()
self._chat_text = QtCore.QString('')
self.setOpenLinks(False)
self.anchorClicked.connect(self._on_link_clicked)
self.clear()
def _on_link_clicked(self, url):
href = unicode(url.toString())
if href.startswith("search://"):
self.search_request.emit(href)
return
if not href.startswith("file://"):
Desktop.open(href)
return
def clear(self, source="", target="", target_display="",
source_img="", target_img=""):
'''clear the content'''
QtGui.QTextBrowser.clear(self)
self._chat_text = QtCore.QString('')
gui.base.OutputText.clear(self)
def add_message(self, msg, scroll):
if msg.type == "status":
msg.message = Plus.msnplus_strip(msg.message)
text = self.formatter.format_information(msg.message)
else:
msg.alias = Plus.msnplus_strip(msg.alias)
msg.display_name = Plus.msnplus_strip(msg.display_name)
text = self.formatter.format(msg)
self._append_to_chat(text, scroll)
def _append_to_chat(self, html_string, scroll):
'''Method that appends an html string to the chat view'''
vert_scroll_bar = self.verticalScrollBar()
position = vert_scroll_bar.value()
self._chat_text.append(html_string)
self.setText(self._chat_text)
if scroll:
vert_scroll_bar.setValue(vert_scroll_bar.maximum())
else:
vert_scroll_bar.setValue(position)
def update_p2p(self, account, _type, *what):
''' new p2p data has been received (custom emoticons) '''
#FIXME:
pass
|
from __future__ import (unicode_literals, division, absolute_import,
print_function)
'''
Created on 29 Jun 2012
@author: charles
'''
import socket, select, json, os, traceback, time, sys, random
import posixpath
from collections import defaultdict
import hashlib, threading
import Queue
from functools import wraps
from errno import EAGAIN, EINTR
from threading import Thread
from calibre import prints
from calibre.constants import numeric_version, DEBUG, cache_dir
from calibre.devices.errors import (OpenFailed, OpenFeedback, ControlError, TimeoutError,
InitialConnectionError, PacketError)
from calibre.devices.interface import DevicePlugin, currently_connected_device
from calibre.devices.usbms.books import Book, CollectionsBookList
from calibre.devices.usbms.deviceconfig import DeviceConfig
from calibre.devices.usbms.driver import USBMS
from calibre.devices.utils import build_template_regexp
from calibre.ebooks import BOOK_EXTENSIONS
from calibre.ebooks.metadata import title_sort
from calibre.ebooks.metadata.book.base import Metadata
from calibre.ebooks.metadata.book.json_codec import JsonCodec
from calibre.library import current_library_name
from calibre.library.server import server_config as content_server_config
from calibre.ptempfile import PersistentTemporaryFile
from calibre.utils.ipc import eintr_retry_call
from calibre.utils.config_base import tweaks
from calibre.utils.filenames import ascii_filename as sanitize, shorten_components_to
from calibre.utils.mdns import (publish as publish_zeroconf, unpublish as
unpublish_zeroconf, get_all_ips)
from calibre.utils.socket_inheritance import set_socket_inherit
def synchronous(tlockname):
"""A decorator to place an instance based lock around a method """
def _synched(func):
@wraps(func)
def _synchronizer(self, *args, **kwargs):
with self.__getattribute__(tlockname):
return func(self, *args, **kwargs)
return _synchronizer
return _synched
class ConnectionListener(Thread):
def __init__(self, driver):
Thread.__init__(self)
self.daemon = True
self.driver = driver
self.keep_running = True
self.all_ip_addresses = dict()
def stop(self):
self.keep_running = False
def run(self):
device_socket = None
get_all_ips(reinitialize=True)
while self.keep_running:
try:
time.sleep(1)
except:
# Happens during interpreter shutdown
break
if not self.keep_running:
break
if not self.all_ip_addresses:
self.all_ip_addresses = get_all_ips()
if self.all_ip_addresses:
self.driver._debug("All IP addresses", self.all_ip_addresses)
if not self.driver.connection_queue.empty():
d = currently_connected_device.device
if d is not None:
self.driver._debug('queue not serviced', d.get_gui_name())
try:
sock = self.driver.connection_queue.get_nowait()
s = self.driver._json_encode(
self.driver.opcodes['CALIBRE_BUSY'],
{'otherDevice': d.get_gui_name()})
self.driver._send_byte_string(device_socket, (b'%d' % len(s)) + s)
sock.close()
except Queue.Empty:
pass
if getattr(self.driver, 'broadcast_socket', None) is not None:
while True:
ans = select.select((self.driver.broadcast_socket,), (), (), 0)
if len(ans[0]) > 0:
try:
packet = self.driver.broadcast_socket.recvfrom(100)
remote = packet[1]
content_server_port = b''
try :
content_server_port = \
str(content_server_config().parse().port)
except:
pass
message = str(self.driver.ZEROCONF_CLIENT_STRING + b' (on ' +
str(socket.gethostname().partition('.')[0]) +
b');' + content_server_port +
b',' + str(self.driver.port))
self.driver._debug('received broadcast', packet, message)
self.driver.broadcast_socket.sendto(message, remote)
except:
pass
else:
break
if self.driver.connection_queue.empty() and \
getattr(self.driver, 'listen_socket', None) is not None:
ans = select.select((self.driver.listen_socket,), (), (), 0)
if len(ans[0]) > 0:
# timeout in 100 ms to detect rare case where the socket goes
# away between the select and the accept
try:
self.driver._debug('attempt to open device socket')
device_socket = None
self.driver.listen_socket.settimeout(0.100)
device_socket, ign = eintr_retry_call(
self.driver.listen_socket.accept)
set_socket_inherit(device_socket, False)
self.driver.listen_socket.settimeout(None)
device_socket.settimeout(None)
try:
self.driver.connection_queue.put_nowait(device_socket)
except Queue.Full:
device_socket.close()
device_socket = None
self.driver._debug('driver is not answering')
except socket.timeout:
pass
except socket.error:
x = sys.exc_info()[1]
self.driver._debug('unexpected socket exception', x.args[0])
device_socket.close()
device_socket = None
class SDBook(Book):
def __init__(self, prefix, lpath, size=None, other=None):
Book.__init__(self, prefix, lpath, size=size, other=other)
path = getattr(self, 'path', lpath)
self.path = path.replace('\\', '/')
class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
name = 'SmartDevice App Interface'
gui_name = _('Wireless Device')
gui_name_template = '%s: %s'
icon = I('devices/tablet.png')
description = _('Communicate with Smart Device apps')
supported_platforms = ['windows', 'osx', 'linux']
author = 'Charles Haley'
version = (0, 0, 1)
# Invalid USB vendor information so the scanner will never match
VENDOR_ID = [0xffff]
PRODUCT_ID = [0xffff]
BCD = [0xffff]
FORMATS = list(BOOK_EXTENSIONS)
ALL_FORMATS = list(BOOK_EXTENSIONS)
HIDE_FORMATS_CONFIG_BOX = True
USER_CAN_ADD_NEW_FORMATS = False
DEVICE_PLUGBOARD_NAME = 'SMART_DEVICE_APP'
CAN_SET_METADATA = []
CAN_DO_DEVICE_DB_PLUGBOARD = False
SUPPORTS_SUB_DIRS = True
MUST_READ_METADATA = True
NEWS_IN_FOLDER = True
SUPPORTS_USE_AUTHOR_SORT = False
WANTS_UPDATED_THUMBNAILS = True
MANAGES_DEVICE_PRESENCE = True
# Guess about the max length on windows. This number will be reduced by
# the length of the path on the client, and by the fudge factor below. We
# use this on all platforms because the device might be connected to windows
# in the future.
MAX_PATH_LEN = 250
# guess of length of MTP name. The length of the full path to the folder
# on the device is added to this. That path includes the device's mount point
# making this number effectively around 10 to 15 larger.
PATH_FUDGE_FACTOR = 40
THUMBNAIL_HEIGHT = 160
DEFAULT_THUMBNAIL_HEIGHT = 160
THUMBNAIL_COMPRESSION_QUALITY = 75
DEFAULT_THUMBNAIL_COMPRESSION_QUALITY = 75
PREFIX = ''
BACKLOADING_ERROR_MESSAGE = None
SAVE_TEMPLATE = '{title} - {authors} ({id})'
# Some network protocol constants
BASE_PACKET_LEN = 4096
PROTOCOL_VERSION = 1
MAX_CLIENT_COMM_TIMEOUT = 300.0 # Wait at most N seconds for an answer
MAX_UNSUCCESSFUL_CONNECTS = 5
SEND_NOOP_EVERY_NTH_PROBE = 5
DISCONNECT_AFTER_N_SECONDS = 30*60 # 30 minutes
PURGE_CACHE_ENTRIES_DAYS = 30
CURRENT_CC_VERSION = 128
ZEROCONF_CLIENT_STRING = b'calibre wireless device client'
# A few "random" port numbers to use for detecting clients using broadcast
# The clients are expected to broadcast a UDP 'hi there' on all of these
# ports when they attempt to connect. Calibre will respond with the port
# number the client should use. This scheme backs up mdns. And yes, we
# must hope that no other application on the machine is using one of these
# ports in datagram mode.
# If you change the ports here, all clients will also need to change.
BROADCAST_PORTS = [54982, 48123, 39001, 44044, 59678]
opcodes = {
'NOOP' : 12,
'OK' : 0,
'BOOK_DONE' : 11,
'CALIBRE_BUSY' : 18,
'SET_LIBRARY_INFO' : 19,
'DELETE_BOOK' : 13,
'DISPLAY_MESSAGE' : 17,
'FREE_SPACE' : 5,
'GET_BOOK_FILE_SEGMENT' : 14,
'GET_BOOK_METADATA' : 15,
'GET_BOOK_COUNT' : 6,
'GET_DEVICE_INFORMATION' : 3,
'GET_INITIALIZATION_INFO': 9,
'SEND_BOOKLISTS' : 7,
'SEND_BOOK' : 8,
'SEND_BOOK_METADATA' : 16,
'SET_CALIBRE_DEVICE_INFO': 1,
'SET_CALIBRE_DEVICE_NAME': 2,
'TOTAL_SPACE' : 4,
}
reverse_opcodes = dict([(v, k) for k,v in opcodes.iteritems()])
MESSAGE_PASSWORD_ERROR = 1
MESSAGE_UPDATE_NEEDED = 2
MESSAGE_SHOW_TOAST = 3
ALL_BY_TITLE = _('All by title')
ALL_BY_AUTHOR = _('All by author')
ALL_BY_SOMETHING = _('All by something')
EXTRA_CUSTOMIZATION_MESSAGE = [
_('Enable connections at startup') + ':::<p>' +
_('Check this box to allow connections when calibre starts') + '</p>',
'',
_('Security password') + ':::<p>' +
_('Enter a password that the device app must use to connect to calibre') + '</p>',
'',
_('Use fixed network port') + ':::<p>' +
_('If checked, use the port number in the "Port" box, otherwise '
'the driver will pick a random port') + '</p>',
_('Port number: ') + ':::<p>' +
_('Enter the port number the driver is to use if the "fixed port" box is checked') + '</p>',
_('Print extra debug information') + ':::<p>' +
_('Check this box if requested when reporting problems') + '</p>',
'',
_('Comma separated list of metadata fields '
'to turn into collections on the device.') + ':::<p>' +
_('Possibilities include: series, tags, authors, etc' +
'. Three special collections are available: %(abt)s:%(abtv)s, '
'%(aba)s:%(abav)s, and %(abs)s:%(absv)s. Add '
'these values to the list to enable them. The collections will be '
'given the name provided after the ":" character.')%dict(
abt='abt', abtv=ALL_BY_TITLE, aba='aba', abav=ALL_BY_AUTHOR,
abs='abs', absv=ALL_BY_SOMETHING),
'',
_('Enable the no-activity timeout') + ':::<p>' +
_('If this box is checked, calibre will automatically disconnect if '
'a connected device does nothing for %d minutes. Unchecking this '
' box disables this timeout, so calibre will never automatically '
'disconnect.')%(DISCONNECT_AFTER_N_SECONDS/60,) + '</p>',
_('Use this IP address') + ':::<p>' +
_('Use this option if you want to force the driver to listen on a '
'particular IP address. The driver will listen only on the '
'entered address, and this address will be the one advertized '
'over mDNS (bonjour).') + '</p>',
_('Replace books with same calibre ID') + ':::<p>' +
_('Use this option to overwrite a book on the device if that book '
'has the same calibre identifier as the book being sent. The file name of the '
'book will not change even if the save template produces a '
'different result. Using this option in most cases prevents '
'having multiple copies of a book on the device.') + '</p>',
_('Cover thumbnail compression quality') + ':::<p>' +
_('Use this option to control the size and quality of the cover '
'file sent to the device. It must be between 50 and 99. '
'The larger the number the higher quality the cover, but also '
'the larger the file. For example, changing this from 70 to 90 '
'results in a much better cover that is approximately 2.5 '
'times as big. To see the changes you must force calibre '
'to resend metadata to the device, either by changing '
'the metadata for the book (updating the last modification '
'time) or resending the book itself.') + '</p>',
_('Use metadata cache') + ':::<p>' +
_('Setting this option allows calibre to keep a copy of metadata '
'on the device, speeding up device connections. Unsetting this '
'option disables keeping the copy, forcing the device to send '
'metadata to calibre on every connect. Unset this option if '
'you think that the cache might not be operating correctly.') + '</p>',
]
EXTRA_CUSTOMIZATION_DEFAULT = [
False, '',
'', '',
False, '9090',
False, '',
'', '',
False, '',
True, '75',
True
]
OPT_AUTOSTART = 0
OPT_PASSWORD = 2
OPT_USE_PORT = 4
OPT_PORT_NUMBER = 5
OPT_EXTRA_DEBUG = 6
OPT_COLLECTIONS = 8
OPT_AUTODISCONNECT = 10
OPT_FORCE_IP_ADDRESS = 11
OPT_OVERWRITE_BOOKS_UUID = 12
OPT_COMPRESSION_QUALITY = 13
OPT_USE_METADATA_CACHE = 14
def __init__(self, path):
self.sync_lock = threading.RLock()
self.noop_counter = 0
self.debug_start_time = time.time()
self.debug_time = time.time()
self.is_connected = False
def _debug(self, *args):
# manual synchronization so we don't lose the calling method name
import inspect
with self.sync_lock:
if not DEBUG:
return
total_elapsed = time.time() - self.debug_start_time
elapsed = time.time() - self.debug_time
print('SMART_DEV (%7.2f:%7.3f) %s'%(total_elapsed, elapsed,
inspect.stack()[1][3]), end='')
for a in args:
try:
if isinstance(a, dict):
printable = {}
for k,v in a.iteritems():
if isinstance(v, (str, unicode)) and len(v) > 50:
printable[k] = 'too long'
else:
printable[k] = v
prints('', printable, end='')
else:
prints('', a, end='')
except:
prints('', 'value too long', end='')
print()
self.debug_time = time.time()
# local utilities
# copied from USBMS. Perhaps this could be a classmethod in usbms?
def _update_driveinfo_record(self, dinfo, prefix, location_code, name=None):
from calibre.utils.date import isoformat, now
import uuid
if not isinstance(dinfo, dict):
dinfo = {}
if dinfo.get('device_store_uuid', None) is None:
dinfo['device_store_uuid'] = unicode(uuid.uuid4())
if dinfo.get('device_name') is None:
dinfo['device_name'] = self.get_gui_name()
if name is not None:
dinfo['device_name'] = name
dinfo['location_code'] = location_code
dinfo['last_library_uuid'] = getattr(self, 'current_library_uuid', None)
dinfo['calibre_version'] = '.'.join([unicode(i) for i in numeric_version])
dinfo['date_last_connected'] = isoformat(now())
dinfo['prefix'] = self.PREFIX
return dinfo
# copied with changes from USBMS.Device. In particular, we needed to
# remove the 'path' argument and all its uses. Also removed the calls to
# filename_callback and sanitize_path_components
def _create_upload_path(self, mdata, fname, create_dirs=True):
fname = sanitize(fname)
ext = os.path.splitext(fname)[1]
try:
# If we have already seen this book's UUID, use the existing path
if self.settings().extra_customization[self.OPT_OVERWRITE_BOOKS_UUID]:
existing_book = self._uuid_in_cache(mdata.uuid, ext)
if (existing_book and existing_book.lpath and
self.known_metadata.get(existing_book.lpath, None)):
return existing_book.lpath
# If the device asked for it, try to use the UUID as the file name.
# Fall back to the ch if the UUID doesn't exist.
if self.client_wants_uuid_file_names and mdata.uuid:
return (mdata.uuid + ext)
except:
pass
dotless_ext = ext[1:] if len(ext) > 0 else ext
maxlen = (self.MAX_PATH_LEN - (self.PATH_FUDGE_FACTOR +
self.exts_path_lengths.get(dotless_ext, self.PATH_FUDGE_FACTOR)))
special_tag = None
if mdata.tags:
for t in mdata.tags:
if t.startswith(_('News')) or t.startswith('/'):
special_tag = t
break
settings = self.settings()
template = self.save_template()
if mdata.tags and _('News') in mdata.tags:
try:
p = mdata.pubdate
date = (p.year, p.month, p.day)
except:
today = time.localtime()
date = (today[0], today[1], today[2])
template = "{title}_%d-%d-%d" % date
use_subdirs = self.SUPPORTS_SUB_DIRS and settings.use_subdirs
from calibre.library.save_to_disk import get_components
from calibre.library.save_to_disk import config
opts = config().parse()
if not isinstance(template, unicode):
template = template.decode('utf-8')
app_id = str(getattr(mdata, 'application_id', ''))
id_ = mdata.get('id', fname)
extra_components = get_components(template, mdata, id_,
timefmt=opts.send_timefmt, length=maxlen-len(app_id)-1,
last_has_extension=False)
if not extra_components:
extra_components.append(sanitize(fname))
else:
extra_components[-1] = sanitize(extra_components[-1]+ext)
if extra_components[-1] and extra_components[-1][0] in ('.', '_'):
extra_components[-1] = 'x' + extra_components[-1][1:]
if special_tag is not None:
name = extra_components[-1]
extra_components = []
tag = special_tag
if tag.startswith(_('News')):
if self.NEWS_IN_FOLDER:
extra_components.append('News')
else:
for c in tag.split('/'):
c = sanitize(c)
if not c:
continue
extra_components.append(c)
extra_components.append(name)
if not use_subdirs:
# Leave this stuff here in case we later decide to use subdirs
extra_components = extra_components[-1:]
def remove_trailing_periods(x):
ans = x
while ans.endswith('.'):
ans = ans[:-1].strip()
if not ans:
ans = 'x'
return ans
extra_components = list(map(remove_trailing_periods, extra_components))
components = shorten_components_to(maxlen, extra_components)
filepath = posixpath.join(*components)
self._debug('lengths', dotless_ext, maxlen,
self.exts_path_lengths.get(dotless_ext, self.PATH_FUDGE_FACTOR),
len(filepath))
return filepath
def _strip_prefix(self, path):
if self.PREFIX and path.startswith(self.PREFIX):
return path[len(self.PREFIX):]
return path
# JSON booklist encode & decode
# If the argument is a booklist or contains a book, use the metadata json
# codec to first convert it to a string dict
def _json_encode(self, op, arg):
res = {}
for k,v in arg.iteritems():
if isinstance(v, (Book, Metadata)):
res[k] = self.json_codec.encode_book_metadata(v)
series = v.get('series', None)
if series:
tsorder = tweaks['save_template_title_series_sorting']
series = title_sort(series, order=tsorder)
else:
series = ''
self._debug('series sort = ', series)
res[k]['_series_sort_'] = series
else:
res[k] = v
from calibre.utils.config import to_json
return json.dumps([op, res], encoding='utf-8', default=to_json)
# Network functions
def _read_binary_from_net(self, length):
try:
self.device_socket.settimeout(self.MAX_CLIENT_COMM_TIMEOUT)
v = self.device_socket.recv(length)
self.device_socket.settimeout(None)
return v
except:
self._close_device_socket()
raise
def _read_string_from_net(self):
data = bytes(0)
while True:
dex = data.find(b'[')
if dex >= 0:
break
# recv seems to return a pointer into some internal buffer.
# Things get trashed if we don't make a copy of the data.
v = self._read_binary_from_net(2)
if len(v) == 0:
return '' # documentation says the socket is broken permanently.
data += v
total_len = int(data[:dex])
data = data[dex:]
pos = len(data)
while pos < total_len:
v = self._read_binary_from_net(total_len - pos)
if len(v) == 0:
return '' # documentation says the socket is broken permanently.
data += v
pos += len(v)
return data
def _send_byte_string(self, sock, s):
if not isinstance(s, bytes):
self._debug('given a non-byte string!')
self._close_device_socket()
raise PacketError("Internal error: found a string that isn't bytes")
sent_len = 0
total_len = len(s)
while sent_len < total_len:
try:
sock.settimeout(self.MAX_CLIENT_COMM_TIMEOUT)
if sent_len == 0:
amt_sent = sock.send(s)
else:
amt_sent = sock.send(s[sent_len:])
sock.settimeout(None)
if amt_sent <= 0:
raise IOError('Bad write on socket')
sent_len += amt_sent
except socket.error as e:
self._debug('socket error', e, e.errno)
if e.args[0] != EAGAIN and e.args[0] != EINTR:
self._close_device_socket()
raise
time.sleep(0.1) # lets not hammer the OS too hard
except:
self._close_device_socket()
raise
# This must be protected by a lock because it is called from the GUI thread
# (the sync stuff) and the device manager thread
@synchronous('sync_lock')
def _call_client(self, op, arg, print_debug_info=True, wait_for_response=True):
if op != 'NOOP':
self.noop_counter = 0
extra_debug = self.settings().extra_customization[self.OPT_EXTRA_DEBUG]
if print_debug_info or extra_debug:
if extra_debug:
self._debug(op, 'wfr', wait_for_response, arg)
else:
self._debug(op, 'wfr', wait_for_response)
if self.device_socket is None:
return None, None
try:
s = self._json_encode(self.opcodes[op], arg)
if print_debug_info and extra_debug:
self._debug('send string', s)
self._send_byte_string(self.device_socket, (b'%d' % len(s)) + s)
if not wait_for_response:
return None, None
return self._receive_from_client(print_debug_info=print_debug_info)
except socket.timeout:
self._debug('timeout communicating with device')
self._close_device_socket()
raise TimeoutError('Device did not respond in reasonable time')
except socket.error:
self._debug('device went away')
self._close_device_socket()
raise ControlError(desc='Device closed the network connection')
except:
self._debug('other exception')
traceback.print_exc()
self._close_device_socket()
raise
raise ControlError(desc='Device responded with incorrect information')
def _receive_from_client(self, print_debug_info=True):
from calibre.utils.config import from_json
extra_debug = self.settings().extra_customization[self.OPT_EXTRA_DEBUG]
try:
v = self._read_string_from_net()
if print_debug_info and extra_debug:
self._debug('received string', v)
if v:
v = json.loads(v, object_hook=from_json)
if print_debug_info and extra_debug:
self._debug('receive after decode') # , v)
return (self.reverse_opcodes[v[0]], v[1])
self._debug('protocol error -- empty json string')
except socket.timeout:
self._debug('timeout communicating with device')
self._close_device_socket()
raise TimeoutError('Device did not respond in reasonable time')
except socket.error:
self._debug('device went away')
self._close_device_socket()
raise ControlError(desc='Device closed the network connection')
except:
self._debug('other exception')
traceback.print_exc()
self._close_device_socket()
raise
raise ControlError(desc='Device responded with incorrect information')
# Write a file to the device as a series of binary strings.
def _put_file(self, infile, lpath, book_metadata, this_book, total_books):
close_ = False
if not hasattr(infile, 'read'):
infile, close_ = lopen(infile, 'rb'), True
infile.seek(0, os.SEEK_END)
length = infile.tell()
book_metadata.size = length
infile.seek(0)
opcode, result = self._call_client('SEND_BOOK', {'lpath': lpath, 'length': length,
'metadata': book_metadata, 'thisBook': this_book,
'totalBooks': total_books,
'willStreamBooks': True,
'willStreamBinary' : True,
'wantsSendOkToSendbook' : self.can_send_ok_to_sendbook,
'canSupportLpathChanges': True},
print_debug_info=False,
wait_for_response=self.can_send_ok_to_sendbook)
if self.can_send_ok_to_sendbook:
lpath = result.get('lpath', lpath)
book_metadata.lpath = lpath
self._set_known_metadata(book_metadata)
pos = 0
failed = False
with infile:
while True:
b = infile.read(self.max_book_packet_len)
blen = len(b)
if not b:
break
self._send_byte_string(self.device_socket, b)
pos += blen
self.time = None
if close_:
infile.close()
return (-1, None) if failed else (length, lpath)
def _get_smartdevice_option_number(self, opt_string):
if opt_string == 'password':
return self.OPT_PASSWORD
elif opt_string == 'autostart':
return self.OPT_AUTOSTART
elif opt_string == 'use_fixed_port':
return self.OPT_USE_PORT
elif opt_string == 'port_number':
return self.OPT_PORT_NUMBER
elif opt_string == 'force_ip_address':
return self.OPT_FORCE_IP_ADDRESS
elif opt_string == 'thumbnail_compression_quality':
return self.OPT_COMPRESSION_QUALITY
else:
return None
def _metadata_in_cache(self, uuid, ext_or_lpath, lastmod):
from calibre.utils.date import now, parse_date
try:
key = self._make_metadata_cache_key(uuid, ext_or_lpath)
if isinstance(lastmod, unicode):
if lastmod == 'None':
return None
lastmod = parse_date(lastmod)
if key in self.device_book_cache and self.device_book_cache[key]['book'].last_modified == lastmod:
self.device_book_cache[key]['last_used'] = now()
return self.device_book_cache[key]['book'].deepcopy(lambda : SDBook('', ''))
except:
traceback.print_exc()
return None
def _metadata_already_on_device(self, book):
try:
v = self.known_metadata.get(book.lpath, None)
if v is not None:
# Metadata is the same if the uuids match, if the last_modified dates
# match, and if the height of the thumbnails is the same. The last
# is there to allow a device to demand a different thumbnail size
if (v.get('uuid', None) == book.get('uuid', None) and
v.get('last_modified', None) == book.get('last_modified', None)):
v_thumb = v.get('thumbnail', None)
b_thumb = book.get('thumbnail', None)
if bool(v_thumb) != bool(b_thumb):
return False
return not v_thumb or v_thumb[1] == b_thumb[1]
except:
traceback.print_exc()
return False
def _uuid_in_cache(self, uuid, ext):
try:
for b in self.device_book_cache.itervalues():
metadata = b['book']
if metadata.get('uuid', '') != uuid:
continue
if metadata.get('lpath', '').endswith(ext):
return metadata
except:
traceback.print_exc()
return None
def _read_metadata_cache(self):
self._debug('device uuid', self.device_uuid)
from calibre.utils.config import from_json
try:
old_cache_file_name = os.path.join(cache_dir(),
'device_drivers_' + self.__class__.__name__ +
'_metadata_cache.pickle')
if os.path.exists(old_cache_file_name):
os.remove(old_cache_file_name)
except:
pass
try:
old_cache_file_name = os.path.join(cache_dir(),
'device_drivers_' + self.__class__.__name__ +
'_metadata_cache.json')
if os.path.exists(old_cache_file_name):
os.remove(old_cache_file_name)
except:
pass
cache_file_name = os.path.join(cache_dir(),
'wireless_device_' + self.device_uuid +
'_metadata_cache.json')
self.device_book_cache = defaultdict(dict)
self.known_metadata = {}
try:
count = 0
if os.path.exists(cache_file_name):
with lopen(cache_file_name, mode='rb') as fd:
while True:
rec_len = fd.readline()
if len(rec_len) != 8:
break
raw = fd.read(int(rec_len))
book = json.loads(raw.decode('utf-8'), object_hook=from_json)
key = book.keys()[0]
metadata = self.json_codec.raw_to_book(book[key]['book'],
SDBook, self.PREFIX)
book[key]['book'] = metadata
self.device_book_cache.update(book)
lpath = metadata.get('lpath')
self.known_metadata[lpath] = metadata
count += 1
self._debug('loaded', count, 'cache items')
except:
traceback.print_exc()
self.device_book_cache = defaultdict(dict)
self.known_metadata = {}
try:
if os.path.exists(cache_file_name):
os.remove(cache_file_name)
except:
traceback.print_exc()
def _write_metadata_cache(self):
self._debug()
from calibre.utils.date import now
now_ = now()
from calibre.utils.config import to_json
try:
purged = 0
count = 0
prefix = os.path.join(cache_dir(),
'wireless_device_' + self.device_uuid + '_metadata_cache')
with lopen(prefix + '.tmp', mode='wb') as fd:
for key,book in self.device_book_cache.iteritems():
if (now_ - book['last_used']).days > self.PURGE_CACHE_ENTRIES_DAYS:
purged += 1
continue
json_metadata = defaultdict(dict)
json_metadata[key]['book'] = self.json_codec.encode_book_metadata(book['book'])
json_metadata[key]['last_used'] = book['last_used']
result = json.dumps(json_metadata, indent=2, default=to_json)
fd.write("%0.7d\n"%(len(result)+1))
fd.write(result)
fd.write('\n')
count += 1
self._debug('wrote', count, 'entries, purged', purged, 'entries')
from calibre.utils.filenames import atomic_rename
atomic_rename(fd.name, prefix + '.json')
except:
traceback.print_exc()
def _make_metadata_cache_key(self, uuid, lpath_or_ext):
key = None
if uuid and lpath_or_ext:
key = uuid + lpath_or_ext
return key
def _set_known_metadata(self, book, remove=False):
from calibre.utils.date import now
lpath = book.lpath
ext = os.path.splitext(lpath)[1]
uuid = book.get('uuid', None)
if self.client_cache_uses_lpaths:
key = self._make_metadata_cache_key(uuid, lpath)
else:
key = self._make_metadata_cache_key(uuid, ext)
if remove:
self.known_metadata.pop(lpath, None)
if key:
self.device_book_cache.pop(key, None)
else:
# Check if we have another UUID with the same lpath. If so, remove it
# Must try both the extension and the lpath because of the cache change
existing_uuid = self.known_metadata.get(lpath, {}).get('uuid', None)
if existing_uuid and existing_uuid != uuid:
self.device_book_cache.pop(self._make_metadata_cache_key(existing_uuid, ext), None)
self.device_book_cache.pop(self._make_metadata_cache_key(existing_uuid, lpath), None)
new_book = book.deepcopy()
self.known_metadata[lpath] = new_book
if key:
self.device_book_cache[key]['book'] = new_book
self.device_book_cache[key]['last_used'] = now()
def _close_device_socket(self):
if self.device_socket is not None:
try:
self.device_socket.close()
except:
pass
self.device_socket = None
self._write_metadata_cache()
self.is_connected = False
def _attach_to_port(self, sock, port):
try:
ip_addr = self.settings().extra_customization[self.OPT_FORCE_IP_ADDRESS]
self._debug('try ip address "'+ ip_addr + '"', 'on port', port)
if ip_addr:
sock.bind((ip_addr, port))
else:
sock.bind(('', port))
except socket.error:
self._debug('socket error on port', port)
port = 0
except:
self._debug('Unknown exception while attaching port to socket')
traceback.print_exc()
raise
return port
def _close_listen_socket(self):
self.listen_socket.close()
self.listen_socket = None
self.is_connected = False
if getattr(self, 'broadcast_socket', None) is not None:
self.broadcast_socket.close()
self.broadcast_socket = None
def _read_file_metadata(self, temp_file_name):
from calibre.ebooks.metadata.meta import get_metadata
from calibre.customize.ui import quick_metadata
ext = temp_file_name.rpartition('.')[-1].lower()
with lopen(temp_file_name, 'rb') as stream:
with quick_metadata:
return get_metadata(stream, stream_type=ext,
force_read_metadata=True,
pattern=build_template_regexp(self.save_template()))
# The public interface methods.
@synchronous('sync_lock')
def detect_managed_devices(self, devices_on_system, force_refresh=False):
if getattr(self, 'listen_socket', None) is None:
self.is_connected = False
if self.is_connected:
self.noop_counter += 1
if (self.noop_counter > self.SEND_NOOP_EVERY_NTH_PROBE and
(self.noop_counter % self.SEND_NOOP_EVERY_NTH_PROBE) != 1):
try:
ans = select.select((self.device_socket,), (), (), 0)
if len(ans[0]) == 0:
return self
# The socket indicates that something is there. Given the
# protocol, this can only be a disconnect notification. Fall
# through and actually try to talk to the client.
# This will usually toss an exception if the socket is gone.
except:
pass
if (self.settings().extra_customization[self.OPT_AUTODISCONNECT] and
self.noop_counter > self.DISCONNECT_AFTER_N_SECONDS):
self._close_device_socket()
self._debug('timeout -- disconnected')
else:
try:
if self._call_client('NOOP', dict())[0] is None:
self._close_device_socket()
except:
self._close_device_socket()
return self if self.is_connected else None
if getattr(self, 'listen_socket', None) is not None:
try:
ans = self.connection_queue.get_nowait()
self.device_socket = ans
self.is_connected = True
try:
peer = self.device_socket.getpeername()[0]
attempts = self.connection_attempts.get(peer, 0)
if attempts >= self.MAX_UNSUCCESSFUL_CONNECTS:
self._debug('too many connection attempts from', peer)
self._close_device_socket()
raise InitialConnectionError(_('Too many connection attempts from %s') % peer)
else:
self.connection_attempts[peer] = attempts + 1
except InitialConnectionError:
raise
except:
pass
except Queue.Empty:
self.is_connected = False
return self if self.is_connected else None
return None
@synchronous('sync_lock')
def debug_managed_device_detection(self, devices_on_system, output):
from functools import partial
p = partial(prints, file=output)
if self.is_connected:
p("A wireless device is connected")
return True
all_ip_addresses = get_all_ips()
if all_ip_addresses:
p("All IP addresses", all_ip_addresses)
else:
p("No IP addresses found")
p("No device is connected")
return False
@synchronous('sync_lock')
def open(self, connected_device, library_uuid):
from calibre.utils.date import isoformat, now
self._debug()
if not self.is_connected:
# We have been called to retry the connection. Give up immediately
raise ControlError(desc='Attempt to open a closed device')
self.current_library_uuid = library_uuid
self.current_library_name = current_library_name()
self.device_uuid = ''
try:
password = self.settings().extra_customization[self.OPT_PASSWORD]
if password:
challenge = isoformat(now())
hasher = hashlib.new('sha1')
hasher.update(password.encode('UTF-8'))
hasher.update(challenge.encode('UTF-8'))
hash_digest = hasher.hexdigest()
else:
challenge = ''
hash_digest = ''
opcode, result = self._call_client('GET_INITIALIZATION_INFO',
{'serverProtocolVersion': self.PROTOCOL_VERSION,
'validExtensions': self.ALL_FORMATS,
'passwordChallenge': challenge,
'currentLibraryName': self.current_library_name,
'currentLibraryUUID': library_uuid,
'pubdateFormat': tweaks['gui_pubdate_display_format'],
'timestampFormat': tweaks['gui_timestamp_display_format'],
'lastModifiedFormat': tweaks['gui_last_modified_display_format'],
'calibre_version': numeric_version,
'canSupportUpdateBooks': True,
'canSupportLpathChanges': True})
if opcode != 'OK':
# Something wrong with the return. Close the socket
# and continue.
self._debug('Protocol error - Opcode not OK')
self._close_device_socket()
return False
if not result.get('versionOK', False):
# protocol mismatch
self._debug('Protocol error - protocol version mismatch')
self._close_device_socket()
return False
if result.get('maxBookContentPacketLen', 0) <= 0:
# protocol mismatch
self._debug('Protocol error - bogus book packet length')
self._close_device_socket()
return False
# Set up to recheck the sync columns
self.have_checked_sync_columns = False
client_can_stream_books = result.get('canStreamBooks', False)
self._debug('Device can stream books', client_can_stream_books)
client_can_stream_metadata = result.get('canStreamMetadata', False)
self._debug('Device can stream metadata', client_can_stream_metadata)
client_can_receive_book_binary = result.get('canReceiveBookBinary', False)
self._debug('Device can receive book binary', client_can_receive_book_binary)
client_can_delete_multiple = result.get('canDeleteMultipleBooks', False)
self._debug('Device can delete multiple books', client_can_delete_multiple)
if not (client_can_stream_books and
client_can_stream_metadata and
client_can_receive_book_binary and
client_can_delete_multiple):
self._debug('Software on device too old')
self._close_device_socket()
raise OpenFeedback(_('The app on your device is too old and is no '
'longer supported. Update it to a newer version.'))
self.client_can_use_metadata_cache = result.get('canUseCachedMetadata', False)
self._debug('Device can use cached metadata', self.client_can_use_metadata_cache)
self.client_cache_uses_lpaths = result.get('cacheUsesLpaths', False)
self._debug('Cache uses lpaths', self.client_cache_uses_lpaths)
self.can_send_ok_to_sendbook = result.get('canSendOkToSendbook', False)
self._debug('Can send OK to sendbook', self.can_send_ok_to_sendbook)
self.can_accept_library_info = result.get('canAcceptLibraryInfo', False)
self._debug('Can accept library info', self.can_accept_library_info)
self.will_ask_for_update_books = result.get('willAskForUpdateBooks', False)
self._debug('Will ask for update books', self.will_ask_for_update_books)
self.set_temp_mark_when_syncing_read = \
result.get('setTempMarkWhenReadInfoSynced', False)
self._debug('Will set temp mark when syncing read',
self.set_temp_mark_when_syncing_read)
if not self.settings().extra_customization[self.OPT_USE_METADATA_CACHE]:
self.client_can_use_metadata_cache = False
self._debug('metadata caching disabled by option')
self.client_device_kind = result.get('deviceKind', '')
self._debug('Client device kind', self.client_device_kind)
self.client_device_name = result.get('deviceName', self.client_device_kind)
self._debug('Client device name', self.client_device_name)
self.client_app_name = result.get('appName', "")
self._debug('Client app name', self.client_app_name)
self.app_version_number = result.get('ccVersionNumber', '0')
self._debug('App version #:', self.app_version_number)
try:
if (self.client_app_name == 'CalibreCompanion' and
self.app_version_number < self.CURRENT_CC_VERSION):
self._debug('Telling client to update')
self._call_client("DISPLAY_MESSAGE",
{'messageKind': self.MESSAGE_UPDATE_NEEDED,
'lastestKnownAppVersion': self.CURRENT_CC_VERSION})
except:
pass
self.max_book_packet_len = result.get('maxBookContentPacketLen',
self.BASE_PACKET_LEN)
self._debug('max_book_packet_len', self.max_book_packet_len)
exts = result.get('acceptedExtensions', None)
if exts is None or not isinstance(exts, list) or len(exts) == 0:
self._debug('Protocol error - bogus accepted extensions')
self._close_device_socket()
return False
self.client_wants_uuid_file_names = result.get('useUuidFileNames', False)
self._debug('Device wants UUID file names', self.client_wants_uuid_file_names)
config = self._configProxy()
config['format_map'] = exts
self._debug('selected formats', config['format_map'])
self.exts_path_lengths = result.get('extensionPathLengths', {})
self._debug('extension path lengths', self.exts_path_lengths)
self.THUMBNAIL_HEIGHT = result.get('coverHeight', self.DEFAULT_THUMBNAIL_HEIGHT)
self._debug('cover height', self.THUMBNAIL_HEIGHT)
if 'coverWidth' in result:
# Setting this field forces the aspect ratio
self.THUMBNAIL_WIDTH = result.get('coverWidth',
(self.DEFAULT_THUMBNAIL_HEIGHT/3) * 4)
self._debug('cover width', self.THUMBNAIL_WIDTH)
elif hasattr(self, 'THUMBNAIL_WIDTH'):
delattr(self, 'THUMBNAIL_WIDTH')
self.is_read_sync_col = result.get('isReadSyncCol', None)
self._debug('Device is_read sync col', self.is_read_sync_col)
self.is_read_date_sync_col = result.get('isReadDateSyncCol', None)
self._debug('Device is_read_date sync col', self.is_read_date_sync_col)
if password:
returned_hash = result.get('passwordHash', None)
if result.get('passwordHash', None) is None:
# protocol mismatch
self._debug('Protocol error - missing password hash')
self._close_device_socket()
return False
if returned_hash != hash_digest:
# bad password
self._debug('password mismatch')
try:
self._call_client("DISPLAY_MESSAGE",
{'messageKind': self.MESSAGE_PASSWORD_ERROR,
'currentLibraryName': self.current_library_name,
'currentLibraryUUID': library_uuid})
except:
pass
self._close_device_socket()
# Don't bother with a message. The user will be informed on
# the device.
raise OpenFailed('')
try:
peer = self.device_socket.getpeername()[0]
self.connection_attempts[peer] = 0
except:
pass
return True
except socket.timeout:
self._close_device_socket()
except socket.error:
x = sys.exc_info()[1]
self._debug('unexpected socket exception', x.args[0])
self._close_device_socket()
raise
return False
def get_gui_name(self):
if getattr(self, 'client_device_name', None):
return self.gui_name_template%(self.gui_name, self.client_device_name)
if getattr(self, 'client_device_kind', None):
return self.gui_name_template%(self.gui_name, self.client_device_kind)
return self.gui_name
def config_widget(self):
from calibre.gui2.device_drivers.configwidget import ConfigWidget
cw = ConfigWidget(self.settings(), self.FORMATS, self.SUPPORTS_SUB_DIRS,
self.MUST_READ_METADATA, self.SUPPORTS_USE_AUTHOR_SORT,
self.EXTRA_CUSTOMIZATION_MESSAGE, self)
return cw
@synchronous('sync_lock')
def get_device_information(self, end_session=True):
self._debug()
self.report_progress(1.0, _('Get device information...'))
opcode, result = self._call_client('GET_DEVICE_INFORMATION', dict())
if opcode == 'OK':
self.driveinfo = result['device_info']
self._update_driveinfo_record(self.driveinfo, self.PREFIX, 'main')
self.device_uuid = self.driveinfo['device_store_uuid']
self._call_client('SET_CALIBRE_DEVICE_INFO', self.driveinfo)
self._read_metadata_cache()
return (self.get_gui_name(), result['device_version'],
result['version'], '', {'main':self.driveinfo})
return (self.get_gui_name(), '', '', '')
@synchronous('sync_lock')
def set_driveinfo_name(self, location_code, name):
self._update_driveinfo_record(self.driveinfo, "main", name)
self._call_client('SET_CALIBRE_DEVICE_NAME',
{'location_code': 'main', 'name':name})
@synchronous('sync_lock')
def reset(self, key='-1', log_packets=False, report_progress=None,
detected_device=None) :
self._debug()
self.set_progress_reporter(report_progress)
@synchronous('sync_lock')
def set_progress_reporter(self, report_progress):
self._debug()
self.report_progress = report_progress
if self.report_progress is None:
self.report_progress = lambda x, y: x
@synchronous('sync_lock')
def card_prefix(self, end_session=True):
self._debug()
return (None, None)
@synchronous('sync_lock')
def total_space(self, end_session=True):
self._debug()
opcode, result = self._call_client('TOTAL_SPACE', {})
if opcode == 'OK':
return (result['total_space_on_device'], 0, 0)
# protocol error if we get here
return (0, 0, 0)
@synchronous('sync_lock')
def free_space(self, end_session=True):
self._debug()
opcode, result = self._call_client('FREE_SPACE', {})
if opcode == 'OK':
return (result['free_space_on_device'], 0, 0)
# protocol error if we get here
return (0, 0, 0)
@synchronous('sync_lock')
def books(self, oncard=None, end_session=True):
self._debug(oncard)
if oncard is not None:
return CollectionsBookList(None, None, None)
opcode, result = self._call_client('GET_BOOK_COUNT',
{'canStream':True,
'canScan':True,
'willUseCachedMetadata': self.client_can_use_metadata_cache,
'supportsSync': (bool(self.is_read_sync_col) or
bool(self.is_read_date_sync_col)),
'canSupportBookFormatSync': True})
bl = CollectionsBookList(None, self.PREFIX, self.settings)
if opcode == 'OK':
count = result['count']
will_use_cache = self.client_can_use_metadata_cache
if will_use_cache:
books_on_device = []
self._debug('caching. count=', count)
for i in range(0, count):
opcode, result = self._receive_from_client(print_debug_info=False)
books_on_device.append(result)
self._debug('received all books. count=', count)
books_to_send = []
lpaths_on_device = set()
for r in books_on_device:
if r.get('lpath', None):
book = self._metadata_in_cache(r['uuid'], r['lpath'],
r['last_modified'])
else:
book = self._metadata_in_cache(r['uuid'], r['extension'],
r['last_modified'])
if book:
if self.client_cache_uses_lpaths:
lpaths_on_device.add(r.get('lpath'))
bl.add_book_extended(book, replace_metadata=True,
check_for_duplicates=not self.client_cache_uses_lpaths)
book.set('_is_read_', r.get('_is_read_', None))
book.set('_sync_type_', r.get('_sync_type_', None))
book.set('_last_read_date_', r.get('_last_read_date_', None))
book.set('_format_mtime_', r.get('_format_mtime_', None))
else:
books_to_send.append(r['priKey'])
self._debug('processed cache. count=', len(books_on_device))
count_of_cache_items_deleted = 0
if self.client_cache_uses_lpaths:
for lpath in tuple(self.known_metadata.iterkeys()):
if lpath not in lpaths_on_device:
try:
uuid = self.known_metadata[lpath].get('uuid', None)
if uuid is not None:
key = self._make_metadata_cache_key(uuid, lpath)
self.device_book_cache.pop(key, None)
self.known_metadata.pop(lpath, None)
count_of_cache_items_deleted += 1
except:
self._debug('Exception while deleting book from caches', lpath)
traceback.print_exc()
self._debug('removed', count_of_cache_items_deleted, 'books from caches')
count = len(books_to_send)
self._debug('caching. Need count from device', count)
self._call_client('NOOP', {'count': count},
print_debug_info=False, wait_for_response=False)
for priKey in books_to_send:
self._call_client('NOOP', {'priKey':priKey},
print_debug_info=False, wait_for_response=False)
for i in range(0, count):
if (i % 100) == 0:
self._debug('getting book metadata. Done', i, 'of', count)
opcode, result = self._receive_from_client(print_debug_info=False)
if opcode == 'OK':
try:
if '_series_sort_' in result:
del result['_series_sort_']
book = self.json_codec.raw_to_book(result, SDBook, self.PREFIX)
book.set('_is_read_', result.get('_is_read_', None))
book.set('_sync_type_', result.get('_sync_type_', None))
book.set('_last_read_date_', result.get('_last_read_date_', None))
bl.add_book_extended(book, replace_metadata=True,
check_for_duplicates=not self.client_cache_uses_lpaths)
if '_new_book_' in result:
book.set('_new_book_', True)
else:
self._set_known_metadata(book)
except:
self._debug('exception retrieving metadata for book', result.get('title', 'Unknown'))
traceback.print_exc()
else:
raise ControlError(desc='book metadata not returned')
total = 0
for book in bl:
if book.get('_new_book_', None):
total += 1
count = 0
for book in bl:
if book.get('_new_book_', None):
paths = [book.lpath]
self._set_known_metadata(book, remove=True)
self.prepare_addable_books(paths, this_book=count, total_books=total)
book.smart_update(self._read_file_metadata(paths[0]))
del book._new_book_
count += 1
self._debug('finished getting book metadata')
return bl
@synchronous('sync_lock')
def sync_booklists(self, booklists, end_session=True):
colattrs = [x.strip() for x in
self.settings().extra_customization[self.OPT_COLLECTIONS].split(',')]
self._debug('collection attributes', colattrs)
coldict = {}
if colattrs:
collections = booklists[0].get_collections(colattrs)
for k,v in collections.iteritems():
lpaths = []
for book in v:
lpaths.append(book.lpath)
coldict[k] = lpaths
# If we ever do device_db plugboards, this is where it will go. We will
# probably need to send two booklists, one with calibre's data that is
# given back by "books", and one that has been plugboarded.
books_to_send = []
for book in booklists[0]:
if (book.get('_force_send_metadata_', None) or
not self._metadata_already_on_device(book)):
books_to_send.append(book)
count = len(books_to_send)
self._call_client('SEND_BOOKLISTS', {'count': count,
'collections': coldict,
'willStreamMetadata': True,
'supportsSync': (bool(self.is_read_sync_col) or
bool(self.is_read_date_sync_col))},
wait_for_response=False)
if count:
for i,book in enumerate(books_to_send):
self._debug('sending metadata for book', book.lpath, book.title)
self._set_known_metadata(book)
opcode, result = self._call_client(
'SEND_BOOK_METADATA',
{'index': i, 'count': count, 'data': book,
'supportsSync': (bool(self.is_read_sync_col) or
bool(self.is_read_date_sync_col))},
print_debug_info=False,
wait_for_response=False)
if not self.have_bad_sync_columns:
# Update the local copy of the device's read info just in case
# the device is re-synced. This emulates what happens on the device
# when the metadata is received.
try:
if bool(self.is_read_sync_col):
book.set('_is_read_', book.get(self.is_read_sync_col, None))
except:
self._debug('failed to set local copy of _is_read_')
traceback.print_exc()
try:
if bool(self.is_read_date_sync_col):
book.set('_last_read_date_',
book.get(self.is_read_date_sync_col, None))
except:
self._debug('failed to set local copy of _last_read_date_')
traceback.print_exc()
# Write the cache here so that if we are interrupted on disconnect then the
# almost-latest info will be available.
self._write_metadata_cache()
@synchronous('sync_lock')
def eject(self):
self._debug()
self._close_device_socket()
@synchronous('sync_lock')
def post_yank_cleanup(self):
self._debug()
@synchronous('sync_lock')
def upload_books(self, files, names, on_card=None, end_session=True,
metadata=None):
if self.settings().extra_customization[self.OPT_EXTRA_DEBUG]:
self._debug(names)
else:
self._debug()
paths = []
names = iter(names)
metadata = iter(metadata)
for i, infile in enumerate(files):
mdata, fname = metadata.next(), names.next()
lpath = self._create_upload_path(mdata, fname, create_dirs=False)
self._debug('lpath', lpath)
if not hasattr(infile, 'read'):
infile = USBMS.normalize_path(infile)
book = SDBook(self.PREFIX, lpath, other=mdata)
length, lpath = self._put_file(infile, lpath, book, i, len(files))
if length < 0:
raise ControlError(desc='Sending book %s to device failed' % lpath)
paths.append((lpath, length))
# No need to deal with covers. The client will get the thumbnails
# in the mi structure
self.report_progress((i + 1) / float(len(files)), _('Transferring books to device...'))
self.report_progress(1.0, _('Transferring books to device...'))
self._debug('finished uploading %d books' % (len(files)))
return paths
@synchronous('sync_lock')
def add_books_to_metadata(self, locations, metadata, booklists):
self._debug('adding metadata for %d books' % (len(metadata)))
metadata = iter(metadata)
for i, location in enumerate(locations):
self.report_progress((i + 1) / float(len(locations)),
_('Adding books to device metadata listing...'))
info = metadata.next()
lpath = location[0]
length = location[1]
lpath = self._strip_prefix(lpath)
book = SDBook(self.PREFIX, lpath, other=info)
if book.size is None:
book.size = length
b = booklists[0].add_book(book, replace_metadata=True)
if b:
b._new_book = True
from calibre.utils.date import isoformat, now
b.set('_format_mtime_', isoformat(now()))
self.report_progress(1.0, _('Adding books to device metadata listing...'))
self._debug('finished adding metadata')
@synchronous('sync_lock')
def delete_books(self, paths, end_session=True):
if self.settings().extra_customization[self.OPT_EXTRA_DEBUG]:
self._debug(paths)
else:
self._debug()
new_paths = []
for path in paths:
new_paths.append(self._strip_prefix(path))
opcode, result = self._call_client('DELETE_BOOK', {'lpaths': new_paths})
for i in range(0, len(new_paths)):
opcode, result = self._receive_from_client(False)
self._debug('removed book with UUID', result['uuid'])
self._debug('removed', len(new_paths), 'books')
@synchronous('sync_lock')
def remove_books_from_metadata(self, paths, booklists):
if self.settings().extra_customization[self.OPT_EXTRA_DEBUG]:
self._debug(paths)
else:
self._debug()
for i, path in enumerate(paths):
path = self._strip_prefix(path)
self.report_progress((i + 1) / float(len(paths)), _('Removing books from device metadata listing...'))
for bl in booklists:
for book in bl:
if path == book.path:
bl.remove_book(book)
self._set_known_metadata(book, remove=True)
self.report_progress(1.0, _('Removing books from device metadata listing...'))
self._debug('finished removing metadata for %d books' % (len(paths)))
@synchronous('sync_lock')
def get_file(self, path, outfile, end_session=True, this_book=None, total_books=None):
if self.settings().extra_customization[self.OPT_EXTRA_DEBUG]:
self._debug(path)
else:
self._debug()
eof = False
position = 0
while not eof:
opcode, result = self._call_client('GET_BOOK_FILE_SEGMENT',
{'lpath' : path, 'position': position,
'thisBook': this_book, 'totalBooks': total_books,
'canStream':True, 'canStreamBinary': True},
print_debug_info=False)
if opcode == 'OK':
length = result.get('fileLength')
remaining = length
while remaining > 0:
v = self._read_binary_from_net(min(remaining, self.max_book_packet_len))
outfile.write(v)
remaining -= len(v)
eof = True
else:
raise ControlError(desc='request for book data failed')
@synchronous('sync_lock')
def prepare_addable_books(self, paths, this_book=None, total_books=None):
for idx, path in enumerate(paths):
(ign, ext) = os.path.splitext(path)
with PersistentTemporaryFile(suffix=ext) as tf:
self.get_file(path, tf, this_book=this_book, total_books=total_books)
paths[idx] = tf.name
tf.name = path
return paths
@synchronous('sync_lock')
def set_plugboards(self, plugboards, pb_func):
self._debug()
self.plugboards = plugboards
self.plugboard_func = pb_func
@synchronous('sync_lock')
def set_library_info(self, library_name, library_uuid, field_metadata):
self._debug(library_name, library_uuid)
if self.can_accept_library_info:
self._call_client('SET_LIBRARY_INFO',
{'libraryName' : library_name,
'libraryUuid': library_uuid,
'fieldMetadata': field_metadata.all_metadata()},
print_debug_info=True)
@synchronous('sync_lock')
def specialize_global_preferences(self, device_prefs):
device_prefs.set_overrides(manage_device_metadata='on_connect')
def _show_message(self, message):
self._call_client("DISPLAY_MESSAGE",
{'messageKind': self.MESSAGE_SHOW_TOAST,
'message': message})
def _check_if_format_send_needed(self, db, id_, book):
if not self.will_ask_for_update_books:
return (None, False)
from calibre.utils.date import parse_date, isoformat
try:
if not hasattr(book, '_format_mtime_'):
return (None, False)
ext = posixpath.splitext(book.lpath)[1][1:]
fmt_metadata = db.new_api.format_metadata(id_, ext)
if fmt_metadata:
calibre_mtime = fmt_metadata['mtime']
if calibre_mtime > self.now:
if not self.have_sent_future_dated_book_message:
self.have_sent_future_dated_book_message = True
self._show_message(_('You have book formats in your library '
'with dates in the future. See calibre '
'for details'))
return (None, True)
cc_mtime = parse_date(book.get('_format_mtime_'), as_utc=True)
self._debug(book.title, 'cal_mtime', calibre_mtime, 'cc_mtime', cc_mtime)
if cc_mtime < calibre_mtime:
book.set('_format_mtime_', isoformat(self.now))
return (posixpath.basename(book.lpath), False)
except:
self._debug('exception checking if must send format', book.title)
traceback.print_exc()
return (None, False)
@synchronous('sync_lock')
def synchronize_with_db(self, db, id_, book, first_call):
from calibre.utils.date import parse_date, is_date_undefined, now
if first_call:
self.have_sent_future_dated_book_message = False
self.now = now()
if self.have_bad_sync_columns or not (self.is_read_sync_col or
self.is_read_date_sync_col):
# Not syncing or sync columns are invalid
return (None, self._check_if_format_send_needed(db, id_, book))
# Check the validity of the columns once per connection. We do it
# here because we have access to the db to get field_metadata
if not self.have_checked_sync_columns:
fm = db.field_metadata.custom_field_metadata()
if self.is_read_sync_col:
if self.is_read_sync_col not in fm:
self._debug('is_read_sync_col not in field_metadata')
self._show_message(_("The read sync column %s is "
"not in calibre's library")%self.is_read_sync_col)
self.have_bad_sync_columns = True
elif fm[self.is_read_sync_col]['datatype'] != 'bool':
self._debug('is_read_sync_col not bool type')
self._show_message(_("The read sync column %s is "
"not a Yes/No column")%self.is_read_sync_col)
self.have_bad_sync_columns = True
if self.is_read_date_sync_col:
if self.is_read_date_sync_col not in fm:
self._debug('is_read_date_sync_col not in field_metadata')
self._show_message(_("The read date sync column %s is "
"not in calibre's library")%self.is_read_date_sync_col)
self.have_bad_sync_columns = True
elif fm[self.is_read_date_sync_col]['datatype'] != 'datetime':
self._debug('is_read_date_sync_col not date type')
self._show_message(_("The read date sync column %s is "
"not a Date column")%self.is_read_date_sync_col)
self.have_bad_sync_columns = True
self.have_checked_sync_columns = True
if self.have_bad_sync_columns:
return (None, self._check_if_format_send_needed(db, id_, book))
# if we are marking synced books, clear all the current marks
if self.set_temp_mark_when_syncing_read:
self._debug('clearing temp marks')
db.set_marked_ids(())
sync_type = book.get('_sync_type_', None)
# We need to check if our attributes are in the book. If they are not
# then this is metadata coming from calibre to the device for the first
# time, in which case we must not sync it.
if hasattr(book, '_is_read_'):
is_read = book.get('_is_read_', None)
has_is_read = True
else:
has_is_read = False
if hasattr(book, '_last_read_date_'):
# parse_date returns UNDEFINED_DATE if the value is None
is_read_date = parse_date(book.get('_last_read_date_', None))
if is_date_undefined(is_read_date):
is_read_date = None
has_is_read_date = True
else:
has_is_read_date = False
force_return_changed_books = False
changed_books = set()
if sync_type == 3:
# The book metadata was built by the device from metadata in the
# book file itself. It must not be synced, because the metadata is
# almost surely wrong. However, the fact that we got here means that
# book matching has succeeded. Arrange that calibre's metadata is
# sent back to the device. This isn't strictly necessary as sending
# back the info will be arranged in other ways.
self._debug('Book with device-generated metadata', book.get('title', 'huh?'))
book.set('_force_send_metadata_', True)
force_return_changed_books = True
elif sync_type == 2:
# This is a special case where the user just set a sync column. In
# this case the device value wins if it is not None, otherwise the
# calibre value wins.
# Check is_read
if has_is_read and self.is_read_sync_col:
try:
calibre_val = db.new_api.field_for(self.is_read_sync_col,
id_, default_value=None)
if is_read is not None:
# The CC value wins. Check if it is different from calibre's
# value to avoid updating the db to the same value
if is_read != calibre_val:
self._debug('special update calibre to is_read',
book.get('title', 'huh?'), 'to', is_read, calibre_val)
changed_books = db.new_api.set_field(self.is_read_sync_col,
{id_: is_read})
if self.set_temp_mark_when_syncing_read:
db.data.toggle_marked_ids({id_})
elif calibre_val is not None:
# Calibre value wins. Force the metadata for the
# book to be sent to the device even if the mod
# dates haven't changed.
self._debug('special update is_read to calibre value',
book.get('title', 'huh?'), 'to', calibre_val)
book.set('_force_send_metadata_', True)
force_return_changed_books = True
except:
self._debug('exception special syncing is_read', self.is_read_sync_col)
traceback.print_exc()
# Check is_read_date.
if has_is_read_date and self.is_read_date_sync_col:
try:
# The db method returns None for undefined dates.
calibre_val = db.new_api.field_for(self.is_read_date_sync_col,
id_, default_value=None)
if is_read_date is not None:
if is_read_date != calibre_val:
self._debug('special update calibre to is_read_date',
book.get('title', 'huh?'), 'to', is_read_date, calibre_val)
changed_books |= db.new_api.set_field(self.is_read_date_sync_col,
{id_: is_read_date})
if self.set_temp_mark_when_syncing_read:
db.data.toggle_marked_ids({id_})
elif calibre_val is not None:
self._debug('special update is_read_date to calibre value',
book.get('title', 'huh?'), 'to', calibre_val)
book.set('_force_send_metadata_', True)
force_return_changed_books = True
except:
self._debug('exception special syncing is_read_date',
self.is_read_sync_col)
traceback.print_exc()
else:
# This is the standard sync case. If the CC value has changed, it
# wins, otherwise the calibre value is synced to CC in the normal
# fashion (mod date)
if has_is_read and self.is_read_sync_col:
try:
orig_is_read = book.get(self.is_read_sync_col, None)
if is_read != orig_is_read:
# The value in the device's is_read checkbox is not the
# same as the last one that came to the device from
# calibre during the last connect, meaning that the user
# changed it. Write the one from the device to calibre's
# db.
self._debug('standard update is_read', book.get('title', 'huh?'),
'to', is_read, 'was', orig_is_read)
changed_books = db.new_api.set_field(self.is_read_sync_col,
{id_: is_read})
if self.set_temp_mark_when_syncing_read:
db.data.toggle_marked_ids({id_})
except:
self._debug('exception standard syncing is_read', self.is_read_sync_col)
traceback.print_exc()
if has_is_read_date and self.is_read_date_sync_col:
try:
orig_is_read_date = book.get(self.is_read_date_sync_col, None)
if is_date_undefined(orig_is_read_date):
orig_is_read_date = None
if is_read_date != orig_is_read_date:
self._debug('standard update is_read_date', book.get('title', 'huh?'),
'to', is_read_date, 'was', orig_is_read_date)
changed_books |= db.new_api.set_field(self.is_read_date_sync_col,
{id_: is_read_date})
if self.set_temp_mark_when_syncing_read:
db.data.toggle_marked_ids({id_})
except:
self._debug('Exception standard syncing is_read_date',
self.is_read_date_sync_col)
traceback.print_exc()
if changed_books or force_return_changed_books:
# One of the two values was synced, giving a (perhaps empty) list of
# changed books. Return that.
return (changed_books, self._check_if_format_send_needed(db, id_, book))
# Nothing was synced. The user might have changed the value in calibre.
# If so, that value will be sent to the device in the normal way. Note
# that because any updated value has already been synced and so will
# also be sent, the device should put the calibre value into its
# checkbox (or whatever it uses)
return (None, self._check_if_format_send_needed(db, id_, book))
@synchronous('sync_lock')
def startup(self):
self.listen_socket = None
self.is_connected = False
@synchronous('sync_lock')
def startup_on_demand(self):
if getattr(self, 'listen_socket', None) is not None:
# we are already running
return
if len(self.opcodes) != len(self.reverse_opcodes):
self._debug(self.opcodes, self.reverse_opcodes)
self.is_connected = False
self.listen_socket = None
self.device_socket = None
self.json_codec = JsonCodec()
self.known_metadata = {}
self.device_book_cache = defaultdict(dict)
self.debug_time = time.time()
self.debug_start_time = time.time()
self.max_book_packet_len = 0
self.noop_counter = 0
self.connection_attempts = {}
self.client_wants_uuid_file_names = False
self.is_read_sync_col = None
self.is_read_date_sync_col = None
self.have_checked_sync_columns = False
self.have_bad_sync_columns = False
self.have_sent_future_dated_book_message = False
self.now = None
message = None
compression_quality_ok = True
try:
cq = int(self.settings().extra_customization[self.OPT_COMPRESSION_QUALITY])
if cq < 50 or cq > 99:
compression_quality_ok = False
else:
self.THUMBNAIL_COMPRESSION_QUALITY = cq
except:
compression_quality_ok = False
if not compression_quality_ok:
self.THUMBNAIL_COMPRESSION_QUALITY = 70
message = _('Bad compression quality setting. It must be a number '
'between 50 and 99. Forced to be %d.')%self.DEFAULT_THUMBNAIL_COMPRESSION_QUALITY
self._debug(message)
self.set_option('thumbnail_compression_quality',
str(self.DEFAULT_THUMBNAIL_COMPRESSION_QUALITY))
try:
self.listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
set_socket_inherit(self.listen_socket, False)
except:
traceback.print_exc()
message = 'creation of listen socket failed'
self._debug(message)
return message
i = 0
if self.settings().extra_customization[self.OPT_USE_PORT]:
try:
opt_port = int(self.settings().extra_customization[self.OPT_PORT_NUMBER])
except:
message = _('Invalid port in options: %s')% \
self.settings().extra_customization[self.OPT_PORT_NUMBER]
self._debug(message)
self._close_listen_socket()
return message
port = self._attach_to_port(self.listen_socket, opt_port)
if port == 0:
message = _('Failed to connect to port %d. Try a different value.')%opt_port
self._debug(message)
self._close_listen_socket()
return message
else:
while i < 100: # try 9090 then up to 99 random port numbers
i += 1
port = self._attach_to_port(self.listen_socket,
9090 if i == 1 else random.randint(8192, 32000))
if port != 0:
break
if port == 0:
message = _('Failed to allocate a random port')
self._debug(message)
self._close_listen_socket()
return message
try:
self.listen_socket.listen(0)
except:
message = 'listen on port %d failed' % port
self._debug(message)
self._close_listen_socket()
return message
try:
ip_addr = self.settings().extra_customization[self.OPT_FORCE_IP_ADDRESS]
publish_zeroconf('calibre smart device client',
'_calibresmartdeviceapp._tcp', port, {},
use_ip_address=ip_addr)
except:
self._debug('registration with bonjour failed')
traceback.print_exc()
self._debug('listening on port', port)
self.port = port
# Now try to open a UDP socket to receive broadcasts on
try:
self.broadcast_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except:
message = 'creation of broadcast socket failed. This is not fatal.'
self._debug(message)
self.broadcast_socket = None
else:
for p in self.BROADCAST_PORTS:
port = self._attach_to_port(self.broadcast_socket, p)
if port != 0:
self._debug('broadcast socket listening on port', port)
break
if port == 0:
self.broadcast_socket.close()
self.broadcast_socket = None
message = 'attaching port to broadcast socket failed. This is not fatal.'
self._debug(message)
self.connection_queue = Queue.Queue(1)
self.connection_listener = ConnectionListener(self)
self.connection_listener.start()
return message
@synchronous('sync_lock')
def shutdown(self):
self._close_device_socket()
if getattr(self, 'listen_socket', None) is not None:
self.connection_listener.stop()
try:
unpublish_zeroconf('calibre smart device client',
'_calibresmartdeviceapp._tcp', self.port, {})
except:
self._debug('deregistration with bonjour failed')
traceback.print_exc()
self._close_listen_socket()
# Methods for dynamic control
@synchronous('sync_lock')
def is_dynamically_controllable(self):
return 'smartdevice'
@synchronous('sync_lock')
def start_plugin(self):
return self.startup_on_demand()
@synchronous('sync_lock')
def stop_plugin(self):
self.shutdown()
@synchronous('sync_lock')
def get_option(self, opt_string, default=None):
opt = self._get_smartdevice_option_number(opt_string)
if opt is not None:
return self.settings().extra_customization[opt]
return default
@synchronous('sync_lock')
def set_option(self, opt_string, value):
opt = self._get_smartdevice_option_number(opt_string)
if opt is not None:
config = self._configProxy()
ec = config['extra_customization']
ec[opt] = value
config['extra_customization'] = ec
@synchronous('sync_lock')
def is_running(self):
return getattr(self, 'listen_socket', None) is not None
|
import subprocess
from distutils.version import LooseVersion
from unittest import SkipTest
from django.core.cache import cache
from django.test import TestCase
from django.test.utils import override_settings
import weblate.vcs.gpg
from weblate.utils.checks import check_data_writable
from weblate.utils.unittest import tempdir_setting
from weblate.vcs.gpg import (
generate_gpg_key,
get_gpg_key,
get_gpg_public_key,
get_gpg_sign_key,
)
class GPGTest(TestCase):
gpg_error = None
@classmethod
def setUpClass(cls):
"""Check whether we can use gpg."""
super().setUpClass()
try:
result = subprocess.run(
["gpg", "--version"],
check=True,
text=True,
capture_output=True,
)
version = result.stdout.splitlines()[0].strip().rsplit(None, 1)[-1]
if LooseVersion(version) < LooseVersion("2.1"):
cls.gpg_error = "gpg too old"
except (subprocess.CalledProcessError, OSError):
cls.gpg_error = "gpg not found"
def setUp(self):
if self.gpg_error:
raise SkipTest(self.gpg_error)
def check_errors(self):
self.assertEqual(weblate.vcs.gpg.GPG_ERRORS, {})
@tempdir_setting("DATA_DIR")
@override_settings(
WEBLATE_GPG_IDENTITY="Weblate <weblate@example.com>", WEBLATE_GPG_ALGO="rsa512"
)
def test_generate(self):
self.assertEqual(check_data_writable(), [])
self.assertIsNone(get_gpg_key(silent=True))
key = generate_gpg_key()
self.check_errors()
self.assertIsNotNone(key)
self.assertEqual(key, get_gpg_key())
@tempdir_setting("DATA_DIR")
@override_settings(
WEBLATE_GPG_IDENTITY="Weblate <weblate@example.com>", WEBLATE_GPG_ALGO="rsa512"
)
def test_get(self):
self.assertEqual(check_data_writable(), [])
# This will generate new key
key = get_gpg_sign_key()
self.check_errors()
self.assertIsNotNone(key)
# Check cache access
self.assertEqual(key, get_gpg_sign_key())
# Check empty cache
cache.delete("gpg-key-id")
self.assertEqual(key, get_gpg_sign_key())
@tempdir_setting("DATA_DIR")
@override_settings(
WEBLATE_GPG_IDENTITY="Weblate <weblate@example.com>", WEBLATE_GPG_ALGO="rsa512"
)
def test_public(self):
self.assertEqual(check_data_writable(), [])
# This will generate new key
key = get_gpg_public_key()
self.check_errors()
self.assertIsNotNone(key)
# Check cache access
self.assertEqual(key, get_gpg_public_key())
|
import factory
from api import models
class ClientFactory(factory.DjangoModelFactory):
class Meta:
model = models.Client
name = 'Coaxis'
@factory.django.mute_signals(models.post_save)
class UserFactory(factory.DjangoModelFactory):
class Meta:
model = models.MyUser
email = factory.Sequence(lambda n: 'u{0}@coaxis.com'.format(n))
password = factory.PostGenerationMethodCall('set_password', 'password')
is_staff = False
class EmployeeFactory(factory.DjangoModelFactory):
class Meta:
model = models.Employee
user = factory.SubFactory(UserFactory)
is_technician = False
@factory.post_generation
def clients(self, create, extracted, **kwargs):
if not create: # Simple build, do nothing.
return
if extracted: # A list of objects were passed in, use them
for client in extracted:
self.clients.add(client)
class TechnicianFactory(EmployeeFactory):
is_technician = True
class DaemonFactory(factory.DjangoModelFactory):
class Meta:
model = models.Daemon
client = factory.SubFactory(ClientFactory)
|
import logging
import re
import salt.client
from netaddr import IPNetwork, IPAddress
log = logging.getLogger(__name__)
def ping(cluster = None, exclude = None, **kwargs):
"""
Ping all addresses from all addresses on all minions. If cluster is passed,
restrict addresses to public and cluster networks.
Note: Some optimizations could be done here in the multi module (such as
skipping the source and destination when they are the same). However, the
unoptimized version is taking ~2.5 seconds on 18 minions with 72 addresses
for success. Failures take between 6 to 12 seconds. Optimizations should
focus there.
TODO: Convert commented out print statements to log.debug
CLI Example: (Before DeepSea with a cluster configuration)
.. code-block:: bash
sudo salt-run net.ping
or you can run it with exclude
.. code-block:: bash
sudo salt-run net.ping exclude="E@host*,host-osd-name*,192.168.1.1"
(After DeepSea with a cluster configuration)
.. code-block:: bash
sudo salt-run net.ping cluster=ceph
sudo salt-run net.ping ceph
"""
exclude_string = exclude_iplist = None
if exclude:
exclude_string, exclude_iplist = _exclude_filter(exclude)
extra_kwargs = _skip_dunder(kwargs)
if _skip_dunder(kwargs):
print "Unsupported parameters: {}".format(" ,".join(extra_kwargs.keys()))
text = re.sub(re.compile("^ {12}", re.MULTILINE), "", '''
salt-run net.ping [cluster] [exclude]
Ping all addresses from all addresses on all minions.
If cluster is specified, restrict addresses to cluster and public networks.
If exclude is specified, remove matching addresses. See Salt compound matchers.
within exclude individual ip address will be remove a specific target interface
instead of ping from, the ping to interface will be removed
Examples:
salt-run net.ping
salt-run net.ping ceph
salt-run net.ping ceph L@mon1.ceph
salt-run net.ping cluster=ceph exclude=L@mon1.ceph
salt-run net.ping exclude=S@192.168.21.254
salt-run net.ping exclude=S@192.168.21.0/29
salt-run net.ping exclude="E@host*,host-osd-name*,192.168.1.1"
''')
print text
return
local = salt.client.LocalClient()
if cluster:
search = "I@cluster:{}".format(cluster)
if exclude_string:
search += " and not ( " + exclude_string + " )"
log.debug( "ping: search {} ".format(search))
networks = local.cmd(search , 'pillar.item', [ 'cluster_network', 'public_network' ], expr_form="compound")
#print networks
total = local.cmd(search , 'grains.get', [ 'ipv4' ], expr_form="compound")
#print addresses
addresses = []
for host in sorted(total.iterkeys()):
if 'cluster_network' in networks[host]:
addresses.extend(_address(total[host], networks[host]['cluster_network']))
if 'public_network' in networks[host]:
addresses.extend(_address(total[host], networks[host]['public_network']))
else:
search = "*"
if exclude_string:
search += " and not ( " + exclude_string + " )"
log.debug( "ping: search {} ".format(search))
addresses = local.cmd(search , 'grains.get', [ 'ipv4' ], expr_form="compound")
addresses = _flatten(addresses.values())
# Lazy loopback removal - use ipaddress when adding IPv6
try:
if addresses:
addresses.remove('127.0.0.1')
if exclude_iplist:
for ex_ip in exclude_iplist:
log.debug( "ping: removing {} ip ".format(ex_ip))
addresses.remove(ex_ip)
except ValueError:
log.debug( "ping: remove {} ip doesn't exist".format(ex_ip))
pass
#print addresses
results = local.cmd(search, 'multi.ping', addresses, expr_form="compound")
#print results
_summarize(len(addresses), results)
def _address(addresses, network):
"""
Return all addresses in the given network
Note: list comprehension vs. netaddr vs. simple
"""
matched = []
for address in addresses:
if IPAddress(address) in IPNetwork(network):
matched.append(address)
return matched
def _exclude_filter(excluded):
"""
Internal exclude_filter return string in compound format
Compound format = {'G': 'grain', 'P': 'grain_pcre', 'I': 'pillar',
'J': 'pillar_pcre', 'L': 'list', 'N': None,
'S': 'ipcidr', 'E': 'pcre'}
IPV4 address = "255.255.255.255"
hostname = "myhostname"
"""
log.debug( "_exclude_filter: excluding {}".format(excluded))
excluded = excluded.split(",")
log.debug( "_exclude_filter: split ',' {}".format(excluded))
pattern_compound = re.compile("^.*([GPIJLNSE]\@).*$")
pattern_iplist = re.compile( "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$" )
pattern_ipcidr = re.compile( "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/([0-9]|[1-2][0-9]|3[0-2]))$")
pattern_hostlist = re.compile( "^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9-]*[a-zA-Z0-9]).)*([A-Za-z]|[A-Za-z][A-Za-z0-9-]*[A-Za-z0-9])$")
compound = []
ipcidr = []
iplist = []
hostlist = []
regex_list = []
for para in excluded:
if pattern_compound.match(para):
log.debug( "_exclude_filter: Compound {}".format(para))
compound.append(para)
elif pattern_iplist.match(para):
log.debug( "_exclude_filter: ip {}".format(para))
iplist.append(para)
elif pattern_ipcidr.match(para):
log.debug( "_exclude_filter: ipcidr {}".format(para))
ipcidr.append("S@"+para)
elif pattern_hostlist.match(para):
hostlist.append("L@"+para)
log.debug( "_exclude_filter: hostname {}".format(para))
else:
regex_list.append("E@"+para)
log.debug( "_exclude_filter: not sure but likely Regex host {}".format(para))
#if ipcidr:
# log.debug("_exclude_filter ip subnet is not working yet ... = {}".format(ipcidr))
new_compound_excluded = " or ".join(compound + hostlist + regex_list + ipcidr)
log.debug("_exclude_filter new formed compound excluded list = {}".format(new_compound_excluded))
if new_compound_excluded and iplist:
return new_compound_excluded, iplist
elif new_compound_excluded:
return new_compound_excluded, None
elif iplist:
return None, iplist
else:
return None, None
def _flatten(l):
"""
Flatten a array of arrays
"""
log.debug( "_flatten: {}".format(l))
return list(set(item for sublist in l for item in sublist))
def _summarize(total, results):
"""
Summarize the successes, failures and errors across all minions
"""
success = []
failed = []
errored = []
slow = []
log.debug( "_summarize: results {}".format(results))
for host in sorted(results.iterkeys()):
if results[host]['succeeded'] == total:
success.append(host)
if 'failed' in results[host]:
failed.append("{} from {}".format(results[host]['failed'], host))
if 'errored' in results[host]:
errored.append("{} from {}".format(results[host]['errored'], host))
if 'slow' in results[host]:
slow.append("{} from {} average rtt {}".format(results[host]['slow'], host, "{0:.2f}".format(results[host]['avg'])))
if success:
avg = sum( results[host].get('avg') for host in results) / len(results)
else:
avg = 0
print "Succeeded: {} addresses from {} minions average rtt {} ms".format(total, len(success), "{0:.2f}".format(avg))
if slow:
print "Warning: \n {}".format("\n ".join(slow))
if failed:
print "Failed: \n {}".format("\n ".join(failed))
if errored:
print "Errored: \n {}".format("\n ".join(errored))
def _skip_dunder(settings):
"""
Skip double underscore keys
"""
return {k:v for k,v in settings.iteritems() if not k.startswith('__')}
|
"DiskCache: disk and file backed cache."
from .core import Cache, Disk, UnknownFileWarning, EmptyDirWarning, Timeout
from .core import DEFAULT_SETTINGS, EVICTION_POLICY
from .fanout import FanoutCache
from .persistent import Deque, Index
__all__ = [
'Cache',
'Disk',
'UnknownFileWarning',
'EmptyDirWarning',
'Timeout',
'DEFAULT_SETTINGS',
'EVICTION_POLICY',
'FanoutCache',
'Deque',
'Index',
]
try:
from .djangocache import DjangoCache # pylint: disable=wrong-import-position
__all__.append('DjangoCache')
except Exception: # pylint: disable=broad-except
# Django not installed or not setup so ignore.
pass
__title__ = 'diskcache'
__version__ = '2.9.0'
__build__ = 0x020900
__author__ = 'Grant Jenks'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2016 Grant Jenks'
|
from .models import *
from django.contrib import admin
from django.db import models
from website.base.form import TinyMCEAdminMixin
from django.utils.translation import ugettext_lazy as _
from mediastore.admin import ModelAdmin
class SessionAdmin(TinyMCEAdminMixin, ModelAdmin):
list_display = ('title','day_of_week','cost','is_public','is_featured','sort_value',)
list_editable = ('is_public','is_featured','sort_value')
fieldsets = (
(_('Session'), {
'fields':(
'title',
'list_description',
'day_of_week',
'club',
)
}),
(_('Description'),{
'fields':(
'description',
)
}),
(_('Cost'),{
'fields':(
'cost',
)
}),
(_('Location'),{
'fields':(
'location',
)
}),
(_('Settings'), {
'fields':(
'sort_value',
'is_public',
'is_featured',
)
})
)
admin.site.register(Session,SessionAdmin)
|
from cuon.Databases.SingleData import SingleData
import logging
import pygtk
pygtk.require('2.0')
import gtk
import gtk.glade
import gobject
class SingleProposalMisc(SingleData):
def __init__(self, allTables):
SingleData.__init__(self)
# tables.dbd and address
self.sNameOfTable = "proposalmisc"
self.xmlTableDef = 0
# self.loadTable()
# self.saveTable()
self.loadTable(allTables)
#self.setStore( gtk.ListStore(gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_UINT) )
#self.listHeader['names'] = ['number', 'designation', 'ID']
#self.listHeader['size'] = [25,10,25,25,10]
#print "number of Columns "
#print len(self.table.Columns)
#
self.ordernumber = 0
#self.statusfields = ['lastname', 'firstname']
def readNonWidgetEntries(self, dicValues):
print 'readNonWidgetEntries(self) by SingleorderGets'
dicValues['orderid'] = [self.ordernumber, 'int']
return dicValues
|
import wx
import eos.db
import gui.mainFrame
from gui import globalEvents as GE
from gui.fitCommands.calc.module.projectedAdd import CalcAddProjectedModuleCommand
from gui.fitCommands.helpers import InternalCommandHistory, ModuleInfo
from service.fit import Fit
class GuiAddProjectedModuleCommand(wx.Command):
def __init__(self, fitID, itemID):
wx.Command.__init__(self, True, 'Add Projected Module')
self.internalHistory = InternalCommandHistory()
self.fitID = fitID
self.itemID = itemID
def Do(self):
cmd = CalcAddProjectedModuleCommand(fitID=self.fitID, modInfo=ModuleInfo(itemID=self.itemID))
success = self.internalHistory.submit(cmd)
sFit = Fit.getInstance()
if cmd.needsGuiRecalc:
eos.db.flush()
sFit.recalc(self.fitID)
sFit.fill(self.fitID)
eos.db.commit()
wx.PostEvent(gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,)))
return success
def Undo(self):
success = self.internalHistory.undoAll()
eos.db.flush()
sFit = Fit.getInstance()
sFit.recalc(self.fitID)
sFit.fill(self.fitID)
eos.db.commit()
wx.PostEvent(gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,)))
return success
|
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns(
'',
url(r'^$', views.home, name='home'),
url(r'^(?P<pk>\d+)/edit/$', views.edit, name='edit'),
url(r'^new/$', views.new, name='new'),
)
|
'''u413 - an open-source BBS/transmit/PI-themed forum
Copyright (C) 2012 PiMaster
Copyright (C) 2012 EnKrypt
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.'''
import command
import user
import database as db
import util
import bbcode
def user_id(uname):
user=db.query("SELECT username,id FROM users WHERE LCASE(username)='%s';"%db.escape(uname.lower()))
if len(user)==0:
return None
return int(user[0]["id"])
def user_exists(uname):
user=user_id(uname)
if user==None:
return False
return True
def nmsg_func(args,u413):
if "step" in u413.cmddata:
if u413.cmddata["step"]==1:
u413.cmddata["step"]=2
args=args.strip().split()[0]
to=user_id(args)
if to==None:
u413.type('"%s" is not a u413 user.'%args)
return
u413.cmddata["to"]=to
u413.type("Enter the topic:")
u413.set_context("TOPIC")
u413.continue_cmd()
elif u413.cmddata["step"]==2:
u413.cmddata["step"]=3
u413.cmddata["topic"]=args
u413.type("Enter your message:")
u413.set_context("MESSAGE")
u413.continue_cmd()
elif u413.cmddata["step"]==3:
db.query("INSERT INTO messages(sender,receiver,topic,msg,sent,seen) VALUES(%i,%i,'%s','%s',NOW(),FALSE);"%(u413.user.userid,u413.cmddata["to"],db.escape(u413.cmddata["topic"]),db.escape(args)))
u413.type("Message sent.")
u413.set_context('')
else:
params=args.split(' ',1)
if len(args)==0:
u413.cmddata["step"]=1
u413.type("Enter the receiver:")
u413.set_context("USER")
u413.continue_cmd()
elif len(params)==1:
u413.cmddata["step"]=2
args=params[0].strip().split()[0]
to=user_id(args)
if to==None:
u413.type('"%s" is not a u413 user.'%args)
return
u413.cmddata["to"]=to
u413.type("Enter the topic:")
u413.set_context("TOPIC")
u413.continue_cmd()
else:
u413.cmddata["step"]=3
args=params[0].strip().split()[0]
to=user_id(args)
if to==None:
u413.type('"%s" is not a u413 user.'%args)
return
u413.cmddata["to"]=to
u413.cmddata["topic"]=params[1]
u413.type("Enter your message:")
u413.set_context("MESSAGE")
u413.continue_cmd()
command.Command("NEWMESSAGE","[user [topic]]",{"id":"The ID of the PM"},"Sends a private message to another user.",nmsg_func,user.User.member)
|
from .backend import RegionsjobBackend
__all__ = ['RegionsjobBackend']
|
import pypom
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from features.pages.common import CommonPageMixin
from features.fields.fields import InputField, SelectField, ButtonField
class SearchEntityPage(CommonPageMixin, pypom.Page):
URL_TEMPLATE = '/entities/'
acronym = InputField(By.ID, 'id_acronym')
title = InputField(By.ID, 'id_title')
entity_type = SelectField(By.ID, "id_entity_type")
search = ButtonField(By.ID, "bt_submit_entity_search")
def find_acronym_in_table(self, row: int = 1):
return self.find_element(By.ID, 'td_entity_%d' % row).text
class SearchOrganizationPage(CommonPageMixin, pypom.Page):
URL_TEMPLATE = '/organizations/'
acronym = InputField(By.ID, 'id_acronym')
name = InputField(By.ID, 'id_name')
type = SelectField(By.ID, "id_type")
search = ButtonField(By.ID, "bt_submit_organization_search")
def find_acronym_in_table(self, row: int = 1):
return self.find_element(By.ID, 'td_organization_%d' % row).text
class SearchStudentPage(CommonPageMixin, pypom.Page):
URL_TEMPLATE = '/students/'
registration_id = InputField(By.ID, 'id_registration_id')
name = InputField(By.ID, 'id_name')
search = ButtonField(By.ID, "bt_submit_student_search")
def find_registration_id_in_table(self, row: int = 1):
return self.find_element(By.ID, 'td_student_%d' % row).text
def find_name_in_table(self):
names = []
row = 1
last = False
while not last:
try:
elt = self.find_element(By.ID, 'spn_student_name_%d' % row)
names.append(elt.text)
row += 1
except NoSuchElementException as e:
return names
return names
|
import unittest
from .volume_cubic_inches_to_metric import cubic_inches_to_metric
class VolumeTestCase(unittest.TestCase):
def test(self):
text = (
"Total volume is 100.50 cubic inches for this land. "
"Total volume is 15.7 cubic in for this land. "
"Total volume is 1 Cubic Inch for this land. "
"Total volume is 1-16 cu-in for this land. "
"Total volume is 1-16 cb. in for this land. "
"Total volume is 16.7-Cubic-in for this land. "
"Total volume is 16,500-cu. in. for this land. "
)
item = {"body_html": text}
res, diff = cubic_inches_to_metric(item)
self.assertEqual(diff["100.50 cubic inches"], "100.50 cubic inches (1,647 cubic centimeter)")
self.assertEqual(diff["15.7 cubic in"], "15.7 cubic in (257.3 cubic centimeter)")
self.assertEqual(diff["1 Cubic Inch"], "1 Cubic Inch (16 cubic centimeter)")
self.assertEqual(diff["1-16 cu-in"], "1-16 cu-in (16-262 cubic centimeter)")
self.assertEqual(diff["1-16 cb. in"], "1-16 cb. in (16-262 cubic centimeter)")
self.assertEqual(diff["16.7-Cubic-in"], "16.7-Cubic-in (273.7 cubic centimeter)")
self.assertEqual(diff["16,500-cu. in"], "16,500-cu. in (0.3 cubic meter)")
self.assertEqual(res["body_html"], item["body_html"])
|
from rest_framework import serializers
from models import SurveyDraft
from taggit.models import Tag
class WritableJSONField(serializers.Field):
""" Serializer for JSONField -- required to make field writable"""
""" ALSO REQUIRED because the default JSONField serialization includes the
`u` prefix on strings when running Django 1.8, resulting in invalid JSON
"""
def __init__(self, **kwargs):
self.allow_blank= kwargs.pop('allow_blank', False)
super(WritableJSONField, self).__init__(**kwargs)
def to_internal_value(self, data):
if (not data) and (not self.required):
return None
else:
try:
return json.loads(data)
except Exception as e:
raise serializers.ValidationError(
u'Unable to parse JSON: {}'.format(e))
def to_representation(self, value):
return value
class ListSurveyDraftSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = SurveyDraft
fields = ('id', 'name', 'asset_type', 'summary', 'date_modified', 'description')
summary = WritableJSONField(required=False)
class DetailSurveyDraftSerializer(serializers.HyperlinkedModelSerializer):
tags = serializers.SerializerMethodField('get_tag_names')
summary = WritableJSONField(required=False)
class Meta:
model = SurveyDraft
fields = ('id', 'name', 'body', 'summary', 'date_modified', 'description', 'tags')
def get_tag_names(self, obj):
return obj.tags.names()
class TagSerializer(serializers.HyperlinkedModelSerializer):
count = serializers.SerializerMethodField()
label = serializers.CharField(source='name')
class Meta:
model = Tag
fields = ('id', 'label', 'count')
def get_count(self, obj):
return SurveyDraft.objects.filter(tags__name__in=[obj.name])\
.filter(user=self.context.get('request', None).user)\
.filter(asset_type='question')\
.count()
|
"""
This file blocks all the routes defined automatically by cms_form.
"""
from odoo import http
from odoo.addons.cms_form.controllers.main import (
CMSFormController,
CMSWizardFormController,
CMSSearchFormController,
)
class UwantedCMSFormController(CMSFormController):
@http.route()
def cms_form(self, model, model_id=None, **kw):
return http.request.render("website.404")
class UnwantedCMSWizardFormController(CMSWizardFormController):
@http.route()
def cms_wiz(self, wiz_model, model_id=None, **kw):
return http.request.render("website.404")
class UnwantedCMSSearchFormController(CMSSearchFormController):
@http.route()
def cms_form(self, model, **kw):
return http.request.render("website.404")
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('orentapp', '0016_auto_20150422_1803'),
]
operations = [
migrations.AddField(
model_name='product',
name='step',
field=models.DecimalField(max_digits=8, null=True, decimal_places=2),
preserve_default=True,
),
]
|
from openerp import fields, models,osv
from base_olims_model import BaseOLiMSModel
from openerp.tools.translate import _
from fields.string_field import StringField
from fields.text_field import TextField
from fields.widget.widget import TextAreaWidget
schema = (StringField('Title',
required=1,
),
TextField('Description',
widget=TextAreaWidget(
label=_('Description'),
description=_('Used in item listings and search results.')),
),
fields.One2many('olims.instrument',
'Type',
string='Type')
)
class InstrumentType(models.Model, BaseOLiMSModel):#(BaseContent):
_name = 'olims.instrument_type'
_rec_name = 'Title'
InstrumentType.initialze(schema)
|
import mysite.profile.models
import django.contrib.auth.models
for user in django.contrib.auth.models.User.objects.all():
user.email = ''
user.password = ''
user.save()
for pfe in mysite.profile.models.PortfolioEntry.objects.all():
if pfe.is_deleted:
pfe.delete()
for citation in mysite.profile.models.Citation.objects.all():
if citation.is_deleted:
citation.delete()
for wr in mysite.customs.models.WebResponse.objects.all():
wr.delete()
|
from superdesk.resource import Resource
from content_api import MONGO_PREFIX
class CompaniesResource(Resource):
"""
Company schema
"""
schema = {
"name": {"type": "string", "unique": True, "required": True},
"sd_subscriber_id": {"type": "string"},
"is_enabled": {"type": "boolean", "default": True},
"contact_name": {"type": "string"},
"phone": {"type": "string"},
"country": {"type": "string"},
}
datasource = {"source": "companies", "default_sort": [("name", 1)]}
item_methods = ["GET", "PATCH", "PUT"]
resource_methods = ["GET", "POST"]
mongo_prefix = MONGO_PREFIX
|
from . import computed_purchase_order
|
"""Mercator proposal."""
from adhocracy_core.resources import add_resource_type_to_registry
from adhocracy_core.resources import process
from adhocracy_core.resources import proposal
from adhocracy_core.sheets.geo import IPoint
from adhocracy_core.sheets.geo import ILocationReference
from adhocracy_core.sheets.image import IImageReference
import adhocracy_meinberlin.sheets.kiezkassen
class IProposalVersion(proposal.IProposalVersion):
"""Kiezkassen proposal version."""
proposal_version_meta = proposal.proposal_version_meta._replace(
iresource=IProposalVersion,
)._add(extended_sheets=(adhocracy_meinberlin.sheets.kiezkassen.IProposal,
IPoint))
class IProposal(proposal.IProposal):
"""Kiezkassen proposal versions pool."""
proposal_meta = proposal.proposal_meta._replace(
iresource=IProposal,
element_types=(IProposalVersion,),
item_type=IProposalVersion,
)
class IProcess(process.IProcess):
"""Kiezkassen participation process."""
process_meta = process.process_meta._replace(
content_name='KiezkassenProcess',
iresource=IProcess,
element_types=(IProposal,
),
is_implicit_addable=True,
extended_sheets=(
ILocationReference,
IImageReference,
),
default_workflow='kiezkassen',
)
def includeme(config):
"""Add resource type to content."""
add_resource_type_to_registry(proposal_meta, config)
add_resource_type_to_registry(proposal_version_meta, config)
add_resource_type_to_registry(process_meta, config)
|
import sys
from pts.core.simulation.simulation import createsimulations
from pts.core.plot.wavemovie import makewavemovie
xlim = None
ylim = None
from_percentile = 30
to_percentile = 100
print "Starting makewavemovie..."
argument = sys.argv[1] if len(sys.argv) > 1 else ""
for simulation in createsimulations(argument):
makewavemovie(simulation, xlim=xlim, ylim=ylim, from_percentile=from_percentile, to_percentile=to_percentile)
print "Finished makewavemovie"
|
import copy
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django_countries import countries
import accounts
import third_party_auth
from edxmako.shortcuts import marketing_link
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.user_api.helpers import FormDescription
from openedx.features.enterprise_support.api import enterprise_customer_for_request
from student.forms import get_registration_extension_form
from student.models import UserProfile
def get_password_reset_form():
"""Return a description of the password reset form.
This decouples clients from the API definition:
if the API decides to modify the form, clients won't need
to be updated.
See `user_api.helpers.FormDescription` for examples
of the JSON-encoded form description.
Returns:
HttpResponse
"""
form_desc = FormDescription("post", reverse("password_change_request"))
# Translators: This label appears above a field on the password reset
# form meant to hold the user's email address.
email_label = _(u"Email")
# Translators: This example email address is used as a placeholder in
# a field on the password reset form meant to hold the user's email address.
email_placeholder = _(u"username@domain.com")
# Translators: These instructions appear on the password reset form,
# immediately below a field meant to hold the user's email address.
email_instructions = _(u"The email address you used to register with {platform_name}").format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
)
form_desc.add_field(
"email",
field_type="email",
label=email_label,
placeholder=email_placeholder,
instructions=email_instructions,
restrictions={
"min_length": accounts.EMAIL_MIN_LENGTH,
"max_length": accounts.EMAIL_MAX_LENGTH,
}
)
return form_desc
def get_login_session_form():
"""Return a description of the login form.
This decouples clients from the API definition:
if the API decides to modify the form, clients won't need
to be updated.
See `user_api.helpers.FormDescription` for examples
of the JSON-encoded form description.
Returns:
HttpResponse
"""
form_desc = FormDescription("post", reverse("user_api_login_session"))
# Translators: This label appears above a field on the login form
# meant to hold the user's email address.
email_label = _(u"Email")
# Translators: This example email address is used as a placeholder in
# a field on the login form meant to hold the user's email address.
email_placeholder = _(u"username@domain.com")
# Translators: These instructions appear on the login form, immediately
# below a field meant to hold the user's email address.
email_instructions = _("The email address you used to register with {platform_name}").format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
)
form_desc.add_field(
"email",
field_type="email",
label=email_label,
placeholder=email_placeholder,
instructions=email_instructions,
restrictions={
"min_length": accounts.EMAIL_MIN_LENGTH,
"max_length": accounts.EMAIL_MAX_LENGTH,
}
)
# Translators: This label appears above a field on the login form
# meant to hold the user's password.
password_label = _(u"Password")
form_desc.add_field(
"password",
label=password_label,
field_type="password",
restrictions={
"max_length": accounts.PASSWORD_MAX_LENGTH,
}
)
form_desc.add_field(
"remember",
field_type="checkbox",
label=_("Remember me"),
default=False,
required=False,
)
return form_desc
class RegistrationFormFactory(object):
"""HTTP end-points for creating a new user. """
DEFAULT_FIELDS = ["email", "name", "username", "password"]
EXTRA_FIELDS = [
"confirm_email",
"first_name",
"last_name",
"city",
"state",
"country",
"gender",
"year_of_birth",
"level_of_education",
"company",
"title",
"mailing_address",
"goals",
"honor_code",
"terms_of_service",
"profession",
"specialty",
]
def _is_field_visible(self, field_name):
"""Check whether a field is visible based on Django settings. """
return self._extra_fields_setting.get(field_name) in ["required", "optional"]
def _is_field_required(self, field_name):
"""Check whether a field is required based on Django settings. """
return self._extra_fields_setting.get(field_name) == "required"
def __init__(self):
# Backwards compatibility: Honor code is required by default, unless
# explicitly set to "optional" in Django settings.
self._extra_fields_setting = copy.deepcopy(configuration_helpers.get_value('REGISTRATION_EXTRA_FIELDS'))
if not self._extra_fields_setting:
self._extra_fields_setting = copy.deepcopy(settings.REGISTRATION_EXTRA_FIELDS)
self._extra_fields_setting["honor_code"] = self._extra_fields_setting.get("honor_code", "required")
# Check that the setting is configured correctly
for field_name in self.EXTRA_FIELDS:
if self._extra_fields_setting.get(field_name, "hidden") not in ["required", "optional", "hidden"]:
msg = u"Setting REGISTRATION_EXTRA_FIELDS values must be either required, optional, or hidden."
raise ImproperlyConfigured(msg)
# Map field names to the instance method used to add the field to the form
self.field_handlers = {}
valid_fields = self.DEFAULT_FIELDS + self.EXTRA_FIELDS
for field_name in valid_fields:
handler = getattr(self, "_add_{field_name}_field".format(field_name=field_name))
self.field_handlers[field_name] = handler
field_order = configuration_helpers.get_value('REGISTRATION_FIELD_ORDER')
if not field_order:
field_order = settings.REGISTRATION_FIELD_ORDER or valid_fields
# Check that all of the valid_fields are in the field order and vice versa, if not set to the default order
if set(valid_fields) != set(field_order):
field_order = valid_fields
self.field_order = field_order
def get_registration_form(self, request):
"""Return a description of the registration form.
This decouples clients from the API definition:
if the API decides to modify the form, clients won't need
to be updated.
This is especially important for the registration form,
since different edx-platform installations might
collect different demographic information.
See `user_api.helpers.FormDescription` for examples
of the JSON-encoded form description.
Arguments:
request (HttpRequest)
Returns:
HttpResponse
"""
form_desc = FormDescription("post", reverse("user_api_registration"))
self._apply_third_party_auth_overrides(request, form_desc)
# Custom form fields can be added via the form set in settings.REGISTRATION_EXTENSION_FORM
custom_form = get_registration_extension_form()
if custom_form:
# Default fields are always required
for field_name in self.DEFAULT_FIELDS:
self.field_handlers[field_name](form_desc, required=True)
for field_name, field in custom_form.fields.items():
restrictions = {}
if getattr(field, 'max_length', None):
restrictions['max_length'] = field.max_length
if getattr(field, 'min_length', None):
restrictions['min_length'] = field.min_length
field_options = getattr(
getattr(custom_form, 'Meta', None), 'serialization_options', {}
).get(field_name, {})
field_type = field_options.get('field_type', FormDescription.FIELD_TYPE_MAP.get(field.__class__))
if not field_type:
raise ImproperlyConfigured(
"Field type '{}' not recognized for registration extension field '{}'.".format(
field_type,
field_name
)
)
form_desc.add_field(
field_name, label=field.label,
default=field_options.get('default'),
field_type=field_options.get('field_type', FormDescription.FIELD_TYPE_MAP.get(field.__class__)),
placeholder=field.initial, instructions=field.help_text, required=field.required,
restrictions=restrictions,
options=getattr(field, 'choices', None), error_messages=field.error_messages,
include_default_option=field_options.get('include_default_option'),
)
# Extra fields configured in Django settings
# may be required, optional, or hidden
for field_name in self.EXTRA_FIELDS:
if self._is_field_visible(field_name):
self.field_handlers[field_name](
form_desc,
required=self._is_field_required(field_name)
)
else:
# Go through the fields in the fields order and add them if they are required or visible
for field_name in self.field_order:
if field_name in self.DEFAULT_FIELDS:
self.field_handlers[field_name](form_desc, required=True)
elif self._is_field_visible(field_name):
self.field_handlers[field_name](
form_desc,
required=self._is_field_required(field_name)
)
return form_desc
def _add_email_field(self, form_desc, required=True):
"""Add an email field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's email address.
email_label = _(u"Email")
# Translators: This example email address is used as a placeholder in
# a field on the registration form meant to hold the user's email address.
email_placeholder = _(u"username@domain.com")
# Translators: These instructions appear on the registration form, immediately
# below a field meant to hold the user's email address.
email_instructions = _(u"This is what you will use to login.")
form_desc.add_field(
"email",
field_type="email",
label=email_label,
placeholder=email_placeholder,
instructions=email_instructions,
restrictions={
"min_length": accounts.EMAIL_MIN_LENGTH,
"max_length": accounts.EMAIL_MAX_LENGTH,
},
required=required
)
def _add_confirm_email_field(self, form_desc, required=True):
"""Add an email confirmation field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to confirm the user's email address.
email_label = _(u"Confirm Email")
error_msg = accounts.REQUIRED_FIELD_CONFIRM_EMAIL_MSG
form_desc.add_field(
"confirm_email",
label=email_label,
required=required,
error_messages={
"required": error_msg
}
)
def _add_name_field(self, form_desc, required=True):
"""Add a name field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's full name.
name_label = _(u"Full Name")
# Translators: This example name is used as a placeholder in
# a field on the registration form meant to hold the user's name.
name_placeholder = _(u"Jane Q. Learner")
# Translators: These instructions appear on the registration form, immediately
# below a field meant to hold the user's full name.
name_instructions = _(u"This name will be used on any certificates that you earn.")
form_desc.add_field(
"name",
label=name_label,
placeholder=name_placeholder,
instructions=name_instructions,
restrictions={
"max_length": accounts.NAME_MAX_LENGTH,
},
required=required
)
def _add_username_field(self, form_desc, required=True):
"""Add a username field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's public username.
username_label = _(u"Public Username")
username_instructions = _(
# Translators: These instructions appear on the registration form, immediately
# below a field meant to hold the user's public username.
u"The name that will identify you in your courses. "
u"It cannot be changed later."
)
# Translators: This example username is used as a placeholder in
# a field on the registration form meant to hold the user's username.
username_placeholder = _(u"Jane_Q_Learner")
form_desc.add_field(
"username",
label=username_label,
instructions=username_instructions,
placeholder=username_placeholder,
restrictions={
"min_length": accounts.USERNAME_MIN_LENGTH,
"max_length": accounts.USERNAME_MAX_LENGTH,
},
required=required
)
def _add_password_field(self, form_desc, required=True):
"""Add a password field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's password.
password_label = _(u"Password")
form_desc.add_field(
"password",
label=password_label,
field_type="password",
restrictions={
"min_length": accounts.PASSWORD_MIN_LENGTH,
"max_length": accounts.PASSWORD_MAX_LENGTH,
},
required=required
)
def _add_level_of_education_field(self, form_desc, required=True):
"""Add a level of education field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a dropdown menu on the registration
# form used to select the user's highest completed level of education.
education_level_label = _(u"Highest level of education completed")
error_msg = accounts.REQUIRED_FIELD_LEVEL_OF_EDUCATION_MSG
# The labels are marked for translation in UserProfile model definition.
options = [(name, _(label)) for name, label in UserProfile.LEVEL_OF_EDUCATION_CHOICES] # pylint: disable=translation-of-non-string
form_desc.add_field(
"level_of_education",
label=education_level_label,
field_type="select",
options=options,
include_default_option=True,
required=required,
error_messages={
"required": error_msg
}
)
def _add_gender_field(self, form_desc, required=True):
"""Add a gender field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a dropdown menu on the registration
# form used to select the user's gender.
gender_label = _(u"Gender")
# The labels are marked for translation in UserProfile model definition.
options = [(name, _(label)) for name, label in UserProfile.GENDER_CHOICES] # pylint: disable=translation-of-non-string
form_desc.add_field(
"gender",
label=gender_label,
field_type="select",
options=options,
include_default_option=True,
required=required
)
def _add_year_of_birth_field(self, form_desc, required=True):
"""Add a year of birth field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a dropdown menu on the registration
# form used to select the user's year of birth.
yob_label = _(u"Year of birth")
options = [(unicode(year), unicode(year)) for year in UserProfile.VALID_YEARS]
form_desc.add_field(
"year_of_birth",
label=yob_label,
field_type="select",
options=options,
include_default_option=True,
required=required
)
def _add_field_with_configurable_select_options(self, field_name, field_label, form_desc, required=False):
"""Add a field to a form description.
If select options are given for this field, it will be a select type
otherwise it will be a text type.
Arguments:
field_name: name of field
field_label: label for the field
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to False
"""
extra_field_options = configuration_helpers.get_value('EXTRA_FIELD_OPTIONS')
if extra_field_options is None or extra_field_options.get(field_name) is None:
field_type = "text"
include_default_option = False
options = None
error_msg = ''
exec("error_msg = accounts.REQUIRED_FIELD_%s_TEXT_MSG" % (field_name.upper()))
else:
field_type = "select"
include_default_option = True
field_options = extra_field_options.get(field_name)
options = [(unicode(option.lower()), option) for option in field_options]
error_msg = ''
exec("error_msg = accounts.REQUIRED_FIELD_%s_SELECT_MSG" % (field_name.upper()))
form_desc.add_field(
field_name,
label=field_label,
field_type=field_type,
options=options,
include_default_option=include_default_option,
required=required,
error_messages={
"required": error_msg
}
)
def _add_profession_field(self, form_desc, required=False):
"""Add a profession field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to False
"""
# Translators: This label appears above a dropdown menu on the registration
# form used to select the user's profession
profession_label = _("Profession")
self._add_field_with_configurable_select_options('profession', profession_label, form_desc, required=required)
def _add_specialty_field(self, form_desc, required=False):
"""Add a specialty field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to False
"""
# Translators: This label appears above a dropdown menu on the registration
# form used to select the user's specialty
specialty_label = _("Specialty")
self._add_field_with_configurable_select_options('specialty', specialty_label, form_desc, required=required)
def _add_mailing_address_field(self, form_desc, required=True):
"""Add a mailing address field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's mailing address.
mailing_address_label = _(u"Mailing address")
error_msg = accounts.REQUIRED_FIELD_MAILING_ADDRESS_MSG
form_desc.add_field(
"mailing_address",
label=mailing_address_label,
field_type="textarea",
required=required,
error_messages={
"required": error_msg
}
)
def _add_goals_field(self, form_desc, required=True):
"""Add a goals field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This phrase appears above a field on the registration form
# meant to hold the user's reasons for registering with edX.
goals_label = _(u"Tell us why you're interested in {platform_name}").format(
platform_name=configuration_helpers.get_value("PLATFORM_NAME", settings.PLATFORM_NAME)
)
error_msg = accounts.REQUIRED_FIELD_GOALS_MSG
form_desc.add_field(
"goals",
label=goals_label,
field_type="textarea",
required=required,
error_messages={
"required": error_msg
}
)
def _add_city_field(self, form_desc, required=True):
"""Add a city field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# which allows the user to input the city in which they live.
city_label = _(u"City")
error_msg = accounts.REQUIRED_FIELD_CITY_MSG
form_desc.add_field(
"city",
label=city_label,
required=required,
error_messages={
"required": error_msg
}
)
def _add_state_field(self, form_desc, required=False):
"""Add a State/Province/Region field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to False
"""
# Translators: This label appears above a field on the registration form
# which allows the user to input the State/Province/Region in which they live.
state_label = _(u"State/Province/Region")
form_desc.add_field(
"state",
label=state_label,
required=required
)
def _add_company_field(self, form_desc, required=False):
"""Add a Company field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to False
"""
# Translators: This label appears above a field on the registration form
# which allows the user to input the Company
company_label = _(u"Company")
form_desc.add_field(
"company",
label=company_label,
required=required
)
def _add_title_field(self, form_desc, required=False):
"""Add a Title field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to False
"""
# Translators: This label appears above a field on the registration form
# which allows the user to input the Title
title_label = _(u"Title")
form_desc.add_field(
"title",
label=title_label,
required=required
)
def _add_first_name_field(self, form_desc, required=False):
"""Add a First Name field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to False
"""
# Translators: This label appears above a field on the registration form
# which allows the user to input the First Name
first_name_label = _(u"First Name")
form_desc.add_field(
"first_name",
label=first_name_label,
required=required
)
def _add_last_name_field(self, form_desc, required=False):
"""Add a Last Name field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to False
"""
# Translators: This label appears above a field on the registration form
# which allows the user to input the First Name
last_name_label = _(u"Last Name")
form_desc.add_field(
"last_name",
label=last_name_label,
required=required
)
def _add_country_field(self, form_desc, required=True):
"""Add a country field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a dropdown menu on the registration
# form used to select the country in which the user lives.
country_label = _(u"Country or Region of Residence")
country_instructions = _(
# Translators: These instructions appear on the registration form, immediately
# below a field meant to hold the user's country.
u"The country or region where you live."
)
error_msg = accounts.REQUIRED_FIELD_COUNTRY_MSG
# If we set a country code, make sure it's uppercase for the sake of the form.
default_country = form_desc._field_overrides.get('country', {}).get('defaultValue')
if default_country:
form_desc.override_field_properties(
'country',
default=default_country.upper()
)
form_desc.add_field(
"country",
label=country_label,
instructions=country_instructions,
field_type="select",
options=list(countries),
include_default_option=True,
required=required,
error_messages={
"required": error_msg
}
)
def _add_honor_code_field(self, form_desc, required=True):
"""Add an honor code field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Separate terms of service and honor code checkboxes
if self._is_field_visible("terms_of_service"):
terms_label = _(u"Honor Code")
terms_link = marketing_link("HONOR")
terms_text = _(u"Review the Honor Code")
# Combine terms of service and honor code checkboxes
else:
# Translators: This is a legal document users must agree to
# in order to register a new account.
terms_label = _(u"Terms of Service and Honor Code")
terms_link = marketing_link("HONOR")
terms_text = _(u"Review the Terms of Service and Honor Code")
# Translators: "Terms of Service" is a legal document users must agree to
# in order to register a new account.
label = _(u"I agree to the {platform_name} {terms_of_service}").format(
platform_name=configuration_helpers.get_value("PLATFORM_NAME", settings.PLATFORM_NAME),
terms_of_service=terms_label
)
# Translators: "Terms of Service" is a legal document users must agree to
# in order to register a new account.
error_msg = _(u"You must agree to the {platform_name} {terms_of_service}").format(
platform_name=configuration_helpers.get_value("PLATFORM_NAME", settings.PLATFORM_NAME),
terms_of_service=terms_label
)
form_desc.add_field(
"honor_code",
label=label,
field_type="checkbox",
default=False,
required=required,
error_messages={
"required": error_msg
},
supplementalLink=terms_link,
supplementalText=terms_text
)
def _add_terms_of_service_field(self, form_desc, required=True):
"""Add a terms of service field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This is a legal document users must agree to
# in order to register a new account.
terms_label = _(u"Terms of Service")
terms_link = marketing_link("TOS")
terms_text = _(u"Review the Terms of Service")
# Translators: "Terms of service" is a legal document users must agree to
# in order to register a new account.
label = _(u"I agree to the {platform_name} {terms_of_service}").format(
platform_name=configuration_helpers.get_value("PLATFORM_NAME", settings.PLATFORM_NAME),
terms_of_service=terms_label
)
# Translators: "Terms of service" is a legal document users must agree to
# in order to register a new account.
error_msg = _(u"You must agree to the {platform_name} {terms_of_service}").format(
platform_name=configuration_helpers.get_value("PLATFORM_NAME", settings.PLATFORM_NAME),
terms_of_service=terms_label
)
form_desc.add_field(
"terms_of_service",
label=label,
field_type="checkbox",
default=False,
required=required,
error_messages={
"required": error_msg
},
supplementalLink=terms_link,
supplementalText=terms_text
)
def _apply_third_party_auth_overrides(self, request, form_desc):
"""Modify the registration form if the user has authenticated with a third-party provider.
If a user has successfully authenticated with a third-party provider,
but does not yet have an account with EdX, we want to fill in
the registration form with any info that we get from the
provider.
This will also hide the password field, since we assign users a default
(random) password on the assumption that they will be using
third-party auth to log in.
Arguments:
request (HttpRequest): The request for the registration form, used
to determine if the user has successfully authenticated
with a third-party provider.
form_desc (FormDescription): The registration form description
"""
if third_party_auth.is_enabled():
running_pipeline = third_party_auth.pipeline.get(request)
if running_pipeline:
current_provider = third_party_auth.provider.Registry.get_from_pipeline(running_pipeline)
if current_provider:
# Override username / email / full name
field_overrides = current_provider.get_register_form_data(
running_pipeline.get('kwargs')
)
# When the TPA Provider is configured to skip the registration form and we are in an
# enterprise context, we need to hide all fields except for terms of service and
# ensure that the user explicitly checks that field.
hide_registration_fields_except_tos = (current_provider.skip_registration_form and
enterprise_customer_for_request(request))
for field_name in self.DEFAULT_FIELDS + self.EXTRA_FIELDS:
if field_name in field_overrides:
form_desc.override_field_properties(
field_name, default=field_overrides[field_name]
)
if (field_name not in ['terms_of_service', 'honor_code']
and field_overrides[field_name]
and hide_registration_fields_except_tos):
form_desc.override_field_properties(
field_name,
field_type="hidden",
label="",
instructions="",
)
# Hide the password field
form_desc.override_field_properties(
"password",
default="",
field_type="hidden",
required=False,
label="",
instructions="",
restrictions={}
)
# used to identify that request is running third party social auth
form_desc.add_field(
"social_auth_provider",
field_type="hidden",
label="",
default=current_provider.name if current_provider.name else "Third Party",
required=False,
)
|
from unittest import mock
from django.test import SimpleTestCase
from program_management.ddd import command
from program_management.ddd.domain.service.identity_search import ProgramTreeVersionIdentitySearch
from program_management.ddd.repositories.program_tree_version import ProgramTreeVersionRepository
from program_management.ddd.service.read import get_program_tree_version_from_node_service
class TestGetProgramTreeVersionFromNodeService(SimpleTestCase):
@mock.patch.object(ProgramTreeVersionIdentitySearch, 'get_from_node_identity')
@mock.patch.object(ProgramTreeVersionRepository, 'get')
def test_domain_service_is_called(self, mock_domain_service, mock_repository_get):
cmd = command.GetProgramTreeVersionFromNodeCommand(code="LDROI1200", year=2018)
get_program_tree_version_from_node_service.get_program_tree_version_from_node(cmd)
self.assertTrue(mock_domain_service.called)
self.assertTrue(mock_repository_get.called)
|
import logging
from lxml import etree
from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE, FORMATS, FORMAT
from superdesk.etree import parse_html
from superdesk.text_utils import get_text
from superdesk.publish import registered_transmitters
formatters = []
logger = logging.getLogger(__name__)
class FormatterRegistry(type):
"""Registry metaclass for formatters."""
def __init__(cls, name, bases, attrs):
"""Register sub-classes of Formatter class when defined."""
super(FormatterRegistry, cls).__init__(name, bases, attrs)
if name != "Formatter":
formatters.append(cls)
class Formatter(metaclass=FormatterRegistry):
"""Base Formatter class for all types of Formatters like News ML 1.2, News ML G2, NITF, etc."""
def __init__(self):
self.can_preview = False
self.can_export = False
self.destination = None
self.subscriber = None
def format(self, article, subscriber, codes=None):
"""Formats the article and returns the transformed string"""
raise NotImplementedError()
def export(self, article, subscriber, codes=None):
"""Formats the article and returns the output string for export"""
raise NotImplementedError()
def can_format(self, format_type, article):
"""Test if formatter can format for given article."""
raise NotImplementedError()
def append_body_footer(self, article):
"""
Checks if the article has any Public Service Announcements and if available appends each of them to the body.
:return: body with public service announcements.
"""
try:
article["body_html"] = article["body_html"].replace("<br>", "<br/>")
except KeyError:
pass
body = ""
if article[ITEM_TYPE] in [CONTENT_TYPE.TEXT, CONTENT_TYPE.PREFORMATTED]:
body = article.get("body_html", "")
elif article[ITEM_TYPE] in [CONTENT_TYPE.AUDIO, CONTENT_TYPE.PICTURE, CONTENT_TYPE.VIDEO]:
body = article.get("description", "")
if body and article.get(FORMAT, "") == FORMATS.PRESERVED:
body = body.replace("\n", "\r\n").replace("\r\r", "\r")
parsed = parse_html(body, content="html")
for br in parsed.xpath("//br"):
br.tail = "\r\n" + br.tail if br.tail else "\r\n"
etree.strip_elements(parsed, "br", with_tail=False)
body = etree.tostring(parsed, encoding="unicode")
if body and article.get("body_footer"):
footer = article.get("body_footer")
if article.get(FORMAT, "") == FORMATS.PRESERVED:
body = "{}\r\n{}".format(body, get_text(footer))
else:
body = "{}{}".format(body, footer)
return body
def append_legal(self, article, truncate=False):
"""
Checks if the article has the legal flag on and adds 'Legal:' to the slugline
:param article: article having the slugline
:param truncate: truncates the slugline to 24 characters
:return: updated slugline
"""
slugline = article.get("slugline", "") or ""
if article.get("flags", {}).get("marked_for_legal", False):
slugline = "{}: {}".format("Legal", slugline)
if truncate:
slugline = slugline[:24]
return slugline
def map_html_to_xml(self, element, html):
"""
Map the html text tags to xml
:param etree.Element element: The xml element to populate
:param str html: the html to parse the text from
:return:
"""
root = parse_html(html, content="html")
# if there are no ptags just br
if not len(root.xpath("//p")) and len(root.xpath("//br")):
para = etree.SubElement(element, "p")
for br in root.xpath("//br"):
etree.SubElement(para, "br").text = br.text
for p in root.xpath("//p"):
para = etree.SubElement(element, "p")
if len(p.xpath(".//br")) > 0:
for br in p.xpath(".//br"):
etree.SubElement(para, "br").text = br.text
para.text = etree.tostring(p, encoding="unicode", method="text")
# there neither ptags pr br's
if len(list(element)) == 0:
etree.SubElement(element, "p").text = etree.tostring(root, encoding="unicode", method="text")
def set_destination(self, destination=None, subscriber=None):
self.destination = destination
self.subscriber = subscriber
def get_formatter(format_type, article):
for formatter_cls in formatters:
formatter_instance = formatter_cls()
if formatter_instance.can_format(format_type, article):
return formatter_instance
def get_all_formatters():
"""Return all formatters registered."""
return [formatter_cls() for formatter_cls in formatters]
from .nitf_formatter import NITFFormatter # NOQA
from .ninjs_formatter import NINJSFormatter, NINJS2Formatter # NOQA
from .newsml_1_2_formatter import NewsML12Formatter # NOQA
from .newsml_g2_formatter import NewsMLG2Formatter # NOQA
from .email_formatter import EmailFormatter # NOQA
from .ninjs_newsroom_formatter import NewsroomNinjsFormatter # NOQA
from .idml_formatter import IDMLFormatter # NOQA
from .ninjs_ftp_formatter import FTPNinjsFormatter # NOQA
from .imatrics import IMatricsFormatter # NOQA
|
import os,sys
oldvers = '4.3.0 "Cardinal"'
newvers = '5.0.0 "Raven"'
os.system('rm -rf version.txt')
os.system("grep -IFwr '%s' *|grep -vF '.svn' |grep -v ISC > version.txt"%oldvers)
filelist = []
f = open('version.txt','r')
for line in f.readlines():
candidate = line.split(':')[0]
if not candidate in filelist and candidate.find(sys.argv[0])<0:
filelist.append(candidate)
f.close()
print filelist
yorn = ''
while(not yorn.lower()=='y'):
yorn = raw_input('Replace %s with %s in the listed files? [Y/N]: '%(oldvers,newvers))
if yorn.lower()=='n':
print 'The file version.txt contains matches of oldvers'
sys.exit()
for fname in filelist:
s = open(fname,'r').read()
s_new = s.replace(oldvers,newvers)
f = open(fname,'w')
f.write(s_new)
f.close()
os.system('rm -rf version.txt')
|
from spack import *
import sys
import os
class Nwchem(Package):
"""High-performance computational chemistry software"""
homepage = "http://www.nwchem-sw.org"
url = "http://www.nwchem-sw.org/images/Nwchem-6.6.revision27746-src.2015-10-20.tar.gz"
tags = ['ecp', 'ecp-apps']
version('6.8', '50b18116319f4c15d1cb7eaa1b433006',
url='https://github.com/nwchemgit/nwchem/archive/v6.8-release.tar.gz')
version('6.6', 'c581001c004ea5e5dfacb783385825e3',
url='http://www.nwchem-sw.org/images/Nwchem-6.6.revision27746-src.2015-10-20.tar.gz')
depends_on('blas')
depends_on('lapack')
depends_on('mpi')
depends_on('scalapack')
depends_on('python@2.7:2.8', type=('build', 'link', 'run'))
# first hash is sha256 of the patch (required for URL patches),
# second is sha256 for the archive.
# patches for 6.6-27746:
urls_for_patches = {
'@6.6': [
('http://www.nwchem-sw.org/images/Tddft_mxvec20.patch.gz', 'ae04d4754c25fc324329dab085d4cc64148c94118ee702a7e14fce6152b4a0c5', 'cdfa8a5ae7d6ee09999407573b171beb91e37e1558a3bfb2d651982a85f0bc8f'),
('http://www.nwchem-sw.org/images/Tools_lib64.patch.gz', 'ef2eadef89c055c4651ea807079577bd90e1bc99ef6c89f112f1f0e7560ec9b4', '76b8d3e1b77829b683234c8307fde55bc9249b87410914b605a76586c8f32dae'),
('http://www.nwchem-sw.org/images/Config_libs66.patch.gz', '56f9c4bab362d82fb30d97564469e77819985a38e15ccaf04f647402c1ee248e', 'aa17f03cbb22ad7d883e799e0fddad1b5957f5f30b09f14a1a2caeeb9663cc07'),
('http://www.nwchem-sw.org/images/Cosmo_meminit.patch.gz', 'f05f09ca235ad222fe47d880bfd05a1b88d0148b990ca8c7437fa231924be04b', '569c5ee528f3922ee60ca831eb20ec6591633a36f80efa76cbbe41cabeb9b624'),
('http://www.nwchem-sw.org/images/Sym_abelian.patch.gz', 'e3470fb5786ab30bf2eda3bb4acc1e4c48fb5e640a09554abecf7d22b315c8fd', 'aa693e645a98dbafbb990e26145d65b100d6075254933f36326cf00bac3c29e0'),
('http://www.nwchem-sw.org/images/Xccvs98.patch.gz', '75540e0436c12e193ed0b644cff41f5036d78c101f14141846083f03ad157afa', '1c0b0f1293e3b9b05e9e51e7d5b99977ccf1edb4b072872c8316452f6cea6f13'),
('http://www.nwchem-sw.org/images/Dplot_tolrho.patch.gz', '8c30f92730d15f923ec8a623e3b311291eb2ba8b9d5a9884716db69a18d14f24', '2ebb1a5575c44eef4139da91f0e1e60057b2eccdba7f57a8fb577e840c326cbb'),
('http://www.nwchem-sw.org/images/Driver_smalleig.patch.gz', 'a040df6f1d807402ce552ba6d35c9610d5efea7a9d6342bbfbf03c8d380a4058', 'dd65bfbae6b472b94c8ee81d74f6c3ece37c8fc8766ff7a3551d8005d44815b8'),
('http://www.nwchem-sw.org/images/Ga_argv.patch.gz', '6fcd3920978ab95083483d5ed538cd9a6f2a80c2cafa0c5c7450fa5621f0a314', '8a78cb2af14314b92be9d241b801e9b9fed5527b9cb47a083134c7becdfa7cf1'),
('http://www.nwchem-sw.org/images/Raman_displ.patch.gz', 'ca4312cd3ed1ceacdc3a7d258bb05b7824c393bf44f44c28a789ebeb29a8dba4', '6a16f0f589a5cbb8d316f68bd2e6a0d46cd47f1c699a4b256a3973130061f6c3'),
('http://www.nwchem-sw.org/images/Ga_defs.patch.gz', 'f8ac827fbc11f7d2a9d8ec840c6f79d4759ef782bd4d291f2e88ec81b1b230aa', 'c6f1a48338d196e1db22bcfc6087e2b2e6eea50a34d3a2b2d3e90cccf43742a9'),
('http://www.nwchem-sw.org/images/Zgesvd.patch.gz', 'c333a94ceb2c35a490f24b007485ac6e334e153b03cfc1d093b6037221a03517', '4af592c047dc3e0bc4962376ae2c6ca868eb7a0b40a347ed9b88e887016ad9ed'),
('http://www.nwchem-sw.org/images/Cosmo_dftprint.patch.gz', '449d59983dc68c23b34e6581370b2fb3d5ea425b05c3182f0973e5b0e1a62651', 'd3b73431a68d6733eb7b669d471e18a83e03fa8e40c48e536fe8edecd99250ff'),
('http://www.nwchem-sw.org/images/Txs_gcc6.patch.gz', '1dab87f23b210e941c765f7dd7cc2bed06d292a2621419dede73f10ba1ca1bcd', '139692215718cd7414896470c0cc8b7817a73ece1e4ca93bf752cf1081a195af'),
('http://www.nwchem-sw.org/images/Gcc6_optfix.patch.gz', '8f8a5f8246bc1e42ef0137049acab4448a2e560339f44308703589adf753c148', '15cff43ab0509e0b0e83c49890032a848d6b7116bd6c8e5678e6c933f2d051ab'),
('http://www.nwchem-sw.org/images/Util_gnumakefile.patch.gz', '173e17206a9099c3512b87e3f42441f5b089db82be1d2b306fe2a0070e5c8fad', '5dd82b9bd55583152295c999a0e4d72dd9d5c6ab7aa91117c2aae57a95a14ba1'),
('http://www.nwchem-sw.org/images/Util_getppn.patch.gz', 'c4a23592fdcfb1fb6b65bc6c1906ac36f9966eec4899c4329bc8ce12015d2495', '8be418e1f8750778a31056f1fdf2a693fa4a12ea86a531f1ddf6f3620421027e'),
('http://www.nwchem-sw.org/images/Gcc6_macs_optfix.patch.gz', 'ff33d5f1ccd33385ffbe6ce7a18ec1506d55652be6e7434dc8065af64c879aaa', 'fade16098a1f54983040cdeb807e4e310425d7f66358807554e08392685a7164'),
('http://www.nwchem-sw.org/images/Notdir_fc.patch.gz', '54c722fa807671d6bf1a056586f0923593319d09c654338e7dd461dcd29ff118', 'a6a233951eb254d8aff5b243ca648def21fa491807a66c442f59c437f040ee69')
]
}
# Iterate over patches
for __condition, __urls in urls_for_patches.items():
for __url, __sha256, __archive_sha256 in __urls:
patch(__url, when=__condition, level=0, sha256=__sha256, archive_sha256=__archive_sha256)
def install(self, spec, prefix):
scalapack = spec['scalapack'].libs
lapack = spec['lapack'].libs
blas = spec['blas'].libs
# see http://www.nwchem-sw.org/index.php/Compiling_NWChem
args = []
args.extend([
'NWCHEM_TOP=%s' % self.stage.source_path,
# NWCHEM is picky about FC and CC. They should NOT be full path.
# see http://www.nwchem-sw.org/index.php/Special:AWCforum/sp/id7524
'CC=%s' % os.path.basename(spack_cc),
'FC=%s' % os.path.basename(spack_fc),
'USE_MPI=y',
'MPI_LOC=%s' % spec['mpi'].prefix,
'USE_PYTHONCONFIG=y',
'PYTHONVERSION=%s' % spec['python'].version.up_to(2),
'PYTHONHOME=%s' % spec['python'].home,
'BLASOPT=%s' % ((lapack + blas).ld_flags),
'BLAS_LIB=%s' % blas.ld_flags,
'LAPACK_LIB=%s' % lapack.ld_flags,
'USE_SCALAPACK=y',
'SCALAPACK=%s' % scalapack.ld_flags,
'NWCHEM_MODULES=all python',
'NWCHEM_LONG_PATHS=Y' # by default NWCHEM_TOP is 64 char max
])
# TODO: query if blas/lapack/scalapack uses 64bit Ints
# A flag to distinguish between 32bit and 64bit integers in linear
# algebra (Blas, Lapack, Scalapack)
use_32_bit_lin_alg = True
if use_32_bit_lin_alg:
args.extend([
'USE_64TO32=y',
'BLAS_SIZE=4',
'LAPACK_SIZE=4',
'SCALAPACK_SIZE=4'
])
else:
args.extend([
'BLAS_SIZE=8',
'LAPACK_SIZE=8'
'SCALAPACK_SIZE=8'
])
if sys.platform == 'darwin':
target = 'MACX64'
args.extend([
'CFLAGS_FORGA=-DMPICH_NO_ATTR_TYPE_TAGS'
])
else:
target = 'LINUX64'
args.extend(['NWCHEM_TARGET=%s' % target])
with working_dir('src'):
make('nwchem_config', *args)
if use_32_bit_lin_alg:
make('64_to_32', *args)
make(*args)
# need to install by hand. Follow Ubuntu:
# http://packages.ubuntu.com/trusty/all/nwchem-data/filelist
# http://packages.ubuntu.com/trusty/amd64/nwchem/filelist
share_path = join_path(prefix, 'share', 'nwchem')
mkdirp(prefix.bin)
install_tree('data', share_path)
install_tree(join_path('basis', 'libraries'),
join_path(share_path, 'libraries'))
install_tree(join_path('nwpw', 'libraryps'),
join_path(share_path, 'libraryps'))
b_path = join_path(self.stage.source_path, 'bin',
target, 'nwchem')
chmod = which('chmod')
chmod('+x', b_path)
install(b_path, prefix.bin)
# Finally, make user's life easier by creating a .nwchemrc file
# to point to the required data files.
nwchemrc = """\
nwchem_basis_library {data}/libraries/
nwchem_nwpw_library {data}/libraryps/
ffield amber
amber_1 {data}/amber_s/
amber_2 {data}/amber_q/
amber_3 {data}/amber_x/
amber_4 {data}/amber_u/
spce {data}/solvents/spce.rst
charmm_s {data}/charmm_s/
charmm_x {data}/charmm_x/
""".format(data=share_path)
with open(".nwchemrc", 'w') as f:
f.write(nwchemrc)
install(".nwchemrc", share_path)
|
import sys, os, glob, distutils.file_util
DOCS_PATH="../../docs/latex/wx"
def scanTexFiles(callback):
count = 0
for f in glob.glob(DOCS_PATH + '/*.tex'):
file = open(f, "r")
if not file:
print "could not open %s" % f
continue
print "opened file %s" % f
count = count + 1
# search \class tags
content = file.readlines()
classdecl = 0
for i in range(len(content)):
line = content[i]
if "\class{" in line:
classdecl = classdecl + 1
# polish the class name
classname = line
classname = classname[classname.find("\class{"):]
classname = classname[classname.find("{")+1:classname.find("}")]
print " the class declared is named '%s'" % classname
# process this \class
if not callback(classname, f, content, i):
return count
print " file %s contains %d class declarations" % (f, classdecl)
return count
|
from django.db import models
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
import datetime
class Article(models.Model):
title = models.CharField(max_length=255)
brief = models.CharField(null=True,blank=True,max_length=255)
category = models.ForeignKey("Category")
content = models.TextField(u"文章内容")
author = models.ForeignKey("UserProfile")
pub_date = models.DateTimeField(blank=True,null=True)
last_modify = models.DateTimeField(auto_now=True)
priority = models.IntegerField(u"优先级",default=1000)
head_img = models.ImageField(u"文章标题图片",upload_to="uploads")
status_choices = (('draft',u"草稿"),
('published',u"已发布"),
('hidden',u"隐藏"),
)
status = models.CharField(choices=status_choices,default='published',max_length=32)
def __str__(self):
return self.title
def clean(self):
# Don't allow draft entries to have a pub_date.
if self.status == 'draft' and self.pub_date is not None:
raise ValidationError(('Draft entries may not have a publication date.'))
# Set the pub_date for published items if it hasn't been set already.
if self.status == 'published' and self.pub_date is None:
self.pub_date = datetime.date.today()
class Comment(models.Model):
article = models.ForeignKey(Article,verbose_name=u"所属文章")
parent_comment = models.ForeignKey('self',related_name='my_children',blank=True,null=True)
comment_choices = ((1,u'评论'),
(2,u"点赞"))
comment_type = models.IntegerField(choices=comment_choices,default=1)
user = models.ForeignKey("UserProfile")
comment = models.TextField(blank=True,null=True)
date = models.DateTimeField(auto_now_add=True)
def clean(self):
if self.comment_type == 1 and len(self.comment) ==0:
raise ValidationError(u'评论内容不能为空,sb')
def __str__(self):
return "C:%s" %(self.comment)
class Category(models.Model):
name = models.CharField(max_length=64,unique=True)
brief = models.CharField(null=True,blank=True,max_length=255)
set_as_top_menu = models.BooleanField(default=False)
position_index = models.SmallIntegerField()
admins = models.ManyToManyField("UserProfile",blank=True)
def __str__(self):
return self.name
class UserProfile(models.Model):
user = models.OneToOneField(User)
name =models.CharField(max_length=32)
signature= models.CharField(max_length=255,blank=True,null=True)
head_img = models.ImageField(height_field=150,width_field=150,blank=True,null=True)
def __str__(self):
return self.name
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('womprat')
mobileTemplate.setLevel(4)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Wild Meat")
mobileTemplate.setMeatAmount(4)
mobileTemplate.setHideType("Leathery Hide")
mobileTemplate.setBoneAmount(3)
mobileTemplate.setBoneType("Animal Bone")
mobileTemplate.setHideAmount(2)
mobileTemplate.setSocialGroup("womprat")
mobileTemplate.setAssistRange(6)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_womp_rat.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_bite_1')
attacks.add('bm_bolster_armor_1')
attacks.add('bm_enfeeble_1')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('womprat', mobileTemplate)
return
|
import os
import json
import logging
import unittest
from pycaching import Geocaching
from pycaching.utfgrid import UTFGrid, GridCoordinateBlock
from pycaching.errors import Error
from test.test_geocaching import _username, _password
_this_folder = os.path.dirname(__file__)
sample_files = {i: os.path.join(_this_folder, i) for i in ["sample_caches.csv", "sample_utfgrid.json"]}
class TestUTFGrid(unittest.TestCase):
def setUp(self):
self.grid = UTFGrid(Geocaching(), 8800, 5574, 14)
def test_download(self):
"""Test if downloading a tile goes nice without errors"""
self.grid._gc.login(_username, _password)
with self.subTest("Not getting .png tile first"):
list(self.grid.download())
with self.subTest("Getting .png tile first"):
list(self.grid.download(get_png_first=True))
def test_parse(self):
"""Parse locally stored grid and compare to expected results"""
expected_caches = {}
with open(sample_files["sample_caches.csv"]) as f:
for row in f:
wp, lat, lon = row.split(',')
expected_caches[wp] = (float(lat), float(lon))
with open(sample_files["sample_utfgrid.json"]) as f:
j = json.loads(f.read())
caches = self.grid._parse_utfgrid(j)
for c in caches:
with self.subTest("Cache " + wp):
self.assertIn(c.wp, expected_caches)
self.assertAlmostEqual(c.location.latitude, expected_caches[c.wp][0])
self.assertAlmostEqual(c.location.longitude, expected_caches[c.wp][1])
expected_caches.pop(c.wp)
self.assertEqual(len(expected_caches), 0)
class TestGridCoordinateBlock(unittest.TestCase):
# {descriptor: [points, midpoint, x_lim, y_lim]}
good_cases = {9: [[(1, 1), (1, 2), (1, 3),
(2, 1), (2, 2), (2, 3),
(3, 1), (3, 2), (3, 3)],
[2.0, 2.0],
(1, 3), (1, 3)],
6: [[(1, 0), (1, 1),
(2, 0), (2, 1),
(3, 0), (3, 1)],
[2.0, 0.0],
(1, 3), (-1, 1)],
4: [[(62, 62), (62, 63),
(63, 62), (63, 63)],
[63.0, 63.0],
(62, 64), (62, 64)],
3: [[(63, 30), (63, 31), (63, 32)],
[64.0, 31.0],
(63, 65), (30, 32)],
2: [[(62, 0),
(63, 0)],
[63.0, -1.0],
(62, 64), (-2, 0)],
1: [[(0, 63)],
[-1.0, 64.0],
(-2, 0), (63, 65)],
}
bad_cases = {'too much points':
[(1, 1), (1, 2), (1, 3),
(2, 1), (2, 2), (2, 3),
(3, 1), (3, 2), (3, 3), (3, 4)],
'still too much points':
[(63, 30), (63, 31), (63, 32), (63, 33)],
'point missing: 9':
[(1, 1), (1, 3),
(2, 1), (2, 2), (2, 3),
(3, 1), (3, 2), (3, 3)],
'point missing: 6':
[(1, 0), (1, 1),
(2, 0),
(3, 0), (3, 1)],
'points not aligned':
[(1, 1), (1, 2), (1, 3),
(2, 1), (2, 3), (2, 4),
(3, 1), (3, 2), (3, 3)],
}
def setUp(self):
self.grid = UTFGrid(Geocaching(), 8800, 5574, 14)
self.grid.size = 64
self.cb = GridCoordinateBlock(self.grid)
def test_determine_block_size(self, *block_points):
with self.subTest("Initial value"):
self.assertEqual(GridCoordinateBlock.size, 3)
with self.subTest("Initial value of instance"):
self.assertEqual(GridCoordinateBlock(self.grid).size, 3)
with self.subTest("No changes: same value"):
sizes = [100] * 9 + [4] * 3 + [1]
GridCoordinateBlock.determine_block_size(*sizes)
self.assertEqual(GridCoordinateBlock.size, 3)
with self.subTest("No changes: no input"):
GridCoordinateBlock.determine_block_size()
self.assertEqual(GridCoordinateBlock.size, 3)
with self.subTest("Should change to 16"):
sizes = [16] * 21 + [4]
with self.assertLogs(level=logging.WARNING):
GridCoordinateBlock.determine_block_size(*sizes)
self.assertEqual(GridCoordinateBlock.size, 4)
with self.subTest("New value of instance"):
self.assertEqual(GridCoordinateBlock(self.grid).size, 4)
# Set back to initial value
GridCoordinateBlock.size = 3
def test_add_point(self):
"""Test passing points at initialization"""
with self.subTest("Zero points"):
self.assertEqual(self.cb.points,
GridCoordinateBlock(self.grid).points)
with self.subTest("One point"):
self.cb.points = []
self.cb.add((3, 4))
self.assertEqual(self.cb.points,
GridCoordinateBlock(self.grid, (3, 4)).points)
with self.subTest("Multiple points: pass directly"):
points = [(0, 0), (1, 2), (3, 4), (1, 2), (5, 6)]
self.cb.points = points
self.assertEqual(self.cb.points,
GridCoordinateBlock(self.grid, *points).points)
with self.subTest("Multiple points: update"):
self.cb.points = []
points = [(0, 0), (1, 2), (3, 4), (1, 2), (5, 6)]
self.cb.update(points)
self.assertEqual(self.cb.points,
GridCoordinateBlock(self.grid, *points).points)
def test_get_middle_point(self):
"""Check that correct middle points are returned"""
for case in [self.good_cases, self.bad_cases]:
for i in case:
if case is self.good_cases:
points, mid_point, xlim, ylim = self.good_cases[i]
with self.subTest('{} points'.format(i)):
self.cb.points = points
self.assertEqual(self.cb._get_middle_point(),
mid_point)
else:
with self.subTest('Malformed input: {}'.format(i)):
with self.assertRaises(Error):
self.cb.points = self.bad_cases[i]
self.cb._get_middle_point()
def test_check_block(self):
"""Test block form with various passes and fails"""
for case in [self.good_cases, self.bad_cases]:
for i in case:
if case is self.good_cases:
self.cb.points = case[i][0]
with self.subTest(i):
if i == 9:
self.assertEqual(self.cb._check_block(), 1, i)
else:
self.assertEqual(self.cb._check_block(), 2, i)
else:
self.cb.points = case[i]
with self.subTest(i):
self.assertEqual(self.cb._check_block(), 0, i)
def test_find_limits(self):
"""Check calculation of block limits when going out of the border"""
for i in self.good_cases:
points, mid_point, xlim, ylim = self.good_cases[i]
self.cb.points = points
for axis, limits in zip(['x', 'y'], [xlim, ylim]):
with self.subTest('{} points, {} axis'.format(i, axis)):
self.assertEqual(self.cb._find_limits(axis), limits)
|
import unittest
from magnumfe import *
set_log_active(False)
class CacheTest(unittest.TestCase):
def test_initial_update(self):
mesh = UnitCubeMesh(1,1,1)
state = State(mesh)
cache = Cache()
self.assertTrue(cache.requires_update(state))
def test_change_state(self):
mesh = UnitCubeMesh(1,1,1)
state1 = State(mesh)
state2 = State(mesh)
cache = Cache()
count = 0
if cache.requires_update(state1): count += 1
if cache.requires_update(state1): count += 1
self.assertEqual(1, count)
if cache.requires_update(state2): count += 1
self.assertEqual(2, count)
def test_update_required(self):
mesh = UnitCubeMesh(2, 2, 2)
state = State(mesh, m = Constant((1.0, 0.0, 0.0)), j = Constant((0.0, 0.0, 0.0)))
cache = Cache("m", "t")
count = 0
if cache.requires_update(state): count += 1
self.assertEqual(1, count)
if cache.requires_update(state): count += 1
self.assertEqual(1, count)
state.t = 1.0
if cache.requires_update(state): count += 1
self.assertEqual(2, count)
if cache.requires_update(state): count += 1
self.assertEqual(2, count)
state.m = Constant((0.0, 1.0, 0.0))
if cache.requires_update(state): count += 1
self.assertEqual(3, count)
if cache.requires_update(state): count += 1
self.assertEqual(3, count)
state.j = Constant((1.0, 0.0, 0.0))
if cache.requires_update(state): count += 1
self.assertEqual(3, count)
if __name__ == '__main__':
unittest.main()
|
from graphicsUtils import *
import math, time
from game import Directions
DEFAULT_GRID_SIZE = 30.0
INFO_PANE_HEIGHT = 35
BACKGROUND_COLOR = formatColor(0,0,0)
WALL_COLOR = formatColor(0.0/255.0, 51.0/255.0, 255.0/255.0)
INFO_PANE_COLOR = formatColor(.4,.4,0)
SCORE_COLOR = formatColor(.9, .9, .9)
PACMAN_OUTLINE_WIDTH = 2
PACMAN_CAPTURE_OUTLINE_WIDTH = 4
GHOST_COLORS = []
GHOST_COLORS.append(formatColor(.9,0,0)) # Red
GHOST_COLORS.append(formatColor(0,.3,.9)) # Blue
GHOST_COLORS.append(formatColor(.98,.41,.07)) # Orange
GHOST_COLORS.append(formatColor(.1,.75,.7)) # Green
GHOST_COLORS.append(formatColor(1.0,0.6,0.0)) # Yellow
GHOST_COLORS.append(formatColor(.4,0.13,0.91)) # Purple
TEAM_COLORS = GHOST_COLORS[:2]
GHOST_SHAPE = [
( 0, 0.3 ),
( 0.25, 0.75 ),
( 0.5, 0.3 ),
( 0.75, 0.75 ),
( 0.75, -0.5 ),
( 0.5, -0.75 ),
(-0.5, -0.75 ),
(-0.75, -0.5 ),
(-0.75, 0.75 ),
(-0.5, 0.3 ),
(-0.25, 0.75 )
]
GHOST_SIZE = 0.65
SCARED_COLOR = formatColor(1,1,1)
GHOST_VEC_COLORS = list(map(colorToVector, GHOST_COLORS))
PACMAN_COLOR = formatColor(255.0/255.0,255.0/255.0,61.0/255)
PACMAN_SCALE = 0.5
FOOD_COLOR = formatColor(1,1,1)
FOOD_SIZE = 0.1
LASER_COLOR = formatColor(1,0,0)
LASER_SIZE = 0.02
CAPSULE_COLOR = formatColor(1,1,1)
CAPSULE_SIZE = 0.25
WALL_RADIUS = 0.15
class InfoPane:
def __init__(self, layout, gridSize):
self.gridSize = gridSize
self.width = (layout.width) * gridSize
self.base = (layout.height + 1) * gridSize
self.height = INFO_PANE_HEIGHT
self.fontSize = 24
self.textColor = PACMAN_COLOR
self.drawPane()
def toScreen(self, pos, y = None):
"""
Translates a point relative from the bottom left of the info pane.
"""
if y == None:
x,y = pos
else:
x = pos
x = self.gridSize + x # Margin
y = self.base + y
return x,y
def drawPane(self):
self.scoreText = text( self.toScreen(0, 0 ), self.textColor, "SCORE: 0", "Times", self.fontSize, "bold")
def initializeGhostDistances(self, distances):
self.ghostDistanceText = []
size = 20
if self.width < 240:
size = 12
if self.width < 160:
size = 10
for i, d in enumerate(distances):
t = text( self.toScreen(self.width/2 + self.width/8 * i, 0), GHOST_COLORS[i+1], d, "Times", size, "bold")
self.ghostDistanceText.append(t)
def updateScore(self, score):
changeText(self.scoreText, "SCORE: % 4d" % score)
def setTeam(self, isBlue):
text = "RED TEAM"
if isBlue: text = "BLUE TEAM"
self.teamText = text( self.toScreen(300, 0 ), self.textColor, text, "Times", self.fontSize, "bold")
def updateGhostDistances(self, distances):
if len(distances) == 0: return
if 'ghostDistanceText' not in dir(self): self.initializeGhostDistances(distances)
else:
for i, d in enumerate(distances):
changeText(self.ghostDistanceText[i], d)
def drawGhost(self):
pass
def drawPacman(self):
pass
def drawWarning(self):
pass
def clearIcon(self):
pass
def updateMessage(self, message):
pass
def clearMessage(self):
pass
class PacmanGraphics:
def __init__(self, zoom=1.0, frameTime=0.0, capture=False):
self.have_window = 0
self.currentGhostImages = {}
self.pacmanImage = None
self.zoom = zoom
self.gridSize = DEFAULT_GRID_SIZE * zoom
self.capture = capture
self.frameTime = frameTime
def initialize(self, state, isBlue = False):
self.isBlue = isBlue
self.startGraphics(state)
# self.drawDistributions(state)
self.distributionImages = None # Initialized lazily
self.drawStaticObjects(state)
self.drawAgentObjects(state)
# Information
self.previousState = state
def startGraphics(self, state):
self.layout = state.layout
layout = self.layout
self.width = layout.width
self.height = layout.height
self.make_window(self.width, self.height)
self.infoPane = InfoPane(layout, self.gridSize)
self.currentState = layout
def drawDistributions(self, state):
walls = state.layout.walls
dist = []
for x in range(walls.width):
distx = []
dist.append(distx)
for y in range(walls.height):
( screen_x, screen_y ) = self.to_screen( (x, y) )
block = square( (screen_x, screen_y),
0.5 * self.gridSize,
color = BACKGROUND_COLOR,
filled = 1, behind=2)
distx.append(block)
self.distributionImages = dist
def drawStaticObjects(self, state):
layout = self.layout
self.drawWalls(layout.walls)
self.food = self.drawFood(layout.food)
self.capsules = self.drawCapsules(layout.capsules)
refresh()
def drawAgentObjects(self, state):
self.agentImages = [] # (agentState, image)
for index, agent in enumerate(state.agentStates):
if agent.isPacman:
image = self.drawPacman(agent, index)
self.agentImages.append( (agent, image) )
else:
image = self.drawGhost(agent, index)
self.agentImages.append( (agent, image) )
refresh()
def swapImages(self, agentIndex, newState):
"""
Changes an image from a ghost to a pacman or vis versa (for capture)
"""
prevState, prevImage = self.agentImages[agentIndex]
for item in prevImage: remove_from_screen(item)
if newState.isPacman:
image = self.drawPacman(newState, agentIndex)
self.agentImages[agentIndex] = (newState, image )
else:
image = self.drawGhost(newState, agentIndex)
self.agentImages[agentIndex] = (newState, image )
refresh()
def update(self, newState):
agentIndex = newState._agentMoved
agentState = newState.agentStates[agentIndex]
if self.agentImages[agentIndex][0].isPacman != agentState.isPacman: self.swapImages(agentIndex, agentState)
prevState, prevImage = self.agentImages[agentIndex]
if agentState.isPacman:
self.animatePacman(agentState, prevState, prevImage)
else:
self.moveGhost(agentState, agentIndex, prevState, prevImage)
self.agentImages[agentIndex] = (agentState, prevImage)
if newState._foodEaten != None:
self.removeFood(newState._foodEaten, self.food)
if newState._capsuleEaten != None:
self.removeCapsule(newState._capsuleEaten, self.capsules)
self.infoPane.updateScore(newState.score)
if 'ghostDistances' in dir(newState):
self.infoPane.updateGhostDistances(newState.ghostDistances)
def make_window(self, width, height):
grid_width = (width-1) * self.gridSize
grid_height = (height-1) * self.gridSize
screen_width = 2*self.gridSize + grid_width
screen_height = 2*self.gridSize + grid_height + INFO_PANE_HEIGHT
begin_graphics(screen_width,
screen_height,
BACKGROUND_COLOR,
"AI Pacman")
def drawPacman(self, pacman, index):
position = self.getPosition(pacman)
screen_point = self.to_screen(position)
endpoints = self.getEndpoints(self.getDirection(pacman))
width = PACMAN_OUTLINE_WIDTH
outlineColor = PACMAN_COLOR
fillColor = PACMAN_COLOR
if self.capture:
outlineColor = TEAM_COLORS[index % 2]
fillColor = GHOST_COLORS[index]
width = PACMAN_CAPTURE_OUTLINE_WIDTH
return [circle(screen_point, PACMAN_SCALE * self.gridSize,
fillColor = fillColor, outlineColor = outlineColor,
endpoints = endpoints,
width = width)]
def getEndpoints(self, direction, position=(0,0)):
x, y = position
pos = x - int(x) + y - int(y)
width = 30 + 80 * math.sin(math.pi* pos)
delta = width / 2
if (direction == 'West'):
endpoints = (180+delta, 180-delta)
elif (direction == 'North'):
endpoints = (90+delta, 90-delta)
elif (direction == 'South'):
endpoints = (270+delta, 270-delta)
else:
endpoints = (0+delta, 0-delta)
return endpoints
def movePacman(self, position, direction, image):
screenPosition = self.to_screen(position)
endpoints = self.getEndpoints( direction, position )
r = PACMAN_SCALE * self.gridSize
moveCircle(image[0], screenPosition, r, endpoints)
refresh()
def animatePacman(self, pacman, prevPacman, image):
if self.frameTime < 0:
print('Press any key to step forward, "q" to play')
keys = wait_for_keys()
if 'q' in keys:
self.frameTime = 0.1
if self.frameTime > 0.01 or self.frameTime < 0:
start = time.time()
fx, fy = self.getPosition(prevPacman)
px, py = self.getPosition(pacman)
frames = 4.0
for i in range(1,int(frames) + 1):
pos = px*i/frames + fx*(frames-i)/frames, py*i/frames + fy*(frames-i)/frames
self.movePacman(pos, self.getDirection(pacman), image)
refresh()
sleep(abs(self.frameTime) / frames)
else:
self.movePacman(self.getPosition(pacman), self.getDirection(pacman), image)
refresh()
def getGhostColor(self, ghost, ghostIndex):
if ghost.scaredTimer > 0:
return SCARED_COLOR
else:
return GHOST_COLORS[ghostIndex]
def drawGhost(self, ghost, agentIndex):
pos = self.getPosition(ghost)
dir = self.getDirection(ghost)
(screen_x, screen_y) = (self.to_screen(pos) )
coords = []
for (x, y) in GHOST_SHAPE:
coords.append((x*self.gridSize*GHOST_SIZE + screen_x, y*self.gridSize*GHOST_SIZE + screen_y))
colour = self.getGhostColor(ghost, agentIndex)
body = polygon(coords, colour, filled = 1)
WHITE = formatColor(1.0, 1.0, 1.0)
BLACK = formatColor(0.0, 0.0, 0.0)
dx = 0
dy = 0
if dir == 'North':
dy = -0.2
if dir == 'South':
dy = 0.2
if dir == 'East':
dx = 0.2
if dir == 'West':
dx = -0.2
leftEye = circle((screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2, WHITE, WHITE)
rightEye = circle((screen_x+self.gridSize*GHOST_SIZE*(0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2, WHITE, WHITE)
leftPupil = circle((screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08, BLACK, BLACK)
rightPupil = circle((screen_x+self.gridSize*GHOST_SIZE*(0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08, BLACK, BLACK)
ghostImageParts = []
ghostImageParts.append(body)
ghostImageParts.append(leftEye)
ghostImageParts.append(rightEye)
ghostImageParts.append(leftPupil)
ghostImageParts.append(rightPupil)
return ghostImageParts
def moveEyes(self, pos, dir, eyes):
(screen_x, screen_y) = (self.to_screen(pos) )
dx = 0
dy = 0
if dir == 'North':
dy = -0.2
if dir == 'South':
dy = 0.2
if dir == 'East':
dx = 0.2
if dir == 'West':
dx = -0.2
moveCircle(eyes[0],(screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2)
moveCircle(eyes[1],(screen_x+self.gridSize*GHOST_SIZE*(0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2)
moveCircle(eyes[2],(screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08)
moveCircle(eyes[3],(screen_x+self.gridSize*GHOST_SIZE*(0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08)
def moveGhost(self, ghost, ghostIndex, prevGhost, ghostImageParts):
old_x, old_y = self.to_screen(self.getPosition(prevGhost))
new_x, new_y = self.to_screen(self.getPosition(ghost))
delta = new_x - old_x, new_y - old_y
for ghostImagePart in ghostImageParts:
move_by(ghostImagePart, delta)
refresh()
if ghost.scaredTimer > 0:
color = SCARED_COLOR
else:
color = GHOST_COLORS[ghostIndex]
edit(ghostImageParts[0], ('fill', color), ('outline', color))
self.moveEyes(self.getPosition(ghost), self.getDirection(ghost), ghostImageParts[-4:])
refresh()
def getPosition(self, agentState):
if agentState.configuration == None: return (-1000, -1000)
return agentState.getPosition()
def getDirection(self, agentState):
if agentState.configuration == None: return Directions.STOP
return agentState.configuration.getDirection()
def finish(self):
end_graphics()
def to_screen(self, point):
( x, y ) = point
#y = self.height - y
x = (x + 1)*self.gridSize
y = (self.height - y)*self.gridSize
return ( x, y )
# Fixes some TK issue with off-center circles
def to_screen2(self, point):
( x, y ) = point
#y = self.height - y
x = (x + 1)*self.gridSize
y = (self.height - y)*self.gridSize
return ( x, y )
def drawWalls(self, wallMatrix):
wallColor = WALL_COLOR
for xNum, x in enumerate(wallMatrix):
if self.capture and (xNum * 2) < wallMatrix.width: wallColor = TEAM_COLORS[0]
if self.capture and (xNum * 2) >= wallMatrix.width: wallColor = TEAM_COLORS[1]
for yNum, cell in enumerate(x):
if cell: # There's a wall here
pos = (xNum, yNum)
screen = self.to_screen(pos)
screen2 = self.to_screen2(pos)
# draw each quadrant of the square based on adjacent walls
wIsWall = self.isWall(xNum-1, yNum, wallMatrix)
eIsWall = self.isWall(xNum+1, yNum, wallMatrix)
nIsWall = self.isWall(xNum, yNum+1, wallMatrix)
sIsWall = self.isWall(xNum, yNum-1, wallMatrix)
nwIsWall = self.isWall(xNum-1, yNum+1, wallMatrix)
swIsWall = self.isWall(xNum-1, yNum-1, wallMatrix)
neIsWall = self.isWall(xNum+1, yNum+1, wallMatrix)
seIsWall = self.isWall(xNum+1, yNum-1, wallMatrix)
# NE quadrant
if (not nIsWall) and (not eIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (0,91), 'arc')
if (nIsWall) and (not eIsWall):
# vertical line
line(add(screen, (self.gridSize*WALL_RADIUS, 0)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(-0.5)-1)), wallColor)
if (not nIsWall) and (eIsWall):
# horizontal line
line(add(screen, (0, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5+1, self.gridSize*(-1)*WALL_RADIUS)), wallColor)
if (nIsWall) and (eIsWall) and (not neIsWall):
# outer circle
circle(add(screen2, (self.gridSize*2*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (180,271), 'arc')
line(add(screen, (self.gridSize*2*WALL_RADIUS-1, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5+1, self.gridSize*(-1)*WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS+1)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(-0.5))), wallColor)
# NW quadrant
if (not nIsWall) and (not wIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (90,181), 'arc')
if (nIsWall) and (not wIsWall):
# vertical line
line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, 0)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-0.5)-1)), wallColor)
if (not nIsWall) and (wIsWall):
# horizontal line
line(add(screen, (0, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5)-1, self.gridSize*(-1)*WALL_RADIUS)), wallColor)
if (nIsWall) and (wIsWall) and (not nwIsWall):
# outer circle
circle(add(screen2, (self.gridSize*(-2)*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (270,361), 'arc')
line(add(screen, (self.gridSize*(-2)*WALL_RADIUS+1, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5), self.gridSize*(-1)*WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS+1)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-0.5))), wallColor)
# SE quadrant
if (not sIsWall) and (not eIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (270,361), 'arc')
if (sIsWall) and (not eIsWall):
# vertical line
line(add(screen, (self.gridSize*WALL_RADIUS, 0)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(0.5)+1)), wallColor)
if (not sIsWall) and (eIsWall):
# horizontal line
line(add(screen, (0, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5+1, self.gridSize*(1)*WALL_RADIUS)), wallColor)
if (sIsWall) and (eIsWall) and (not seIsWall):
# outer circle
circle(add(screen2, (self.gridSize*2*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (90,181), 'arc')
line(add(screen, (self.gridSize*2*WALL_RADIUS-1, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5, self.gridSize*(1)*WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS-1)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(0.5))), wallColor)
# SW quadrant
if (not sIsWall) and (not wIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (180,271), 'arc')
if (sIsWall) and (not wIsWall):
# vertical line
line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, 0)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(0.5)+1)), wallColor)
if (not sIsWall) and (wIsWall):
# horizontal line
line(add(screen, (0, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5)-1, self.gridSize*(1)*WALL_RADIUS)), wallColor)
if (sIsWall) and (wIsWall) and (not swIsWall):
# outer circle
circle(add(screen2, (self.gridSize*(-2)*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (0,91), 'arc')
line(add(screen, (self.gridSize*(-2)*WALL_RADIUS+1, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5), self.gridSize*(1)*WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS-1)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(0.5))), wallColor)
def isWall(self, x, y, walls):
if x < 0 or y < 0:
return False
if x >= walls.width or y >= walls.height:
return False
return walls[x][y]
def drawFood(self, foodMatrix ):
foodImages = []
color = FOOD_COLOR
for xNum, x in enumerate(foodMatrix):
if self.capture and (xNum * 2) <= foodMatrix.width: color = TEAM_COLORS[0]
if self.capture and (xNum * 2) > foodMatrix.width: color = TEAM_COLORS[1]
imageRow = []
foodImages.append(imageRow)
for yNum, cell in enumerate(x):
if cell: # There's food here
screen = self.to_screen((xNum, yNum ))
dot = circle( screen,
FOOD_SIZE * self.gridSize,
outlineColor = color, fillColor = color,
width = 1)
imageRow.append(dot)
else:
imageRow.append(None)
return foodImages
def drawCapsules(self, capsules ):
capsuleImages = {}
for capsule in capsules:
( screen_x, screen_y ) = self.to_screen(capsule)
dot = circle( (screen_x, screen_y),
CAPSULE_SIZE * self.gridSize,
outlineColor = CAPSULE_COLOR,
fillColor = CAPSULE_COLOR,
width = 1)
capsuleImages[capsule] = dot
return capsuleImages
def removeFood(self, cell, foodImages ):
x, y = cell
remove_from_screen(foodImages[x][y])
def removeCapsule(self, cell, capsuleImages ):
x, y = cell
remove_from_screen(capsuleImages[(x, y)])
def drawExpandedCells(self, cells):
"""
Draws an overlay of expanded grid positions for search agents
"""
n = float(len(cells))
baseColor = [1.0, 0.0, 0.0]
self.clearExpandedCells()
self.expandedCells = []
for k, cell in enumerate(cells):
screenPos = self.to_screen( cell)
cellColor = formatColor(*[(n-k) * c * .5 / n + .25 for c in baseColor])
block = square(screenPos,
0.5 * self.gridSize,
color = cellColor,
filled = 1, behind=2)
self.expandedCells.append(block)
if self.frameTime < 0:
refresh()
def clearExpandedCells(self):
if 'expandedCells' in dir(self) and len(self.expandedCells) > 0:
for cell in self.expandedCells:
remove_from_screen(cell)
def updateDistributions(self, distributions):
"Draws an agent's belief distributions"
if self.distributionImages == None:
self.drawDistributions(self.previousState)
for x in range(len(self.distributionImages)):
for y in range(len(self.distributionImages[0])):
image = self.distributionImages[x][y]
weights = [dist[ (x,y) ] for dist in distributions]
if sum(weights) != 0:
pass
# Fog of war
color = [0.0,0.0,0.0]
colors = GHOST_VEC_COLORS[1:] # With Pacman
if self.capture: colors = GHOST_VEC_COLORS
for weight, gcolor in zip(weights, colors):
color = [min(1.0, c + 0.95 * g * weight ** .3) for c,g in zip(color, gcolor)]
changeColor(image, formatColor(*color))
refresh()
class FirstPersonPacmanGraphics(PacmanGraphics):
def __init__(self, zoom = 1.0, showGhosts = True, capture = False, frameTime=0):
PacmanGraphics.__init__(self, zoom, frameTime=frameTime)
self.showGhosts = showGhosts
self.capture = capture
def initialize(self, state, isBlue = False):
self.isBlue = isBlue
PacmanGraphics.startGraphics(self, state)
# Initialize distribution images
walls = state.layout.walls
dist = []
self.layout = state.layout
# Draw the rest
self.distributionImages = None # initialize lazily
self.drawStaticObjects(state)
self.drawAgentObjects(state)
# Information
self.previousState = state
def lookAhead(self, config, state):
if config.getDirection() == 'Stop':
return
else:
pass
# Draw relevant ghosts
allGhosts = state.getGhostStates()
visibleGhosts = state.getVisibleGhosts()
for i, ghost in enumerate(allGhosts):
if ghost in visibleGhosts:
self.drawGhost(ghost, i)
else:
self.currentGhostImages[i] = None
def getGhostColor(self, ghost, ghostIndex):
return GHOST_COLORS[ghostIndex]
def getPosition(self, ghostState):
if not self.showGhosts and not ghostState.isPacman and ghostState.getPosition()[1] > 1:
return (-1000, -1000)
else:
return PacmanGraphics.getPosition(self, ghostState)
def add(x, y):
return (x[0] + y[0], x[1] + y[1])
SAVE_POSTSCRIPT = False
POSTSCRIPT_OUTPUT_DIR = 'frames'
FRAME_NUMBER = 0
import os
def saveFrame():
"Saves the current graphical output as a postscript file"
global SAVE_POSTSCRIPT, FRAME_NUMBER, POSTSCRIPT_OUTPUT_DIR
if not SAVE_POSTSCRIPT: return
if not os.path.exists(POSTSCRIPT_OUTPUT_DIR): os.mkdir(POSTSCRIPT_OUTPUT_DIR)
name = os.path.join(POSTSCRIPT_OUTPUT_DIR, 'frame_%08d.ps' % FRAME_NUMBER)
FRAME_NUMBER += 1
writePostscript(name) # writes the current canvas
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import pytest
from pants.backend.core.tasks.list_goals import ListGoals
from pants.backend.core.tasks.task import Task
from pants.goal.goal import Goal
from pants.goal.task_registrar import TaskRegistrar
from pants_test.tasks.task_test_base import ConsoleTaskTestBase
class ListGoalsTest(ConsoleTaskTestBase):
_INSTALLED_HEADER = 'Installed goals:'
_UNDOCUMENTED_HEADER = 'Undocumented goals:'
_LIST_GOALS_NAME = 'goals'
_LIST_GOALS_DESC = 'List all documented goals.'
_LLAMA_NAME = 'llama'
_LLAMA_DESC = 'With such handsome fiber, no wonder everyone loves Llamas.'
_ALPACA_NAME = 'alpaca'
@classmethod
def task_type(cls):
return ListGoals
class LlamaTask(Task):
pass
class AlpacaTask(Task):
pass
def test_list_goals(self):
Goal.clear()
self.assert_console_output(self._INSTALLED_HEADER)
TaskRegistrar(name=self._LIST_GOALS_NAME, action=ListGoals)\
.install().with_description(self._LIST_GOALS_DESC)
self.assert_console_output(
self._INSTALLED_HEADER,
' {0}: {1}'.format(self._LIST_GOALS_NAME, self._LIST_GOALS_DESC),
)
TaskRegistrar(name=self._LLAMA_NAME, action=ListGoalsTest.LlamaTask)\
.install().with_description(self._LLAMA_DESC)
self.assert_console_output(
self._INSTALLED_HEADER,
' {0}: {1}'.format(self._LIST_GOALS_NAME, self._LIST_GOALS_DESC),
' {0}: {1}'.format(self._LLAMA_NAME, self._LLAMA_DESC),
)
TaskRegistrar(name=self._ALPACA_NAME, action=ListGoalsTest.AlpacaTask, dependencies=[self._LLAMA_NAME])\
.install()
self.assert_console_output(
self._INSTALLED_HEADER,
' {0}: {1}'.format(self._LIST_GOALS_NAME, self._LIST_GOALS_DESC),
' {0}: {1}'.format(self._LLAMA_NAME, self._LLAMA_DESC),
)
def test_list_goals_all(self):
Goal.clear()
TaskRegistrar(name=self._LIST_GOALS_NAME, action=ListGoals)\
.install().with_description(self._LIST_GOALS_DESC)
TaskRegistrar(name=self._LLAMA_NAME, action=ListGoalsTest.LlamaTask)\
.install().with_description(self._LLAMA_DESC)
TaskRegistrar(name=self._ALPACA_NAME, action=ListGoalsTest.AlpacaTask, dependencies=[self._LLAMA_NAME])\
.install()
self.assert_console_output(
self._INSTALLED_HEADER,
' {0}: {1}'.format(self._LIST_GOALS_NAME, self._LIST_GOALS_DESC),
' {0}: {1}'.format(self._LLAMA_NAME, self._LLAMA_DESC),
'',
self._UNDOCUMENTED_HEADER,
' {0}'.format(self._ALPACA_NAME),
options={ 'all': True }
)
# TODO(John Sirois): Re-enable when fixing up ListGoals `--graph` in
# https://github.com/pantsbuild/pants/issues/918
@pytest.mark.xfail
def test_list_goals_graph(self):
Goal.clear()
TaskRegistrar(name=self._LIST_GOALS_NAME, action=ListGoals)\
.install().with_description(self._LIST_GOALS_DESC)
TaskRegistrar(name=self._LLAMA_NAME, action=ListGoalsTest.LlamaTask)\
.install().with_description(self._LLAMA_DESC)
TaskRegistrar(name=self._ALPACA_NAME, action=ListGoalsTest.AlpacaTask, dependencies=[self._LLAMA_NAME])\
.install()
self.assert_console_output(
'digraph G {\n rankdir=LR;\n graph [compound=true];',
' subgraph cluster_goals {\n node [style=filled];\n color = blue;\n label = "goals";',
' goals_goals [label="goals"];',
' }',
' subgraph cluster_llama {\n node [style=filled];\n color = blue;\n label = "llama";',
' llama_llama [label="llama"];',
' }',
' subgraph cluster_alpaca {\n node [style=filled];\n color = blue;\n label = "alpaca";',
' alpaca_alpaca [label="alpaca"];',
' }',
' alpaca_alpaca -> llama_llama [ltail=cluster_alpaca lhead=cluster_llama];',
'}',
options={ 'graph': True }
)
|
import os
from flask import request
from website.addons.dataverse.client import get_study, get_files, \
get_dataverse, connect_from_settings
from website.project.decorators import must_be_contributor_or_public
from website.project.decorators import must_have_addon
from website.util import rubeus
def dataverse_hgrid_root(node_addon, auth, state=None, **kwargs):
node = node_addon.owner
user_settings = node_addon.user_settings
default_state = 'released'
state = 'released' if not node.can_edit(auth) else state or default_state
connection = connect_from_settings(user_settings)
# Quit if no study linked
if node_addon.study_hdl is None or connection is None:
return []
dataverse = get_dataverse(connection, node_addon.dataverse_alias)
study = get_study(dataverse, node_addon.study_hdl)
# Quit if hdl does not produce a study
if study is None:
return []
released_files = get_files(study, released=True)
authorized = node.can_edit(auth)
# Produce draft version or quit if no released version is available
if not released_files:
if authorized:
state = 'draft'
else:
return []
study_name = node_addon.study
if len(study_name) > 23:
study_name = u'{0}...'.format(study_name[:20])
permissions = {
'edit': node.can_edit(auth) and not node.is_registration,
'view': node.can_view(auth)
}
urls = {
'upload': node.api_url_for('dataverse_upload_file'),
'fetch': node.api_url_for('dataverse_hgrid_data_contents', state=state),
'state': node.api_url_for('dataverse_root_folder_public'),
'release': node.api_url_for('dataverse_release_study'),
}
buttons = [rubeus.build_addon_button(
'<i class="fa fa-globe"></i> Release Study',
'releaseStudy')] if state == 'draft' else None
return [rubeus.build_addon_root(
node_addon,
study_name,
urls=urls,
permissions=permissions,
buttons=buttons,
study=study_name,
doi=study.doi,
dataverse=dataverse.title,
citation=study.citation,
hasReleasedFiles=bool(released_files),
state=state,
)]
@must_be_contributor_or_public
@must_have_addon('dataverse', 'node')
def dataverse_root_folder_public(node_addon, auth, **kwargs):
state = request.args['state']
return dataverse_hgrid_root(node_addon, auth=auth, state=state)
@must_be_contributor_or_public
@must_have_addon('dataverse', 'node')
def dataverse_hgrid_data_contents(node_addon, auth, **kwargs):
node = node_addon.owner
user_settings = node_addon.user_settings
state = request.args.get('state')
default_state = 'released'
state = 'released' if not node.can_edit(auth) else state or default_state
released = state == 'released'
can_edit = node.can_edit(auth) and not node.is_registration and not released
can_view = node.can_view(auth)
connection = connect_from_settings(user_settings)
if node_addon.study_hdl is None or connection is None:
return []
dataverse = get_dataverse(connection, node_addon.dataverse_alias)
study = get_study(dataverse, node_addon.study_hdl)
# Quit if hdl does not produce a study
if study is None:
return []
info = []
for f in get_files(study, released):
item = {
'addon': 'dataverse',
'provider': 'dataverse',
rubeus.KIND: 'file',
'name': f.name,
'path': f.name,
'file_id': f.id,
'ext': os.path.splitext(f.name)[1],
'urls': {
'view': node.web_url_for('dataverse_view_file',
path=f.id),
'download': node.web_url_for('dataverse_download_file',
path=f.id),
'delete': node.api_url_for('dataverse_delete_file',
path=f.id),
},
'permissions': {
'view': can_view,
'edit': can_edit,
},
}
info.append(item)
return {'data': info}
|
from django.contrib.contenttypes.models import ContentType
import json
from django.http import Http404, HttpResponse
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required, user_passes_test
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, redirect, render
from guardian.decorators import permission_required
from guardian.shortcuts import get_objects_for_user
from account.models import DepartmentGroup
from backend.tasks import TestConnectionTask
from event.models import NotificationPreferences
from .models import Application, Department, Environment, Server, ServerRole
from task.models import Execution
@login_required
def index(request):
data = {}
executions = Execution.objects.filter(task__application__department_id=request.current_department_id)
if not executions.count():
return redirect(reverse('first_steps_page'))
return render(request, 'page/index.html', data)
@permission_required('core.view_application', (Application, 'id', 'application_id'))
def application_page(request, application_id):
data = {}
data['application'] = get_object_or_404(Application, pk=application_id)
return render(request, 'page/application.html', data)
@permission_required('core.view_environment', (Environment, 'id', 'environment_id'))
def environment_page(request, environment_id):
data = {}
data['environment'] = get_object_or_404(Environment, pk=environment_id)
data['servers'] = list(Server.objects.filter(environment_id=environment_id).prefetch_related('roles'))
return render(request, 'page/environment.html', data)
@permission_required('core.view_environment', (Environment, 'servers__id', 'server_id'))
def server_test(request, server_id):
data = {}
data['server'] = get_object_or_404(Server, pk=server_id)
data['task_id'] = TestConnectionTask().delay(server_id).id
return render(request, 'partial/server_test.html', data)
@login_required
def server_test_ajax(request, task_id):
data = {}
task = TestConnectionTask().AsyncResult(task_id)
if task.status == 'SUCCESS':
status, output = task.get()
data['status'] = status
data['output'] = output
elif task.status == 'FAILED':
data['status'] = False
else:
data['status'] = None
return HttpResponse(json.dumps(data), content_type="application/json")
@login_required
def first_steps_page(request):
data = {}
return render(request, 'page/first_steps.html', data)
@login_required
def settings_page(request, section='user', subsection='profile'):
data = {}
data['section'] = section
data['subsection'] = subsection
data['department'] = Department(pk=request.current_department_id)
data['on_settings'] = True
handler = '_settings_%s_%s' % (section, subsection)
if section == 'system' and request.user.is_superuser is not True:
return redirect('index')
if section == 'department' and not request.user.has_perm('core.change_department', obj=data['department']):
return redirect('index')
if handler in globals():
data = globals()[handler](request, data)
else:
raise Http404
return render(request, 'page/settings.html', data)
def _settings_account_profile(request, data):
data['subsection_template'] = 'partial/account_profile.html'
from account.forms import account_create_form
form = account_create_form('user_profile', request, request.user.id)
form.fields['email'].widget.attrs['readonly'] = True
data['form'] = form
if request.method == 'POST':
if form.is_valid():
form.save()
data['user'] = form.instance
messages.success(request, 'Saved')
return data
def _settings_account_password(request, data):
data['subsection_template'] = 'partial/account_password.html'
from account.forms import account_create_form
form = account_create_form('user_password', request, request.user.id)
data['form'] = form
if request.method == 'POST':
if form.is_valid():
user = form.save(commit=False)
user.set_password(user.password)
user.save()
data['user'] = form.instance
messages.success(request, 'Saved')
return data
def _settings_account_notifications(request, data):
data['subsection_template'] = 'partial/account_notifications.html'
data['applications'] = get_objects_for_user(request.user, 'core.view_application')
content_type = ContentType.objects.get_for_model(Application)
if request.method == 'POST':
for application in data['applications']:
key = 'notification[%s]' % application.id
notification, created = NotificationPreferences.objects.get_or_create(
user=request.user,
event_type='ExecutionFinish',
content_type=content_type,
object_id=application.id)
if notification.is_active != (key in request.POST):
notification.is_active = key in request.POST
notification.save()
messages.success(request, 'Saved')
data['notifications'] = NotificationPreferences.objects.filter(
user=request.user,
event_type='ExecutionFinish',
content_type=content_type.id).values_list('object_id', 'is_active')
data['notifications'] = dict(data['notifications'])
return data
def _settings_department_applications(request, data):
data['subsection_template'] = 'partial/application_list.html'
data['applications'] = Application.objects.filter(department_id=request.current_department_id)
data['empty'] = not bool(data['applications'].count())
return data
def _settings_department_users(request, data):
data['subsection_template'] = 'partial/user_list.html'
from guardian.shortcuts import get_users_with_perms
department = Department.objects.get(pk=request.current_department_id)
data['users'] = get_users_with_perms(department).prefetch_related('groups__departmentgroup').order_by('name')
data['department_user_list'] = True
data['form_name'] = 'user'
return data
def _settings_department_groups(request, data):
data['subsection_template'] = 'partial/group_list.html'
data['groups'] = DepartmentGroup.objects.filter(department_id=request.current_department_id)
return data
def _settings_department_serverroles(request, data):
data['subsection_template'] = 'partial/serverrole_list.html'
data['serverroles'] = ServerRole.objects.filter(department_id=request.current_department_id)
data['empty'] = not bool(data['serverroles'].count())
return data
@user_passes_test(lambda u: u.is_superuser)
def _settings_system_departments(request, data):
data['subsection_template'] = 'partial/department_list.html'
data['departments'] = Department.objects.all()
return data
@user_passes_test(lambda u: u.is_superuser)
def _settings_system_users(request, data):
data['subsection_template'] = 'partial/user_list.html'
data['users'] = get_user_model().objects.exclude(id=-1).prefetch_related('groups__departmentgroup__department').order_by('name')
data['form_name'] = 'usersystem'
return data
def department_switch(request, id):
department = get_object_or_404(Department, pk=id)
if request.user.has_perm('core.view_department', department):
request.session['current_department_id'] = int(id)
else:
messages.error(request, 'Access forbidden')
return redirect('index')
def handle_403(request):
print 'aaaaaaaa'
messages.error(request, 'Access forbidden')
return redirect('index')
|
"""TensorSignature class and utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
class TensorSignature(collections.namedtuple(
"TensorSignature", ["dtype", "shape", "is_sparse"])):
"""Signature of the `Tensor` object.
Useful to check compatibility of tensors.
Attributes:
dtype: `DType` object.
shape: `TensorShape` object.
"""
def __new__(cls, tensor):
if isinstance(tensor, ops.SparseTensor):
return super(TensorSignature, cls).__new__(
cls, dtype=tensor.values.dtype, shape=None, is_sparse=True)
return super(TensorSignature, cls).__new__(
cls, dtype=tensor.dtype, shape=tensor.get_shape(), is_sparse=False)
def is_compatible_with(self, other):
"""Returns True if signatures are compatible."""
def _shape_is_compatible_0dim(this, other):
other = tensor_shape.as_shape(other)
if this.ndims != other.ndims:
return False
for dim, (x_dim, y_dim) in enumerate(zip(this.dims, other.dims)):
if dim == 0:
continue
if not x_dim.is_compatible_with(y_dim):
return False
return True
if other.is_sparse:
return self.is_sparse and self.dtype.is_compatible_with(other.dtype)
return (self.dtype.is_compatible_with(other.dtype) and
_shape_is_compatible_0dim(self.shape, other.shape) and
not self.is_sparse)
def get_placeholder(self):
if self.is_sparse:
return array_ops.sparse_placeholder(dtype=self.dtype)
return array_ops.placeholder(dtype=self.dtype, shape=self.shape)
def tensors_compatible(tensors, signatures):
"""Check that tensors are compatible with signatures.
Args:
tensors: Dict of `Tensor` objects or single `Tensor` object.
signatures: Dict of `TensorSignature` objects or
single `TensorSignature` object.
Returns:
True if all tensors are compatible, False otherwise.
"""
# Dict of Tensors as input.
if isinstance(tensors, dict):
if not isinstance(signatures, dict):
return False
for key in signatures:
if key not in tensors:
return False
if not TensorSignature(tensors[key]).is_compatible_with(signatures[key]):
return False
return True
# Single tensor as input.
if isinstance(signatures, dict):
return False
return TensorSignature(tensors).is_compatible_with(signatures)
def create_signatures(tensors):
"""Creates TensorSignature objects for given tensors.
Args:
tensors: Dict of `Tensor` objects or single `Tensor`.
Returns:
Dict of `TensorSignature` objects or single `TensorSignature`.
"""
if isinstance(tensors, dict):
return {
key: TensorSignature(tensors[key]) for key in tensors}
return TensorSignature(tensors)
def create_placeholders_from_signatures(signatures):
"""Creates placeholders from given signatures.
Args:
signatures: Dict of `TensorSignature` objects or single `TensorSignature`.
Returns:
Dict of `tf.placeholder` objects or single `tf.placeholder`.
"""
if not isinstance(signatures, dict):
return signatures.get_placeholder()
return {
key: signatures[key].get_placeholder()
for key in signatures}
|
import unittest
class Test0017(unittest.TestCase):
def test_problem(self):
one_to_nine = [3, 3, 5, 4, 4, 3, 5, 5, 4]
ten_to_nineteen = [3, 6, 6, 8, 8, 7, 7, 9, 8, 8]
twenty_to_ninety = [6, 6, 5, 5, 5, 7, 6, 6]
words_len = 0
sum_1_to_9 = sum(one_to_nine)
sum_10_to_19 = sum(ten_to_nineteen)
sum_20_to_90 = sum(twenty_to_ninety)
#1~9,10~19
sum_1_to_99 = sum_1_to_9 + sum_10_to_19
#20~99
sum_1_to_99 += len(twenty_to_ninety) * sum_1_to_9 + (len(one_to_nine) +
1) * sum_20_to_90
#1~99
words_len += sum_1_to_99
#100~999, 'hundred and' => 10
words_len += len(one_to_nine) * sum_1_to_99 + 100 * (
sum_1_to_9 + 10 * len(one_to_nine)) - 3 * len(one_to_nine)
#1000
words_len += 11
self.assertEqual(words_len, 21124)
|
from oslo_log import log as logging
from tempest_lib import exceptions as lib_exc
from tempest.common import fixed_network
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import config
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestLargeOpsScenario(manager.ScenarioTest):
"""
Test large operations.
This test below:
* Spin up multiple instances in one nova call, and repeat three times
* as a regular user
* TODO: same thing for cinder
"""
@classmethod
def skip_checks(cls):
super(TestLargeOpsScenario, cls).skip_checks()
if CONF.scenario.large_ops_number < 1:
raise cls.skipException("large_ops_number not set to multiple "
"instances")
@classmethod
def setup_credentials(cls):
cls.set_network_resources()
super(TestLargeOpsScenario, cls).setup_credentials()
@classmethod
def resource_setup(cls):
super(TestLargeOpsScenario, cls).resource_setup()
# list of cleanup calls to be executed in reverse order
cls._cleanup_resources = []
@classmethod
def resource_cleanup(cls):
while cls._cleanup_resources:
function, args, kwargs = cls._cleanup_resources.pop(-1)
try:
function(*args, **kwargs)
except lib_exc.NotFound:
pass
super(TestLargeOpsScenario, cls).resource_cleanup()
@classmethod
def addCleanupClass(cls, function, *arguments, **keywordArguments):
cls._cleanup_resources.append((function, arguments, keywordArguments))
def _wait_for_server_status(self, status):
for server in self.servers:
# Make sure nova list keeps working throughout the build process
self.servers_client.list_servers()
waiters.wait_for_server_status(self.servers_client,
server['id'], status)
def nova_boot(self):
name = data_utils.rand_name('scenario-server')
flavor_id = CONF.compute.flavor_ref
# Explicitly create secgroup to avoid cleanup at the end of testcases.
# Since no traffic is tested, we don't need to actually add rules to
# secgroup
secgroup = self.security_groups_client.create_security_group(
name='secgroup-%s' % name, description='secgroup-desc-%s' % name)
self.addCleanupClass(self.security_groups_client.delete_security_group,
secgroup['id'])
create_kwargs = {
'min_count': CONF.scenario.large_ops_number,
'security_groups': [{'name': secgroup['name']}]
}
network = self.get_tenant_network()
create_kwargs = fixed_network.set_networks_kwarg(network,
create_kwargs)
#self.servers_client.create_server(
self.create_server(
name,
'',
flavor_id,
**create_kwargs)
# needed because of bug 1199788
params = {'name': name}
server_list = self.servers_client.list_servers(**params)
self.servers = server_list['servers']
for server in self.servers:
# after deleting all servers - wait for all servers to clear
# before cleanup continues
self.addCleanupClass(self.servers_client.
wait_for_server_termination,
server['id'])
for server in self.servers:
self.addCleanupClass(self.servers_client.delete_server,
server['id'])
self._wait_for_server_status('ACTIVE')
def _large_ops_scenario(self):
#self.glance_image_create()
self.nova_boot()
@test.idempotent_id('14ba0e78-2ed9-4d17-9659-a48f4756ecb3')
@test.services('compute', 'image')
def test_large_ops_scenario_1(self):
self._large_ops_scenario()
@test.idempotent_id('b9b79b88-32aa-42db-8f8f-dcc8f4b4ccfe')
@test.services('compute', 'image')
def test_large_ops_scenario_2(self):
self._large_ops_scenario()
@test.idempotent_id('3aab7e82-2de3-419a-9da1-9f3a070668fb')
@test.services('compute', 'image')
def test_large_ops_scenario_3(self):
self._large_ops_scenario()
|
import copy
import django
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IgnoreArg # noqa
from mox3.mox import IsA # noqa
import six
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
class RouterMixin(object):
@test.create_stubs({
api.neutron: ('router_get', 'port_list',
'network_get', 'is_extension_supported',
'list_l3_agent_hosting_router'),
})
def _get_detail(self, router, extraroute=True, lookup_l3=False):
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'extraroute')\
.MultipleTimes().AndReturn(extraroute)
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
api.neutron.port_list(IsA(http.HttpRequest),
device_id=router.id)\
.AndReturn([self.ports.first()])
self._mock_external_network_get(router)
if lookup_l3:
agent = self.agents.list()[1]
api.neutron.list_l3_agent_hosting_router(IsA(http.HttpRequest), router.id)\
.AndReturn([agent])
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:%s'
':routers:detail' % self.DASHBOARD,
args=[router.id]))
return res
def _mock_external_network_list(self, alter_ids=False):
search_opts = {'router:external': True}
ext_nets = [n for n in self.networks.list() if n['router:external']]
if alter_ids:
for ext_net in ext_nets:
ext_net.id += 'some extra garbage'
api.neutron.network_list(
IsA(http.HttpRequest),
**search_opts).AndReturn(ext_nets)
def _mock_external_network_get(self, router):
ext_net_id = router.external_gateway_info['network_id']
ext_net = self.networks.list()[2]
api.neutron.network_get(IsA(http.HttpRequest), ext_net_id,
expand_subnet=False).AndReturn(ext_net)
def _mock_network_list(self, tenant_id):
api.neutron.network_list(
IsA(http.HttpRequest),
shared=False,
tenant_id=tenant_id).AndReturn(self.networks.list())
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
class RouterTests(RouterMixin, test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
DETAIL_PATH = 'horizon:%s:routers:detail' % DASHBOARD
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_index(self):
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, '%s/routers/index.html' % self.DASHBOARD)
routers = res.context['table'].data
self.assertItemsEqual(routers, self.routers.list())
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_index_router_list_exception(self):
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).MultipleTimes().AndRaise(
self.exceptions.neutron)
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, '%s/routers/index.html' % self.DASHBOARD)
self.assertEqual(len(res.context['table'].data), 0)
self.assertMessageCount(res, error=1)
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_set_external_network_empty(self):
router = self.routers.first()
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).MultipleTimes().AndReturn([router])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list(alter_ids=True)
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
table_data = res.context['table'].data
self.assertEqual(len(table_data), 1)
self.assertIn('(Not Found)',
table_data[0]['external_gateway_info']['network'])
self.assertTemplateUsed(res, '%s/routers/index.html' % self.DASHBOARD)
self.assertMessageCount(res, error=1)
def test_router_detail(self):
router = self.routers.first()
res = self._get_detail(router)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
ports = res.context['interfaces_table'].data
self.assertItemsEqual(ports, [self.ports.first()])
@test.create_stubs({api.neutron: ('router_get',)})
def test_router_detail_exception(self):
router = self.routers.first()
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:%s'
':routers:detail' % self.DASHBOARD,
args=[router.id]))
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_list', 'network_list',
'port_list', 'router_delete',),
quotas: ('tenant_quota_usages',)})
def test_router_delete(self):
router = self.routers.first()
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
self._mock_external_network_list()
api.neutron.port_list(IsA(http.HttpRequest),
device_id=router.id, device_owner=IgnoreArg())\
.AndReturn([])
api.neutron.router_delete(IsA(http.HttpRequest), router.id)
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
formData = {'action': 'routers__delete__' + router.id}
res = self.client.post(self.INDEX_URL, formData, follow=True)
self.assertNoFormErrors(res)
self.assertMessageCount(response=res, success=1)
self.assertIn('Deleted Router: ' + router.name,
res.content.decode('utf-8'))
@test.create_stubs({api.neutron: ('router_list', 'network_list',
'port_list', 'router_remove_interface',
'router_delete',),
quotas: ('tenant_quota_usages',)})
def test_router_with_interface_delete(self):
router = self.routers.first()
ports = self.ports.list()
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
self._mock_external_network_list()
api.neutron.port_list(IsA(http.HttpRequest),
device_id=router.id, device_owner=IgnoreArg())\
.AndReturn(ports)
for port in ports:
api.neutron.router_remove_interface(IsA(http.HttpRequest),
router.id, port_id=port.id)
api.neutron.router_delete(IsA(http.HttpRequest), router.id)
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
formData = {'action': 'routers__delete__' + router.id}
res = self.client.post(self.INDEX_URL, formData, follow=True)
self.assertNoFormErrors(res)
self.assertMessageCount(response=res, success=1)
self.assertIn('Deleted Router: ' + router.name,
res.content.decode('utf-8'))
class RouterActionTests(RouterMixin, test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
DETAIL_PATH = 'horizon:%s:routers:detail' % DASHBOARD
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',
'network_list')})
def test_router_create_post(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.AndReturn(False)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.AndReturn(False)
api.neutron.network_list(IsA(http.HttpRequest))\
.AndReturn(self.networks.list())
params = {'name': router.name,
'admin_state_up': str(router.admin_state_up)}
api.neutron.router_create(IsA(http.HttpRequest), **params)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'name': router.name,
'admin_state_up': router.admin_state_up}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',
'network_list')})
def test_router_create_post_mode_server_default(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.AndReturn(True)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.AndReturn(True)
api.neutron.network_list(IsA(http.HttpRequest))\
.AndReturn(self.networks.list())
params = {'name': router.name,
'admin_state_up': str(router.admin_state_up)}
api.neutron.router_create(IsA(http.HttpRequest), **params)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'name': router.name,
'mode': 'server_default',
'ha': 'server_default',
'admin_state_up': router.admin_state_up}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',
'network_list')})
def test_dvr_ha_router_create_post(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.MultipleTimes().AndReturn(True)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.MultipleTimes().AndReturn(True)
api.neutron.network_list(IsA(http.HttpRequest))\
.AndReturn(self.networks.list())
param = {'name': router.name,
'distributed': True,
'ha': True,
'admin_state_up': str(router.admin_state_up)}
api.neutron.router_create(IsA(http.HttpRequest), **param)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'name': router.name,
'mode': 'distributed',
'ha': 'enabled',
'admin_state_up': router.admin_state_up}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',
'network_list')})
def test_router_create_post_exception_error_case_409(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.MultipleTimes().AndReturn(False)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.AndReturn(False)
self.exceptions.neutron.status_code = 409
api.neutron.network_list(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(self.networks.list())
params = {'name': router.name,
'admin_state_up': str(router.admin_state_up)}
api.neutron.router_create(IsA(http.HttpRequest), **params)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'name': router.name,
'admin_state_up': router.admin_state_up}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',
'network_list')})
def test_router_create_post_exception_error_case_non_409(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.MultipleTimes().AndReturn(False)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.MultipleTimes().AndReturn(False)
self.exceptions.neutron.status_code = 999
api.neutron.network_list(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(self.networks.list())
params = {'name': router.name,
'admin_state_up': str(router.admin_state_up)}
api.neutron.router_create(IsA(http.HttpRequest), **params)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'name': router.name,
'admin_state_up': router.admin_state_up}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_get',
'get_feature_permission')})
def _test_router_update_get(self, dvr_enabled=False,
current_dvr=False,
ha_enabled=False):
router = [r for r in self.routers.list()
if r.distributed == current_dvr][0]
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "update")\
.AndReturn(dvr_enabled)
# TODO(amotoki): Due to Neutron Bug 1378525, Neutron disables
# PUT operation. It will be fixed in Kilo cycle.
# api.neutron.get_feature_permission(IsA(http.HttpRequest),
# "l3-ha", "update")\
# .AndReturn(ha_enabled)
self.mox.ReplayAll()
url = reverse('horizon:%s:routers:update' % self.DASHBOARD,
args=[router.id])
return self.client.get(url)
def test_router_update_get_dvr_disabled(self):
res = self._test_router_update_get(dvr_enabled=False)
self.assertTemplateUsed(res, 'project/routers/update.html')
self.assertNotContains(res, 'Router Type')
self.assertNotContains(res, 'id="id_mode"')
def test_router_update_get_dvr_enabled_mode_centralized(self):
res = self._test_router_update_get(dvr_enabled=True, current_dvr=False)
self.assertTemplateUsed(res, 'project/routers/update.html')
self.assertContains(res, 'Router Type')
# Check both menu are displayed.
self.assertContains(
res,
'<option value="centralized" selected="selected">'
'Centralized</option>',
html=True)
self.assertContains(
res,
'<option value="distributed">Distributed</option>',
html=True)
def test_router_update_get_dvr_enabled_mode_distributed(self):
res = self._test_router_update_get(dvr_enabled=True, current_dvr=True)
self.assertTemplateUsed(res, 'project/routers/update.html')
self.assertContains(res, 'Router Type')
if django.VERSION >= (1, 10):
pattern = ('<input class="form-control" id="id_mode" name="mode" '
'readonly="readonly" type="text" value="distributed" '
'required/>')
else:
pattern = ('<input class="form-control" id="id_mode" name="mode" '
'readonly="readonly" type="text" '
'value="distributed" />')
self.assertContains(res, pattern, html=True)
self.assertNotContains(res, 'centralized')
@test.create_stubs({api.neutron: ('router_get',
'router_update',
'get_feature_permission')})
def test_router_update_post_dvr_ha_disabled(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "update")\
.AndReturn(False)
# TODO(amotoki): Due to Neutron Bug 1378525, Neutron disables
# PUT operation. It will be fixed in Kilo cycle.
# api.neutron.get_feature_permission(IsA(http.HttpRequest),
# "l3-ha", "update")\
# .AndReturn(False)
api.neutron.router_update(IsA(http.HttpRequest), router.id,
name=router.name,
admin_state_up=router.admin_state_up)\
.AndReturn(router)
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'name': router.name,
'admin_state': router.admin_state_up}
url = reverse('horizon:%s:routers:update' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_get',
'router_update',
'get_feature_permission')})
def test_router_update_post_dvr_ha_enabled(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "update")\
.AndReturn(True)
# TODO(amotoki): Due to Neutron Bug 1378525, Neutron disables
# PUT operation. It will be fixed in Kilo cycle.
# api.neutron.get_feature_permission(IsA(http.HttpRequest),
# "l3-ha", "update")\
# .AndReturn(True)
api.neutron.router_update(IsA(http.HttpRequest), router.id,
name=router.name,
admin_state_up=router.admin_state_up,
# ha=True,
distributed=True).AndReturn(router)
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'name': router.name,
'admin_state': router.admin_state_up,
'mode': 'distributed',
'ha': True}
url = reverse('horizon:%s:routers:update' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
def _test_router_addinterface(self, raise_error=False):
router = self.routers.first()
subnet = self.subnets.first()
port = self.ports.first()
add_interface = api.neutron.router_add_interface(
IsA(http.HttpRequest), router.id, subnet_id=subnet.id)
if raise_error:
add_interface.AndRaise(self.exceptions.neutron)
else:
add_interface.AndReturn({'subnet_id': subnet.id,
'port_id': port.id})
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
self._check_router_addinterface(router, subnet)
def _check_router_addinterface(self, router, subnet, ip_address=''):
# mock APIs used to show router detail
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
api.neutron.port_list(IsA(http.HttpRequest), device_id=router.id)\
.AndReturn([])
self._mock_network_list(router['tenant_id'])
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'router_name': router.name,
'subnet_id': subnet.id,
'ip_address': ip_address}
url = reverse('horizon:%s:routers:addinterface' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = reverse(self.DETAIL_PATH, args=[router.id])
self.assertRedirectsNoFollow(res, detail_url)
@test.create_stubs({api.neutron: ('router_get',
'router_add_interface',
'port_get',
'network_list',
'port_list')})
def test_router_addinterface(self):
self._test_router_addinterface()
@test.create_stubs({api.neutron: ('router_get',
'router_add_interface',
'network_list',
'port_list')})
def test_router_addinterface_exception(self):
self._test_router_addinterface(raise_error=True)
def _test_router_addinterface_ip_addr(self, errors=None):
errors = errors or []
router = self.routers.first()
subnet = self.subnets.first()
port = self.ports.first()
ip_addr = port['fixed_ips'][0]['ip_address']
self._setup_mock_addinterface_ip_addr(router, subnet, port,
ip_addr, errors)
self._check_router_addinterface(router, subnet, ip_addr)
def _setup_mock_addinterface_ip_addr(self, router, subnet, port,
ip_addr, errors=None):
errors = errors or []
subnet_get = api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)
if 'subnet_get' in errors:
subnet_get.AndRaise(self.exceptions.neutron)
return
subnet_get.AndReturn(subnet)
params = {'network_id': subnet.network_id,
'fixed_ips': [{'subnet_id': subnet.id,
'ip_address': ip_addr}]}
port_create = api.neutron.port_create(IsA(http.HttpRequest), **params)
if 'port_create' in errors:
port_create.AndRaise(self.exceptions.neutron)
return
port_create.AndReturn(port)
add_inf = api.neutron.router_add_interface(
IsA(http.HttpRequest), router.id, port_id=port.id)
if 'add_interface' not in errors:
return
add_inf.AndRaise(self.exceptions.neutron)
port_delete = api.neutron.port_delete(IsA(http.HttpRequest), port.id)
if 'port_delete' in errors:
port_delete.AndRaise(self.exceptions.neutron)
@test.create_stubs({api.neutron: ('router_add_interface', 'subnet_get',
'port_create',
'router_get', 'network_list',
'port_list')})
def test_router_addinterface_ip_addr(self):
self._test_router_addinterface_ip_addr()
@test.create_stubs({api.neutron: ('subnet_get', 'router_get',
'network_list', 'port_list')})
def test_router_addinterface_ip_addr_exception_subnet_get(self):
self._test_router_addinterface_ip_addr(errors=['subnet_get'])
@test.create_stubs({api.neutron: ('subnet_get', 'port_create',
'router_get', 'network_list',
'port_list')})
def test_router_addinterface_ip_addr_exception_port_create(self):
self._test_router_addinterface_ip_addr(errors=['port_create'])
@test.create_stubs({api.neutron: ('router_add_interface', 'subnet_get',
'port_create', 'port_delete',
'router_get', 'network_list',
'port_list')})
def test_router_addinterface_ip_addr_exception_add_interface(self):
self._test_router_addinterface_ip_addr(errors=['add_interface'])
@test.create_stubs({api.neutron: ('router_add_interface', 'subnet_get',
'port_create', 'port_delete',
'router_get', 'network_list',
'port_list')})
def test_router_addinterface_ip_addr_exception_port_delete(self):
self._test_router_addinterface_ip_addr(errors=['add_interface',
'port_delete'])
@test.create_stubs({api.neutron: ('router_get',
'router_add_gateway',
'network_list')})
def test_router_add_gateway(self):
router = self.routers.first()
network = self.networks.first()
api.neutron.router_add_gateway(
IsA(http.HttpRequest),
router.id,
network.id).AndReturn(None)
api.neutron.router_get(
IsA(http.HttpRequest), router.id).AndReturn(router)
search_opts = {'router:external': True}
api.neutron.network_list(
IsA(http.HttpRequest), **search_opts).AndReturn([network])
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'router_name': router.name,
'network_id': network.id}
url = reverse('horizon:%s:routers:setgateway' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = self.INDEX_URL
self.assertRedirectsNoFollow(res, detail_url)
@test.create_stubs({api.neutron: ('router_get',
'router_add_gateway',
'network_list')})
def test_router_add_gateway_exception(self):
router = self.routers.first()
network = self.networks.first()
api.neutron.router_add_gateway(
IsA(http.HttpRequest),
router.id,
network.id).AndRaise(self.exceptions.neutron)
api.neutron.router_get(
IsA(http.HttpRequest), router.id).AndReturn(router)
search_opts = {'router:external': True}
api.neutron.network_list(
IsA(http.HttpRequest), **search_opts).AndReturn([network])
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'router_name': router.name,
'network_id': network.id}
url = reverse('horizon:%s:routers:setgateway' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = self.INDEX_URL
self.assertRedirectsNoFollow(res, detail_url)
class RouterRouteTests(RouterMixin, test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
DETAIL_PATH = 'horizon:%s:routers:detail' % DASHBOARD
def test_extension_hides_without_routes(self):
router = self.routers_with_routes.first()
res = self._get_detail(router, extraroute=False)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertNotIn('extra_routes_table', res.context)
def test_routerroute_detail(self):
router = self.routers_with_routes.first()
res = self._get_detail(router, extraroute=True)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
routes = res.context['extra_routes_table'].data
routes_dict = [r._apidict for r in routes]
self.assertItemsEqual(routes_dict, router['routes'])
@test.create_stubs({api.neutron: ('router_get', 'router_update')})
def _test_router_addrouterroute(self, raise_error=False):
pre_router = self.routers_with_routes.first()
post_router = copy.deepcopy(pre_router)
route = {'nexthop': '10.0.0.5', 'destination': '40.0.1.0/24'}
post_router['routes'].insert(0, route)
api.neutron.router_get(IsA(http.HttpRequest), pre_router.id)\
.MultipleTimes().AndReturn(pre_router)
params = {}
params['routes'] = post_router['routes']
router_update = api.neutron.router_update(IsA(http.HttpRequest),
pre_router.id, **params)
if raise_error:
router_update.AndRaise(self.exceptions.neutron)
else:
router_update.AndReturn({'router': post_router})
self.mox.ReplayAll()
form_data = copy.deepcopy(route)
form_data['router_id'] = pre_router.id
url = reverse('horizon:%s:routers:addrouterroute' % self.DASHBOARD,
args=[pre_router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = reverse(self.DETAIL_PATH, args=[pre_router.id])
self.assertRedirectsNoFollow(res, detail_url)
def test_router_addrouterroute(self):
if self.DASHBOARD == 'project':
self._test_router_addrouterroute()
self.assertMessageCount(success=1)
def test_router_addrouterroute_exception(self):
if self.DASHBOARD == 'project':
self._test_router_addrouterroute(raise_error=True)
self.assertMessageCount(error=1)
@test.create_stubs({api.neutron: ('router_get', 'router_update',
'network_get', 'port_list',
'is_extension_supported')})
def test_router_removeroute(self):
if self.DASHBOARD == 'admin':
return
pre_router = self.routers_with_routes.first()
post_router = copy.deepcopy(pre_router)
route = post_router['routes'].pop()
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'extraroute')\
.MultipleTimes().AndReturn(True)
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(pre_router)
params = {}
params['routes'] = post_router['routes']
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(pre_router)
router_update = api.neutron.router_update(IsA(http.HttpRequest),
pre_router.id, **params)
router_update.AndReturn({'router': post_router})
api.neutron.port_list(IsA(http.HttpRequest),
device_id=pre_router.id)\
.AndReturn([self.ports.first()])
self._mock_external_network_get(pre_router)
self.mox.ReplayAll()
form_route_id = route['nexthop'] + ":" + route['destination']
form_data = {'action': 'extra_routes__delete__%s' % form_route_id}
url = reverse(self.DETAIL_PATH, args=[pre_router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
class RouterViewTests(RouterMixin, test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_create_button_disabled_when_quota_exceeded(self):
quota_data = self.neutron_quota_usages.first()
quota_data['routers']['available'] = 0
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, 'project/routers/index.html')
routers = res.context['routers_table'].data
self.assertItemsEqual(routers, self.routers.list())
create_action = self.getAndAssertTableAction(res, 'routers', 'create')
self.assertIn('disabled', create_action.classes,
'Create button is not disabled')
self.assertEqual('Create Router (Quota exceeded)',
create_action.verbose_name)
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_create_button_shown_when_quota_disabled(self):
quota_data = self.neutron_quota_usages.first()
quota_data['routers'].pop('available')
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, 'project/routers/index.html')
routers = res.context['routers_table'].data
self.assertItemsEqual(routers, self.routers.list())
create_action = self.getAndAssertTableAction(res, 'routers', 'create')
self.assertFalse('disabled' in create_action.classes,
'Create button should not be disabled')
self.assertEqual('Create Router',
create_action.verbose_name)
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_create_button_attributes(self):
quota_data = self.neutron_quota_usages.first()
quota_data['routers']['available'] = 10
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, 'project/routers/index.html')
routers = res.context['routers_table'].data
self.assertItemsEqual(routers, self.routers.list())
create_action = self.getAndAssertTableAction(res, 'routers', 'create')
self.assertEqual(set(['ajax-modal']), set(create_action.classes))
self.assertEqual('Create Router',
six.text_type(create_action.verbose_name))
self.assertEqual('horizon:project:routers:create', create_action.url)
self.assertEqual((('network', 'create_router'),),
create_action.policy_rules)
|
from __future__ import absolute_import
from __future__ import print_function
from chaco.label import Label
from six.moves import map
from pychron.core.ui import set_qt
set_qt()
from chaco.abstract_overlay import AbstractOverlay
from kiva.fonttools import str_to_font
from traits.api import HasTraits, Instance, Float, File, Property, Str, List
from traitsui.api import View, Controller, UItem
from chaco.api import OverlayPlotContainer
from enable.component_editor import ComponentEditor
from pyface.api import FileDialog, OK
from lxml.etree import ElementTree, Element
from chaco.plot import Plot
from chaco.array_plot_data import ArrayPlotData
from numpy import linspace, cos, sin, pi
import os
import csv
from chaco.data_label import DataLabel
from pychron.paths import paths
from chaco.plot_graphics_context import PlotGraphicsContext
from traitsui.menu import Action
import math
from pychron.core.helpers.strtools import to_bool
class myDataLabel(DataLabel):
show_label_coords = False
marker_visible = False
label_position = "center"
border_visible = False
class LabelsOverlay(AbstractOverlay):
labels = List
def overlay(self, other_component, gc, view_bounds=None, mode="normal"):
with gc:
gc.set_font(str_to_font(None, None, "7"))
for x, y, l in self.labels:
ll = Label(x=x, y=y, text=l, font="modern 7")
w, h = ll.get_bounding_box(gc)
x, y = other_component.map_screen([(x, y)])[0]
gc.set_text_position(x - w / 2.0, y + 5)
gc.show_text(l)
class RotatingContainer(OverlayPlotContainer):
rotation = Float(0)
def _draw(self, gc, *args, **kw):
with gc:
w2 = self.width / 2
h2 = self.height / 2
# gc.translate_ctm(w2, h2)
# gc.rotate_ctm(math.radians(self.rotation))
# gc.translate_ctm(-w2, -h2)
super(RotatingContainer, self)._draw(gc, *args, **kw)
class GraphicGeneratorController(Controller):
def save(self, info):
self.model.save()
def traits_view(self):
w, h = 750, 750
v = View(
UItem("srcpath"),
# Item('rotation'),
UItem("container", editor=ComponentEditor(), style="custom"),
width=w + 2,
height=h + 56,
resizable=True,
buttons=[Action(name="Save", action="save"), "OK", "Cancel"],
)
return v
class GraphicModel(HasTraits):
srcpath = File
xmlpath = File
container = Instance(OverlayPlotContainer)
name = Property
_name = Str
rotation = Float(enter_set=True, auto_set=False)
initialized = False
def _get_name(self):
return os.path.splitext(
self._name if self._name else os.path.basename(self.srcpath)
)[0]
def save(self, path=None):
# print self.container.bounds
if path is None:
dlg = FileDialog(action="save as", default_directory=paths.data_dir or "")
if dlg.open() == OK:
path = dlg.path
if path is not None:
_, tail = os.path.splitext(path)
c = self.container
if tail == ".pdf":
from chaco.pdf_graphics_context import PdfPlotGraphicsContext
gc = PdfPlotGraphicsContext(filename=path, pagesize="letter")
else:
if not tail in (".png", ".jpg", ".tiff"):
path = "{}.png".format(path)
gc = PlotGraphicsContext((int(c.outer_width), int(c.outer_height)))
# c.use_backbuffer = False
# for ci in c.components:
# try:
# ci.x_axis.visible = False
# ci.y_axis.visible = False
# except Exception:
# pass
# c.use_backbuffer = False
from reportlab.lib.pagesizes import LETTER
c.do_layout(size=(LETTER[1], LETTER[1]), force=True)
gc.render_component(c)
# c.use_backbuffer = True
gc.save(path)
self._name = os.path.basename(path)
def load(self, path):
parser = ElementTree(file=open(path, "r"))
circles = parser.find("circles")
outline = parser.find("outline")
bb = outline.find("bounding_box")
bs = bb.find("width"), bb.find("height")
w, h = [float(b.text) for b in bs]
use_label = parser.find("use_label")
if use_label is not None:
use_label = to_bool(use_label.text.strip())
else:
use_label = True
data = ArrayPlotData()
p = Plot(data=data, padding=10)
p.x_grid.visible = False
p.y_grid.visible = False
p.x_axis.visible = False
p.y_axis.visible = False
p.x_axis.title = "X cm"
p.y_axis.title = "Y cm"
p.index_range.low_setting = -w / 2
p.index_range.high_setting = w / 2
p.value_range.low_setting = -h / 2
p.value_range.high_setting = h / 2
thetas = linspace(0, 2 * pi)
radius = circles.find("radius").text
radius = float(radius)
face_color = circles.find("face_color")
if face_color is not None:
face_color = face_color.text
else:
face_color = "white"
labels = []
for i, pp in enumerate(circles.findall("point")):
x, y, l = pp.find("x").text, pp.find("y").text, pp.find("label").text
# print i, pp, x, y
# load hole specific attrs
r = pp.find("radius")
if r is None:
r = radius
else:
r = float(r.text)
fc = pp.find("face_color")
if fc is None:
fc = face_color
else:
fc = fc.text
x, y = list(map(float, (x, y)))
xs = x + r * sin(thetas)
ys = y + r * cos(thetas)
xn, yn = "px{:03d}".format(i), "py{:03d}".format(i)
data.set_data(xn, xs)
data.set_data(yn, ys)
plot = p.plot((xn, yn), face_color=fc, type="polygon")[0]
labels.append((x, y, l))
# if use_label:
# label = myDataLabel(component=plot,
# data_point=(x, y),
# label_text=l,
# bgcolor='transparent')
# plot.overlays.append(label)
if use_label:
p.overlays.append(LabelsOverlay(component=plot, labels=labels))
self.container.add(p)
self.container.invalidate_and_redraw()
def _srcpath_changed(self):
# default_radius=radius,
# default_bounds=bounds,
# convert_mm=convert_mm,
# use_label=use_label,
# make=make,
# rotate=rotate)
self._reload()
def _rotation_changed(self):
self._reload()
def _reload(self):
if self.initialized:
self.container = self._container_factory()
print(os.path.isfile(self.srcpath), self.srcpath)
if os.path.isfile(self.srcpath):
p = make_xml(
self.srcpath,
default_bounds=(2.54, 2.54),
default_radius=0.0175 * 2.54,
rotate=self.rotation,
convert_mm=True,
)
self.load(p)
def _container_default(self):
return self._container_factory()
def _container_factory(self):
return RotatingContainer(bgcolor="white")
def make_xml(
path,
offset=100,
default_bounds=(50, 50),
default_radius=3.0,
convert_mm=False,
make=True,
use_label=True,
rotate=0,
):
"""
convert a csv into an xml
use blank line as a group marker
circle labels are offset by ``offset*group_id``
ie. group 0. 1,2,3
group 1. 101,102,103
"""
out = "{}_from_csv.xml".format(os.path.splitext(path)[0])
if not make:
return out
root = Element("root")
ul = Element("use_label")
ul.text = "True" if use_label else "False"
root.append(ul)
outline = Element("outline")
bb = Element("bounding_box")
width, height = Element("width"), Element("height")
width.text, height.text = list(map(str, default_bounds))
bb.append(width)
bb.append(height)
outline.append(bb)
root.append(outline)
circles = Element("circles")
radius = Element("radius")
radius.text = str(default_radius)
circles.append(radius)
face_color = Element("face_color")
face_color.text = "white"
circles.append(face_color)
root.append(circles)
i = 0
off = 0
reader = csv.reader(open(path, "r"), delimiter=",")
# writer = open(path + 'angles.txt', 'w')
nwriter = None
if rotate:
nwriter = csv.writer(open(path + "rotated_{}.txt".format(rotate), "w"))
header = next(reader)
if nwriter:
nwriter.writerow(header)
theta = math.radians(rotate)
for k, row in enumerate(reader):
# print k, row
row = list(map(str.strip, row))
if row:
e = Element("point")
x, y, l = Element("x"), Element("y"), Element("label")
xx, yy = float(row[1]), float(row[2])
try:
r = float(row[4])
rr = Element("radius")
if convert_mm:
r *= 2.54
rr.text = str(r)
e.append(rr)
except IndexError:
r = None
px = math.cos(theta) * xx - math.sin(theta) * yy
py = math.sin(theta) * xx + math.cos(theta) * yy
xx, yy = px, py
if nwriter:
data = ["{:0.4f}".format(xx), "{:0.4f}".format(yy)]
if r is not None:
data.append("{:0.4f}".format(r))
nwriter.writerow(data)
if convert_mm:
xx = xx * 2.54
yy = yy * 2.54
xx *= 1.1
yy *= 1.1
x.text = str(xx)
y.text = str(yy)
# a = math.degrees(math.atan2(yy, xx))
# writer.write('{} {}\n'.format(k + 1, a))
l.text = str(i + 1 + off)
e.append(l)
e.append(x)
e.append(y)
circles.append(e)
i += 1
else:
# use blank rows as group markers
off += offset
i = 0
tree = ElementTree(root)
tree.write(out, xml_declaration=True, method="xml", pretty_print=True)
return out
def open_txt(
p, bounds, radius, use_label=True, convert_mm=False, make=True, rotate=None
):
gm = GraphicModel(srcpath=p, rotation=rotate or 0)
p = make_xml(
p,
offset=0,
default_radius=radius,
default_bounds=bounds,
convert_mm=convert_mm,
use_label=use_label,
make=make,
rotate=rotate,
)
# p = '/Users/ross/Sandbox/graphic_gen_from_csv.xml'
gm.load(p)
gm.initialized = True
gcc = GraphicGeneratorController(model=gm)
return gcc, gm
if __name__ == "__main__":
gm = GraphicModel()
# p = '/Users/ross/Sandbox/2mmirrad.txt'
# p = '/Users/ross/Sandbox/2mmirrad_ordered.txt'
# p = '/Users/ross/Sandbox/1_75mmirrad_ordered.txt'
# p = '/Users/ross/Sandbox/1_75mmirrad_ordered.txt'
# p = '/Users/ross/Pychrondata_diode/setupfiles/irradiation_tray_maps/0_75mmirrad_ordered1.txt'
# p = '/Users/ross/Sandbox/1_75mmirrad.txt'
p = "/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/construction/newtrays/1_75mmirrad_continuous.txt"
# p = '/Users/ross/Pychrondata_diode/setupfiles/irradiation_tray_maps/0_75mmirrad.txt'
# p = '/Users/ross/Pychrondata_diode/setupfiles/irradiation_tray_maps/0_75mmirrad_continuous.txt'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/newtrays/2mmirrad_continuous.txt'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/newtrays/40_no_spokes.txt'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/newtrays/26_spokes.txt'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/newtrays/26_no_spokes.txt'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/construction/newtrays/40_spokes.txt'
# p = '/Users/ross/Desktop/72_spokes'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/construction/newtrays/16_40_ms.txt'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/construction/newtrays/40_spokes_rev2.txt'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/construction/newtrays/40_spokes-5.txt'
p = "/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/construction/newtrays/24_spokes.txt"
p = "/Users/ross/PychronDev/data/o2inch.txt"
p = "/Users/ross/PychronDev/data/421.txt"
gcc, gm = open_txt(p, (51, 51), 0.95, convert_mm=False, make=True, rotate=0)
# p2 = '/Users/ross/Pychrondata_diode/setupfiles/irradiation_tray_maps/newtrays/TX_6-Hole.txt'
# gcc, gm2 = open_txt(p2, (2.54, 2.54), .1, make=False)
# p2 = '/Users/ross/Pychrondata_diode/setupfiles/irradiation_tray_maps/newtrays/TX_20-Hole.txt'
# gcc, gm2 = open_txt(p2, (2.54, 2.54), .1, make=False)
# gm2.container.bgcolor = 'transparent'
# gm2.container.add(gm.container)
gcc.configure_traits()
|
import errno
import os
import stat
import warnings
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers import remotefs as remotefs_drv
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.StrOpt('glusterfs_shares_config',
default='/etc/cinder/glusterfs_shares',
help='File with the list of available gluster shares'),
cfg.StrOpt('glusterfs_mount_point_base',
default='$state_path/mnt',
help='Base dir containing mount points for gluster shares.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver, driver.CloneableVD,
driver.ExtendVD):
"""Gluster based cinder driver.
Creates file on Gluster share for using it as block device on hypervisor.
Operations such as create/delete/extend volume/snapshot use locking on a
per-process basis to prevent multiple threads from modifying qcow2 chains
or the snapshot .info file simultaneously.
"""
driver_volume_type = 'glusterfs'
driver_prefix = 'glusterfs'
volume_backend_name = 'GlusterFS'
VERSION = '1.3.0'
def __init__(self, execute=processutils.execute, *args, **kwargs):
self._remotefsclient = None
super(GlusterfsDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
root_helper = utils.get_root_helper()
self.base = getattr(self.configuration,
'glusterfs_mount_point_base',
CONF.glusterfs_mount_point_base)
self._remotefsclient = remotefs_brick.RemoteFsClient(
'glusterfs', root_helper, execute,
glusterfs_mount_point_base=self.base)
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
super(GlusterfsDriver, self).do_setup(context)
config = self.configuration.glusterfs_shares_config
if not config:
msg = (_("There's no Gluster config file configured (%s)") %
'glusterfs_shares_config')
LOG.warning(msg)
raise exception.GlusterfsException(msg)
if not os.path.exists(config):
msg = (_("Gluster config file at %(config)s doesn't exist") %
{'config': config})
LOG.warning(msg)
raise exception.GlusterfsException(msg)
self.shares = {}
try:
self._execute('mount.glusterfs', check_exit_code=False)
except OSError as exc:
if exc.errno == errno.ENOENT:
raise exception.GlusterfsException(
_('mount.glusterfs is not installed'))
else:
raise
self._refresh_mounts()
def _unmount_shares(self):
self._load_shares_config(self.configuration.glusterfs_shares_config)
for share in self.shares.keys():
try:
self._do_umount(True, share)
except Exception as exc:
LOG.warning(_LW('Exception during unmounting %s'), exc)
def _do_umount(self, ignore_not_mounted, share):
mount_path = self._get_mount_point_for_share(share)
command = ['umount', mount_path]
try:
self._execute(*command, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ignore_not_mounted and 'not mounted' in exc.stderr:
LOG.info(_LI("%s is already umounted"), share)
else:
LOG.error(_LE("Failed to umount %(share)s, reason=%(stderr)s"),
{'share': share, 'stderr': exc.stderr})
raise
def _refresh_mounts(self):
try:
self._unmount_shares()
except processutils.ProcessExecutionError as exc:
if 'target is busy' in exc.stderr:
LOG.warning(_LW("Failed to refresh mounts, reason=%s"),
exc.stderr)
else:
raise
self._ensure_shares_mounted()
def _qemu_img_info(self, path, volume_name):
return super(GlusterfsDriver, self)._qemu_img_info_base(
path, volume_name, self.configuration.glusterfs_mount_point_base)
def check_for_setup_error(self):
"""Just to override parent behavior."""
pass
def _local_volume_dir(self, volume):
hashed = self._get_hash_str(volume['provider_location'])
path = '%s/%s' % (self.configuration.glusterfs_mount_point_base,
hashed)
return path
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
super(GlusterfsDriver, self)._update_volume_stats()
data = self._stats
global_capacity = data['total_capacity_gb']
global_free = data['free_capacity_gb']
thin_enabled = self.configuration.nas_volume_prov_type == 'thin'
if thin_enabled:
provisioned_capacity = self._get_provisioned_capacity()
else:
provisioned_capacity = round(global_capacity - global_free, 2)
data['provisioned_capacity_gb'] = provisioned_capacity
data['max_over_subscription_ratio'] = (
self.configuration.max_over_subscription_ratio)
data['thin_provisioning_support'] = thin_enabled
data['thick_provisioning_support'] = not thin_enabled
self._stats = data
@remotefs_drv.locked_volume_id_operation
def create_volume(self, volume):
"""Creates a volume."""
self._ensure_shares_mounted()
volume['provider_location'] = self._find_share(volume['size'])
LOG.info(_LI('casted to %s'), volume['provider_location'])
self._do_create_volume(volume)
return {'provider_location': volume['provider_location']}
def _copy_volume_from_snapshot(self, snapshot, volume, volume_size):
"""Copy data from snapshot to destination volume.
This is done with a qemu-img convert to raw/qcow2 from the snapshot
qcow2.
"""
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, "
"volume_size: %(size)s",
{'snap': snapshot['id'],
'vol': volume['id'],
'size': volume_size})
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path)
vol_path = self._local_volume_dir(snapshot['volume'])
forward_file = snap_info[snapshot['id']]
forward_path = os.path.join(vol_path, forward_file)
# Find the file which backs this file, which represents the point
# when this snapshot was created.
img_info = self._qemu_img_info(forward_path,
snapshot['volume']['name'])
path_to_snap_img = os.path.join(vol_path, img_info.backing_file)
path_to_new_vol = self._local_path_volume(volume)
LOG.debug("will copy from snapshot at %s", path_to_snap_img)
if self.configuration.nas_volume_prov_type == 'thin':
out_format = 'qcow2'
else:
out_format = 'raw'
image_utils.convert_image(path_to_snap_img,
path_to_new_vol,
out_format)
self._set_rw_permissions_for_all(path_to_new_vol)
@remotefs_drv.locked_volume_id_operation
def delete_volume(self, volume):
"""Deletes a logical volume."""
if not volume['provider_location']:
LOG.warning(_LW('Volume %s does not have '
'provider_location specified, '
'skipping'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
volume_dir = self._local_volume_dir(volume)
mounted_path = os.path.join(volume_dir,
self.get_active_image_from_info(volume))
self._execute('rm', '-f', mounted_path, run_as_root=True)
# If an exception (e.g. timeout) occurred during delete_snapshot, the
# base volume may linger around, so just delete it if it exists
base_volume_path = self._local_path_volume(volume)
fileutils.delete_if_exists(base_volume_path)
info_path = self._local_path_volume_info(volume)
fileutils.delete_if_exists(info_path)
def _get_matching_backing_file(self, backing_chain, snapshot_file):
return next(f for f in backing_chain
if f.get('backing-filename', '') == snapshot_file)
def ensure_export(self, ctx, volume):
"""Synchronously recreates an export for a logical volume."""
self._ensure_share_mounted(volume['provider_location'])
def create_export(self, ctx, volume, connector):
"""Exports the volume."""
pass
def remove_export(self, ctx, volume):
"""Removes an export for a logical volume."""
pass
def validate_connector(self, connector):
pass
@remotefs_drv.locked_volume_id_operation
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info."""
# Find active qcow2 file
active_file = self.get_active_image_from_info(volume)
path = '%s/%s/%s' % (self.configuration.glusterfs_mount_point_base,
self._get_hash_str(volume['provider_location']),
active_file)
data = {'export': volume['provider_location'],
'name': active_file}
if volume['provider_location'] in self.shares:
data['options'] = self.shares[volume['provider_location']]
# Test file for raw vs. qcow2 format
info = self._qemu_img_info(path, volume['name'])
data['format'] = info.file_format
if data['format'] not in ['raw', 'qcow2']:
msg = _('%s must be a valid raw or qcow2 image.') % path
raise exception.InvalidVolume(msg)
return {
'driver_volume_type': 'glusterfs',
'data': data,
'mount_point_base': self._get_mount_point_base()
}
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
pass
@remotefs_drv.locked_volume_id_operation
def extend_volume(self, volume, size_gb):
volume_path = self.local_path(volume)
info = self._qemu_img_info(volume_path, volume['name'])
backing_fmt = info.file_format
if backing_fmt not in ['raw', 'qcow2']:
msg = _('Unrecognized backing format: %s')
raise exception.InvalidVolume(msg % backing_fmt)
# qemu-img can resize both raw and qcow2 files
image_utils.resize_image(volume_path, size_gb)
def _do_create_volume(self, volume):
"""Create a volume on given glusterfs_share.
:param volume: volume reference
"""
volume_path = self.local_path(volume)
volume_size = volume['size']
LOG.debug("creating new volume at %s", volume_path)
if os.path.exists(volume_path):
msg = _('file already exists at %s') % volume_path
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if self.configuration.nas_volume_prov_type == 'thin':
self._create_qcow2_file(volume_path, volume_size)
else:
try:
self._fallocate(volume_path, volume_size)
except processutils.ProcessExecutionError as exc:
if 'Operation not supported' in exc.stderr:
warnings.warn('Fallocate not supported by current version '
'of glusterfs. So falling back to dd.')
self._create_regular_file(volume_path, volume_size)
else:
fileutils.delete_if_exists(volume_path)
raise
self._set_rw_permissions_for_all(volume_path)
def _ensure_shares_mounted(self):
"""Mount all configured GlusterFS shares."""
self._mounted_shares = []
self._load_shares_config(self.configuration.glusterfs_shares_config)
for share in self.shares.keys():
try:
self._ensure_share_mounted(share)
self._mounted_shares.append(share)
except Exception as exc:
LOG.error(_LE('Exception during mounting %s'), exc)
LOG.debug('Available shares: %s', self._mounted_shares)
def _ensure_share_mounted(self, glusterfs_share):
"""Mount GlusterFS share.
:param glusterfs_share: string
"""
mount_path = self._get_mount_point_for_share(glusterfs_share)
self._mount_glusterfs(glusterfs_share)
# Ensure we can write to this share
group_id = os.getegid()
current_group_id = utils.get_file_gid(mount_path)
current_mode = utils.get_file_mode(mount_path)
if group_id != current_group_id:
cmd = ['chgrp', group_id, mount_path]
self._execute(*cmd, run_as_root=True)
if not (current_mode & stat.S_IWGRP):
cmd = ['chmod', 'g+w', mount_path]
self._execute(*cmd, run_as_root=True)
self._ensure_share_writable(mount_path)
def _find_share(self, volume_size_for):
"""Choose GlusterFS share among available ones for given volume size.
Current implementation looks for greatest capacity.
:param volume_size_for: int size in GB
"""
if not self._mounted_shares:
raise exception.GlusterfsNoSharesMounted()
greatest_size = 0
greatest_share = None
for glusterfs_share in self._mounted_shares:
capacity = self._get_available_capacity(glusterfs_share)[0]
if capacity > greatest_size:
greatest_share = glusterfs_share
greatest_size = capacity
if volume_size_for * units.Gi > greatest_size:
raise exception.GlusterfsNoSuitableShareFound(
volume_size=volume_size_for)
return greatest_share
def _mount_glusterfs(self, glusterfs_share):
"""Mount GlusterFS share to mount path."""
mnt_flags = []
if self.shares.get(glusterfs_share) is not None:
mnt_flags = self.shares[glusterfs_share].split()
try:
self._remotefsclient.mount(glusterfs_share, mnt_flags)
except processutils.ProcessExecutionError:
LOG.error(_LE("Mount failure for %(share)s."),
{'share': glusterfs_share})
raise
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume.
Allow a backup to occur only if no snapshots exist.
Check both Cinder and the file on-disk. The latter is only
a safety mechanism to prevent further damage if the snapshot
information is already inconsistent.
"""
snapshots = self.db.snapshot_get_all_for_volume(context,
backup['volume_id'])
snap_error_msg = _('Backup is not supported for GlusterFS '
'volumes with snapshots.')
if len(snapshots) > 0:
raise exception.InvalidVolume(snap_error_msg)
volume = self.db.volume_get(context, backup['volume_id'])
volume_dir = self._local_volume_dir(volume)
active_file_path = os.path.join(
volume_dir,
self.get_active_image_from_info(volume))
info = self._qemu_img_info(active_file_path, volume['name'])
if info.backing_file is not None:
LOG.error(_LE('No snapshots found in database, but %(path)s has '
'backing file %(backing_file)s!'),
{'path': active_file_path,
'backing_file': info.backing_file})
raise exception.InvalidVolume(snap_error_msg)
if info.file_format != 'raw':
msg = _('Backup is only supported for raw-formatted '
'GlusterFS volumes.')
raise exception.InvalidVolume(msg)
return super(GlusterfsDriver, self).backup_volume(
context, backup, backup_service)
|
import os
import shutil
from subprocess import call
def main():
# Clean the build directory
if os.path.isdir('./build'):
shutil.rmtree('./build')
# Freeze it
call('python setup.py build')
# Zip it up - 7-zip provides better compression than the zipfile module
# Make sure the 7-zip folder is on your path
file_name = 'simulation_standalone'
if os.path.isfile('{}.zip'.format(file_name)):
os.remove('{}.zip'.format(file_name))
call('7z a -tzip {}.zip simulation.xlsm'.format(file_name, file_name))
call('7z a -tzip {}.zip LICENSE.txt'.format(file_name))
call('7z a -tzip {}.zip build'.format(file_name))
if __name__ == '__main__':
main()
|
"""Python API for composing notebook elements
The Python representation of a notebook is a nested structure of
dictionary subclasses that support attribute access
(IPython.utils.ipstruct.Struct). The functions in this module are merely
helpers to build the structs in the right form.
"""
from ..notebooknode import from_dict, NotebookNode
nbformat = 4
nbformat_minor = 1
nbformat_schema = 'nbformat.v4.schema.json'
def validate(node, ref=None):
"""validate a v4 node"""
from .. import validate
return validate(node, ref=ref, version=nbformat)
def new_output(output_type, data=None, **kwargs):
"""Create a new output, to go in the ``cell.outputs`` list of a code cell."""
output = NotebookNode(output_type=output_type)
# populate defaults:
if output_type == 'stream':
output.name = u'stdout'
output.text = u''
elif output_type in {'execute_result', 'display_data'}:
output.metadata = NotebookNode()
output.data = NotebookNode()
# load from args:
output.update(from_dict(kwargs))
if data is not None:
output.data = from_dict(data)
# validate
validate(output, output_type)
return output
def output_from_msg(msg):
"""Create a NotebookNode for an output from a kernel's IOPub message.
Returns
-------
NotebookNode: the output as a notebook node.
Raises
------
ValueError: if the message is not an output message.
"""
msg_type = msg['header']['msg_type']
content = msg['content']
if msg_type == 'execute_result':
return new_output(output_type=msg_type,
metadata=content['metadata'],
data=content['data'],
execution_count=content['execution_count'],
)
elif msg_type == 'stream':
return new_output(output_type=msg_type,
name=content['name'],
text=content['text'],
)
elif msg_type == 'display_data':
return new_output(output_type=msg_type,
metadata=content['metadata'],
data=content['data'],
)
elif msg_type == 'error':
return new_output(output_type=msg_type,
ename=content['ename'],
evalue=content['evalue'],
traceback=content['traceback'],
)
else:
raise ValueError("Unrecognized output msg type: %r" % msg_type)
def new_code_cell(source='', **kwargs):
"""Create a new code cell"""
cell = NotebookNode(
cell_type='code',
metadata=NotebookNode(),
execution_count=None,
source=source,
outputs=[],
)
cell.update(from_dict(kwargs))
validate(cell, 'code_cell')
return cell
def new_markdown_cell(source='', **kwargs):
"""Create a new markdown cell"""
cell = NotebookNode(
cell_type='markdown',
source=source,
metadata=NotebookNode(),
)
cell.update(from_dict(kwargs))
validate(cell, 'markdown_cell')
return cell
def new_raw_cell(source='', **kwargs):
"""Create a new raw cell"""
cell = NotebookNode(
cell_type='raw',
source=source,
metadata=NotebookNode(),
)
cell.update(from_dict(kwargs))
validate(cell, 'raw_cell')
return cell
def new_worksheet(name=None, cells=None, metadata=None):
"""Create a worksheet by name with with a list of cells."""
ws = NotebookNode()
if cells is None:
ws.cells = []
else:
ws.cells = list(cells)
ws.metadata = NotebookNode(metadata or {})
return ws
def new_notebook(name=None, metadata=None, worksheets=None):
"""Create a notebook by name, id and a list of worksheets."""
nb = NotebookNode()
nb.nbformat = nbformat
nb.nbformat_minor = nbformat_minor
if worksheets is None:
nb.worksheets = []
else:
nb.worksheets = list(worksheets)
if metadata is None:
nb.metadata = new_metadata()
else:
nb.metadata = NotebookNode(metadata)
if name is not None:
nb.metadata.name = cast_unicode(name)
return nb
def new_metadata(name=None, authors=None, license=None, created=None,
modified=None, gistid=None):
"""Create a new metadata node."""
metadata = NotebookNode()
if name is not None:
metadata.name = cast_unicode(name)
if authors is not None:
metadata.authors = list(authors)
if created is not None:
metadata.created = cast_unicode(created)
if modified is not None:
metadata.modified = cast_unicode(modified)
if license is not None:
metadata.license = cast_unicode(license)
if gistid is not None:
metadata.gistid = cast_unicode(gistid)
return metadata
|
"""Show the current failures in the repository."""
import sys
from cliff import command
import testtools
from stestr import output
from stestr.repository import util
from stestr import results
from stestr import user_config
class Failing(command.Command):
"""Show the current failures known by the repository.
Without --subunit, the process exit code will be non-zero if the
previous test run was not successful and test failures are shown. But,
with --subunit, the process exit code is non-zero only if the subunit
stream could not be generated successfully from any failures. The test
results and run status are included in the subunit stream emitted for
the failed tests, so the stream should be used for interpretting the
failing tests. If no subunit stream is emitted with --subunit and a
zero exit code then there were no failures in the most recent run in
the repository.
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
"--subunit", action="store_true",
default=False, help="Show output as a subunit stream.")
parser.add_argument(
"--list", action="store_true",
default=False, help="Show only a list of failing tests.")
return parser
def take_action(self, parsed_args):
user_conf = user_config.get_user_config(self.app_args.user_config)
args = parsed_args
if getattr(user_conf, 'failing', False):
list_opt = args.list or user_conf.failing.get('list', False)
else:
list_opt = args.list
return failing(repo_type=self.app_args.repo_type,
repo_url=self.app_args.repo_url,
list_tests=list_opt, subunit=args.subunit)
def _show_subunit(run):
stream = run.get_subunit_stream()
if getattr(sys.stdout, 'buffer', False):
sys.stdout.buffer.write(stream.read())
else:
sys.stdout.write(stream.read())
return 0
def _make_result(repo, list_tests=False, stdout=sys.stdout):
if list_tests:
list_result = testtools.StreamSummary()
return list_result, list_result
else:
def _get_id():
return repo.get_latest_run().get_id()
output_result = results.CLITestResult(_get_id,
stdout, None)
summary_result = output_result.get_summary()
return output_result, summary_result
def failing(repo_type='file', repo_url=None, list_tests=False, subunit=False,
stdout=sys.stdout):
"""Print the failing tests from the most recent run in the repository
This function will print to STDOUT whether there are any tests that failed
in the last run. It optionally will print the test_ids for the failing
tests if ``list_tests`` is true. If ``subunit`` is true a subunit stream
with just the failed tests will be printed to STDOUT.
Note this function depends on the cwd for the repository if `repo_type` is
set to file and `repo_url` is not specified it will use the repository
located at CWD/.stestr
:param str repo_type: This is the type of repository to use. Valid choices
are 'file' and 'sql'.
:param str repo_url: The url of the repository to use.
:param bool list_test: Show only a list of failing tests.
:param bool subunit: Show output as a subunit stream.
:param file stdout: The output file to write all output to. By default
this is sys.stdout
:return return_code: The exit code for the command. 0 for success and > 0
for failures.
:rtype: int
"""
if repo_type not in ['file', 'sql']:
stdout.write('Repository type %s is not a type' % repo_type)
return 1
repo = util.get_repo_open(repo_type, repo_url)
run = repo.get_failing()
if subunit:
return _show_subunit(run)
case = run.get_test()
failed = False
result, summary = _make_result(repo, list_tests=list_tests)
result.startTestRun()
try:
case.run(result)
finally:
result.stopTestRun()
failed = not results.wasSuccessful(summary)
if failed:
result = 1
else:
result = 0
if list_tests:
failing_tests = [
test for test, _ in summary.errors + summary.failures]
output.output_tests(failing_tests, output=stdout)
return result
|
from .user import User
|
'''
1.create private vpc router network with cidr
2.check dhcp ip address
@author Antony WeiJiang
'''
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.net_operations as net_ops
import test_stub_for_dhcp_ip as test_stub_dhcp
import random
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
l2_query_resource = res_ops.L2_NETWORK
type_l2 = ["L2NoVlanNetwork","L2VlanNetwork"]
l3_name = "test_dhcp_server"
ip_range_name = "dhcp_ip_range"
ip_Version = [4,6]
networkcidr = "192.168.1.0/24"
dhcp_ip_for_private_vpc = "192.168.1.3"
dhcp_system_tags = ["flatNetwork::DhcpServer::"+dhcp_ip_for_private_vpc+"::ipUuid::null"]
def test():
test_util.test_logger("start dhcp test for l3 public network")
test_util.test_dsc("get no vlan network uuid")
private_vpcnetwork = test_stub_dhcp.VpcNetwork_IP_For_Dhcp()
private_vpcnetwork.set_l2_query_resource(l2_query_resource)
private_vpcnetwork.set_l2_type(type_l2[1])
l2_no_vlan_uuid = private_vpcnetwork.get_l2uuid()
test_util.test_logger("antony @@@debug : %s" %(l2_no_vlan_uuid))
test_util.test_logger("create l3 network")
private_vpcnetwork.set_ipVersion(ip_Version[0])
private_vpcnetwork.create_l3uuid(l3_name)
test_util.test_logger("antony @@@debug : %s" %(private_vpcnetwork.get_l3uuid()))
private_vpcnetwork.add_service_to_l3_vpcnetwork()
test_util.test_logger("add ip v4 range to l3 network")
private_vpcnetwork.add_ip_by_networkcidr(ip_range_name, networkcidr, dhcp_system_tags)
if private_vpcnetwork.check_dhcp_ipaddress().find(dhcp_ip_for_private_vpc) == -1:
test_util.test_fail("dhcp server ip create fail")
test_util.test_logger("delete l3 network")
private_vpcnetwork.del_l3uuid()
test_util.test_pass("dhcp server ip create successfully")
'''
to be define
'''
def error_cleanup():
pass
'''
to be define
'''
def env_recover():
pass
|
import sys
import unittest
from libcloud.test import MockHttp
from libcloud.test.file_fixtures import DNSFileFixtures
from libcloud.test.secrets import DNS_PARAMS_BUDDYNS
from libcloud.dns.drivers.buddyns import BuddyNSDNSDriver
from libcloud.utils.py3 import httplib
from libcloud.dns.types import ZoneDoesNotExistError, ZoneAlreadyExistsError
from libcloud.dns.base import Zone
class BuddyNSDNSTests(unittest.TestCase):
def setUp(self):
BuddyNSMockHttp.type = None
BuddyNSDNSDriver.connectionCls.conn_class = BuddyNSMockHttp
self.driver = BuddyNSDNSDriver(*DNS_PARAMS_BUDDYNS)
self.test_zone = Zone(
id="test.com",
type="master",
ttl=None,
domain="test.com",
extra={},
driver=self,
)
def test_list_zones_empty(self):
BuddyNSMockHttp.type = "EMPTY_ZONES_LIST"
zones = self.driver.list_zones()
self.assertEqual(zones, [])
def test_list_zones_success(self):
BuddyNSMockHttp.type = "LIST_ZONES"
zones = self.driver.list_zones()
self.assertEqual(len(zones), 2)
zone = zones[0]
self.assertEqual(zone.id, "microsoft.com")
self.assertIsNone(zone.type)
self.assertEqual(zone.domain, "microsoft.com")
self.assertIsNone(zone.ttl)
zone = zones[1]
self.assertEqual(zone.id, "google.de")
self.assertIsNone(zone.type)
self.assertEqual(zone.domain, "google.de")
self.assertIsNone(zone.ttl)
def test_delete_zone_zone_does_not_exist(self):
BuddyNSMockHttp.type = "DELETE_ZONE_ZONE_DOES_NOT_EXIST"
try:
self.driver.delete_zone(zone=self.test_zone)
except ZoneDoesNotExistError as e:
self.assertEqual(e.zone_id, self.test_zone.id)
else:
self.fail("Exception was not thrown")
def test_delete_zone_success(self):
BuddyNSMockHttp.type = "DELETE_ZONE_SUCCESS"
status = self.driver.delete_zone(zone=self.test_zone)
self.assertTrue(status)
def test_get_zone_zone_does_not_exist(self):
BuddyNSMockHttp.type = "GET_ZONE_ZONE_DOES_NOT_EXIST"
try:
self.driver.get_zone(zone_id="zonedoesnotexist.com")
except ZoneDoesNotExistError as e:
self.assertEqual(e.zone_id, "zonedoesnotexist.com")
else:
self.fail("Exception was not thrown")
def test_get_zone_success(self):
BuddyNSMockHttp.type = "GET_ZONE_SUCCESS"
zone = self.driver.get_zone(zone_id="myexample.com")
self.assertEqual(zone.id, "myexample.com")
self.assertEqual(zone.domain, "myexample.com")
self.assertIsNone(zone.type)
self.assertIsNone(zone.ttl)
self.assertEqual(zone.driver, self.driver)
def test_create_zone_success(self):
BuddyNSMockHttp.type = "CREATE_ZONE_SUCCESS"
zone = self.driver.create_zone(domain="microsoft.com")
self.assertEqual(zone.id, "microsoft.com")
self.assertEqual(zone.domain, "microsoft.com")
self.assertIsNone(zone.type),
self.assertIsNone(zone.ttl)
def test_create_zone_zone_already_exists(self):
BuddyNSMockHttp.type = "CREATE_ZONE_ZONE_ALREADY_EXISTS"
try:
self.driver.create_zone(domain="newzone.com", extra={"master": "13.0.0.1"})
except ZoneAlreadyExistsError as e:
self.assertEqual(e.zone_id, "newzone.com")
else:
self.fail("Exception was not thrown")
class BuddyNSMockHttp(MockHttp):
fixtures = DNSFileFixtures("buddyns")
def _api_v2_zone_EMPTY_ZONES_LIST(self, method, url, body, headers):
body = self.fixtures.load("empty_zones_list.json")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _api_v2_zone_LIST_ZONES(self, method, url, body, headers):
body = self.fixtures.load("list_zones.json")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _api_v2_zone_zonedoesnotexist_com_GET_ZONE_ZONE_DOES_NOT_EXIST(
self, method, url, body, headers
):
body = self.fixtures.load("zone_does_not_exist.json")
return 404, body, {}, httplib.responses[httplib.OK]
def _api_v2_zone_myexample_com_GET_ZONE_SUCCESS(self, method, url, body, headers):
body = self.fixtures.load("get_zone_success.json")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _api_v2_zone_test_com_DELETE_ZONE_SUCCESS(self, method, url, body, headers):
body = self.fixtures.load("delete_zone_success.json")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _api_v2_zone_test_com_DELETE_ZONE_ZONE_DOES_NOT_EXIST(
self, method, url, body, headers
):
body = self.fixtures.load("zone_does_not_exist.json")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _api_v2_zone_CREATE_ZONE_SUCCESS(self, method, url, body, headers):
body = self.fixtures.load("create_zone_success.json")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _api_v2_zone_CREATE_ZONE_ZONE_ALREADY_EXISTS(self, method, url, body, headers):
body = self.fixtures.load("zone_already_exists.json")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
if __name__ == "__main__":
sys.exit(unittest.main())
|
import os
import random
from neutron.agent.linux import ovs_lib
from neutron.agent.linux import utils
from neutron.common import constants as n_const
from neutron.tests import base
BR_PREFIX = 'test-br'
class BaseLinuxTestCase(base.BaseTestCase):
def setUp(self, root_helper='sudo'):
super(BaseLinuxTestCase, self).setUp()
self.root_helper = root_helper
def check_command(self, cmd, error_text, skip_msg):
try:
utils.execute(cmd)
except RuntimeError as e:
if error_text in str(e):
self.skipTest(skip_msg)
raise
def check_sudo_enabled(self):
if os.environ.get('OS_SUDO_TESTING') not in base.TRUE_STRING:
self.skipTest('testing with sudo is not enabled')
def get_rand_name(self, max_length, prefix='test'):
name = prefix + str(random.randint(1, 0x7fffffff))
return name[:max_length]
def create_resource(self, name_prefix, creation_func, *args, **kwargs):
"""Create a new resource that does not already exist.
:param name_prefix: The prefix for a randomly generated name
:param creation_func: A function taking the name of the resource
to be created as it's first argument. An error is assumed
to indicate a name collision.
:param *args *kwargs: These will be passed to the create function.
"""
while True:
name = self.get_rand_name(n_const.DEV_NAME_MAX_LEN, name_prefix)
try:
return creation_func(name, *args, **kwargs)
except RuntimeError:
continue
class BaseOVSLinuxTestCase(BaseLinuxTestCase):
def setUp(self, root_helper='sudo'):
super(BaseOVSLinuxTestCase, self).setUp(root_helper)
self.ovs = ovs_lib.BaseOVS(self.root_helper)
def create_ovs_bridge(self, br_prefix=BR_PREFIX):
br = self.create_resource(br_prefix, self.ovs.add_bridge)
self.addCleanup(br.destroy)
return br
|
import numpy as np
from scipy.io.wavfile import write
a = np.fromfile('/tmp/file.raw', dtype='int16')
write('/tmp/file.wav', 16000, a)
|
import git
import os
import string
def latest_commit_sha(repo, path):
"""That the last commit sha for a given path in repo"""
log_message = repo.git.log("-1", path)
commit_sha = log_message.split('\n')[0].split(' ')[1]
return commit_sha
def parse_manifest(manifest, repo, repo_name):
# For each release
for release_name, release_data in list(manifest['release_names'].items()):
print('release_name: ', release_name)
# For each os supported
at_least_one_tag = False
for os_name, os_data in list(release_data['os_names'].items()):
print('os_name: ', os_name)
# For each os code name supported
for os_code_name, os_code_data in list(os_data['os_code_names'].items()):
print('os_code_name: ', os_code_name)
if os_code_data['tag_names']:
at_least_one_tag = True
for tag_name, tag_data in os_code_data['tag_names'].items():
print('tag_name: ', tag_name)
tags = []
for alias_pattern in tag_data['aliases']:
alias_template = string.Template(alias_pattern)
alias = alias_template.substitute(
release_name=release_name,
os_name=os_name,
os_code_name=os_code_name)
tags.append(alias)
commit_path = os.path.join(
repo_name, release_name,
os_name, os_code_name, tag_name)
commit_sha = latest_commit_sha(repo, commit_path)
print('tags: ', tags)
tag_data['Tags'] = tags
tag_data['Architectures'] = os_code_data['archs']
tag_data['GitCommit'] = commit_sha
tag_data['Directory'] = commit_path
if not at_least_one_tag:
del manifest['release_names'][release_name]
return manifest
|
"""Test Home Assistant json utility functions."""
from json import JSONEncoder
import os
import sys
from tempfile import mkdtemp
import unittest
from unittest.mock import Mock
import pytest
from homeassistant.exceptions import HomeAssistantError
from homeassistant.util.json import SerializationError, load_json, save_json
TEST_JSON_A = {"a": 1, "B": "two"}
TEST_JSON_B = {"a": "one", "B": 2}
TEST_BAD_OBJECT = {("A",): 1}
TEST_BAD_SERIALIED = "THIS IS NOT JSON\n"
TMP_DIR = None
def setup():
"""Set up for tests."""
global TMP_DIR
TMP_DIR = mkdtemp()
def teardown():
"""Clean up after tests."""
for fname in os.listdir(TMP_DIR):
os.remove(os.path.join(TMP_DIR, fname))
os.rmdir(TMP_DIR)
def _path_for(leaf_name):
return os.path.join(TMP_DIR, leaf_name + ".json")
def test_save_and_load():
"""Test saving and loading back."""
fname = _path_for("test1")
save_json(fname, TEST_JSON_A)
data = load_json(fname)
assert data == TEST_JSON_A
@unittest.skipIf(
sys.platform.startswith("win"), "private permissions not supported on Windows"
)
def test_save_and_load_private():
"""Test we can load private files and that they are protected."""
fname = _path_for("test2")
save_json(fname, TEST_JSON_A, private=True)
data = load_json(fname)
assert data == TEST_JSON_A
stats = os.stat(fname)
assert stats.st_mode & 0o77 == 0
def test_overwrite_and_reload():
"""Test that we can overwrite an existing file and read back."""
fname = _path_for("test3")
save_json(fname, TEST_JSON_A)
save_json(fname, TEST_JSON_B)
data = load_json(fname)
assert data == TEST_JSON_B
def test_save_bad_data():
"""Test error from trying to save unserialisable data."""
fname = _path_for("test4")
with pytest.raises(SerializationError):
save_json(fname, TEST_BAD_OBJECT)
def test_load_bad_data():
"""Test error from trying to load unserialisable data."""
fname = _path_for("test5")
with open(fname, "w") as fh:
fh.write(TEST_BAD_SERIALIED)
with pytest.raises(HomeAssistantError):
load_json(fname)
def test_custom_encoder():
"""Test serializing with a custom encoder."""
class MockJSONEncoder(JSONEncoder):
"""Mock JSON encoder."""
def default(self, o):
"""Mock JSON encode method."""
return "9"
fname = _path_for("test6")
save_json(fname, Mock(), encoder=MockJSONEncoder)
data = load_json(fname)
assert data == "9"
|
"""Libvirt volume driver for iSCSI"""
from os_brick import exception as os_brick_exception
from os_brick.initiator import connector
from oslo_log import log as logging
import nova.conf
from nova import utils
from nova.virt.libvirt.volume import volume as libvirt_volume
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class LibvirtISCSIVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, host):
super(LibvirtISCSIVolumeDriver, self).__init__(host,
is_block_dev=True)
# Call the factory here so we can support
# more than x86 architectures.
self.connector = connector.InitiatorConnector.factory(
'ISCSI', utils.get_root_helper(),
use_multipath=CONF.libvirt.volume_use_multipath,
device_scan_attempts=CONF.libvirt.num_volume_scan_tries,
transport=self._get_transport())
def _get_transport(self):
if CONF.libvirt.iscsi_iface:
transport = CONF.libvirt.iscsi_iface
else:
transport = 'default'
return transport
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtISCSIVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
conf.driver_io = "native"
return conf
def connect_volume(self, connection_info, disk_info, instance):
"""Attach the volume to instance_name."""
LOG.debug("Calling os-brick to attach iSCSI Volume")
device_info = self.connector.connect_volume(connection_info['data'])
LOG.debug("Attached iSCSI volume %s", device_info)
connection_info['data']['device_path'] = device_info['path']
def disconnect_volume(self, connection_info, disk_dev, instance):
"""Detach the volume from instance_name."""
LOG.debug("calling os-brick to detach iSCSI Volume")
try:
self.connector.disconnect_volume(connection_info['data'], None)
except os_brick_exception.VolumeDeviceNotFound as exc:
LOG.warning('Ignoring VolumeDeviceNotFound: %s', exc)
return
LOG.debug("Disconnected iSCSI Volume %s", disk_dev)
super(LibvirtISCSIVolumeDriver,
self).disconnect_volume(connection_info, disk_dev, instance)
def extend_volume(self, connection_info, instance):
"""Extend the volume."""
LOG.debug("calling os-brick to extend iSCSI Volume", instance=instance)
new_size = self.connector.extend_volume(connection_info['data'])
LOG.debug("Extend iSCSI Volume %s; new_size=%s",
connection_info['data']['device_path'],
new_size, instance=instance)
return new_size
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.