code
stringlengths 1
199k
|
|---|
from django.core.management.base import BaseCommand
from ideascube.search.utils import reindex_content
class Command(BaseCommand):
help = 'Reindex all the searchable objects'
def handle(self, *args, **kwargs):
indexed = reindex_content()
for name, count in indexed.items():
if count:
self.stdout.write('Indexed {} content.'.format(name))
self.stdout.write('Done reindexing.')
|
from odoo import api, fields, models
from datetime import datetime
from odoo.addons import decimal_precision as dp
from odoo.tools import float_compare, float_round
UNIT = dp.get_precision('Product Unit of Measure')
class StockWarehouseOrderpoint(models.Model):
_inherit = 'stock.warehouse.orderpoint'
procure_recommended_qty = fields.Float(
string='Procure Recommendation',
compute="_compute_procure_recommended",
digits=UNIT,
)
procure_recommended_date = fields.Date(
string='Recommended Request Date',
compute="_compute_procure_recommended",
)
@api.multi
def _get_procure_recommended_qty(self, virtual_qty, op_qtys):
self.ensure_one()
procure_recommended_qty = 0.0
qty = max(self.product_min_qty, self.product_max_qty) - virtual_qty
remainder = \
self.qty_multiple > 0 and qty % self.qty_multiple or 0.0
if float_compare(
remainder, 0.0,
precision_rounding=self.product_uom.rounding) > 0:
qty += self.qty_multiple - remainder
if float_compare(
qty, 0.0,
precision_rounding=self.product_uom.rounding) <= 0:
return procure_recommended_qty
qty -= op_qtys[self.id]
qty_rounded = float_round(
qty, precision_rounding=self.product_uom.rounding)
if qty_rounded > 0:
procure_recommended_qty = qty_rounded
return procure_recommended_qty
@api.multi
@api.depends("product_min_qty", "product_id", "qty_multiple")
def _compute_procure_recommended(self):
op_qtys = self._quantity_in_progress()
for op in self:
qty = 0.0
virtual_qty = op.with_context(
location=op.location_id.id).product_id.virtual_available
if float_compare(virtual_qty, op.product_min_qty,
precision_rounding=op.product_uom.rounding) < 0:
qty = op._get_procure_recommended_qty(virtual_qty, op_qtys)
op.procure_recommended_qty = qty
op.procure_recommended_date = op._get_date_planned(
qty, datetime.today())
|
from . import test_stock_report_quantity_by_location
|
"""
RSS/Atom feeds for the forum app.
"""
from django.core.urlresolvers import reverse_lazy
from django.contrib.syndication.views import Feed
from django.utils.feedgenerator import Atom1Feed
from django.utils.translation import ugettext_lazy as _
from .models import (Forum,
ForumThread,
ForumThreadPost)
from .settings import (NB_FORUM_THREADS_IN_FEEDS,
NB_FORUM_THREAD_POSTS_IN_FEEDS)
class ForumThreadsBaseFeed(Feed):
"""
Base class for any forum's threads feed.
"""
def items(self):
"""
Return a list of forum's threads.
"""
raise NotImplementedError()
def item_title(self, item):
"""
Return the title of the forum's thread.
"""
return item.title
def item_description(self, item):
"""
Return the description of the forum's thread.
"""
return item.first_post.content_html
def item_author_name(self, item):
"""
Return the author name for the forum's thread.
"""
author = item.first_post.author
return author.username if author.is_active else _('Anonymous')
def item_pubdate(self, item):
"""
Return the submitted date of the forum's thread.
"""
return item.first_post.pub_date
def item_updateddate(self, item):
"""
Return the last modification date of the forum's thread.
"""
return item.last_post.last_content_modification_date or item.last_post.pub_date
class ForumPostsBaseFeed(Feed):
"""
Base class for any forum's thread's posts feed.
"""
def items(self):
"""
Return a list of forum's thread's post.
"""
raise NotImplementedError()
def item_title(self, item):
"""
Return the title of the forum's thread's post.
"""
return item.parent_thread.title
def item_description(self, item):
"""
Return the description of the forum's thread's post.
"""
return item.content_html
def item_author_name(self, item):
"""
Return the author name for the forum's thread's post.
"""
author = item.author
return author.username if author.is_active else _('Anonymous')
def item_pubdate(self, item):
"""
Return the submitted date of the forum's thread's post.
"""
return item.pub_date
def item_updateddate(self, item):
"""
Return the last modification date of the forum's thread's post.
"""
return item.last_content_modification_date or item.pub_date
class LatestForumThreadsFeed(ForumThreadsBaseFeed):
"""
Feed of latest forum's thread.
"""
title = _('Latest forum threads')
link = reverse_lazy('forum:index')
feed_url = reverse_lazy('forum:latest_forum_threads_rss')
description = _('Latest forum threads, all forums together')
def items(self):
"""
Return a list of the N most recent (public) forum's threads.
"""
return ForumThread.objects.public_threads() \
.select_related('first_post__author', 'last_post')[:NB_FORUM_THREADS_IN_FEEDS]
class LatestForumThreadsAtomFeed(LatestForumThreadsFeed):
"""
Feed of latest forum's thread (ATOM version).
"""
feed_type = Atom1Feed
subtitle = LatestForumThreadsFeed.description
feed_url = reverse_lazy('forum:latest_forum_threads_atom')
class LatestForumPostsFeed(ForumPostsBaseFeed):
"""
Feed of latest forum's thread's posts.
"""
title = _('Latest forum posts')
link = reverse_lazy('forum:index')
feed_url = reverse_lazy('forum:latest_forum_thread_posts_rss')
description = _('Latest forum posts, all threads together')
def items(self):
"""
Return a list of the five most recent (public) forum's thread's post.
"""
return ForumThreadPost.objects.public_published() \
.select_related('parent_thread__last_post',
'parent_thread__first_post', 'author')[:NB_FORUM_THREAD_POSTS_IN_FEEDS]
class LatestForumPostsAtomFeed(LatestForumPostsFeed):
"""
Feed of latest forum's thread's posts (ATOM version).
"""
feed_type = Atom1Feed
subtitle = LatestForumPostsFeed.description
feed_url = reverse_lazy('forum:latest_forum_thread_posts_atom')
class LatestForumThreadsForForumFeed(ForumThreadsBaseFeed):
"""
Feed of latest forum's thread for a given forum.
"""
def get_object(self, request, *args, **kwargs):
"""
Get the desired forum.
"""
# Get desired forum PK
hierarchy = kwargs.pop('hierarchy')
assert hierarchy is not None
# Get the forum
return Forum.objects.get(slug_hierarchy=hierarchy, private=False)
def title(self, obj):
"""
Return the title of the forum.
"""
return _('Latest forum threads in forum "%s"') % obj.title
def link(self, obj):
"""
Return the permalink to the forum.
"""
return obj.get_absolute_url()
def feed_url(self, obj):
"""
Return the permalink to this feed.
"""
return obj.get_latest_threads_rss_feed_url()
def description(self, obj):
"""
Return the description of the forum.
"""
return obj.description or _('Latest forum threads in forum "%s"') % obj.title
def items(self, obj):
"""
Return a list of the N most recent (public) forum's threads in the given forum.
"""
return obj.threads.public_threads() \
.select_related('first_post__author', 'last_post')[:NB_FORUM_THREADS_IN_FEEDS]
class LatestForumThreadsForForumAtomFeed(LatestForumThreadsForForumFeed):
"""
Feed of latest forum's thread for a given forum (ATOM version).
"""
feed_type = Atom1Feed
subtitle = LatestForumThreadsForForumFeed.description
def feed_url(self, obj):
"""
Return the permalink to this feed.
"""
return obj.get_latest_threads_atom_feed_url()
class LatestForumPostsForForumFeed(ForumPostsBaseFeed):
"""
Feed of latest forum's thread's posts for a given forum.
"""
def get_object(self, request, *args, **kwargs):
"""
Get the desired forum.
"""
# Get desired forum PK
hierarchy = kwargs.pop('hierarchy')
assert hierarchy is not None
# Get the forum
return Forum.objects.get(slug_hierarchy=hierarchy, private=False)
def title(self, obj):
"""
Return the title of the forum.
"""
return _('Latest forum posts in forum "%s"') % obj.title
def link(self, obj):
"""
Return the permalink to the forum.
"""
return obj.get_absolute_url()
def feed_url(self, obj):
"""
Return the permalink to this feed.
"""
return obj.get_latest_posts_rss_feed_url()
def description(self, obj):
"""
Return the description of the forum.
"""
return obj.description or _('Latest forum posts in forum "%s"') % obj.title
def items(self, obj):
"""
Return a list of the N most recent (public) forum's thread's post in the given forum.
"""
return ForumThreadPost.objects.public_published() \
.select_related('parent_thread__last_post',
'parent_thread__first_post', 'author')[:NB_FORUM_THREAD_POSTS_IN_FEEDS]
class LatestForumPostsForForumAtomFeed(LatestForumPostsForForumFeed):
"""
Feed of latest forum's thread's posts for a given forum (ATOM version).
"""
feed_type = Atom1Feed
subtitle = LatestForumPostsForForumFeed.description
def feed_url(self, obj):
"""
Return the permalink to this feed.
"""
return obj.get_latest_posts_atom_feed_url()
class LatestForumPostsForThreadFeed(ForumPostsBaseFeed):
"""
Feed of latest forum's thread's posts for a given forum's thread.
"""
def get_object(self, request, *args, **kwargs):
"""
Get the desired forum's thread.
"""
# Get desired forum's thread PK
pk = kwargs.pop('pk')
slug = kwargs.pop('slug')
assert pk is not None
assert slug is not None
# Get the forum's thread
return ForumThread.objects.get(pk=pk, slug=slug, parent_forum__private=False)
def title(self, obj):
"""
Return the title of the forum's thread.
"""
return _('Latest posts in forum thread "%s"') % obj.title
def link(self, obj):
"""
Return the permalink to the forum's thread.
"""
return obj.get_absolute_url()
def feed_url(self, obj):
"""
Return the permalink to this feed.
"""
return obj.get_latest_posts_rss_feed_url()
def description(self, obj):
"""
Return the description of the forum's thread.
"""
return _('Latest posts in forum thread "%s"') % obj.title
def items(self, obj):
"""
Return a list of the N most recent (public) forum's thread's post in the given forum's thread.
"""
return obj.posts.published().select_related('parent_thread__last_post',
'parent_thread__first_post',
'author')[:NB_FORUM_THREAD_POSTS_IN_FEEDS]
class LatestForumPostsForThreadAtomFeed(LatestForumPostsForThreadFeed):
"""
Feed of latest forum's thread's posts for a given forum's thread (ATOM version).
"""
feed_type = Atom1Feed
subtitle = LatestForumPostsForThreadFeed.description
def feed_url(self, obj):
"""
Return the permalink to this feed.
"""
return obj.get_latest_posts_atom_feed_url()
|
from aiohttp.web import HTTPBadRequest
from aiohttp.web import HTTPFound
from aiohttp.web import HTTPMethodNotAllowed
from aiohttp_babel.middlewares import _
import aiohttp_jinja2
from aiohttp_security import authorized_userid
from molb.auth import require
from molb.views.auth.email_form import EmailForm
from molb.views.auth.token import get_token_data
from molb.views.send_message import send_confirmation
from molb.views.utils import flash
from molb.views.utils import generate_csrf_meta
@require("client")
@aiohttp_jinja2.template("auth/email-email.html")
async def handler(request):
login = await authorized_userid(request)
async with request.app["db-pool"].acquire() as conn:
q = "SELECT id, email_address FROM client WHERE login = $1"
client = await conn.fetchrow(q, login)
if request.method == "POST":
form = EmailForm(await request.post(), meta=await generate_csrf_meta(request))
if form.validate():
data = dict(form.data.items())
email_address = data["email_address"]
q = "SELECT COUNT(*) FROM client WHERE email_address = $1"
if await conn.fetchval(q, email_address) != 0:
flash(request, ("danger", _("Veuillez choisir une autre adresse email")))
return {"form": form, "email": client["email_address"]}
await send_confirmation(
request,
email_address,
{"id": client["id"], "email_address": email_address},
"confirm_email",
_("Changement d'adresse email"),
"email-confirmation"
)
flash(
request,
(
"info",
_("Un email de confirmation a été envoyé à {}").format(
email_address
)
)
)
return HTTPFound(request.app.router["home"].url_for())
else:
flash(request, ("danger", _("Le formulaire contient des erreurs.")))
return {"form": form, "email": client["email_address"]}
elif request.method == "GET":
form = EmailForm(meta=await generate_csrf_meta(request))
return {"form": form, "email": client["email_address"]}
else:
raise HTTPMethodNotAllowed()
async def confirm(request):
token = request.match_info["token"]
try:
token_data = get_token_data(token, request.app["config"]["application"]["secret_key"])
id_ = token_data["id"]
email_address = token_data["email_address"]
except Exception:
flash(request, ("danger", _("Le lien est invalide ou a expiré")))
raise HTTPBadRequest()
async with request.app["db-pool"].acquire() as conn:
q = "UPDATE client SET email_address = $1 WHERE id = $2"
try:
await conn.execute(q, email_address, id_)
except Exception:
flash(request, ("danger", _("Votre adresse email ne peut pas être modifiée")))
else:
flash(request, ("info", _("Votre adresse email a été modifiée")))
login = await authorized_userid(request)
if login:
return HTTPFound(request.app.router["home"].url_for())
else:
return HTTPFound(request.app.router["login"].url_for())
|
import sys
import subprocess
print("D FRIENDLY LINKER INVOKED")
linker = "ldc2"
filteredCppLibrary = filter(lambda k: len(k) >= 2 and k[:2] == "-l", sys.argv)
filteredObjectFiles = filter(lambda k: len(k) >= 2 and k[-2:len(k)] == ".o", sys.argv)
passthroughLibraries = []
for iterationCppLibrary in filteredCppLibrary:
passthroughLibraries.append("-L" + iterationCppLibrary)
parameterList = filteredObjectFiles + passthroughLibraries
subprocess.call([linker] + parameterList + ["-of=PtrEngine"])
|
import account_tax
import account_invoice_tax
|
from rest_framework import serializers
from sigma_core.importer import load_ressource
import re
GroupFieldValue = load_ressource("GroupFieldValue")
GroupField = load_ressource("GroupField")
class GroupFieldValueSerializer(serializers.ModelSerializer):
class Meta:
model = GroupFieldValue.model
read_only_fields = ('membership', 'field')
fields = ('membership', 'field', 'value')
#*********************************************************************************************#
#** Validators **#
#*********************************************************************************************#
number_re = re.compile(r'^[+-]?[0-9]+$')
def is_number_valid(self, accept, value):
if accept == '' or (not GroupFieldValueSerializer.number_re.match(value)):
return True
value = int(value)
a,b = accept.split('_')
if not a == '':
if not b == '':
a, b = int(a), int(b)
a, b = min(a,b), max(a,b)
return a <= value and value <= b
else:
return int(a) <= value
else:
if not b == '':
return value <= int(b)
else:
return True
def is_string_valid(self, accept, value):
return True
def is_choice_valid(self, accept, value):
if accept == '':
return False
choices = accept.split(';')
return value in choices
def is_email_valid(self, accept, value):
if accept != '':
suffixes = accept.split()
return value.endswith(tuple(suffixes))
return True
def validate(self, data):
group_field = data['field']
mship = data['membership']
# First, check that the membership group correspond to the field group
if group_field.group != mship.group:
raise serializers.ValidationError("Condition (field.group == membership.group) is not verified.")
# Then, check that the content is valid
validate_methods = [
self.is_number_valid,
self.is_string_valid,
self.is_choice_valid,
self.is_email_valid
]
if validate_methods[group_field.type](group_field.accept, data['value']):
return data
else:
raise serializers.ValidationError('Les données entrées sont invalides')
|
{
'name': 'ESC/POS Hardware Driver',
'version': '1.0',
'category': 'Hardware Drivers',
'sequence': 6,
'website': 'https://www.odoo.com/page/point-of-sale',
'summary': 'Hardware Driver for ESC/POS Printers and Cashdrawers',
'description': """
ESC/POS Hardware Driver
=======================
This module allows openerp to print with ESC/POS compatible printers and
to open ESC/POS controlled cashdrawers in the point of sale and other modules
that would need such functionality.
""",
'author': 'OpenERP SA',
'depends': ['hw_proxy'],
'external_dependencies': {
'python' : ['usb.core'],
},
'test': [
],
'installable': True,
'auto_install': False,
}
|
from odoo import fields, models
class ReportHotelRestaurantStatus(models.Model):
_name = "report.hotel.restaurant.status"
_description = "Reservation By State"
_auto = False
reservation_id = fields.Char('Reservation No', size=64, readonly=True)
nbr = fields.Integer('Reservatioorder_datan', readonly=True)
state = fields.Selection([('draft', 'Draft'), ('confirm', 'Confirm'),
('done', 'Done')], 'State', size=16,
readonly=True)
def init(self):
"""
This method is for initialization for report hotel restaurant
status Module.
@param self: The object pointer
@param cr: database cursor
"""
self.env.cr.execute("""
create or replace view report_hotel_restaurant_status as (
select
min(c.id) as id,
c.reservation_id,
c.state,
count(*) as nbr
from
hotel_restaurant_reservation c
group by c.state,c.reservation_id
)""")
|
from django.contrib import admin
from .models import CreditCard
class CreditCardAdmin(admin.ModelAdmin):
exclude = ('credit_no', 'ccv')
readonly_fields = ('id', 'imp_credit_no', 'imp_ccv', 'owner',
'expire_date', 'customer')
def imp_credit_no(self, obj):
return "**** **** **** " + obj.credit_no[12:16]
imp_credit_no.short_description = 'Credit Number'
def imp_ccv(self, obj):
return "***"
imp_ccv.short_description = 'CCV'
class Meta:
model = CreditCard
admin.site.register(CreditCard, CreditCardAdmin)
|
"""add GroupMember table
Revision ID: 529e38736245
Revises: 27a8d9bcbd1b
Create Date: 2014-12-03 14:34:56.909429
"""
revision = '529e38736245'
down_revision = '27a8d9bcbd1b'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('group_member',
sa.Column('groupid', sa.Integer(), nullable=False),
sa.Column('userid', sa.Integer(), nullable=False),
sa.Column('role', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['groupid'], ['group.id'], ),
sa.ForeignKeyConstraint(['userid'], ['users.id'], ),
sa.PrimaryKeyConstraint('groupid', 'userid')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('group_member')
### end Alembic commands ###
|
from odoo import fields, models
class System(models.Model):
_name = "tmc.system"
_description = "System"
name = fields.Char()
|
from .model import Model
from .fields import *
from .validators import *
|
"""
The class to support ISO9660 Directory Records.
"""
from __future__ import absolute_import
import bisect
import struct
from pycdlib import dates
from pycdlib import inode
from pycdlib import pycdlibexception
from pycdlib import rockridge
from pycdlib import utils
if False: # pylint: disable=using-constant-test
from typing import BinaryIO, List, Optional, Tuple, Union # NOQA pylint: disable=unused-import
# NOTE: these imports have to be here to avoid circular deps
from pycdlib import headervd # NOQA pylint: disable=unused-import
from pycdlib import path_table_record # NOQA pylint: disable=unused-import
class XARecord(object):
"""
A class that represents an ISO9660 Extended Attribute record as defined
in the Philips Yellow Book standard.
"""
__slots__ = ('_initialized', '_group_id', '_user_id', '_attributes',
'_filenum', '_pad_size')
FMT = '=HHH2sB5s'
def __init__(self):
# type: () -> None
self._pad_size = 0
self._initialized = False
def parse(self, xastr, len_fi):
# type: (bytes, int) -> bool
"""
Parse an Extended Attribute Record out of a string.
Parameters:
xastr - The string to parse.
len_fi - The length of the file identifier for this record.
Returns:
True if the data contains an XA record, False otherwise.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('This XARecord is already initialized')
even_size = len_fi + (len_fi % 2)
# In a "typical" XA record, the record immediately follows the DR
# record (but comes before the Rock Ridge record, if this is a Rock
# Ridge ISO). However, we have seen ISOs (Windows 98 SE) that put some
# padding between the end of the DR record and the XA record. As far
# as I can tell, that padding is the size of the file identifier,
# but rounded up to the nearest even number. We check both places for
# the XA record.
for offset in (0, even_size):
parse_str = xastr[offset:]
if len(parse_str) < struct.calcsize(self.FMT):
return False
(self._group_id, self._user_id, self._attributes, signature,
self._filenum, unused) = struct.unpack_from(self.FMT, parse_str, 0)
if signature != b'XA':
continue
if unused != b'\x00\x00\x00\x00\x00':
raise pycdlibexception.PyCdlibInvalidISO('Unused fields should be 0')
self._pad_size = offset
break
else:
return False
self._initialized = True
return True
def new(self):
# type: () -> None
"""
Create a new Extended Attribute Record.
Parameters:
None.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('This XARecord is already initialized')
# FIXME: we should allow the user to set these
self._group_id = 0
self._user_id = 0
self._attributes = 0
self._filenum = 0
self._initialized = True
def record(self):
# type: () -> bytes
"""
Record this Extended Attribute Record.
Parameters:
None.
Returns:
A string representing this Extended Attribute Record.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('This XARecord is not initialized')
return b'\x00' * self._pad_size + struct.pack(self.FMT, self._group_id,
self._user_id,
self._attributes,
b'XA', self._filenum,
b'\x00' * 5)
@staticmethod
def length():
# type: () -> int
"""
A static method to return the size of an Extended Attribute Record.
Parameters:
None.
Returns:
The size of an Extended Attribute Record.
"""
return 14
class DirectoryRecord(object):
"""A class that represents an ISO9660 directory record."""
__slots__ = ('initialized', 'new_extent_loc', 'ptr', 'extents_to_here',
'offset_to_here', 'data_continuation', 'vd', 'children',
'rr_children', 'inode', '_printable_name', 'date',
'index_in_parent', 'dr_len', 'xattr_len', 'file_flags',
'file_unit_size', 'interleave_gap_size', 'len_fi', 'isdir',
'orig_extent_loc', 'data_length', 'seqnum', 'is_root',
'parent', 'rock_ridge', 'xa_record', 'file_ident')
FILE_FLAG_EXISTENCE_BIT = 0
FILE_FLAG_DIRECTORY_BIT = 1
FILE_FLAG_ASSOCIATED_FILE_BIT = 2
FILE_FLAG_RECORD_BIT = 3
FILE_FLAG_PROTECTION_BIT = 4
FILE_FLAG_MULTI_EXTENT_BIT = 7
FMT = '<BBLLLL7sBBBHHB'
def __init__(self):
# type: () -> None
self.initialized = False
self.new_extent_loc = -1
self.ptr = None # type: Optional[path_table_record.PathTableRecord]
self.extents_to_here = 1
self.offset_to_here = 0
self.data_continuation = None # type: Optional[DirectoryRecord]
self.children = [] # type: List[DirectoryRecord]
self.rr_children = [] # type: List[DirectoryRecord]
self.index_in_parent = -1
self.is_root = False
self.isdir = False
self.rock_ridge = None # type: Optional[rockridge.RockRidge]
self.xa_record = None # type: Optional[XARecord]
self.inode = None # type: Optional[inode.Inode]
def parse(self, vd, record, parent):
# type: (headervd.PrimaryOrSupplementaryVD, bytes, Optional[DirectoryRecord]) -> str
"""
Parse a directory record out of a string.
Parameters:
vd - The Volume Descriptor this record is part of.
record - The string to parse for this record.
parent - The parent of this record.
Returns:
The Rock Ridge version as a string if this Directory Record has Rock
Ridge, '' otherwise.
"""
if self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record already initialized')
if len(record) > 255:
# Since the length is supposed to be 8 bits, this should never
# happen.
raise pycdlibexception.PyCdlibInvalidISO('Directory record longer than 255 bytes!')
# According to http://www.dubeyko.com/development/FileSystems/ISO9960/ISO9960.html,
# the xattr_len is the number of bytes at the *beginning* of the file
# extent. Since this is only a byte, it is necessarily limited to 255
# bytes.
(self.dr_len, self.xattr_len, extent_location_le, extent_location_be,
data_length_le, data_length_be_unused, dr_date, self.file_flags,
self.file_unit_size, self.interleave_gap_size, seqnum_le, seqnum_be,
self.len_fi) = struct.unpack_from(self.FMT, record[:33], 0)
# In theory we should have a check here that checks to make sure that
# the length of the record we were passed in matches the data record
# length. However, we have seen ISOs in the wild where this is
# incorrect, so we elide the check here.
if extent_location_le != utils.swab_32bit(extent_location_be):
raise pycdlibexception.PyCdlibInvalidISO('Little-endian (%d) and big-endian (%d) extent location disagree' % (extent_location_le, utils.swab_32bit(extent_location_be)))
self.orig_extent_loc = extent_location_le
# Theoretically, we should check to make sure that the little endian
# data length is the same as the big endian data length. In practice,
# though, we've seen ISOs where this is wrong. Skip the check, and just
# pick the little-endian as the 'actual' size, and hope for the best.
self.data_length = data_length_le
if seqnum_le != utils.swab_16bit(seqnum_be):
raise pycdlibexception.PyCdlibInvalidISO('Little-endian and big-endian seqnum disagree')
self.seqnum = seqnum_le
self.date = dates.DirectoryRecordDate()
self.date.parse(dr_date)
# OK, we've unpacked what we can from the beginning of the string. Now
# we have to use the len_fi to get the rest.
self.parent = parent
self.vd = vd
if self.parent is None:
self.is_root = True
# A root directory entry should always be exactly 34 bytes.
# However, we have seen ISOs in the wild that get this wrong, so we
# elide a check for it.
self.file_ident = bytes(bytearray([record[33]]))
# A root directory entry should always have 0 as the identifier.
# However, we have seen ISOs in the wild that don't have this set
# properly to 0. In that case, we override what we parsed out from
# the original with the correct value (\x00), and hope for the best.
if self.file_ident != b'\x00':
self.file_ident = b'\x00'
self.isdir = True
else:
record_offset = 33
self.file_ident = record[record_offset:record_offset + self.len_fi]
record_offset += self.len_fi
if self.file_flags & (1 << self.FILE_FLAG_DIRECTORY_BIT):
self.isdir = True
if self.len_fi % 2 == 0:
record_offset += 1
xa_rec = XARecord()
if xa_rec.parse(record[record_offset:], self.len_fi):
self.xa_record = xa_rec
record_offset += len(self.xa_record.record())
if len(record[record_offset:]) >= 2 and \
record[record_offset:record_offset + 2] in (b'SP', b'RR', b'CE', b'PX', b'ER', b'ES', b'PN', b'SL', b'NM', b'CL', b'PL', b'TF', b'SF', b'RE', b'AL'):
self.rock_ridge = rockridge.RockRidge()
is_first_dir_record_of_root = False
if self.parent.is_root:
if self.file_ident == b'\x00':
is_first_dir_record_of_root = True
bytes_to_skip = 0
else:
if not self.parent.children:
raise pycdlibexception.PyCdlibInvalidISO('Parent has no dot child')
if self.parent.children[0].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Dot child does not have Rock Ridge; ISO is corrupt')
bytes_to_skip = self.parent.children[0].rock_ridge.bytes_to_skip
else:
if self.parent.rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Parent does not have Rock Ridge; ISO is corrupt')
bytes_to_skip = self.parent.rock_ridge.bytes_to_skip
self.rock_ridge.parse(record[record_offset:],
is_first_dir_record_of_root,
bytes_to_skip,
False)
if self.xattr_len != 0:
if self.file_flags & (1 << self.FILE_FLAG_RECORD_BIT):
raise pycdlibexception.PyCdlibInvalidISO('Record Bit not allowed with Extended Attributes')
if self.file_flags & (1 << self.FILE_FLAG_PROTECTION_BIT):
raise pycdlibexception.PyCdlibInvalidISO('Protection Bit not allowed with Extended Attributes')
if self.rock_ridge is None:
ret = ''
else:
ret = self.rock_ridge.rr_version
if self.is_root:
self._printable_name = '/'.encode(vd.encoding)
elif self.file_ident == b'\x00':
self._printable_name = '.'.encode(vd.encoding)
elif self.file_ident == b'\x01':
self._printable_name = '..'.encode(vd.encoding)
else:
self._printable_name = self.file_ident
self.initialized = True
return ret
def _rr_new(self, rr_version, rr_name, rr_symlink_target, rr_relocated_child,
rr_relocated, rr_relocated_parent, file_mode):
# type: (str, bytes, bytes, bool, bool, bool, int) -> None
"""
Internal method to add Rock Ridge to a Directory Record.
Parameters:
rr_version - A string containing the version of Rock Ridge to use for
this record.
rr_name - The Rock Ridge name to associate with this directory record.
rr_symlink_target - The target for the symlink, if this is a symlink
record (otherwise, None).
rr_relocated_child - True if this is a directory record for a rock
ridge relocated child.
rr_relocated - True if this is a directory record for a relocated
entry.
rr_relocated_parent - True if this is a directory record for a rock
ridge relocated parent.
file_mode - The Unix file mode for this Rock Ridge entry.
Returns:
Nothing.
"""
if self.parent is None:
raise pycdlibexception.PyCdlibInternalError('Invalid call to create new Rock Ridge on root directory')
self.rock_ridge = rockridge.RockRidge()
is_first_dir_record_of_root = self.file_ident == b'\x00' and self.parent.is_root
bytes_to_skip = 0
if self.xa_record is not None:
bytes_to_skip = XARecord.length()
self.dr_len = self.rock_ridge.new(is_first_dir_record_of_root, rr_name,
file_mode, rr_symlink_target,
rr_version, rr_relocated_child,
rr_relocated, rr_relocated_parent,
bytes_to_skip, self.dr_len, {})
# For files, we are done
if not self.isdir:
return
# If this is a directory, we have to manipulate the file links
# appropriately.
if self.parent.is_root:
if self.file_ident == b'\x00' or self.file_ident == b'\x01':
# For the dot and dotdot children of the root, add one
# directly to their Rock Ridge links.
self.rock_ridge.add_to_file_links()
else:
# For all other children of the root, make sure to add one
# to each of the dot and dotdot entries.
if len(self.parent.children) < 2:
raise pycdlibexception.PyCdlibInvalidISO('Expected at least 2 children of the root directory record, saw %d' % (len(self.parent.children)))
if self.parent.children[0].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Dot child of directory has no Rock Ridge; ISO is corrupt')
self.parent.children[0].rock_ridge.add_to_file_links()
if self.parent.children[1].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Dot-dot child of directory has no Rock Ridge; ISO is corrupt')
self.parent.children[1].rock_ridge.add_to_file_links()
else:
if self.parent.rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Parent of the entry did not have Rock Ridge, ISO is corrupt')
if self.file_ident == b'\x00':
# If we are adding the dot directory, increment the parent
# file links and our file links.
self.parent.rock_ridge.add_to_file_links()
self.rock_ridge.add_to_file_links()
elif self.file_ident == b'\x01':
# If we are adding the dotdot directory, copy the file links
# from the dot directory of the grandparent.
if self.parent.parent is None:
raise pycdlibexception.PyCdlibInternalError('Grandparent of the entry did not exist; this cannot be')
if not self.parent.children:
raise pycdlibexception.PyCdlibInvalidISO('Grandparent of the entry did not have a dot entry; ISO is corrupt')
if self.parent.parent.children[0].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Grandparent dotdot entry did not have Rock Ridge; ISO is corrupt')
self.rock_ridge.copy_file_links(self.parent.parent.children[0].rock_ridge)
else:
# For all other entries, increment the parents file links
# and the parents dot file links.
self.parent.rock_ridge.add_to_file_links()
if not self.parent.children:
raise pycdlibexception.PyCdlibInvalidISO('Parent of the entry did not have a dot entry; ISO is corrupt')
if self.parent.children[0].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Dot child of the parent did not have a dot entry; ISO is corrupt')
self.parent.children[0].rock_ridge.add_to_file_links()
def _new(self, vd, name, parent, seqnum, isdir, length, xa):
# type: (headervd.PrimaryOrSupplementaryVD, bytes, Optional[DirectoryRecord], int, bool, int, bool) -> None
"""
Internal method to create a new Directory Record.
Parameters:
vd - The Volume Descriptor this record is part of.
name - The name for this directory record.
parent - The parent of this directory record.
seqnum - The sequence number to associate with this directory record.
isdir - Whether this directory record represents a directory.
length - The length of the data for this directory record.
xa - True if this is an Extended Attribute record.
Returns:
Nothing.
"""
# Adding a new time should really be done when we are going to write
# the ISO (in record()). Ecma-119 9.1.5 says:
#
# 'This field shall indicate the date and the time of the day at which
# the information in the Extent described by the Directory Record was
# recorded.'
#
# We create it here just to have something in the field, but we'll
# redo the whole thing when we are mastering.
self.date = dates.DirectoryRecordDate()
self.date.new()
if length > 2**32 - 1:
raise pycdlibexception.PyCdlibInvalidInput('Maximum supported file length is 2^32-1')
self.data_length = length
self.file_ident = name
self.isdir = isdir
self.seqnum = seqnum
# For a new directory record entry, there is no original_extent_loc,
# so we leave it at None.
self.orig_extent_loc = None
self.len_fi = len(self.file_ident)
self.dr_len = struct.calcsize(self.FMT) + self.len_fi
# From Ecma-119, 9.1.6, the file flag bits are:
#
# Bit 0 - Existence - 0 for existence known, 1 for hidden
# Bit 1 - Directory - 0 for file, 1 for directory
# Bit 2 - Associated File - 0 for not associated, 1 for associated
# Bit 3 - Record - 0=structure not in xattr, 1=structure in xattr
# Bit 4 - Protection - 0=no owner and group, 1=owner and group in xattr
# Bit 5 - Reserved
# Bit 6 - Reserved
# Bit 7 - Multi-extent - 0=final directory record, 1=not final directory record
self.file_flags = 0
if self.isdir:
self.file_flags |= (1 << self.FILE_FLAG_DIRECTORY_BIT)
self.file_unit_size = 0 # FIXME: we don't support setting file unit size for now
self.interleave_gap_size = 0 # FIXME: we don't support setting interleave gap size for now
self.xattr_len = 0 # FIXME: we don't support xattrs for now
self.parent = parent
if parent is None:
# If no parent, then this is the root
self.is_root = True
if xa:
self.xa_record = XARecord()
self.xa_record.new()
self.dr_len += XARecord.length()
self.dr_len += (self.dr_len % 2)
if self.is_root:
self._printable_name = '/'.encode(vd.encoding)
elif self.file_ident == b'\x00':
self._printable_name = '.'.encode(vd.encoding)
elif self.file_ident == b'\x01':
self._printable_name = '..'.encode(vd.encoding)
else:
self._printable_name = self.file_ident
self.vd = vd
self.initialized = True
def new_symlink(self, vd, name, parent, rr_target, seqnum, rock_ridge,
rr_name, xa):
# type: (headervd.PrimaryOrSupplementaryVD, bytes, DirectoryRecord, bytes, int, str, bytes, bool) -> None
"""
Create a new symlink Directory Record. This implies that the new
record will be Rock Ridge.
Parameters:
vd - The Volume Descriptor this record is part of.
name - The name for this directory record.
parent - The parent of this directory record.
rr_target - The symlink target for this directory record.
seqnum - The sequence number for this directory record.
rock_ridge - The version of Rock Ridge to use for this directory record.
rr_name - The Rock Ridge name for this directory record.
xa - True if this is an Extended Attribute record.
Returns:
Nothing.
"""
if self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record already initialized')
self._new(vd, name, parent, seqnum, False, 0, xa)
if rock_ridge:
self._rr_new(rock_ridge, rr_name, rr_target, False, False, False,
0o0120555)
def new_file(self, vd, length, isoname, parent, seqnum, rock_ridge, rr_name,
xa, file_mode):
# type: (headervd.PrimaryOrSupplementaryVD, int, bytes, DirectoryRecord, int, str, bytes, bool, int) -> None
"""
Create a new file Directory Record.
Parameters:
vd - The Volume Descriptor this record is part of.
length - The length of the data.
isoname - The name for this directory record.
parent - The parent of this directory record.
seqnum - The sequence number for this directory record.
rock_ridge - Whether to make this a Rock Ridge directory record.
rr_name - The Rock Ridge name for this directory record.
xa - True if this is an Extended Attribute record.
file_mode - The POSIX file mode for this entry.
Returns:
Nothing.
"""
if self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record already initialized')
self._new(vd, isoname, parent, seqnum, False, length, xa)
if rock_ridge:
self._rr_new(rock_ridge, rr_name, b'', False, False, False,
file_mode)
def new_root(self, vd, seqnum, log_block_size):
# type: (headervd.PrimaryOrSupplementaryVD, int, int) -> None
"""
Create a new root Directory Record.
Parameters:
vd - The Volume Descriptor this record is part of.
seqnum - The sequence number for this directory record.
log_block_size - The logical block size to use.
Returns:
Nothing.
"""
if self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record already initialized')
self._new(vd, b'\x00', None, seqnum, True, log_block_size, False)
def new_dot(self, vd, parent, seqnum, rock_ridge, log_block_size, xa,
file_mode):
# type: (headervd.PrimaryOrSupplementaryVD, DirectoryRecord, int, str, int, bool, int) -> None
"""
Create a new 'dot' Directory Record.
Parameters:
vd - The Volume Descriptor this record is part of.
parent - The parent of this directory record.
seqnum - The sequence number for this directory record.
rock_ridge - Whether to make this a Rock Ridge directory record.
log_block_size - The logical block size to use.
xa - True if this is an Extended Attribute record.
file_mode - The POSIX file mode to set for this directory.
Returns:
Nothing.
"""
if self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record already initialized')
self._new(vd, b'\x00', parent, seqnum, True, log_block_size, xa)
if rock_ridge:
self._rr_new(rock_ridge, b'', b'', False, False, False, file_mode)
def new_dotdot(self, vd, parent, seqnum, rock_ridge, log_block_size,
rr_relocated_parent, xa, file_mode):
# type: (headervd.PrimaryOrSupplementaryVD, DirectoryRecord, int, str, int, bool, bool, int) -> None
"""
Create a new 'dotdot' Directory Record.
Parameters:
vd - The Volume Descriptor this record is part of.
parent - The parent of this directory record.
seqnum - The sequence number for this directory record.
rock_ridge - Whether to make this a Rock Ridge directory record.
log_block_size - The logical block size to use.
rr_relocated_parent - True if this is a Rock Ridge relocated parent.
xa - True if this is an Extended Attribute record.
file_mode - The POSIX file mode to set for this directory.
Returns:
Nothing.
"""
if self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record already initialized')
self._new(vd, b'\x01', parent, seqnum, True, log_block_size, xa)
if rock_ridge:
self._rr_new(rock_ridge, b'', b'', False, False, rr_relocated_parent, file_mode)
def new_dir(self, vd, name, parent, seqnum, rock_ridge, rr_name,
log_block_size, rr_relocated_child, rr_relocated, xa, file_mode):
# type: (headervd.PrimaryOrSupplementaryVD, bytes, DirectoryRecord, int, str, bytes, int, bool, bool, bool, int) -> None
"""
Create a new directory Directory Record.
Parameters:
vd - The Volume Descriptor this record is part of.
name - The name for this directory record.
parent - The parent of this directory record.
seqnum - The sequence number for this directory record.
rock_ridge - Whether to make this a Rock Ridge directory record.
rr_name - The Rock Ridge name for this directory record.
log_block_size - The logical block size to use.
rr_relocated_child - True if this is a Rock Ridge relocated child.
rr_relocated - True if this is a Rock Ridge relocated entry.
xa - True if this is an Extended Attribute record.
file_mode - The POSIX file mode to set for this directory.
Returns:
Nothing.
"""
if self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record already initialized')
self._new(vd, name, parent, seqnum, True, log_block_size, xa)
if rock_ridge:
self._rr_new(rock_ridge, rr_name, b'', rr_relocated_child,
rr_relocated, False, file_mode)
if rr_relocated_child and self.rock_ridge:
# Relocated Rock Ridge entries are not exactly treated as
# directories, so fix things up here.
self.isdir = False
self.file_flags = 0
self.rock_ridge.add_to_file_links()
def change_existence(self, is_hidden):
# type: (bool) -> None
"""
Change the ISO9660 existence flag of this Directory Record.
Parameters:
is_hidden - True if this Directory Record should be hidden,
False otherwise.
Returns:
Nothing.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not initialized')
if is_hidden:
self.file_flags |= (1 << self.FILE_FLAG_EXISTENCE_BIT)
else:
self.file_flags &= ~(1 << self.FILE_FLAG_EXISTENCE_BIT)
def _recalculate_extents_and_offsets(self, index, logical_block_size):
# type: (int, int) -> Tuple[int, int]
"""
Internal method to recalculate the extents and offsets associated with
children of this directory record.
Parameters:
index - The index at which to start the recalculation.
logical_block_size - The block size to use for comparisons.
Returns:
A tuple where the first element is the total number of extents required
by the children and where the second element is the offset into the
last extent currently being used.
"""
if index == 0:
dirrecord_offset = 0
num_extents = 1
else:
dirrecord_offset = self.children[index - 1].offset_to_here
num_extents = self.children[index - 1].extents_to_here
for i in range(index, len(self.children)):
c = self.children[i]
dirrecord_len = c.dr_len
if (dirrecord_offset + dirrecord_len) > logical_block_size:
num_extents += 1
dirrecord_offset = 0
dirrecord_offset += dirrecord_len
c.extents_to_here = num_extents
c.offset_to_here = dirrecord_offset
c.index_in_parent = i
return num_extents, dirrecord_offset
def _add_child(self, child, logical_block_size, allow_duplicate,
check_overflow):
# type: (DirectoryRecord, int, bool, bool) -> bool
"""
An internal method to add a child to this object. Note that this is
called both during parsing and when adding a new object to the system,
so it shouldn't have any functionality that is not appropriate for both.
Parameters:
child - The child directory record object to add.
logical_block_size - The size of a logical block for this volume
descriptor.
allow_duplicate - Whether to allow duplicate names, as there are
situations where duplicate children are allowed.
check_overflow - Whether to check for overflow; if we are parsing, we
don't want to do this.
Returns:
True if adding this child caused the directory to overflow into another
extent, False otherwise.
"""
if not self.isdir:
raise pycdlibexception.PyCdlibInvalidInput('Trying to add a child to a record that is not a directory')
# First ensure that this is not a duplicate. For speed purposes, we
# recognize that bisect_left will always choose an index to the *left*
# of a duplicate child. Thus, to check for duplicates we only need to
# see if the child to be added is a duplicate with the entry that
# bisect_left returned.
index = bisect.bisect_left(self.children, child)
if index != len(self.children) and self.children[index].file_ident == child.file_ident:
if not self.children[index].is_associated_file() and not child.is_associated_file():
if not (self.rock_ridge is not None and self.file_identifier() == b'RR_MOVED'):
if not allow_duplicate:
raise pycdlibexception.PyCdlibInvalidInput('Failed adding duplicate name to parent')
self.children[index].data_continuation = child
self.children[index].file_flags |= (1 << self.FILE_FLAG_MULTI_EXTENT_BIT)
index += 1
self.children.insert(index, child)
if child.rock_ridge is not None and not child.is_dot() and not child.is_dotdot():
lo = 0
hi = len(self.rr_children)
while lo < hi:
mid = (lo + hi) // 2
rr = self.rr_children[mid].rock_ridge
if rr is not None:
if rr.name() < child.rock_ridge.name():
lo = mid + 1
else:
hi = mid
else:
raise pycdlibexception.PyCdlibInternalError('Expected all children to have Rock Ridge, but one did not')
rr_index = lo
self.rr_children.insert(rr_index, child)
# We now have to check if we need to add another logical block.
# We have to iterate over the entire list again, because where we
# placed this last entry may rearrange the empty spaces in the blocks
# that we've already allocated.
num_extents, offset_unused = self._recalculate_extents_and_offsets(index,
logical_block_size)
overflowed = False
if check_overflow and (num_extents * logical_block_size > self.data_length):
overflowed = True
# When we overflow our data length, we always add a full block.
self.data_length += logical_block_size
# We also have to make sure to update the length of the dot child,
# as that should always reflect the length.
self.children[0].data_length = self.data_length
# We also have to update all of the dotdot entries. If this is
# the root directory record (no parent), we first update the root
# dotdot entry. In all cases, we update the dotdot entry of all
# children that are directories.
if self.parent is None:
self.children[1].data_length = self.data_length
for c in self.children:
if not c.is_dir():
continue
if len(c.children) > 1:
c.children[1].data_length = self.data_length
return overflowed
def add_child(self, child, logical_block_size, allow_duplicate=False):
# type: (DirectoryRecord, int, bool) -> bool
"""
Add a new child to this directory record.
Parameters:
child - The child directory record object to add.
logical_block_size - The size of a logical block for this volume
descriptor.
allow_duplicate - Whether to allow duplicate names, as there are
situations where duplicate children are allowed.
Returns:
True if adding this child caused the directory to overflow into another
extent, False otherwise.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not initialized')
return self._add_child(child, logical_block_size, allow_duplicate, True)
def track_child(self, child, logical_block_size, allow_duplicate=False):
# type: (DirectoryRecord, int, bool) -> None
"""
Track an existing child of this directory record.
Parameters:
child - The child directory record object to add.
logical_block_size - The size of a logical block for this volume
descriptor.
allow_duplicate - Whether to allow duplicate names, as there are
situations where duplicate children are allowed.
Returns:
Nothing.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not initialized')
self._add_child(child, logical_block_size, allow_duplicate, False)
def remove_child(self, child, index, logical_block_size):
# type: (DirectoryRecord, int, int) -> bool
"""
Remove a child from this Directory Record.
Parameters:
child - The child DirectoryRecord object to remove.
index - The index of the child into this DirectoryRecord children list.
logical_block_size - The size of a logical block on this volume
descriptor.
Returns:
True if removing this child caused an underflow, False otherwise.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not initialized')
if index < 0:
# This should never happen
raise pycdlibexception.PyCdlibInternalError('Invalid child index to remove')
# Unfortunately, Rock Ridge specifies that a CL 'directory' is replaced
# by a *file*, not another directory. Thus, we can't just depend on
# whether this child is marked as a directory by the file flags during
# parse time. Instead, we check if this is either a true directory,
# or a Rock Ridge CL entry, and in either case try to manipulate the
# file links.
if child.rock_ridge is not None:
if child.isdir or child.rock_ridge.child_link_record_exists():
if len(self.children) < 2:
raise pycdlibexception.PyCdlibInvalidISO('Expected a dot and dotdot entry, but missing; ISO is corrupt')
if self.children[0].rock_ridge is None or self.children[1].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Missing Rock Ridge entry on dot or dotdot; ISO is corrupt')
if self.parent is None:
self.children[0].rock_ridge.remove_from_file_links()
self.children[1].rock_ridge.remove_from_file_links()
else:
if self.rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Child has Rock Ridge, but parent does not; ISO is corrupt')
self.rock_ridge.remove_from_file_links()
self.children[0].rock_ridge.remove_from_file_links()
del self.children[index]
# We now have to check if we need to remove a logical block.
# We have to iterate over the entire list again, because where we
# removed this last entry may rearrange the empty spaces in the blocks
# that we've already allocated.
num_extents, dirrecord_offset = self._recalculate_extents_and_offsets(index,
logical_block_size)
underflow = False
total_size = (num_extents - 1) * logical_block_size + dirrecord_offset
if (self.data_length - total_size) > logical_block_size:
self.data_length -= logical_block_size
# We also have to make sure to update the length of the dot child,
# as that should always reflect the length.
self.children[0].data_length = self.data_length
# We also have to update all of the dotdot entries. If this is
# the root directory record (no parent), we first update the root
# dotdot entry. In all cases, we update the dotdot entry of all
# children that are directories.
if self.parent is None:
self.children[1].data_length = self.data_length
for c in self.children:
if not c.is_dir():
continue
if len(c.children) > 1:
c.children[1].data_length = self.data_length
underflow = True
return underflow
def is_dir(self):
# type: () -> bool
"""
Determine whether this Directory Record is a directory.
Parameters:
None.
Returns:
True if this DirectoryRecord object is a directory, False otherwise.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not initialized')
return self.isdir
def is_file(self):
# type: () -> bool
"""
Determine whether this Directory Record is a file.
Parameters:
None.
Returns:
True if this DirectoryRecord object is a file, False otherwise.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not initialized')
return not self.isdir
def is_symlink(self):
# type: () -> bool
"""
Determine whether this Directory Record is a Rock Ridge
symlink. If using this to distinguish between symlinks, files, and
directories, it is important to call this API *first*; symlinks are
also considered files.
Parameters:
None.
Returns:
True if this Directory Record object is a symlink, False otherwise.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not initialized')
return self.rock_ridge is not None and self.rock_ridge.is_symlink()
def is_dot(self):
# type: () -> bool
"""
Determine whether this Directory Record is a 'dot' entry.
Parameters:
None.
Returns:
True if this DirectoryRecord object is a 'dot' entry, False otherwise.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not initialized')
return self.file_ident == b'\x00'
def is_dotdot(self):
# type: () -> bool
"""
Determine whether this Directory Record is a 'dotdot' entry.
Parameters:
None.
Returns:
True if this DirectoryRecord object is a 'dotdot' entry, False otherwise.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not initialized')
return self.file_ident == b'\x01'
def directory_record_length(self):
# type: () -> int
"""
Determine the length of this Directory Record.
Parameters:
None.
Returns:
The length of this Directory Record.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not initialized')
return self.dr_len
def _extent_location(self):
"""
An internal method to get the location of this Directory Record on the
ISO.
Parameters:
None.
Returns:
Extent location of this Directory Record on the ISO.
"""
if self.new_extent_loc < 0:
return self.orig_extent_loc
return self.new_extent_loc
def extent_location(self):
# type: () -> int
"""
Get the location of this Directory Record on the ISO.
Parameters:
None.
Returns:
Extent location of this Directory Record on the ISO.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not initialized')
return self._extent_location()
def file_identifier(self):
# type: () -> bytes
"""
Get the identifier of this Directory Record.
Parameters:
None.
Returns:
String representing the identifier of this Directory Record.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not initialized')
return self._printable_name
def record(self):
# type: () -> bytes
"""
Generate the string representing this Directory Record.
Parameters:
None.
Returns:
String representing this Directory Record.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not initialized')
# Ecma-119 9.1.5 says the date should reflect the time when the
# record was written, so we make a new date now and use that to
# write out the record.
self.date = dates.DirectoryRecordDate()
self.date.new()
padlen = struct.calcsize(self.FMT) + self.len_fi
padstr = b'\x00' * (padlen % 2)
extent_loc = self._extent_location()
xa_rec = b''
if self.xa_record is not None:
xa_rec = self.xa_record.record()
rr_rec = b''
if self.rock_ridge is not None:
rr_rec = self.rock_ridge.record_dr_entries()
outlist = [struct.pack(self.FMT, self.dr_len, self.xattr_len,
extent_loc, utils.swab_32bit(extent_loc),
self.data_length, utils.swab_32bit(self.data_length),
self.date.record(), self.file_flags,
self.file_unit_size, self.interleave_gap_size,
self.seqnum, utils.swab_16bit(self.seqnum),
self.len_fi) + self.file_ident + padstr + xa_rec + rr_rec]
outlist.append(b'\x00' * (len(outlist[0]) % 2))
return b''.join(outlist)
def is_associated_file(self):
# type: () -> bool
"""
Determine whether this file is 'associated' with another file
on the ISO.
Parameters:
None.
Returns:
True if this file is associated with another file on the ISO, False
otherwise.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not initialized')
return self.file_flags & (1 << self.FILE_FLAG_ASSOCIATED_FILE_BIT)
def set_ptr(self, ptr):
# type: (path_table_record.PathTableRecord) -> None
"""
Set the Path Table Record associated with this Directory Record.
Parameters:
ptr - The path table record to associate with this Directory Record.
Returns:
Nothing.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not initialized')
self.ptr = ptr
def set_data_location(self, current_extent, tag_location): # pylint: disable=unused-argument
# type: (int, int) -> None
"""
Set the new extent location that the data for this Directory Record
should live at.
Parameters:
current_extent - The new extent.
Returns:
Nothing.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not initialized')
self.new_extent_loc = current_extent
if self.ptr is not None:
self.ptr.update_extent_location(current_extent)
def get_data_length(self):
# type: () -> int
"""
Get the length of the data that this Directory Record points to.
Parameters:
None.
Returns:
The length of the data that this Directory Record points to.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not initialized')
if self.inode is not None:
return self.inode.get_data_length()
return self.data_length
def set_data_length(self, length):
# type: (int) -> None
"""
Set the length of the data that this Directory Record points to.
Parameters:
length - The new length for the data.
Returns:
The length of the data that this Directory Record points to.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not initialized')
self.data_length = length
############# START BACKWARDS COMPATIBILITY ###############################
# We have a few downstream users that are using 'data_fp',
# 'original_data_location', 'DATA_ON_ORIGINAL_ISO', 'DATA_IN_EXTERNAL_FP',
# and 'fp_offset' directly. For backwards compatibility
# we define properties here that access these. Note that this won't work
# in all circumstances, but is good enough for a read-only client.
@property
def data_fp(self):
# type: () -> Optional[Union[BinaryIO, str]]
"""Backwards compatibility property for 'data_fp'."""
if self.inode is None:
return None
return self.inode.data_fp
@property
def original_data_location(self):
# type: () -> Optional[int]
"""Backwards compatibility property for 'original_data_location'."""
if self.inode is None:
return None
return self.inode.original_data_location
@property
def DATA_ON_ORIGINAL_ISO(self):
# type: () -> int
"""Backwards compatibility property for 'DATA_ON_ORIGINAL_ISO'."""
return inode.Inode.DATA_ON_ORIGINAL_ISO
@property
def DATA_IN_EXTERNAL_FP(self):
# type: () -> int
"""Backwards compatibility property for 'DATA_IN_EXTERNAL_FP'."""
return inode.Inode.DATA_IN_EXTERNAL_FP
@property
def fp_offset(self):
# type: () -> Optional[int]
"""Backwards compatibility property for 'fp_offset'."""
if self.inode is None:
return None
return self.inode.fp_offset
############# END BACKWARDS COMPATIBILITY #################################
def __lt__(self, other):
# This method is used for the bisect.insort_left() when adding a child.
# It needs to return whether self is less than other. Here we use the
# ISO9660 sorting order which is essentially:
#
# 1. The \x00 is always the 'dot' record, and is always first.
# 2. The \x01 is always the 'dotdot' record, and is always second.
# 3. Other entries are sorted lexically; this does not exactly match
# the sorting method specified in Ecma-119, but does OK for now.
#
# Ecma-119 Section 9.3 specifies that we need to pad out the shorter of
# the two files with 0x20 (spaces), then compare byte-by-byte until
# they differ. However, we can more easily just do the string equality
# comparison, since it will always be the case that 0x20 will be less
# than any of the other allowed characters in the strings.
if self.file_ident == b'\x00':
if other.file_ident == b'\x00':
return False
return True
if other.file_ident == b'\x00':
return False
if self.file_ident == b'\x01':
if other.file_ident == b'\x00':
return False
return True
if other.file_ident == b'\x01':
# If self.file_ident was '\x00', it would have been caught above.
return False
return self.file_ident < other.file_ident
def __ne__(self, other):
# type: (object) -> bool
if not isinstance(other, DirectoryRecord):
return NotImplemented
# Note that we very specifically do not check the extent_location when
# comparing directory records. In a lazy-extent assigning world, the
# extents are not reliable, so we just rely on the rest of the fields to
# tell us if two directory records are the same.
return self.dr_len != other.dr_len or self.xattr_len != other.xattr_len or \
self.data_length != other.data_length or self.date != other.date or \
self.file_flags != other.file_flags or \
self.file_unit_size != other.file_unit_size or \
self.interleave_gap_size != other.interleave_gap_size or \
self.seqnum != other.seqnum or self.len_fi != other.len_fi or \
self.file_ident != other.file_ident
def __eq__(self, other):
# type: (object) -> bool
return not self.__ne__(other)
|
"""Test of table output."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(KeyComboAction("End"))
sequence.append(KeyComboAction("<Shift>Right"))
sequence.append(KeyComboAction("Down"))
sequence.append(KeyComboAction("Return"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"Table Where Am I",
["BRAILLE LINE: 'gtk-demo application Shopping list frame table Number column header 3 bottles of coke '",
" VISIBLE: '3 bottles of coke ', cursor=1",
"SPEECH OUTPUT: 'table Number table cell 3 column 1 of 3 row 1 of 5'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Next row",
["BRAILLE LINE: 'gtk-demo application Shopping list frame table Number column header 5 packages of noodles '",
" VISIBLE: '5 packages of noodles ', cursor=1",
"SPEECH OUTPUT: '5 packages of noodles'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"Table Where Am I (again)",
["BRAILLE LINE: 'gtk-demo application Shopping list frame table Number column header 5 packages of noodles '",
" VISIBLE: '5 packages of noodles ', cursor=1",
"SPEECH OUTPUT: 'table Number table cell 5 column 1 of 3 row 2 of 5'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("F11"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"Turn row reading off",
["BRAILLE LINE: 'Speak cell'",
" VISIBLE: 'Speak cell', cursor=0",
"SPEECH OUTPUT: 'Speak cell'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
"Table Right to the Product column in the packages of noodles row",
["BRAILLE LINE: 'gtk-demo application Shopping list frame table Number column header 5 packages of noodles '",
" VISIBLE: '5 packages of noodles ', cursor=1",
"BRAILLE LINE: 'gtk-demo application Shopping list frame table Product column header packages of noodles table cell'",
" VISIBLE: 'packages of noodles table cell', cursor=1",
"SPEECH OUTPUT: 'Product column header packages of noodles'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Table up to bottles of coke",
["BRAILLE LINE: 'gtk-demo application Shopping list frame table Product column header bottles of coke table cell'",
" VISIBLE: 'bottles of coke table cell', cursor=1",
"SPEECH OUTPUT: 'bottles of coke'"]))
sequence.append(KeyComboAction("<Alt>F4"))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
import sys
import os.path
import xml.dom.minidom
from getopt import gnu_getopt
from libglibcodegen import Signature, type_to_gtype, cmp_by_name, \
get_docstring, xml_escape, get_deprecated
NS_TP = "http://telepathy.freedesktop.org/wiki/DbusSpec#extensions-v0"
class Generator(object):
def __init__(self, dom, prefix, basename, opts):
self.dom = dom
self.__header = []
self.__body = []
self.__docs = []
self.prefix_lc = prefix.lower()
self.prefix_uc = prefix.upper()
self.prefix_mc = prefix.replace('_', '')
self.basename = basename
self.group = opts.get('--group', None)
self.iface_quark_prefix = opts.get('--iface-quark-prefix', None)
self.tp_proxy_api = tuple(map(int,
opts.get('--tp-proxy-api', '0').split('.')))
self.proxy_cls = opts.get('--subclass', 'TpProxy') + ' *'
self.proxy_arg = opts.get('--subclass', 'void') + ' *'
self.proxy_assert = opts.get('--subclass-assert', 'TP_IS_PROXY')
self.proxy_doc = ('A #%s or subclass'
% opts.get('--subclass', 'TpProxy'))
if self.proxy_arg == 'void *':
self.proxy_arg = 'gpointer '
self.reentrant_symbols = set()
try:
filename = opts['--generate-reentrant']
with open(filename, 'r') as f:
for line in f.readlines():
self.reentrant_symbols.add(line.strip())
except KeyError:
pass
self.deprecate_reentrant = opts.get('--deprecate-reentrant', None)
self.deprecation_attribute = opts.get('--deprecation-attribute',
'G_GNUC_DEPRECATED')
def h(self, s):
if isinstance(s, unicode):
s = s.encode('utf-8')
self.__header.append(s)
def b(self, s):
if isinstance(s, unicode):
s = s.encode('utf-8')
self.__body.append(s)
def d(self, s):
if isinstance(s, unicode):
s = s.encode('utf-8')
self.__docs.append(s)
def get_iface_quark(self):
assert self.iface_dbus is not None
assert self.iface_uc is not None
if self.iface_quark_prefix is None:
return 'g_quark_from_static_string (\"%s\")' % self.iface_dbus
else:
return '%s_%s' % (self.iface_quark_prefix, self.iface_uc)
def do_signal(self, iface, signal):
iface_lc = iface.lower()
member = signal.getAttribute('name')
member_lc = signal.getAttribute('tp:name-for-bindings')
if member != member_lc.replace('_', ''):
raise AssertionError('Signal %s tp:name-for-bindings (%s) does '
'not match' % (member, member_lc))
member_lc = member_lc.lower()
member_uc = member_lc.upper()
arg_count = 0
args = []
out_args = []
for arg in signal.getElementsByTagName('arg'):
name = arg.getAttribute('name')
type = arg.getAttribute('type')
tp_type = arg.getAttribute('tp:type')
if not name:
name = 'arg%u' % arg_count
arg_count += 1
else:
name = 'arg_%s' % name
info = type_to_gtype(type)
args.append((name, info, tp_type, arg))
callback_name = ('%s_%s_signal_callback_%s'
% (self.prefix_lc, iface_lc, member_lc))
collect_name = ('_%s_%s_collect_args_of_%s'
% (self.prefix_lc, iface_lc, member_lc))
invoke_name = ('_%s_%s_invoke_callback_for_%s'
% (self.prefix_lc, iface_lc, member_lc))
# Example:
#
# typedef void (*tp_cli_connection_signal_callback_new_channel)
# (TpConnection *proxy, const gchar *arg_object_path,
# const gchar *arg_channel_type, guint arg_handle_type,
# guint arg_handle, gboolean arg_suppress_handler,
# gpointer user_data, GObject *weak_object);
self.d('/**')
self.d(' * %s:' % callback_name)
self.d(' * @proxy: The proxy on which %s_%s_connect_to_%s ()'
% (self.prefix_lc, iface_lc, member_lc))
self.d(' * was called')
for arg in args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
docs = get_docstring(elt) or '(Undocumented)'
if ctype == 'guint ' and tp_type != '':
docs += ' (#%s)' % ('Tp' + tp_type.replace('_', ''))
self.d(' * @%s: %s' % (name, xml_escape(docs)))
self.d(' * @user_data: User-supplied data')
self.d(' * @weak_object: User-supplied weakly referenced object')
self.d(' *')
self.d(' * Represents the signature of a callback for the signal %s.'
% member)
self.d(' */')
self.d('')
self.h('typedef void (*%s) (%sproxy,'
% (callback_name, self.proxy_cls))
for arg in args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
const = pointer and 'const ' or ''
self.h(' %s%s%s,' % (const, ctype, name))
self.h(' gpointer user_data, GObject *weak_object);')
if args:
self.b('static void')
self.b('%s (DBusGProxy *proxy G_GNUC_UNUSED,' % collect_name)
for arg in args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
const = pointer and 'const ' or ''
self.b(' %s%s%s,' % (const, ctype, name))
self.b(' TpProxySignalConnection *sc)')
self.b('{')
self.b(' GValueArray *args = g_value_array_new (%d);' % len(args))
self.b(' GValue blank = { 0 };')
self.b(' guint i;')
self.b('')
self.b(' g_value_init (&blank, G_TYPE_INT);')
self.b('')
self.b(' for (i = 0; i < %d; i++)' % len(args))
self.b(' g_value_array_append (args, &blank);')
self.b('')
for i, arg in enumerate(args):
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
self.b(' g_value_unset (args->values + %d);' % i)
self.b(' g_value_init (args->values + %d, %s);' % (i, gtype))
if gtype == 'G_TYPE_STRING':
self.b(' g_value_set_string (args->values + %d, %s);'
% (i, name))
elif marshaller == 'BOXED':
self.b(' g_value_set_boxed (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_UCHAR':
self.b(' g_value_set_uchar (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_BOOLEAN':
self.b(' g_value_set_boolean (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_INT':
self.b(' g_value_set_int (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_UINT':
self.b(' g_value_set_uint (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_INT64':
self.b(' g_value_set_int (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_UINT64':
self.b(' g_value_set_uint64 (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_DOUBLE':
self.b(' g_value_set_double (args->values + %d, %s);'
% (i, name))
else:
assert False, ("Don't know how to put %s in a GValue"
% gtype)
self.b('')
self.b(' tp_proxy_signal_connection_v0_take_results (sc, args);')
self.b('}')
self.b('static void')
self.b('%s (TpProxy *tpproxy,' % invoke_name)
self.b(' GError *error G_GNUC_UNUSED,')
self.b(' GValueArray *args,')
self.b(' GCallback generic_callback,')
self.b(' gpointer user_data,')
self.b(' GObject *weak_object)')
self.b('{')
self.b(' %s callback =' % callback_name)
self.b(' (%s) generic_callback;' % callback_name)
self.b('')
self.b(' if (callback != NULL)')
self.b(' callback (g_object_ref (tpproxy),')
# FIXME: factor out into a function
for i, arg in enumerate(args):
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
if marshaller == 'BOXED':
self.b(' g_value_get_boxed (args->values + %d),' % i)
elif gtype == 'G_TYPE_STRING':
self.b(' g_value_get_string (args->values + %d),' % i)
elif gtype == 'G_TYPE_UCHAR':
self.b(' g_value_get_uchar (args->values + %d),' % i)
elif gtype == 'G_TYPE_BOOLEAN':
self.b(' g_value_get_boolean (args->values + %d),' % i)
elif gtype == 'G_TYPE_UINT':
self.b(' g_value_get_uint (args->values + %d),' % i)
elif gtype == 'G_TYPE_INT':
self.b(' g_value_get_int (args->values + %d),' % i)
elif gtype == 'G_TYPE_UINT64':
self.b(' g_value_get_uint64 (args->values + %d),' % i)
elif gtype == 'G_TYPE_INT64':
self.b(' g_value_get_int64 (args->values + %d),' % i)
elif gtype == 'G_TYPE_DOUBLE':
self.b(' g_value_get_double (args->values + %d),' % i)
else:
assert False, "Don't know how to get %s from a GValue" % gtype
self.b(' user_data,')
self.b(' weak_object);')
self.b('')
if len(args) > 0:
self.b(' g_value_array_free (args);')
else:
self.b(' if (args != NULL)')
self.b(' g_value_array_free (args);')
self.b('')
self.b(' g_object_unref (tpproxy);')
self.b('}')
# Example:
#
# TpProxySignalConnection *
# tp_cli_connection_connect_to_new_channel
# (TpConnection *proxy,
# tp_cli_connection_signal_callback_new_channel callback,
# gpointer user_data,
# GDestroyNotify destroy);
#
# destroy is invoked when the signal becomes disconnected. This
# is either because the signal has been disconnected explicitly
# by the user, because the TpProxy has become invalid and
# emitted the 'invalidated' signal, or because the weakly referenced
# object has gone away.
self.d('/**')
self.d(' * %s_%s_connect_to_%s:'
% (self.prefix_lc, iface_lc, member_lc))
self.d(' * @proxy: %s' % self.proxy_doc)
self.d(' * @callback: Callback to be called when the signal is')
self.d(' * received')
self.d(' * @user_data: User-supplied data for the callback')
self.d(' * @destroy: Destructor for the user-supplied data, which')
self.d(' * will be called when this signal is disconnected, or')
self.d(' * before this function returns %NULL')
self.d(' * @weak_object: A #GObject which will be weakly referenced; ')
self.d(' * if it is destroyed, this callback will automatically be')
self.d(' * disconnected')
self.d(' * @error: If not %NULL, used to raise an error if %NULL is')
self.d(' * returned')
self.d(' *')
self.d(' * Connect a handler to the signal %s.' % member)
self.d(' *')
self.d(' * %s' % xml_escape(get_docstring(signal) or '(Undocumented)'))
self.d(' *')
self.d(' * Returns: a #TpProxySignalConnection containing all of the')
self.d(' * above, which can be used to disconnect the signal; or')
self.d(' * %NULL if the proxy does not have the desired interface')
self.d(' * or has become invalid.')
self.d(' */')
self.d('')
self.h('TpProxySignalConnection *%s_%s_connect_to_%s (%sproxy,'
% (self.prefix_lc, iface_lc, member_lc, self.proxy_arg))
self.h(' %s callback,' % callback_name)
self.h(' gpointer user_data,')
self.h(' GDestroyNotify destroy,')
self.h(' GObject *weak_object,')
self.h(' GError **error);')
self.h('')
self.b('TpProxySignalConnection *')
self.b('%s_%s_connect_to_%s (%sproxy,'
% (self.prefix_lc, iface_lc, member_lc, self.proxy_arg))
self.b(' %s callback,' % callback_name)
self.b(' gpointer user_data,')
self.b(' GDestroyNotify destroy,')
self.b(' GObject *weak_object,')
self.b(' GError **error)')
self.b('{')
self.b(' GType expected_types[%d] = {' % (len(args) + 1))
for arg in args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
self.b(' %s,' % gtype)
self.b(' G_TYPE_INVALID };')
self.b('')
self.b(' g_return_val_if_fail (%s (proxy), NULL);'
% self.proxy_assert)
self.b(' g_return_val_if_fail (callback != NULL, NULL);')
self.b('')
self.b(' return tp_proxy_signal_connection_v0_new ((TpProxy *) proxy,')
self.b(' %s, \"%s\",' % (self.get_iface_quark(), member))
self.b(' expected_types,')
if args:
self.b(' G_CALLBACK (%s),' % collect_name)
else:
self.b(' NULL, /* no args => no collector function */')
self.b(' %s,' % invoke_name)
self.b(' G_CALLBACK (callback), user_data, destroy,')
self.b(' weak_object, error);')
self.b('}')
self.b('')
def do_method(self, iface, method):
iface_lc = iface.lower()
member = method.getAttribute('name')
member_lc = method.getAttribute('tp:name-for-bindings')
if member != member_lc.replace('_', ''):
raise AssertionError('Method %s tp:name-for-bindings (%s) does '
'not match' % (member, member_lc))
member_lc = member_lc.lower()
member_uc = member_lc.upper()
in_count = 0
ret_count = 0
in_args = []
out_args = []
for arg in method.getElementsByTagName('arg'):
name = arg.getAttribute('name')
direction = arg.getAttribute('direction')
type = arg.getAttribute('type')
tp_type = arg.getAttribute('tp:type')
if direction != 'out':
if not name:
name = 'in%u' % in_count
in_count += 1
else:
name = 'in_%s' % name
else:
if not name:
name = 'out%u' % ret_count
ret_count += 1
else:
name = 'out_%s' % name
info = type_to_gtype(type)
if direction != 'out':
in_args.append((name, info, tp_type, arg))
else:
out_args.append((name, info, tp_type, arg))
# Async reply callback type
# Example:
# void (*tp_cli_properties_interface_callback_for_get_properties)
# (TpProxy *proxy,
# const GPtrArray *out0,
# const GError *error,
# gpointer user_data,
# GObject *weak_object);
self.d('/**')
self.d(' * %s_%s_callback_for_%s:'
% (self.prefix_lc, iface_lc, member_lc))
self.d(' * @proxy: the proxy on which the call was made')
for arg in out_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
docs = xml_escape(get_docstring(elt) or '(Undocumented)')
if ctype == 'guint ' and tp_type != '':
docs += ' (#%s)' % ('Tp' + tp_type.replace('_', ''))
self.d(' * @%s: Used to return an \'out\' argument if @error is '
'%%NULL: %s'
% (name, docs))
self.d(' * @error: %NULL on success, or an error on failure')
self.d(' * @user_data: user-supplied data')
self.d(' * @weak_object: user-supplied object')
self.d(' *')
self.d(' * Signature of the callback called when a %s method call'
% member)
self.d(' * succeeds or fails.')
deprecated = method.getElementsByTagName('tp:deprecated')
if deprecated:
d = deprecated[0]
self.d(' *')
self.d(' * Deprecated: %s' % xml_escape(get_deprecated(d)))
self.d(' */')
self.d('')
callback_name = '%s_%s_callback_for_%s' % (self.prefix_lc, iface_lc,
member_lc)
self.h('typedef void (*%s) (%sproxy,'
% (callback_name, self.proxy_cls))
for arg in out_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
const = pointer and 'const ' or ''
self.h(' %s%s%s,' % (const, ctype, name))
self.h(' const GError *error, gpointer user_data,')
self.h(' GObject *weak_object);')
self.h('')
# Async callback implementation
invoke_callback = '_%s_%s_invoke_callback_%s' % (self.prefix_lc,
iface_lc,
member_lc)
collect_callback = '_%s_%s_collect_callback_%s' % (self.prefix_lc,
iface_lc,
member_lc)
# The callback called by dbus-glib; this ends the call and collects
# the results into a GValueArray.
self.b('static void')
self.b('%s (DBusGProxy *proxy,' % collect_callback)
self.b(' DBusGProxyCall *call,')
self.b(' gpointer user_data)')
self.b('{')
self.b(' GError *error = NULL;')
if len(out_args) > 0:
self.b(' GValueArray *args;')
self.b(' GValue blank = { 0 };')
self.b(' guint i;')
for arg in out_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
# "We handle variants specially; the caller is expected to
# have already allocated storage for them". Thanks,
# dbus-glib...
if gtype == 'G_TYPE_VALUE':
self.b(' GValue *%s = g_new0 (GValue, 1);' % name)
else:
self.b(' %s%s;' % (ctype, name))
self.b('')
self.b(' dbus_g_proxy_end_call (proxy, call, &error,')
for arg in out_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
if gtype == 'G_TYPE_VALUE':
self.b(' %s, %s,' % (gtype, name))
else:
self.b(' %s, &%s,' % (gtype, name))
self.b(' G_TYPE_INVALID);')
if len(out_args) == 0:
self.b(' tp_proxy_pending_call_v0_take_results (user_data, error,'
'NULL);')
else:
self.b('')
self.b(' if (error != NULL)')
self.b(' {')
self.b(' tp_proxy_pending_call_v0_take_results (user_data, error,')
self.b(' NULL);')
for arg in out_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
if gtype == 'G_TYPE_VALUE':
self.b(' g_free (%s);' % name)
self.b(' return;')
self.b(' }')
self.b('')
self.b(' args = g_value_array_new (%d);' % len(out_args))
self.b(' g_value_init (&blank, G_TYPE_INT);')
self.b('')
self.b(' for (i = 0; i < %d; i++)' % len(out_args))
self.b(' g_value_array_append (args, &blank);')
for i, arg in enumerate(out_args):
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
self.b('')
self.b(' g_value_unset (args->values + %d);' % i)
self.b(' g_value_init (args->values + %d, %s);' % (i, gtype))
if gtype == 'G_TYPE_STRING':
self.b(' g_value_take_string (args->values + %d, %s);'
% (i, name))
elif marshaller == 'BOXED':
self.b(' g_value_take_boxed (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_UCHAR':
self.b(' g_value_set_uchar (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_BOOLEAN':
self.b(' g_value_set_boolean (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_INT':
self.b(' g_value_set_int (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_UINT':
self.b(' g_value_set_uint (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_INT64':
self.b(' g_value_set_int (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_UINT64':
self.b(' g_value_set_uint (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_DOUBLE':
self.b(' g_value_set_double (args->values + %d, %s);'
% (i, name))
else:
assert False, ("Don't know how to put %s in a GValue"
% gtype)
self.b(' tp_proxy_pending_call_v0_take_results (user_data, '
'NULL, args);')
self.b('}')
self.b('static void')
self.b('%s (TpProxy *self,' % invoke_callback)
self.b(' GError *error,')
self.b(' GValueArray *args,')
self.b(' GCallback generic_callback,')
self.b(' gpointer user_data,')
self.b(' GObject *weak_object)')
self.b('{')
self.b(' %s callback = (%s) generic_callback;'
% (callback_name, callback_name))
self.b('')
self.b(' if (error != NULL)')
self.b(' {')
self.b(' callback ((%s) self,' % self.proxy_cls)
for arg in out_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
if marshaller == 'BOXED' or pointer:
self.b(' NULL,')
elif gtype == 'G_TYPE_DOUBLE':
self.b(' 0.0,')
else:
self.b(' 0,')
self.b(' error, user_data, weak_object);')
self.b(' g_error_free (error);')
self.b(' return;')
self.b(' }')
self.b(' callback ((%s) self,' % self.proxy_cls)
# FIXME: factor out into a function
for i, arg in enumerate(out_args):
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
if marshaller == 'BOXED':
self.b(' g_value_get_boxed (args->values + %d),' % i)
elif gtype == 'G_TYPE_STRING':
self.b(' g_value_get_string (args->values + %d),' % i)
elif gtype == 'G_TYPE_UCHAR':
self.b(' g_value_get_uchar (args->values + %d),' % i)
elif gtype == 'G_TYPE_BOOLEAN':
self.b(' g_value_get_boolean (args->values + %d),' % i)
elif gtype == 'G_TYPE_UINT':
self.b(' g_value_get_uint (args->values + %d),' % i)
elif gtype == 'G_TYPE_INT':
self.b(' g_value_get_int (args->values + %d),' % i)
elif gtype == 'G_TYPE_UINT64':
self.b(' g_value_get_uint64 (args->values + %d),' % i)
elif gtype == 'G_TYPE_INT64':
self.b(' g_value_get_int64 (args->values + %d),' % i)
elif gtype == 'G_TYPE_DOUBLE':
self.b(' g_value_get_double (args->values + %d),' % i)
else:
assert False, "Don't know how to get %s from a GValue" % gtype
self.b(' error, user_data, weak_object);')
self.b('')
if len(out_args) > 0:
self.b(' g_value_array_free (args);')
else:
self.b(' if (args != NULL)')
self.b(' g_value_array_free (args);')
self.b('}')
self.b('')
# Async stub
# Example:
# TpProxyPendingCall *
# tp_cli_properties_interface_call_get_properties
# (gpointer proxy,
# gint timeout_ms,
# const GArray *in_properties,
# tp_cli_properties_interface_callback_for_get_properties callback,
# gpointer user_data,
# GDestroyNotify *destructor);
self.h('TpProxyPendingCall *%s_%s_call_%s (%sproxy,'
% (self.prefix_lc, iface_lc, member_lc, self.proxy_arg))
self.h(' gint timeout_ms,')
self.d('/**')
self.d(' * %s_%s_call_%s:'
% (self.prefix_lc, iface_lc, member_lc))
self.d(' * @proxy: the #TpProxy')
self.d(' * @timeout_ms: the timeout in milliseconds, or -1 to use the')
self.d(' * default')
for arg in in_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
docs = xml_escape(get_docstring(elt) or '(Undocumented)')
if ctype == 'guint ' and tp_type != '':
docs += ' (#%s)' % ('Tp' + tp_type.replace('_', ''))
self.d(' * @%s: Used to pass an \'in\' argument: %s'
% (name, docs))
self.d(' * @callback: called when the method call succeeds or fails;')
self.d(' * may be %NULL to make a "fire and forget" call with no ')
self.d(' * reply tracking')
self.d(' * @user_data: user-supplied data passed to the callback;')
self.d(' * must be %NULL if @callback is %NULL')
self.d(' * @destroy: called with the user_data as argument, after the')
self.d(' * call has succeeded, failed or been cancelled;')
self.d(' * must be %NULL if @callback is %NULL')
self.d(' * @weak_object: If not %NULL, a #GObject which will be ')
self.d(' * weakly referenced; if it is destroyed, this call ')
self.d(' * will automatically be cancelled. Must be %NULL if ')
self.d(' * @callback is %NULL')
self.d(' *')
self.d(' * Start a %s method call.' % member)
self.d(' *')
self.d(' * %s' % xml_escape(get_docstring(method) or '(Undocumented)'))
self.d(' *')
self.d(' * Returns: a #TpProxyPendingCall representing the call in')
self.d(' * progress. It is borrowed from the object, and will become')
self.d(' * invalid when the callback is called, the call is')
self.d(' * cancelled or the #TpProxy becomes invalid.')
deprecated = method.getElementsByTagName('tp:deprecated')
if deprecated:
d = deprecated[0]
self.d(' *')
self.d(' * Deprecated: %s' % xml_escape(get_deprecated(d)))
self.d(' */')
self.d('')
self.b('TpProxyPendingCall *\n%s_%s_call_%s (%sproxy,'
% (self.prefix_lc, iface_lc, member_lc, self.proxy_arg))
self.b(' gint timeout_ms,')
for arg in in_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
const = pointer and 'const ' or ''
self.h(' %s%s%s,' % (const, ctype, name))
self.b(' %s%s%s,' % (const, ctype, name))
self.h(' %s callback,' % callback_name)
self.h(' gpointer user_data,')
self.h(' GDestroyNotify destroy,')
self.h(' GObject *weak_object);')
self.h('')
self.b(' %s callback,' % callback_name)
self.b(' gpointer user_data,')
self.b(' GDestroyNotify destroy,')
self.b(' GObject *weak_object)')
self.b('{')
self.b(' GError *error = NULL;')
self.b(' GQuark interface = %s;' % self.get_iface_quark())
self.b(' DBusGProxy *iface;')
self.b('')
self.b(' g_return_val_if_fail (%s (proxy), NULL);'
% self.proxy_assert)
self.b(' g_return_val_if_fail (callback != NULL || '
'user_data == NULL, NULL);')
self.b(' g_return_val_if_fail (callback != NULL || '
'destroy == NULL, NULL);')
self.b(' g_return_val_if_fail (callback != NULL || '
'weak_object == NULL, NULL);')
self.b('')
self.b(' iface = tp_proxy_borrow_interface_by_id (')
self.b(' (TpProxy *) proxy,')
self.b(' interface, &error);')
self.b('')
self.b(' if (iface == NULL)')
self.b(' {')
self.b(' if (callback != NULL)')
self.b(' callback (proxy,')
for arg in out_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
if pointer:
self.b(' NULL,')
else:
self.b(' 0,')
self.b(' error, user_data, weak_object);')
self.b('')
self.b(' if (destroy != NULL)')
self.b(' destroy (user_data);')
self.b('')
self.b(' g_error_free (error);')
self.b(' return NULL;')
self.b(' }')
self.b('')
self.b(' if (callback == NULL)')
self.b(' {')
self.b(' dbus_g_proxy_call_no_reply (iface, "%s",' % member)
for arg in in_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
const = pointer and 'const ' or ''
self.b(' %s, %s,' % (gtype, name))
self.b(' G_TYPE_INVALID);')
self.b(' return NULL;')
self.b(' }')
self.b(' else')
self.b(' {')
self.b(' TpProxyPendingCall *data;')
self.b('')
self.b(' data = tp_proxy_pending_call_v0_new ((TpProxy *) proxy,')
self.b(' interface, "%s", iface,' % member)
self.b(' %s,' % invoke_callback)
self.b(' G_CALLBACK (callback), user_data, destroy,')
self.b(' weak_object, FALSE);')
self.b(' tp_proxy_pending_call_v0_take_pending_call (data,')
self.b(' dbus_g_proxy_begin_call_with_timeout (iface,')
self.b(' "%s",' % member)
self.b(' %s,' % collect_callback)
self.b(' data,')
self.b(' tp_proxy_pending_call_v0_completed,')
self.b(' timeout_ms,')
for arg in in_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
const = pointer and 'const ' or ''
self.b(' %s, %s,' % (gtype, name))
self.b(' G_TYPE_INVALID));')
self.b('')
self.b(' return data;')
self.b(' }')
self.b('}')
self.b('')
self.do_method_reentrant(method, iface_lc, member, member_lc,
in_args, out_args, collect_callback)
# leave a gap for the end of the method
self.d('')
self.b('')
self.h('')
def do_method_reentrant(self, method, iface_lc, member, member_lc, in_args,
out_args, collect_callback):
# Reentrant blocking calls
# Example:
# gboolean tp_cli_properties_interface_run_get_properties
# (gpointer proxy,
# gint timeout_ms,
# const GArray *in_properties,
# GPtrArray **out0,
# GError **error,
# GMainLoop **loop);
run_method_name = '%s_%s_run_%s' % (self.prefix_lc, iface_lc, member_lc)
if run_method_name not in self.reentrant_symbols:
return
self.b('typedef struct {')
self.b(' GMainLoop *loop;')
self.b(' GError **error;')
for arg in out_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
self.b(' %s*%s;' % (ctype, name))
self.b(' unsigned success:1;')
self.b(' unsigned completed:1;')
self.b('} _%s_%s_run_state_%s;'
% (self.prefix_lc, iface_lc, member_lc))
reentrant_invoke = '_%s_%s_finish_running_%s' % (self.prefix_lc,
iface_lc,
member_lc)
self.b('static void')
self.b('%s (TpProxy *self G_GNUC_UNUSED,' % reentrant_invoke)
self.b(' GError *error,')
self.b(' GValueArray *args,')
self.b(' GCallback unused G_GNUC_UNUSED,')
self.b(' gpointer user_data G_GNUC_UNUSED,')
self.b(' GObject *unused2 G_GNUC_UNUSED)')
self.b('{')
self.b(' _%s_%s_run_state_%s *state = user_data;'
% (self.prefix_lc, iface_lc, member_lc))
self.b('')
self.b(' state->success = (error == NULL);')
self.b(' state->completed = TRUE;')
self.b(' g_main_loop_quit (state->loop);')
self.b('')
self.b(' if (error != NULL)')
self.b(' {')
self.b(' if (state->error != NULL)')
self.b(' *state->error = error;')
self.b(' else')
self.b(' g_error_free (error);')
self.b('')
self.b(' return;')
self.b(' }')
self.b('')
for i, arg in enumerate(out_args):
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
self.b(' if (state->%s != NULL)' % name)
if marshaller == 'BOXED':
self.b(' *state->%s = g_value_dup_boxed ('
'args->values + %d);' % (name, i))
elif marshaller == 'STRING':
self.b(' *state->%s = g_value_dup_string '
'(args->values + %d);' % (name, i))
elif marshaller in ('UCHAR', 'BOOLEAN', 'INT', 'UINT',
'INT64', 'UINT64', 'DOUBLE'):
self.b(' *state->%s = g_value_get_%s (args->values + %d);'
% (name, marshaller.lower(), i))
else:
assert False, "Don't know how to copy %s" % gtype
self.b('')
if len(out_args) > 0:
self.b(' g_value_array_free (args);')
else:
self.b(' if (args != NULL)')
self.b(' g_value_array_free (args);')
self.b('}')
self.b('')
if self.deprecate_reentrant:
self.h('#ifndef %s' % self.deprecate_reentrant)
self.h('gboolean %s (%sproxy,'
% (run_method_name, self.proxy_arg))
self.h(' gint timeout_ms,')
self.d('/**')
self.d(' * %s:' % run_method_name)
self.d(' * @proxy: %s' % self.proxy_doc)
self.d(' * @timeout_ms: Timeout in milliseconds, or -1 for default')
for arg in in_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
docs = xml_escape(get_docstring(elt) or '(Undocumented)')
if ctype == 'guint ' and tp_type != '':
docs += ' (#%s)' % ('Tp' + tp_type.replace('_', ''))
self.d(' * @%s: Used to pass an \'in\' argument: %s'
% (name, docs))
for arg in out_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
self.d(' * @%s: Used to return an \'out\' argument if %%TRUE is '
'returned: %s'
% (name, xml_escape(get_docstring(elt) or '(Undocumented)')))
self.d(' * @error: If not %NULL, used to return errors if %FALSE ')
self.d(' * is returned')
self.d(' * @loop: If not %NULL, set before re-entering ')
self.d(' * the main loop, to point to a #GMainLoop ')
self.d(' * which can be used to cancel this call with ')
self.d(' * g_main_loop_quit(), causing a return of ')
self.d(' * %FALSE with @error set to %TP_DBUS_ERROR_CANCELLED')
self.d(' *')
self.d(' * Call the method %s and run the main loop' % member)
self.d(' * until it returns. Before calling this method, you must')
self.d(' * add a reference to any borrowed objects you need to keep,')
self.d(' * and generally ensure that everything is in a consistent')
self.d(' * state.')
self.d(' *')
self.d(' * %s' % xml_escape(get_docstring(method) or '(Undocumented)'))
self.d(' *')
self.d(' * Returns: TRUE on success, FALSE and sets @error on error')
deprecated = method.getElementsByTagName('tp:deprecated')
if deprecated:
d = deprecated[0]
self.d(' *')
self.d(' * Deprecated: %s' % xml_escape(get_deprecated(d)))
self.d(' */')
self.d('')
self.b('gboolean\n%s (%sproxy,'
% (run_method_name, self.proxy_arg))
self.b(' gint timeout_ms,')
for arg in in_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
const = pointer and 'const ' or ''
self.h(' %s%s%s,' % (const, ctype, name))
self.b(' %s%s%s,' % (const, ctype, name))
for arg in out_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
self.h(' %s*%s,' % (ctype, name))
self.b(' %s*%s,' % (ctype, name))
self.h(' GError **error,')
if self.deprecate_reentrant:
self.h(' GMainLoop **loop) %s;' % self.deprecation_attribute)
self.h('#endif /* not %s */' % self.deprecate_reentrant)
else:
self.h(' GMainLoop **loop);')
self.h('')
self.b(' GError **error,')
self.b(' GMainLoop **loop)')
self.b('{')
self.b(' DBusGProxy *iface;')
self.b(' GQuark interface = %s;' % self.get_iface_quark())
self.b(' TpProxyPendingCall *pc;')
self.b(' _%s_%s_run_state_%s state = {'
% (self.prefix_lc, iface_lc, member_lc))
self.b(' NULL /* loop */, error,')
for arg in out_args:
name, info, tp_type, elt = arg
self.b(' %s,' % name)
self.b(' FALSE /* completed */, FALSE /* success */ };')
self.b('')
self.b(' g_return_val_if_fail (%s (proxy), FALSE);'
% self.proxy_assert)
self.b('')
self.b(' iface = tp_proxy_borrow_interface_by_id')
self.b(' ((TpProxy *) proxy, interface, error);')
self.b('')
self.b(' if (iface == NULL)')
self.b(' return FALSE;')
self.b('')
self.b(' state.loop = g_main_loop_new (NULL, FALSE);')
self.b('')
self.b(' pc = tp_proxy_pending_call_v0_new ((TpProxy *) proxy,')
self.b(' interface, "%s", iface,' % member)
self.b(' %s,' % reentrant_invoke)
self.b(' NULL, &state, NULL, NULL, TRUE);')
self.b('')
self.b(' if (loop != NULL)')
self.b(' *loop = state.loop;')
self.b('')
self.b(' tp_proxy_pending_call_v0_take_pending_call (pc,')
self.b(' dbus_g_proxy_begin_call_with_timeout (iface,')
self.b(' "%s",' % member)
self.b(' %s,' % collect_callback)
self.b(' pc,')
self.b(' tp_proxy_pending_call_v0_completed,')
self.b(' timeout_ms,')
for arg in in_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
const = pointer and 'const ' or ''
self.b(' %s, %s,' % (gtype, name))
self.b(' G_TYPE_INVALID));')
self.b('')
self.b(' if (!state.completed)')
self.b(' g_main_loop_run (state.loop);')
self.b('')
self.b(' if (!state.completed)')
self.b(' tp_proxy_pending_call_cancel (pc);')
self.b('')
self.b(' if (loop != NULL)')
self.b(' *loop = NULL;')
self.b('')
self.b(' g_main_loop_unref (state.loop);')
self.b('')
self.b(' return state.success;')
self.b('}')
self.b('')
def do_signal_add(self, signal):
marshaller_items = []
gtypes = []
for i in signal.getElementsByTagName('arg'):
name = i.getAttribute('name')
type = i.getAttribute('type')
info = type_to_gtype(type)
# type, GType, STRING, is a pointer
gtypes.append(info[1])
self.b(' dbus_g_proxy_add_signal (proxy, "%s",'
% signal.getAttribute('name'))
for gtype in gtypes:
self.b(' %s,' % gtype)
self.b(' G_TYPE_INVALID);')
def do_interface(self, node):
ifaces = node.getElementsByTagName('interface')
assert len(ifaces) == 1
iface = ifaces[0]
name = node.getAttribute('name').replace('/', '')
self.iface = name
self.iface_lc = name.lower()
self.iface_uc = name.upper()
self.iface_mc = name.replace('_', '')
self.iface_dbus = iface.getAttribute('name')
signals = node.getElementsByTagName('signal')
methods = node.getElementsByTagName('method')
if signals:
self.b('static inline void')
self.b('%s_add_signals_for_%s (DBusGProxy *proxy)'
% (self.prefix_lc, name.lower()))
self.b('{')
if self.tp_proxy_api >= (0, 7, 6):
self.b(' if (!tp_proxy_dbus_g_proxy_claim_for_signal_adding '
'(proxy))')
self.b(' return;')
for signal in signals:
self.do_signal_add(signal)
self.b('}')
self.b('')
self.b('')
for signal in signals:
self.do_signal(name, signal)
for method in methods:
self.do_method(name, method)
self.iface_dbus = None
def __call__(self):
self.h('G_BEGIN_DECLS')
self.h('')
self.b('/* We don\'t want gtkdoc scanning this file, it\'ll get')
self.b(' * confused by seeing function definitions, so mark it as: */')
self.b('/*<private_header>*/')
self.b('')
nodes = self.dom.getElementsByTagName('node')
nodes.sort(cmp_by_name)
for node in nodes:
self.do_interface(node)
if self.group is not None:
self.b('/*')
self.b(' * %s_%s_add_signals:' % (self.prefix_lc, self.group))
self.b(' * @self: the #TpProxy')
self.b(' * @quark: a quark whose string value is the interface')
self.b(' * name whose signals should be added')
self.b(' * @proxy: the D-Bus proxy to which to add the signals')
self.b(' * @unused: not used for anything')
self.b(' *')
self.b(' * Tell dbus-glib that @proxy has the signatures of all')
self.b(' * signals on the given interface, if it\'s one we')
self.b(' * support.')
self.b(' *')
self.b(' * This function should be used as a signal handler for')
self.b(' * #TpProxy::interface-added.')
self.b(' */')
self.b('static void')
self.b('%s_%s_add_signals (TpProxy *self G_GNUC_UNUSED,'
% (self.prefix_lc, self.group))
self.b(' guint quark,')
self.b(' DBusGProxy *proxy,')
self.b(' gpointer unused G_GNUC_UNUSED)')
self.b('{')
for node in nodes:
iface = node.getElementsByTagName('interface')[0]
self.iface_dbus = iface.getAttribute('name')
signals = node.getElementsByTagName('signal')
if not signals:
continue
name = node.getAttribute('name').replace('/', '').lower()
self.iface_uc = name.upper()
self.b(' if (quark == %s)' % self.get_iface_quark())
self.b(' %s_add_signals_for_%s (proxy);'
% (self.prefix_lc, name))
self.b('}')
self.b('')
self.h('G_END_DECLS')
self.h('')
open(self.basename + '.h', 'w').write('\n'.join(self.__header))
open(self.basename + '-body.h', 'w').write('\n'.join(self.__body))
open(self.basename + '-gtk-doc.h', 'w').write('\n'.join(self.__docs))
def types_to_gtypes(types):
return [type_to_gtype(t)[1] for t in types]
if __name__ == '__main__':
options, argv = gnu_getopt(sys.argv[1:], '',
['group=', 'subclass=', 'subclass-assert=',
'iface-quark-prefix=', 'tp-proxy-api=',
'generate-reentrant=', 'deprecate-reentrant=',
'deprecation-attribute='])
opts = {}
for option, value in options:
opts[option] = value
dom = xml.dom.minidom.parse(argv[0])
Generator(dom, argv[1], argv[2], opts)()
|
import os
import signal
import xapi
import image
import xapi.storage.api.volume
from xapi.storage.common import call
from xapi.storage import log
import pickle
import urlparse
blktap2_prefix = "/dev/xen/blktap-2/tapdev"
nbdclient_prefix = "/var/run/blktap-control/nbdclient"
nbdserver_prefix = "/var/run/blktap-control/nbdserver"
TD_PROC_METADATA_DIR = "/var/run/nonpersistent/dp-tapdisk"
TD_PROC_METADATA_FILE = "meta.pickle"
class Tapdisk:
def __init__(self, minor, pid, f):
self.minor = minor
self.pid = pid
self.f = f
self.secondary = None # mirror destination
def __repr__(self):
return "Tapdisk(%s, %s, %s)" % (self.minor, self.pid, self.f)
def destroy(self, dbg):
self.pause(dbg)
call(dbg,
["tap-ctl",
"destroy",
"-m",
str(self.minor),
"-p",
str(self.pid)])
def close(self, dbg):
call(dbg,
["tap-ctl",
"close",
"-m",
str(self.minor),
"-p",
str(self.pid)])
self.f = None
def open(self, dbg, f, o_direct=True):
assert (isinstance(f, image.Vhd) or isinstance(f, image.Raw))
args = ["tap-ctl", "open", "-m", str(self.minor),
"-p", str(self.pid), "-a", str(f)]
if not o_direct:
args.append("-D")
call(dbg, args)
self.f = f
def pause(self, dbg):
call(dbg,
["tap-ctl",
"pause",
"-m",
str(self.minor),
"-p",
str(self.pid)])
def unpause(self, dbg):
cmd = ["tap-ctl", "unpause", "-m",
str(self.minor), "-p", str(self.pid)]
if self.secondary is not None:
cmd = cmd + ["-2 ", self.secondary]
call(dbg, cmd)
def block_device(self):
return blktap2_prefix + str(self.minor)
"""
ToDo: fdsend needs to be imported
def start_mirror(self, dbg, fd):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(nbdclient_prefix + str(self.pid))
token = "token"
fdsend.sendfds(sock, token, fds=[fd])
sock.close()
self.secondary = "nbd:" + token
self.pause(dbg)
self.unpause(dbg)
"""
def stop_mirror(self, dbg):
self.secondary = None
self.pause(dbg)
self.unpause(dbg)
"""
ToDo: fdsend needs to be imported
def receive_nbd(self, dbg, fd):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect("%s%d.%d" % (nbdserver_prefix, self.pid, self.minor))
token = "token"
fdsend.sendfds(sock, token, fds=[fd])
sock.close()
"""
def create(dbg):
output = call(dbg, ["tap-ctl", "spawn"]).strip()
pid = int(output)
output = call(dbg, ["tap-ctl", "allocate"]).strip()
prefix = blktap2_prefix
minor = None
if output.startswith(prefix):
minor = int(output[len(prefix):])
if minor is None:
os.kill(pid, signal.SIGQUIT)
raise xapi.InternalError("tap-ctl allocate returned unexpected " +
"output: %s" % (output))
call(dbg, ["tap-ctl", "attach", "-m", str(minor), "-p", str(pid)])
return Tapdisk(minor, pid, None)
def find_by_file(dbg, f):
log.debug("%s: find_by_file f=%s" % (dbg, f))
assert (isinstance(f, image.Path))
# See whether this host has any metadata about this file
try:
log.debug("%s: find_by_file trying uri=%s" % (dbg, f.path))
tap = load_tapdisk_metadata(dbg, f.path)
log.debug("%s: returning td %s" % (dbg, tap))
return tap
except xapi.storage.api.volume.Volume_does_not_exist:
pass
def _metadata_dir(path):
return TD_PROC_METADATA_DIR + "/" + os.path.realpath(path)
def save_tapdisk_metadata(dbg, path, tap):
""" Record the tapdisk metadata for this VDI in host-local storage """
dirname = _metadata_dir(path)
try:
os.makedirs(dirname, mode=0755)
except OSError as e:
if e.errno != 17: # 17 == EEXIST, which is harmless
raise e
with open(dirname + "/" + TD_PROC_METADATA_FILE, "w") as fd:
pickle.dump(tap.__dict__, fd)
def load_tapdisk_metadata(dbg, path):
"""Recover the tapdisk metadata for this VDI from host-local
storage."""
dirname = _metadata_dir(path)
log.debug("%s: load_tapdisk_metadata: trying '%s'" % (dbg, dirname))
filename = dirname + "/" + TD_PROC_METADATA_FILE
if not(os.path.exists(filename)):
# XXX throw a better exception
raise xapi.storage.api.volume.Volume_does_not_exist(dirname)
with open(filename, "r") as fd:
meta = pickle.load(fd)
tap = Tapdisk(meta['minor'], meta['pid'], meta['f'])
tap.secondary = meta['secondary']
return tap
def forget_tapdisk_metadata(dbg, path):
"""Delete the tapdisk metadata for this VDI from host-local storage."""
dirname = _metadata_dir(path)
try:
os.unlink(dirname + "/" + TD_PROC_METADATA_FILE)
except:
pass
|
import tempfile
import threading
import unittest
from dlg import exceptions
from dlg.manager import constants
from dlg.manager.client import NodeManagerClient, DataIslandManagerClient
from dlg.manager.node_manager import NodeManager
from dlg.manager.rest import NMRestServer, CompositeManagerRestServer
from dlg.restutils import RestClient
from dlg.manager.composite_manager import DataIslandManager
from dlg.exceptions import InvalidGraphException
from dlg.common import Categories
hostname = 'localhost'
class TestRest(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.dm = NodeManager(False)
self._dm_server = NMRestServer(self.dm)
self._dm_t = threading.Thread(target=self._dm_server.start, args=(hostname, constants.NODE_DEFAULT_REST_PORT))
self._dm_t.start()
self.dim = DataIslandManager(dmHosts=[hostname])
self._dim_server = CompositeManagerRestServer(self.dim)
self._dim_t = threading.Thread(target=self._dim_server.start, args=(hostname, constants.ISLAND_DEFAULT_REST_PORT))
self._dim_t.start()
def tearDown(self):
unittest.TestCase.tearDown(self)
self._dm_server.stop()
self._dm_t.join()
self.dm.shutdown()
self.assertFalse(self._dm_t.is_alive())
self._dim_server.stop()
self._dim_t.join()
self.dim.shutdown()
self.assertFalse(self._dim_t.is_alive())
def test_index(self):
# Just check that the HTML pages load properly
with RestClient(hostname, constants.NODE_DEFAULT_REST_PORT, timeout=10) as c:
c._GET('/')
c._GET('/session')
def test_errtype(self):
sid = 'lala'
c = NodeManagerClient(hostname)
c.createSession(sid)
# already exists
self.assertRaises(exceptions.SessionAlreadyExistsException, c.createSession, sid)
# different session
self.assertRaises(exceptions.NoSessionException, c.addGraphSpec, sid + "x", [{}])
# invalid dropspec, it has no oid/type (is completely empty actually)
self.assertRaises(exceptions.InvalidGraphException, c.addGraphSpec, sid, [{}])
# invalid dropspec, app doesn't exist
self.assertRaises(exceptions.InvalidGraphException, c.addGraphSpec, sid, [{'oid': 'a', "type": 'app', 'app': 'doesnt.exist'}])
# invalid state, the graph status is only queried when the session is running
self.assertRaises(exceptions.InvalidSessionState, c.getGraphStatus, sid)
# valid dropspec, but the socket listener app doesn't allow inputs
c.addGraphSpec(sid, [{"type": 'socket', 'oid': 'a', 'inputs': ['b']}, {'oid': 'b', "type": 'plain', 'storage': Categories.MEMORY}])
self.assertRaises(exceptions.InvalidRelationshipException, c.deploySession, sid)
# And here we point to an unexisting file, making an invalid drop
c.destroySession(sid)
c.createSession(sid)
fname = tempfile.mktemp()
c.addGraphSpec(sid, [{"type": 'plain', 'storage': Categories.FILE, 'oid': 'a', 'filepath': fname, 'check_filepath_exists': True}])
self.assertRaises(exceptions.InvalidDropException, c.deploySession, sid)
def test_recursive(self):
sid = 'lala'
c = DataIslandManagerClient(hostname)
c.createSession(sid)
# invalid dropspec, app doesn't exist
# This is not checked at the DIM level but only at the NM level
# The exception should still pass through though
with self.assertRaises(exceptions.SubManagerException) as cm:
c.addGraphSpec(sid, [{'oid': 'a', "type": 'app', 'app': 'doesnt.exist', 'node': hostname}])
ex = cm.exception
self.assertTrue(hostname in ex.args[0])
self.assertTrue(isinstance(ex.args[0][hostname], InvalidGraphException))
|
"""
This module is meant to contain the lava log filtering functions.
These functions are used to filter out the content of the LAVA log when LAVA
callback is being processed.
Filtering functions should follow some rules:
- Name of the filtering function must start with `filter_`
- It can take only one argument which is a log line text to process
- It should return boolean value:
- True - log line stays in the log
- False - log line will be deleted
`LAVA_FILTERS` is a list of filtering functions (function objects)
and it gets automatically populated during module import.
"""
import re
import inspect
LAVA_SIGNAL_PATTERN = re.compile(
r'^<LAVA_.+>')
def filter_log_levels(log_line):
return log_line['lvl'] == 'target'
def filter_lava_signal(log_line):
return not LAVA_SIGNAL_PATTERN.match(log_line['msg'])
def _get_lava_filters():
filters = []
for name, obj in globals().items():
if name.startswith('filter') and inspect.isfunction(obj):
filters.append(obj)
return filters
LAVA_FILTERS = _get_lava_filters()
|
from __future__ import absolute_import
import sys
sys.path[:0] = ['../..']
|
from zope.interface import implements, Attribute
from flumotion.inhouse import log
from flumotion.transcoder.admin import interfaces
from flumotion.transcoder.admin.proxy import base
class IWorkerDefinition(interfaces.IAdminInterface):
def getName(self):
pass
def getWorkerContext(self):
pass
class IWorkerProxy(IWorkerDefinition, base.IBaseProxy):
def getHost(self):
pass
class WorkerDefinition(object):
"""
Used to represent non-running workers.
"""
implements(IWorkerDefinition)
def __init__(self, workerName, workerCtx):
self.name = workerName
self._workerCtx = workerCtx
## IWorkerDefinition Methodes ##
def getName(self):
return self._name
def getWorkerContext(self):
return self._workerCtx
class WorkerProxy(base.BaseProxy):
implements(IWorkerProxy)
def __init__(self, logger, parentPxy, identifier,
managerPxy, workerCtx, workerState):
base.BaseProxy.__init__(self, logger, parentPxy, identifier, managerPxy)
self._workerState = workerState
self._workerCtx = workerCtx
## IWorkerDefinition and IFlumotionProxxyRO Methods ##
def getName(self):
assert self._workerState, "Worker has been removed"
return self._workerState.get('name')
def getHost(self):
assert self._workerState, "Worker has been removed"
return self._workerState.get('host')
def getWorkerContext(self):
return self._workerCtx
## Overriden Methods ##
def _onRemoved(self):
assert self._workerState, "Worker has already been removed"
def _doDiscard(self):
assert self._workerState, "Worker has already been discarded"
self._workerState = None
## Protected Methods ##
def _callRemote(self, methodName, *args, **kwargs):
assert self._workerState, "Worker has been removed"
workerName = self._workerState.get('name')
return self._managerPxy._workerCallRemote(workerName,
methodName,
*args, **kwargs)
def instantiate(logger, parentPxy, identifier, managerPxy,
workerContext, workerState, *args, **kwargs):
return WorkerProxy(logger, parentPxy, identifier, managerPxy,
workerContext, workerState, *args, **kwargs)
|
import pytest
import webtest
from static import u
@pytest.fixture
def _cling():
from static import Cling
return Cling(root="testdata/pub")
@pytest.fixture
def _shock():
from static import Shock, StringMagic, KidMagic, GenshiMagic
magics = (StringMagic(title="String Test"),
KidMagic(title="Kid Test"), GenshiMagic(title="Genshi Test"))
return Shock(root="testdata/pub", magics=magics)
@pytest.fixture
def ascii_shock(_shock):
_shock.encoding = 'ascii'
return webtest.TestApp(_shock)
@pytest.fixture
def cling(_cling):
return webtest.TestApp(_cling)
@pytest.fixture
def shock(_shock):
return webtest.TestApp(_shock)
@pytest.mark.skipif("sys.version_info >= (3,0)")
def test_kid(shock):
response = shock.get("/kid.html.kid")
assert "Title: Kid Test" in response
assert "REQUEST_METHOD" in response
def test_genshi(shock):
response = shock.get("/test.html")
assert "Title: Genshi Test" in response
assert "REQUEST_METHOD" in response
def test_string(shock):
response = shock.get("/sub.html.stp")
assert "<h1>String Test</h1>" in response
assert "Path info: /sub.html." in response
def test_static_cling(cling):
response = cling.get("/index.html")
assert "Mixed Content" in response
assert response.content_type == "text/html"
response = cling.get("/test.xml")
assert "green" in response
# dependes on the mimetypes version, both is fine
assert (response.content_type == "text/xml"
or response.content_type == "application/xml")
response = cling.get("/unicode.html")
assert u('\u00f6\u00e4\u00fc') in response
def test_cling_head(cling):
response = cling.head("/index.html")
assert response.content_type == "text/html"
assert response.body == b''
def test_shock_head(shock):
response = shock.head("/index.html")
assert response.content_type == "text/html"
assert response.body == b''
def test_gzip_cling(cling):
response = cling.get("/gzipped.html", headers=[('Accept-Encoding',
'gzip, deflate')])
assert "Mixed Content" in response
assert response.content_type == "text/html"
assert 'Content-Encoding' not in response.headers
assert response.headers['Vary'] == 'Accept-Encoding'
def test_headers_cling():
from static import Cling
def get(headers, url='/index.html'):
app = webtest.TestApp(Cling(root="testdata/pub", headers=headers))
return app.get(url)
response = get([{'type': 'text/html', 'Cache-Control': 'max-age=10'}])
assert response.headers['Cache-Control'] == 'max-age=10'
response = get(
[{'type': 'text/html', 'Cache-Control': 'max-age=10'}], '/test.xml')
assert 'Cache-Control' not in response.headers.keys()
response = get([{'ext': 'html', 'Cache-Control': 'max-age=10'}])
assert response.headers['Cache-Control'] == 'max-age=10'
response = get(
[{'ext': 'html', 'Cache-Control': 'max-age=10'}], '/test.xml')
assert 'Cache-Control' not in response.headers.keys()
response = get([{'prefix': '/index', 'Cache-Control': 'max-age=10'}])
assert response.headers['Cache-Control'] == 'max-age=10'
response = get(
[{'prefix': '/index', 'Cache-Control': 'max-age=10'}], '/test.xml')
assert 'Cache-Control' not in response.headers.keys()
def test_static_shock(shock):
response = shock.get("/index.html")
assert "Mixed Content" in response
@pytest.mark.skipif("sys.version_info >= (3,0)")
def test_encoding(ascii_shock):
with pytest.raises(UnicodeEncodeError):
ascii_shock.get("/encoding.html")
@pytest.mark.skipif("sys.version_info < (3,0)")
def test_decoding(ascii_shock):
with pytest.raises(UnicodeDecodeError):
ascii_shock.get("/encoding.html")
|
GL_CLIENT_EXECUTABLE = 'glclient'
GL_COLORMAP_DIR = 'colormaps'
GL_COLORMAP_RAINBOW2 = 'rainbow2'
GL_COLORMAP_HOT = 'hot'
GL_COLORMAP_GREY = 'grey'
GL_COLORMAP_REDHOT = 'redhot'
GL_DEFAULT_COLORMAP = 'colormaps/rainbow2'
GL_PORT = '9999'
import os
import sys
import tempfile
import logging
from PyQt4.Qt import Qt
from PyQt4 import QtGui, QtCore
settings = None
TEMPDIR = tempfile.gettempdir()
KEY_FIRSTTIME = 'firsttime'
KEY_GL_COLORMAP = 'glclient/colormap'
KEY_GL_PORT = 'glclient/port'
KEY_GL_CLIENT_EXECUTABLE = 'glclient/executable'
KEY_GL_BACKGROUND_COLOR = 'glclient/bgcolor'
KEY_HOME_DIR = 'main'
KEY_WINDOW_GEOMETRY = 'main/geometry'
KEY_WINDOW_LAYOUT = 'main/layout'
KEY_RUNTIME_AUTOHIDE = 'main/rtautohide'
KEY_DEMOS_DIR = 'main/demosdir'
QT_VERSION = str(QtCore.QT_VERSION_STR).split('.')
QT_MAJOR_VERSION = int(QT_VERSION[0])
QT_MINOR_VERSION = int(QT_VERSION[1])
MOOSE_DOC_URL = 'http://moose.ncbs.res.in/content/view/5/6/'
MOOSE_REPORT_BUG_URL = 'http://sourceforge.net/tracker/?func=add&group_id=165660&atid=836272'
def get_settings():
'''Initializes the QSettings for the application and returns it.'''
global settings
if not settings:
QtCore.QCoreApplication.setOrganizationName('NCBS')
QtCore.QCoreApplication.setOrganizationDomain('ncbs.res.in')
QtCore.QCoreApplication.setApplicationName('MOOSE')
settings = QtCore.QSettings()
return settings
LOG_LEVEL = logging.ERROR
logging.basicConfig(stream=sys.stdout, level=LOG_LEVEL, filemode='w', format='%(asctime)s %(levelname)s %(name)s %(filename)s %(funcName)s: %(lineno)d: %(message)s')
LOGGER = logging.getLogger('moose')
BENCHMARK_LOGGER = logging.getLogger('moose.benchmark')
BENCHMARK_LOGGER.setLevel(logging.INFO)
|
import unittest
import sys
from helper import adjust_filename
from PySide2.QtCore import QObject, QUrl, Slot, QTimer
from PySide2.QtGui import QGuiApplication
from PySide2.QtQml import QQmlIncubationController, VolatileBool
from PySide2.QtQuick import QQuickView
class CustomIncubationController(QObject, QQmlIncubationController):
def __init__(self, test):
QObject.__init__(self)
QQmlIncubationController.__init__(self)
self.test = test
self.interrupted = False
# Incubate every 50 milliseconds
self.startTimer(50)
self.incubationShouldContinue = VolatileBool(True)
self.test.assertEqual(self.incubationShouldContinue.get(), True)
@Slot()
def interrupter(self):
if not self.interrupted:
self.interrupted = True
self.incubationShouldContinue.set(False)
self.test.assertEqual(self.incubationShouldContinue.get(), False)
QTimer.singleShot(0, QGuiApplication.instance().quit)
def timerEvent(self, ev):
# Incubate items for 2000 milliseconds, or until the volatile bool is set to false.
self.incubateWhile(self.incubationShouldContinue, 2000)
class TestBug(unittest.TestCase):
def testIncubateWhileCall(self):
app = QGuiApplication(sys.argv)
view = QQuickView()
controller = CustomIncubationController(self)
view.engine().setIncubationController(controller)
view.setResizeMode(QQuickView.SizeRootObjectToView)
view.setSource(QUrl.fromLocalFile(adjust_filename('qqmlincubator_incubateWhile.qml',
__file__)))
view.show()
root = view.rootObject()
# The QML code will issue an interrupt signal after half of its items are loaded.
root.shouldInterrupt.connect(controller.interrupter)
res = app.exec_()
itemsToCreate = root.property("itemsToCreate")
loadedItems = root.property("loadedItems")
self.assertEqual(loadedItems, itemsToCreate / 2)
# Finish incubating the remaining items.
controller.incubateFor(1000)
loadedItems = root.property("loadedItems")
self.assertEqual(loadedItems, itemsToCreate)
# Deleting the view before it goes out of scope is required to make sure all child QML
# instances are destroyed in the correct order.
del view
del app
if __name__ == '__main__':
unittest.main()
|
from pycp2k.inputsection import InputSection
class _xalpha3(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Xa = None
self.Scale_x = None
self._name = "XALPHA"
self._keywords = {'Xa': 'XA', 'Scale_x': 'SCALE_X'}
self._attributes = ['Section_parameters']
|
"""network_appdata
Revision ID: cec2b77ad85e
Revises: 1d3ab26415ec
Create Date: 2021-02-12 10:10:35.646470
"""
import logging
from alembic import op
import sqlalchemy as sa
log = logging.getLogger(__name__)
revision = 'cec2b77ad85e'
down_revision = '1d3ab26415ec'
branch_labels = None
depends_on = None
def upgrade():
if op.get_bind().dialect.name == 'mysql':
try:
op.add_column('tNetwork',
sa.Column('appdata',
sa.Text().with_variant(sa.dialects.mysql.LONGTEXT, 'mysql')))
except Exception as e:
log.critical(e)
def downgrade():
if op.get_bind().dialect.name == 'mysql':
try:
op.drop_column('tNetwork', 'appdata')
except Exception as e:
log.critical(e)
|
from OracleDatabase import OracleDatabase
from time import sleep
from itertools import product
import logging, string
from Tnscmd import Tnscmd
from Constants import *
from Utils import stringToLinePadded
class ServiceNameGuesser (OracleDatabase):
'''
Service Name guesser
'''
def __init__(self, args, serviceNameFile, timeSleep=0):
'''
Constructor
'''
logging.debug("ServiceNameGuesser object created")
OracleDatabase.__init__(self,args)
self.serviceNameFile = serviceNameFile
self.serviceNames = []
self.validServiceNames = []
self.args['SYSDBA'] = False
self.args['SYSOPER'] = False
self.timeSleep = timeSleep
self.NO_GOOD_SERVICE_NAME_STRING_LIST = ["listener does not currently know of service requested",
"listener does not currently know of SID",
"connection to server failed"]
def getValidServiceNames(self):
'''
return a list containing valid Service Names found
'''
return self.validServiceNames
def appendValidServiceName (self, serviceName):
'''
Append to self.validServiceNames a new Service Name if no in the list
'''
if serviceName not in self.validServiceNames:
self.validServiceNames.append(serviceName)
def __getServiceNamesFromFile__(self):
'''
return list containing Service Names
'''
serviceNames = []
logging.info('Load Service Names stored in the {0} file'.format(self.serviceNameFile))
f = open(self.serviceNameFile)
for l in f:
serviceNames.append(l.replace('\n','').replace('\t',''))
f.close()
return sorted(serviceNames)
def __testIfAGoodServiceName__(self):
'''
Test if it is a good Service Name
'''
no_good_service_name_found = False
self.__generateConnectionString__(username=self.__generateRandomString__(nb=15), password=self.__generateRandomString__(nb=5))
logging.debug("Try to connect with the {0} Service Name ({1})".format(self.args['serviceName'], self.args['connectionStr']))
status = self.connection()
if self.__needRetryConnection__(status) == True:
status = self.__retryConnect__(nbTry=4)
if status != None :
for aNoGoodString in self.NO_GOOD_SERVICE_NAME_STRING_LIST:
if aNoGoodString in str(status):
no_good_service_name_found = True
break
if no_good_service_name_found == False:
self.appendValidServiceName(self.args['serviceName'])
logging.info("'{0}' is a valid Service Name (Server message: {1})".format(self.args['serviceName'], str(status)))
self.args['print'].goodNews(stringToLinePadded("'{0}' is a valid Service Name. Continue... ".format(self.args['serviceName'])))
self.close()
def searchKnownServiceNames(self):
'''
Search valid Service Names THANKS TO a well known Service Name list
'''
self.args['print'].subtitle("Searching valid Service Names thanks to a well known Service Name list on the {0}:{1} server".format(self.args['server'], self.args['port']))
self.serviceNames += self.__getServiceNamesFromFile__()
pbar,nb = self.getStandardBarStarted(len(self.serviceNames)), 0
logging.info('Start the research')
self.args['sid'] = None
for aServiceName in self.serviceNames :
nb += 1
pbar.update(nb)
self.args['serviceName'] = aServiceName
self.__testIfAGoodServiceName__()
sleep(self.timeSleep)
pbar.finish()
return True
def bruteforceServiceNames(self, size=4, charset=string.ascii_uppercase):
'''
Bruteforce Service Names
'''
self.args['print'].subtitle("Searching valid Service Names thanks to a brute-force attack on {2} chars now ({0}:{1})".format(self.args['server'], self.args['port'], size))
pbar,nb = self.getStandardBarStarted(len(charset)**size), 0
logging.info('Start the research')
self.args['sid'] = None
for aServiceName in product(list(charset), repeat=size):
nb +=1
pbar.update(nb)
self.args['serviceName'] = ''.join(aServiceName)
self.__testIfAGoodServiceName__()
sleep(self.timeSleep)
pbar.finish()
return True
def loadServiceNameFromListenerAlias(self):
'''
Append ALIAS from listener into the Service Name list to try ALIAS like Service Name
'''
logging.info('Put listener ALIAS into the Service Name list to try ALIAS like Service Name')
tnscmd = Tnscmd(self.args)
tnscmd.getInformation()
self.serviceNames += tnscmd.getAlias()
def runServiceNameGuesserModule(args):
'''
Run the ServiceNameGuesser module
'''
args['print'].title("Searching valid Service Names")
serviceNameGuesser = ServiceNameGuesser(args, args['service-name-file'], timeSleep=args['timeSleep'])
serviceNameGuesser.loadServiceNameFromListenerAlias()
serviceNameGuesser.searchKnownServiceNames()
for aServiceNameSize in range(args['service-name-min-size'], args['service-name-max-size']+1):
serviceNameGuesser.bruteforceServiceNames(size=aServiceNameSize, charset=args['service-name-charset'])
validServiceNameList = serviceNameGuesser.getValidServiceNames()
if validServiceNameList == []:
args['print'].badNews("No found a valid Service Name".format(args['server'], args['port']))
else :
args['print'].goodNews("Service Name(s) found on the {0}:{1} server: {2}".format(args['server'], args['port'], ','.join(validServiceNameList)))
return validServiceNameList
|
for n in range(2, 100):
for x in range(2, n):
if n % x == 0:
print n, 'equals', x, '*', n/x
break
else:
# loop fell through without finding a factor
print n, 'is a prime number'
|
"""
Config for the grok processes to be run
"""
from collectors.etc import yaml_conf
GROK_CONFIG = yaml_conf.load_collector_configuration('grok.yml')['collector']
GROK_EXPORTER_DIR = GROK_CONFIG['grok_exporter_dir']
if 'grok_exporter_debug' in GROK_CONFIG:
GROK_EXPORTER_DEBUG = GROK_CONFIG['grok_exporter_debug']
else:
GROK_EXPORTER_DEBUG = False
GROK_SCRAPER_CONFIG_DIR = GROK_CONFIG['grok_scraper_config_dir']
def get_grok_exporter_dir():
return GROK_EXPORTER_DIR
def get_grok_exporter_debug():
return GROK_EXPORTER_DEBUG
def get_grok_scraper_config_dir():
return GROK_SCRAPER_CONFIG_DIR
|
import flextls
from OpenSSL import SSL
def convert_version2method(protocol_version):
"""
Convert internal protocol version ID to OpenSSL method.
:param Integer protocol_version: Version ID
:return: OpenSSL method or None if not found
:rtype: OpenSSL method or None
"""
if protocol_version == flextls.registry.version.SSLv2:
return SSL.SSLv2_METHOD
if protocol_version == flextls.registry.version.SSLv3:
return SSL.SSLv3_METHOD
if protocol_version == flextls.registry.version.TLSv10:
return SSL.TLSv1_METHOD
if protocol_version == flextls.registry.version.TLSv11:
return SSL.TLSv1_1_METHOD
if protocol_version == flextls.registry.version.TLSv12:
return SSL.TLSv1_2_METHOD
return None
def convert_versions2methods(protocol_versions):
"""
Convert list of internal protocol version to OpenSSL methods.
:param List protocol_version: List of internal protocol version IDs
:return: List of methods
:rtype: List
"""
methods = []
for protocol_version in protocol_versions:
method = convert_version2method(protocol_version)
if method is not None:
methods.append(method)
return methods
|
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../")))
import time
import mysql.connector as mysql
from lib3.decorator.safe_run import safe_run_wrap
now = int(time.time())
today = int(now+8*3600)/86400*86400-8*3600
dayts = 86400
hourts = 3600
mints = 60
yesterday = today - dayts
def YMD(ts):
return time.strftime("%Y%m%d", time.localtime(ts))
def YM(ts):
return time.strftime("%Y%m", time.localtime(ts))
def DAY(ts):
return time.strftime("%d", time.localtime(ts))
@safe_run_wrap
def MySQLDB(host, user, port=3306, pw="", db=""):
return mysql.connect(host=host, port=port, user=user, password=pw, database=db)
@safe_run_wrap
def MySQLDBv2(dic):
host = dic.get('host')
user = dic.get('user')
port = dic.get('port', 3306)
pw = dic.get('pw', "")
db = dic.get('database', "")
cnx = mysql.connect(host=host, port=port, user=user, password=pw, database=db)
dic.update({"cnx": cnx})
return dic
@safe_run_wrap
def MySQLRun(db, query):
if type(db) is dict:
# This mode enables reconnect while timeout
# {cnx: conn, host: x, port: x, user: x, pw: x, database: x}
host = db["host"]
port = db.get("port", 3306)
user = db["user"]
pw = db.get('pw', "")
database = db["database"]
cnx = db.get("cnx", MySQLDB(host, user, port, pw, database))
if "cnx" not in db:
db["cnx"] = cnx
try:
cnx.ping(reconnect=True, attempts=3, delay=5)
except:
cnx = MySQLDB(host, user, port, pw, database)
db["cnx"] = cnx
conn = cnx
elif type(db) is mysql.connection_cext.CMySQLConnection:
# one time run
conn = db
else:
return None
# start run
cursor = conn.cursor()
cursor.execute(query)
return cursor.fetchall()
def main():
# One time run scripts, a bit simple to maintain.
db=MySQLDB(host="172.20.82.5", user="live", pw="", db="account")
print (MySQLRun(db, "select * from account limit 1;"))
# Using this method makes it possible to reconnect when session times out
TDB_ACNT = {"host": "172.20.82.5", "user":"live", "pw": "", "database": "account"}
newdb = MySQLDBv2(TDB_ACNT)
print (MySQLRun(newdb, "select * from account limit 2;"))
if __name__ == "__main__":
main()
|
from __future__ import unicode_literals
from ast import literal_eval
from decimal import Decimal
import re
from weboob.browser.pages import LoggedPage, JsonPage, HTMLPage
from weboob.browser.elements import ItemElement, DictElement, method
from weboob.browser.filters.standard import Date, Eval, CleanText, Field, CleanDecimal
from weboob.browser.filters.json import Dict
from weboob.capabilities.bank import Account, Transaction
from weboob.capabilities.base import NotAvailable
from weboob.tools.json import json
from weboob.tools.compat import basestring
from weboob.exceptions import ActionNeeded, BrowserUnavailable
def float_to_decimal(f):
return Decimal(str(f))
def parse_decimal(s):
# we might get 1,399,680 in rupie indonésienne
if s.count(',') > 1 and not s.count('.'):
return CleanDecimal(replace_dots=(',', '.')).filter(s)
# we don't know which decimal format this account will use
comma = s.rfind(',') > s.rfind('.')
return CleanDecimal(replace_dots=comma).filter(s)
class WrongLoginPage(HTMLPage):
pass
class AccountSuspendedPage(HTMLPage):
pass
class NoCardPage(HTMLPage):
def on_load(self):
raise ActionNeeded()
class NotFoundPage(HTMLPage):
def on_load(self):
alert_header = CleanText('//h1[@class="alert-header"]/span')(self.doc)
alert_content = CleanText('//p[@class="alert-subtitle"]/span')(self.doc)
raise BrowserUnavailable(alert_header, alert_content)
class LoginPage(HTMLPage):
def login(self, username, password):
form = self.get_form(name='ssoform')
form['UserID'] = username
form['USERID'] = username
form['Password'] = password
form['PWD'] = password
form.submit()
class AccountsPage(LoggedPage, HTMLPage):
def iter_accounts(self):
for line in self.doc.xpath('//script[@id="initial-state"]')[0].text.split('\n'):
m = re.search('window.__INITIAL_STATE__ = (.*);', line)
if m:
data = json.loads(literal_eval(m.group(1)))
break
else:
assert False, "data was not found"
assert data[13] == 'core'
assert len(data[14]) == 3
# search for products to get products list
for index, el in enumerate(data[14][2]):
if 'products' in el:
accounts_data = data[14][2][index+1]
assert len(accounts_data) == 2
assert accounts_data[1][4] == 'productsList'
accounts_data = accounts_data[1][5]
token = []
for account_data in accounts_data:
if isinstance(account_data, basestring):
balances_token = account_data
elif isinstance(account_data, list) and not account_data[4][2][0]=="Canceled":
acc = Account()
if len(account_data) > 15:
token.append(account_data[-11])
acc._idforJSON = account_data[10][-1]
else:
acc._idforJSON = account_data[-5][-1]
acc._idforJSON = re.sub('\s+', ' ', acc._idforJSON)
acc.number = '-%s' % account_data[2][2]
acc.label = '%s %s' % (account_data[6][4], account_data[10][-1])
acc._balances_token = acc.id = balances_token
acc._token = token[-1]
acc.type = Account.TYPE_CARD
yield acc
class JsonBalances(LoggedPage, JsonPage):
def set_balances(self, accounts):
by_token = {a._balances_token: a for a in accounts}
for d in self.doc:
# coming is what should be refunded at a futur deadline
by_token[d['account_token']].coming = -float_to_decimal(d['total_debits_balance_amount'])
# balance is what is currently due
by_token[d['account_token']].balance = -float_to_decimal(d['remaining_statement_balance_amount'])
class JsonBalances2(LoggedPage, JsonPage):
def set_balances(self, accounts):
by_token = {a._balances_token: a for a in accounts}
for d in self.doc:
by_token[d['account_token']].balance = -float_to_decimal(d['total']['payments_credits_total_amount'])
by_token[d['account_token']].coming = -float_to_decimal(d['total']['debits_total_amount'])
# warning: payments_credits_total_amount is not the coming value here
class CurrencyPage(LoggedPage, JsonPage):
def get_currency(self):
return self.doc['currency']
class JsonPeriods(LoggedPage, JsonPage):
def get_periods(self):
return [p['statement_end_date'] for p in self.doc]
class JsonHistory(LoggedPage, JsonPage):
def get_count(self):
return self.doc['total_count']
@method
class iter_history(DictElement):
item_xpath = 'transactions'
class item(ItemElement):
klass = Transaction
def obj_type(self):
if Field('raw')(self) in self.page.browser.SUMMARY_CARD_LABEL:
return Transaction.TYPE_CARD_SUMMARY
elif Field('amount')(self) > 0:
return Transaction.TYPE_ORDER
else:
return Transaction.TYPE_DEFERRED_CARD
obj_raw = CleanText(Dict('description', default=''))
obj_date = Date(Dict('statement_end_date', default=None), default=None)
obj_rdate = Date(Dict('charge_date'))
obj_vdate = obj_bdate = Date(Dict('post_date', default=None), default=NotAvailable)
obj_amount = Eval(lambda x: -float_to_decimal(x), Dict('amount'))
obj_original_currency = Dict('foreign_details/iso_alpha_currency_code', default=NotAvailable)
obj_commission = CleanDecimal(Dict('foreign_details/commission_amount', default=NotAvailable), sign=lambda x: -1, default=NotAvailable)
obj__owner = CleanText(Dict('embossed_name'))
obj_id = Dict('reference_id', default=NotAvailable)
def obj_original_amount(self):
# amount in the account's currency
amount = Field("amount")(self)
# amount in the transaction's currency
original_amount = Dict('foreign_details/amount', default=NotAvailable)(self)
if Field("original_currency")(self) == "XAF":
original_amount = abs(CleanDecimal(replace_dots=('.')).filter(original_amount))
elif not original_amount:
return NotAvailable
else:
original_amount = abs(parse_decimal(original_amount))
if amount < 0:
return -original_amount
else:
return original_amount
# obj__ref = Dict('reference_id')
obj__ref = Dict('identifier')
|
from ajenti.com import *
from ajenti.api import *
from ajenti.utils import *
from ajenti.ui import UI
from ajenti import apis
import os
import time
class Daemons(Plugin):
def list_all(self):
r = []
if self.app.config.has_section('daemons'):
for n in self.app.config.options('daemons'):
l = self.app.config.get('daemons', n)
r.append(Daemon(n, l))
return sorted(r, key=lambda x: x.name)
def save(self, items):
self.app.config.remove_section('daemons')
self.app.config.add_section('daemons')
for i in items:
x = []
for k in i.opts.keys():
if k is None or k == '':
continue
if i.opts[k] == None:
x.append(k)
else:
x.append('%s="%s"'%(k,i.opts[k].strip(' "')))
self.app.config.set('daemons', i.name, ','.join(x))
self.app.config.save()
class Daemon:
def __init__(self, name, s):
self.name = name
self.opts = {}
for x in s.split(','):
v = None
if '=' in x:
k,v = x.split('=',1)
v = v.strip(' "')
else:
k = x
self.opts[k.strip()] = v
@property
def running(self):
u = ''
if 'user' in self.opts:
u = ' --user="%s"'%self.opts['user']
return shell_status('daemon --running --name "%s"%s'%(self.name,u)) == 0
def start(self):
cmd = ''
for k in self.opts.keys():
if k is None or k == '':
continue
if self.opts[k] == None:
cmd += ' --%s' % k
else:
cmd += ' --%s "%s"'%(k,self.opts[k])
shell('daemon --name "%s" %s'%(self.name, cmd))
time.sleep(0.5)
def restart(self):
u = ''
if 'user' in self.opts:
u = ' --user="%s"'%self.opts['user']
shell('daemon --restart --name "%s"%s'%(self.name,u))
self.running()
time.sleep(0.5)
def stop(self):
u = ''
if 'user' in self.opts:
u = ' --user="%s"'%self.opts['user']
shell('daemon --stop --name "%s"%s'%(self.name,u))
self.running
time.sleep(0.5)
options = [
'command',
'user',
'chroot',
'chdir',
'umask',
'attempts',\
'delay',
'limit',
'output',
'stdout',
'stderr',
]
|
import socks
import socket
from urllib.request import urlopen
socks.set_default_proxy(socks.SOCKS5, "127.0.0.1", 1080)
socket.socket = socks.socksocket
print(urlopen("https://www.google.com").read())
|
import copy
import dolfin
import ffc
import ufl
from exceptions import *
from versions import *
__all__ = \
[
"QForm",
"apply_bcs",
"differentiate_expr",
"enforce_bcs",
"extract_test_and_trial",
"evaluate_expr",
"expand",
"expand_expr",
"expand_linear_solver_parameters",
"form_quadrature_degree",
"form_rank",
"is_empty_form",
"is_general_constant",
"is_r0_function",
"is_r0_function_space",
"is_self_adjoint_form",
"is_zero_rhs",
"lumped_mass"
]
class QForm(ufl.form.Form):
"""
A quadrature degree aware Form. A QForm records the quadrature degree with
which the Form is to be assembled, and the quadrature degree is considered
in all rich comparison. Hence two QForm s, which as Form s which would be
deemed equal, are non-equal if their quadrature degrees differ. Constructor
arguments are identical to the Form constructor, with the addition of a
required quadrature_degree keyword argument, equal to the requested quadrature
degree.
"""
def __init__(self, arg, quadrature_degree):
if isinstance(arg, ufl.form.Form):
arg = arg.integrals()
if not isinstance(quadrature_degree, int) or quadrature_degree < 0:
raise InvalidArgumentException("quadrature_degree must be a non-negative integer")
ufl.form.Form.__init__(self, arg)
self.__quadrature_degree = quadrature_degree
return
def __hash__(self):
return hash((self.__quadrature_degree, ufl.form.Form.__hash__(self)))
def equals(self, other):
return form_quadrature_degree(self) == form_quadrature_degree(other) and bool(ufl.form.Form.__eq__(ufl.form.Form(self.integrals()), ufl.form.Form(other.integrals())))
def __repr__(self):
return "%s, quadrature degree %i" % (ufl.form.Form.__repr__(self), self.__quadrature_degree)
def __add__(self, other):
if not isinstance(other, ufl.form.Form):
raise InvalidArgumentException("other must be a Form")
if not self.__quadrature_degree == form_quadrature_degree(other):
raise InvalidArgumentException("Unable to add Forms: Quadrature degrees differ")
return QForm(ufl.form.Form.__add__(self, other), quadrature_degree = self.__quadrature_degree)
def __sub__(self, other):
return self.__add__(-other)
def __mul__(self, other):
raise NotImplementedException("__mul__ method not implemented")
def __rmul__(self, other):
raise NotImplementedException("__rmul__ method not implemented")
def __neg__(self):
return QForm(ufl.form.Form.__neg__(self), quadrature_degree = self.__quadrature_degree)
def quadrature_degree(self):
"""
Return the quadrature degree.
"""
return self.__quadrature_degree
def form_compiler_parameters(self):
"""
Return a dictionary of form compiler parameters.
"""
return {"quadrature_degree":self.__quadrature_degree}
def form_quadrature_degree(form):
"""
Determine the quadrature degree with which the supplied Form is to be
assembled. If form is a QForm, return the quadrature degree of the QForm.
Otherwise, return the default quadrature degree if one is set, or return
the quadrature degree that would be selected by FFC. The final case
duplicates the internal behaviour of FFC.
"""
if isinstance(form, QForm):
return form.quadrature_degree()
elif isinstance(form, ufl.form.Form):
if dolfin.parameters["form_compiler"]["quadrature_degree"] > 0:
quadrature_degree = dolfin.parameters["form_compiler"]["quadrature_degree"]
else:
# This is based upon code from _analyze_form and
# _attach_integral_metadata in analysis.py, FFC bzr trunk revision 1761
form_data = extract_form_data(copy.copy(form))
quadrature_degree = -1
for integral in form.integrals():
rep = dolfin.parameters["form_compiler"]["representation"]
if rep == "auto":
rep = ffc.analysis._auto_select_representation(integral, form_data.unique_sub_elements, form_data.function_replace_map)
quadrature_degree = max(quadrature_degree, ffc.analysis._auto_select_quadrature_degree(integral, rep, form_data.unique_sub_elements, form_data.element_replace_map))
return quadrature_degree
else:
raise InvalidArgumentException("form must be a Form")
def extract_form_data(form):
"""
Wrapper for the form.form_data and form.compute_form_data methods of Form s.
Calls the latter only if the former returns None.
"""
if not isinstance(form, ufl.form.Form):
raise InvalidArgumentException("form must be a Form")
form_data = form.form_data()
if form_data is None:
form_data = form.compute_form_data()
return form_data
def form_rank(form):
"""
Return the rank of the supplied Form.
"""
if not isinstance(form, ufl.form.Form):
raise InvalidArgumentException("form must be a Form")
return len(ufl.algorithms.extract_arguments(form))
def is_general_constant(c):
"""
Return whether the supplied object is a Constant or a ListTensor containing
Constant s.
"""
if isinstance(c, ufl.tensors.ListTensor):
for c_c in c:
if not isinstance(c_c, dolfin.Constant):
return False
return True
else:
return isinstance(c, dolfin.Constant)
def is_r0_function(fn):
"""
Return whether the supplied Function is R0 (i.e. a Real over the mesh).
"""
if not isinstance(fn, dolfin.Function):
raise InvalidArgumentException("fn must be a Function")
return is_r0_function_space(fn.function_space())
def is_r0_function_space(space):
"""
Return whether the supplied FunctionSpace is R0 (i.e. a Real over the mesh).
"""
if not isinstance(space, dolfin.FunctionSpaceBase):
raise InvalidArgumentException("space must be a FunctionSpace")
e = space.ufl_element()
return e.family() == "Real" and e.degree() == 0
def evaluate_expr(expr, copy = False):
"""
Evaluate the supplied expression, and return either a float or GenericVector.
If copy is False then an existing GenericVector may be returned -- it is
expected in this case that the return value will never be modified.
"""
if not isinstance(expr, ufl.expr.Expr):
raise InvalidArgumentException("expr must be an Expr")
if isinstance(expr, ufl.algebra.Product):
ops = expr.operands()
assert(len(ops) > 0)
val = evaluate_expr(ops[0], copy = copy or len(ops) > 1)
for op in ops[1:]:
nval = evaluate_expr(op)
if not isinstance(nval, float) or not nval == 1.0:
val *= nval
elif isinstance(expr, ufl.algebra.Sum):
ops = expr.operands()
assert(len(ops) > 0)
val = evaluate_expr(ops[0], copy = copy or len(ops) > 1)
for op in ops[1:]:
nval = evaluate_expr(op)
if not isinstance(nval, float) or not nval == 0.0:
val += nval
elif isinstance(expr, ufl.algebra.Division):
ops = expr.operands()
assert(len(ops) == 2)
val = evaluate_expr(ops[0]) / evaluate_expr(ops[1])
elif isinstance(expr, ufl.constantvalue.Zero):
return 0.0
elif isinstance(expr, dolfin.Function):
if is_r0_function(expr):
val = expr.vector().sum()
else:
val = expr.vector()
if copy:
val = val.copy()
elif isinstance(expr, (dolfin.Constant, ufl.constantvalue.ConstantValue)):
val = float(expr)
elif isinstance(expr, ufl.differentiation.CoefficientDerivative):
val = evaluate_expr(ufl.algorithms.expand_derivatives(expr))
else:
raise NotImplementedException("Expr type %s not implemented" % expr.__class__)
return val
def differentiate_expr(expr, u, expand = True):
"""
Wrapper for the UFL derivative function. This chooses an argument equal to
Constant(1.0). Form s should be differentiated using the derivative function.
"""
if not isinstance(expr, ufl.expr.Expr):
raise InvalidArgumentException("expr must be an Expr")
if isinstance(u, ufl.indexed.Indexed):
op = u.operands()
assert(len(op) == 2)
if not isinstance(op[0], (dolfin.Constant, dolfin.Function)):
raise InvalidArgumentException("Invalid Indexed")
elif not isinstance(u, (dolfin.Constant, dolfin.Function)):
raise InvalidArgumentException("u must be an Indexed, Constant, or Function")
if expr is u:
der = ufl.constantvalue.IntValue(1)
else:
unity = dolfin.Constant(1.0)
der = dolfin.replace(ufl.derivative(expr, u, argument = unity), {unity:ufl.constantvalue.IntValue(1)})
if expand:
# Based on code from expand_derivatives1 in UFL file ad.py, (see e.g. bzr
# 1.1.x branch revision 1484)
cell = der.cell()
if cell is None:
dim = 0
else:
dim = der.cell().geometric_dimension()
der = ufl.algorithms.expand_derivatives(der, dim = dim)
return der
def expand_expr(expr):
"""
Recursively expand the supplied Expr into the largest possible Sum.
"""
if not isinstance(expr, ufl.expr.Expr):
raise InvalidArgumentException("expr must be an Expr")
if isinstance(expr, ufl.algebra.Sum):
terms = []
for term in expr.operands():
terms += expand_expr(term)
return terms
elif isinstance(expr, ufl.algebra.Product):
ops = expr.operands()
fact1 = ops[0]
fact2 = ops[1]
for op in ops[2:]:
fact2 *= op
fact1_terms = expand_expr(fact1)
fact2_terms = expand_expr(fact2)
terms = []
for term1 in fact1_terms:
for term2 in fact2_terms:
terms.append(term1 * term2)
return terms
elif isinstance(expr, ufl.indexed.Indexed):
ops = expr.operands()
assert(len(ops) == 2)
return [ufl.indexed.Indexed(term, ops[1]) for term in expand_expr(ops[0])]
elif isinstance(expr, ufl.tensors.ComponentTensor):
ops = expr.operands()
assert(len(ops) == 2)
return [ufl.tensors.ComponentTensor(term, ops[1]) for term in expand_expr(ops[0])]
elif isinstance(expr, ufl.algebra.Division):
ops = expr.operands()
assert(len(ops) == 2)
return [ufl.algebra.Division(term, ops[1]) for term in expand_expr(ops[0])]
elif isinstance(expr, ufl.restriction.PositiveRestricted):
ops = expr.operands()
assert(len(ops) == 1)
return [ufl.restriction.PositiveRestricted(term) for term in expand_expr(ops[0])]
elif isinstance(expr, ufl.restriction.NegativeRestricted):
ops = expr.operands()
assert(len(ops) == 1)
return [ufl.restriction.NegativeRestricted(term) for term in expand_expr(ops[0])]
elif isinstance(expr, ufl.differentiation.Grad):
ops = expr.operands()
assert(len(ops) == 1)
return [ufl.differentiation.Grad(term) for term in expand_expr(ops[0])]
elif isinstance(expr, (ufl.tensoralgebra.Dot,
ufl.tensoralgebra.Inner,
ufl.differentiation.CoefficientDerivative,
ufl.differentiation.VariableDerivative)):
return expand_expr(expand(expr))
# Expr types white-list. These cannot be expanded.
elif isinstance(expr, (ufl.constantvalue.ConstantValue,
ufl.argument.Argument,
dolfin.Expression,
dolfin.Function,
dolfin.Constant,
ufl.geometry.Circumradius,
ufl.algebra.Abs,
ufl.geometry.FacetNormal,
ufl.mathfunctions.Sqrt,
ufl.classes.Variable,
ufl.mathfunctions.Exp,
ufl.algebra.Power,
ufl.indexing.MultiIndex,
ufl.classes.Label)):
return [expr]
# Expr types grey-list. It might be possible to expand these, but just ignore
# them at present.
elif isinstance(expr, (ufl.tensors.ListTensor,
ufl.classes.Conditional,
ufl.indexsum.IndexSum)):
return [expr]
else:
dolfin.warning("Expr type %s not expanded by expand_expr" % expr.__class__)
return [expr]
def lumped_mass(space):
"""
Return a linear form which can be assembled to yield a lumped mass matrix.
"""
if not isinstance(space, dolfin.FunctionSpaceBase):
raise InvalidArgumentException("space must be a FunctionSpace")
return dolfin.TestFunction(space) * dolfin.dx
def expand(form, dim = None):
"""
Expand the supplied Expr or Form. This attempts to yield a canonical form.
"""
if not isinstance(form, (ufl.expr.Expr, ufl.form.Form)):
raise InvalidArgumentException("form must be an Expr or Form")
nform = ufl.algorithms.expand_indices(ufl.algorithms.expand_compounds(ufl.algorithms.expand_derivatives(form, dim = dim)))
if isinstance(form, QForm):
return QForm(nform, quadrature_degree = form_quadrature_degree(form))
else:
return nform
if dolfin_version() < (1, 4, 0):
def extract_test_and_trial(form):
"""
Extract the test and trial function from a bi-linear form.
"""
if not isinstance(form, ufl.form.Form):
raise InvalidArgumentException("form must be a Form")
args = ufl.algorithms.extract_arguments(form)
if not len(args) == 2:
raise InvalidArgumentException("form must be a bi-linear Form")
test, trial = args
if test.count() > trial.count():
test, trial = trial, test
assert(test.count() == trial.count() - 1)
return test, trial
else:
def extract_test_and_trial(form):
"""
Extract the test and trial function from a bi-linear form.
"""
if not isinstance(form, ufl.form.Form):
raise InvalidArgumentException("form must be a Form")
args = ufl.algorithms.extract_arguments(form)
if not len(args) == 2:
raise InvalidArgumentException("form must be a bi-linear Form")
test, trial = args
if test.number() > trial.number():
test, trial = trial, test
assert(test.number() == trial.number() - 1)
return test, trial
def is_self_adjoint_form(form):
"""
Return True if the supplied Form is self-adjoint. May return false negatives.
"""
if not isinstance(form, ufl.form.Form):
raise InvalidArgumentException("form must be a Form")
a_form = dolfin.adjoint(form)
test, trial = extract_test_and_trial(form)
a_test, a_trial = extract_test_and_trial(a_form)
if not test.element() == a_trial.element():
return False
elif not trial.element() == a_test.element():
return False
a_form = dolfin.replace(a_form, {a_test:trial, a_trial:test})
return expand(form) == expand(a_form)
def apply_bcs(a, bcs, L = None, symmetric_bcs = False):
"""
Apply boundary conditions to the supplied LHS matrix and (optionally) RHS
vector. If symmetric_bcs is true then the boundary conditions are applied so as
to yield a symmetric matrix. If the boundary conditions are not homogeneous
then a RHS vector should be supplied, although the lack of a RHS in this case
is not treated as an error.
"""
if not isinstance(a, dolfin.GenericMatrix):
raise InvalidArgumentException("a must be a square GenericMatrix")
elif not a.size(0) == a.size(1):
raise InvalidArgumentException("a must be a square GenericMatrix")
if not isinstance(bcs, list):
raise InvalidArgumentException("bcs must be a list of DirichletBC s")
for bc in bcs:
if not isinstance(bc, dolfin.cpp.DirichletBC):
raise InvalidArgumentException("bcs must be a list of DirichletBC s")
if not L is None and not isinstance(L, dolfin.GenericVector):
raise InvalidArgumentException("L must be a GenericVector")
if L is None:
for bc in bcs:
bc.apply(a)
if symmetric_bcs:
L = a.factory().create_vector()
L.resize(a.local_range(0))
for bc in bcs:
bc.zero_columns(a, L, 1.0)
else:
for bc in bcs:
bc.apply(a, L)
if symmetric_bcs:
for bc in bcs:
bc.zero_columns(a, L, 1.0)
return
def enforce_bcs(x, bcs):
"""
Enforce boundary conditions on the supplied GenericVector.
"""
if not isinstance(x, dolfin.GenericVector):
raise InvalidArgumentException("x must be a GenericVector")
if not isinstance(bcs, list):
raise InvalidArgumentException("bcs must be a list of DirichletBC s")
for bc in bcs:
if not isinstance(bc, dolfin.cpp.DirichletBC):
raise InvalidArgumentException("bcs must be a list of DirichletBC s")
for bc in bcs:
bc.apply(x)
return
def is_zero_rhs(rhs):
"""
Return whether the input can be used to indicate a zero RHS.
"""
if rhs in [0, 0.0]:
return True
else:
return False
def apply_default_parameters(parameters, default):
"""
Return a parameters dictionary with a default values set.
"""
lparameters = {}
for key in parameters:
if not isinstance(parameters[key], dict):
lparameters[key] = parameters[key]
else:
lparameters[key] = apply_default_parameters(parameters[key], default.get(key, {}))
for key in default:
if not key in lparameters:
lparameters[key] = copy.deepcopy(default[key])
return lparameters
def expand_linear_solver_parameters(linear_solver_parameters, default_linear_solver_parameters = {}):
"""
Return an expanded dictionary of linear solver parameters with all defaults
explicitly specified. The optional default_linear_solver_parameters argument
can be used to override global defaults.
"""
if not isinstance(linear_solver_parameters, dict):
raise InvalidArgumentException("linear_solver_parameters must be a dictionary")
if not isinstance(default_linear_solver_parameters, dict):
raise InvalidArgumentException("default_linear_solver_parameters must be a dictionary")
linear_solver_parameters = apply_default_parameters(linear_solver_parameters, default_linear_solver_parameters)
linear_solver_parameters = apply_default_parameters(linear_solver_parameters,
{"linear_solver":"default",
"preconditioner":"default",
"lu_solver":dolfin.parameters["lu_solver"].to_dict(),
"krylov_solver":dolfin.parameters["krylov_solver"].to_dict()
})
if linear_solver_parameters["linear_solver"] in ["default", "lu"] or dolfin.has_lu_solver_method(linear_solver_parameters["linear_solver"]):
del(linear_solver_parameters["preconditioner"])
del(linear_solver_parameters["krylov_solver"])
else:
del(linear_solver_parameters["lu_solver"])
return linear_solver_parameters
def is_empty_form(form):
"""
Return whether the supplied form is "empty" (i.e. contains no terms).
"""
if not isinstance(form, ufl.form.Form):
raise InvalidArgumentException("form must be a Form")
if len(form.integrals()) == 0:
return True
zero = True
for integral in form.integrals():
if not isinstance(integral.integrand(), ufl.constantvalue.Zero):
zero = False
break
if zero:
return True
return len(extract_form_data(copy.copy(form)).integral_data) == 0
|
'''Ethernet protocol decoder
'''
from __future__ import print_function, division
import ripyl
import ripyl.decode as decode
import ripyl.sigproc as sigp
import ripyl.streaming as stream
from ripyl.util.enum import Enum
from ripyl.util.bitops import split_bits, join_bits
from ripyl.manchester import manchester_encode, manchester_decode, ManchesterStates, diff_encode
from copy import copy
import itertools
from ripyl.util.eng import eng_si
ethertypes = {
0x0800: 'IPv4',
0x0806: 'ARP',
0x0842: 'Wake-on-LAN',
0x22F3: 'IETF TRILL Protocol',
0x6003: 'DECnet Phase IV',
0x8035: 'Reverse Address Resolution Protocol',
0x809B: 'AppleTalk',
0x80F3: 'AppleTalk Address Resolution Protocol',
0x8100: 'VLAN-tagged frame',
0x8137: 'IPX',
0x8138: 'IPX',
0x8204: 'QNX Qnet',
0x86DD: 'IPv6',
0x8808: 'Ethernet flow control',
0x8809: 'Slow Protocols (IEEE 802.3)',
0x8819: 'CobraNet',
0x8847: 'MPLS unicast',
0x8848: 'MPLS multicast',
0x8863: 'PPPoE Discovery Stage',
0x8864: 'PPPoE Session Stage',
0x8870: 'Jumbo Frame',
0x887B: 'HomePlug 1.0 MME',
0x888E: 'EAP over LAN (IEEE 802.1X)',
0x8892: 'PROFINET',
0x889A: 'HyperSCSI',
0x88A2: 'ATA over Ethernet',
0x88A4: 'EtherCAT Protocol',
0x88A8: 'Provider Bridging',
0x88AB: 'Ethernet Powerlink',
0x88CC: 'LLDP',
0x88CD: 'SERCOS III',
0x88E1: 'HomePlug AV MME[citation needed]',
0x88E3: 'Media Redundancy Protocol (IEC62439-2)',
0x88E5: 'MAC security (IEEE 802.1AE)',
0x88F7: 'Precision Time Protocol (IEEE 1588)',
0x8902: 'IEEE 802.1ag Connectivity Fault Management (CFM)',
0x8906: 'Fibre Channel over Ethernet (FCoE)',
0x8914: 'FCoE Initialization Protocol',
0x8915: 'RDMA over Converged Ethernet (RoCE)',
0x892F: 'High-availability Seamless Redundancy (HSR)',
0x9000: 'Ethernet Configuration Testing Protocol',
0x9100: 'Q-in-Q',
0xCAFE: 'Veritas Low Latency Transport'
}
class EthernetTag(object):
'''Tag object representing 802.1Q tag'''
def __init__(self, tpid, tci):
self.tpid = tpid
self.tci = tci
def __repr__(self):
return 'EthernetTag({}, {})'.format(hex(self.tpid), hex(self.tci))
@property
def pcp(self):
return self.tci >> 13
@property
def dei(self):
return self.tci >> 12 & 0x01
@property
def vid(self):
return self.tci & 0xFFF
@property
def bytes(self):
return (self.tpid >> 8 & 0xFF, self.tpid & 0xFF, self.tci >> 8 & 0xFF, self.tci & 0xFF)
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return not (self == other)
class MACAddr(object):
'''Ethernet MAC address'''
def __init__(self, addr):
'''
addr (str or sequence of int)
The address can be specified as in two formats: a list of int or a string. The string
is a series of hex digits with optional colon separators on byteboundaries.
Raises ValueError if the address does not contain 6 bytes.
'''
if isinstance(addr, str):
if ':' in addr:
self.bytes = [int(b, 16) for b in addr.split(':')]
else:
hex_bytes = [addr[i:i+2] for i in xrange(0, len(addr), 2)]
self.bytes = [int(b, 16) for b in hex_bytes]
else:
self.bytes = addr
if len(self.bytes) != 6:
raise ValueError('Wrong size for Ethernet MAC address')
def __getitem__(self, i):
return self.bytes[i]
def __len__(self):
return len(self.bytes)
def __str__(self):
return ':'.join('{:02X}'.format(b) for b in self.bytes)
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return not (self == other)
class EthernetFrame(object):
'''Ethernet frame object'''
# Ethernet II frame: length_type field >= 0x600 (type code)
# 802.3 frame: length_type field < 0x600 (length code)
# 802.3 SNAP frame: 802.3 frame + LLC field = 0xaaaa03
def __init__(self, dest, source, data, length_type=None, tags=None, crc=None):
'''
dest (MACAddr, str, or sequence of int)
The destination address of the frame.
source (MACAddr, str, or sequence of int)
The source address of the frame.
data (sequence of int)
The data for the frame. Padding is not necessary.
length_type (int or None)
The Ethertype / length field. Ethertypes should be >= 0x600.
tags (sequence of EthernetTag or None)
Optional sequence of 802.1Q tags to insert into frame.
crc (int or None)
The decoded CRC for the frame. Leave as None to generate CRC automatically.
'''
if not isinstance(dest, MACAddr):
dest = MACAddr(dest)
if not isinstance(source, MACAddr):
source = MACAddr(source)
self.dest = dest
self.source = source
self.tags = tags # 802.1Q and 802.1ad header
self._length_type = length_type
self.data = data
self._crc = crc
def __repr__(self):
return 'EthernetFrame("{}", "{}", {}, {}, {}, {})'.format(self.dest, self.source, self.data, \
hex(self.length_type), self.tags, hex(self.crc))
@property
def length_type(self):
if self._length_type is None:
return len(self.data)
else:
return self._length_type
@length_type.setter
def length_type(self, value):
self._length_type = value & 0xFFFF
@property
def crc(self):
if self._crc is None:
crc_bytes = self.bytes[-4:]
crc = 0
for b in crc_bytes:
crc <<= 8
crc += b
return crc
else:
return self._crc
@crc.setter
def crc(self, value):
self._crc = value
def crc_is_valid(self, recv_crc=None):
'''Check if a decoded CRC is valid.
recv_crc (int or None)
The decoded CRC to check against. If None, the CRC passed in the constructor is used.
Returns True when the CRC is correct.
'''
if recv_crc is None:
recv_crc = self._crc
data_crc = 0
for b in self.bytes[-4:]:
data_crc <<= 8
data_crc += b
return recv_crc == data_crc
@property
def bytes(self):
'''Get the bytes for this frame.
Returns a series of bytes representing the header, payload, and CRC. This does not
include the SOF and SFD sequence.
'''
tag_bytes = []
if self.tags is not None:
for t in self.tags:
tag_bytes.extend(t.bytes)
len_type_bytes = [self.length_type >> 8 & 0xFF, self.length_type & 0xFF]
# Add padding for short payloads
pad_bytes = []
min_data_size = 42 if len(tag_bytes) >= 4 else 46
if len(self.data) < min_data_size:
pad_bytes = [0] * (min_data_size - len(self.data))
check_bytes = self.dest.bytes + self.source.bytes + tag_bytes + len_type_bytes + self.data + pad_bytes
crc = table_ethernet_crc32(check_bytes)
crc_bytes = [0] * 4
for i in xrange(4):
crc_bytes[i] = crc & 0xFF
crc >>= 8
return check_bytes + crc_bytes
def bit_stream(self):
'''Get the sequence of raw bits for the frame.
This includes the SOF and SFD at the start and the IDL phase at end of frame.
'''
for b in [0x55] * 7 + [0xD5]: # SOF + SFD
for bit in reversed(split_bits(b, 8)):
yield bit
for b in self.bytes:
for bit in reversed(split_bits(b, 8)):
yield bit
# IDL = high for 3 bit times -> 6 half-bit times
for bit in [ManchesterStates.High] * 6:
yield bit
yield ManchesterStates.Idle
def __eq__(self, other):
if not isinstance(other, EthernetFrame): return False
s_vars = copy(vars(self))
s_vars['_crc'] = self.crc
s_vars['_length_type'] = self.length_type
o_vars = copy(vars(other))
o_vars['_crc'] = other.crc
o_vars['_length_type'] = other.length_type
#print('## s_vars:')
#for k in sorted(s_vars.iterkeys()):
# print(' {}: {}'.format(k, s_vars[k]))
#print('## o_vars:')
#for k in sorted(o_vars.iterkeys()):
# print(' {}: {}'.format(k, o_vars[k]))
return s_vars == o_vars
def __ne__(self, other):
return not (self == other)
class EthernetLinkCode(object):
'''Representation of the pulses in a 100Mbps Ethernet autonegotiation'''
def __init__(self, selector, tech_ability, rem_fault, ack, next_page):
'''
selector (int)
Identify which standard is in use
tech_ability (int)
Technology ability. Identifies posible modes of operation.
rem_fault (int or bool)
Flag indicating a link failure
ack (int or bool)
Flag to indicate reception of the base link code word.
next_page (int or bool)
Flag to indicate intention to send other link code words.
'''
self.selector = selector & 0x1F
self.tech_ability = tech_ability & 0xFF
self.rem_fault = 1 if rem_fault else 0
self.ack = 1 if ack else 0
self.next_page = 1 if next_page else 0
@property
def word(self):
'''Generate 16-bit word from the fields'''
code = self.selector
code = (code << 8) + self.tech_ability
code = (code << 1) + self.rem_fault
code = (code << 1) + self.ack
code = (code << 1) + self.next_page
return code
class EthernetLinkTest(object):
'''An link test pulse or auto-negotiation pulse stream'''
def __init__(self, link_code=None):
'''
link_code (int or None)
When None, this object represents a single link test pulse.
When an int, this object represents a series of pulses for the link code
'''
self.link_code = link_code
def edges(self, bit_period):
'''Get the edges for this object
bit_period (float)
The period of a single bit.
Returns a list of (float, int) edges representing the pulse(s) for this object
'''
if self.link_code is None:
return [(0.0, 1), (bit_period, ManchesterStates.Idle), (2*bit_period, ManchesterStates.Idle)]
else:
#print('## code word:', '{:016b}'.format(self.link_code.word))
code_bits = reversed(split_bits(self.link_code.word, 16))
edges = []
t = 0.0
for b in code_bits:
edges.extend([(t, 1), (t + bit_period, ManchesterStates.Idle)])
if b == 1:
t += 62.5e-6
edges.extend([(t, 1), (t + bit_period, ManchesterStates.Idle)])
t += 62.5e-6
else: # 0
t += 125.0e-6
# Last framing pulse
edges.extend([(t, 1), (t + bit_period, ManchesterStates.Idle), (t + 2*bit_period, ManchesterStates.Idle)])
return edges
class EthernetStreamStatus(Enum):
'''Enumeration for EthernetStreamFrame status codes'''
CRCError = stream.StreamStatus.Error + 1
class EthernetStreamFrame(stream.StreamSegment):
'''Encapsulates an EthernetFrame object into a StreamSegment'''
def __init__(self, bounds, frame, status=stream.StreamStatus.Ok):
stream.StreamSegment.__init__(self, bounds, data=frame, status=status)
self.kind = 'Ethernet frame'
self.annotate('frame', {}, stream.AnnotationFormat.Hidden)
def ethernet_decode(rxtx, tag_ethertypes=None, logic_levels=None, stream_type=stream.StreamType.Samples):
'''Decode an ethernet data stream
This is a generator function that can be used in a pipeline of waveform
procesing operations.
Sample streams are a sequence of SampleChunk Objects. Edge streams are a sequence
of 2-tuples of (time, int) pairs. The type of stream is identified by the stream_type
parameter. Sample streams will be analyzed to find edge transitions representing
0 and 1 logic states of the waveforms. With sample streams, an initial block of data
is consumed to determine the most likely logic levels in the signal.
rxtx (iterable of SampleChunk objects or (float, int) pairs)
A sample stream or edge stream representing a differential ethernet signal.
tag_ethertypes (sequence of int or None)
The ethertypes to use for identifying 802.1Q tags. Default is 0x8100, 0x88a8, and 0x9100.
logic_levels ((float, float) or None)
Optional pair that indicates (low, high) logic levels of the sample
stream. When present, auto level detection is disabled. This has no effect on
edge streams.
stream_type (streaming.StreamType)
A StreamType value indicating that the can parameter represents either Samples
or Edges
Yields a series of EthernetStreamFrame objects. Each frame contains subrecords marking the location
of sub-elements within the frame. CRC errors are recorded as an error status in their
respective subrecords.
Raises AutoLevelError if stream_type = Samples and the logic levels cannot
be determined.
Raises StreamError if ethernet speed cannot be determined.
'''
if stream_type == stream.StreamType.Samples:
if logic_levels is None:
s_rxtx_it, logic_levels = decode.check_logic_levels(rxtx)
else:
s_rxtx_it = rxtx
hyst_thresholds = decode.gen_hyst_thresholds(logic_levels, expand=3, hysteresis=0.05)
rxtx_it = decode.find_multi_edges(s_rxtx_it, hyst_thresholds)
#print('## logic levels:', logic_levels, hyst_thresholds)
else: # The streams are already lists of edges
rxtx_it = rxtx
# Detect speed of ethernet
buf_edges = 150
min_edges = 100
# tee off an iterator to determine speed class
rxtx_it, speed_check_it = itertools.tee(rxtx_it)
# Remove Diff-0's #FIX: need to modify to work with 100Mb and 1Gb Enet
speed_check_it = (edge for edge in speed_check_it if edge[1] != 0)
symbol_rate_edges = itertools.islice(speed_check_it, buf_edges)
# We need to ensure that we can pull out enough edges from the iterator slice
# Just consume them all for a count
sre_list = list(symbol_rate_edges)
if len(sre_list) < min_edges:
raise stream.StreamError('Unable to determine Ethernet speed (not enough edge transitions)')
del speed_check_it
#print('## sym. rate edges len:', len(sre_list))
raw_symbol_rate = decode.find_symbol_rate(iter(sre_list), spectra=2)
#print('### raw sym rate:', raw_symbol_rate)
# For 10baseT (10MHz Manchester) the symbol rate will be 20MHz
# For 100BaseTX the symbol rate will be 31.25MHz?
if raw_symbol_rate < 25e6:
bit_period = 1.0 / 10.0e6
else:
raise stream.StreamError('Unsupported Ethernet speed: {}'.format(eng_si(raw_symbol_rate, 'Hz')))
if stream_type == stream.StreamType.Samples:
# We needed the bus speed before we could properly strip just
# the anomalous SE0s
min_se0 = bit_period * 0.2
rxtx_it = decode.remove_transitional_states(rxtx_it, min_se0)
mstates = manchester_decode(rxtx_it, bit_period)
for r in _ethernet_generic_decode(mstates, tag_ethertypes=tag_ethertypes):
yield r
def _ethernet_generic_decode(mstates, tag_ethertypes):
'''Decode Manchester states into ethernet frames'''
while True:
try:
cur_edge = next(mstates)
except StopIteration:
break
if cur_edge[1] == ManchesterStates.High:
# Possible link test pulse
ltp_start = cur_edge[0]
while True:
try:
cur_edge = next(mstates)
except StopIteration:
break
if cur_edge[1] != ManchesterStates.High:
if 90.0e-9 < cur_edge[0] - ltp_start < 110.0e-9: # Pulse should be nominally 100ns wide
# Found a LTP
ltp = stream.StreamSegment((ltp_start, cur_edge[0]), kind='LTP')
ltp.annotate('misc', {})
yield ltp
break
continue
elif cur_edge[1] not in (0, 1):
continue
frame_start = cur_edge[0]
#print('## frame start:', frame_start)
# Get preamble bits
get_preamble = True
prev_bit = cur_edge[1]
preamble_count = 7*8 + 6 + 1
# Get alternating 1's and 0's until we see a break in the pattern
# that indicates we've reached the SFD.
while preamble_count > 0:
try:
cur_edge = next(mstates)
except StopIteration:
break
if cur_edge[1] != 1 - prev_bit:
break
prev_bit = cur_edge[1]
preamble_count -= 1
# Verify we have the SFD
if not (prev_bit == 1 and cur_edge[1] == 1):
# Restart search for a frame
continue
# Move to first bit of frame header
try:
cur_edge = next(mstates)
except StopIteration:
break
header_start = cur_edge[0]
frame_bits = []
bit_start_times = []
# Get all frame bits
while cur_edge[1] in (0, 1):
frame_bits.append(cur_edge[1])
bit_start_times.append(cur_edge[0])
try:
cur_edge = next(mstates)
except StopIteration:
break
crc_end_time = cur_edge[0]
# Find end of frame
while True:
try:
cur_edge = next(mstates)
except StopIteration:
break
if cur_edge[1] == ManchesterStates.Idle:
break
end_time = cur_edge[0]
#print('## got frame bits:', len(frame_bits))
# Verify we have a multiple of 8 bits
if len(frame_bits) % 8 != 0:
continue
# Verify we have the minimum of 64 bytes for a frame
if len(frame_bits) < 64 * 8:
continue
# Convert bits to bytes
frame_bytes = []
for i in xrange(0, len(frame_bits), 8):
frame_bytes.append(join_bits(reversed(frame_bits[i:i+8])))
byte_start_times = [t for t in bit_start_times[::8]]
#print('## got bytes:', ['{:02x}'.format(b) for b in frame_bytes])
# Create frame object
if tag_ethertypes is None:
tag_ethertypes = [0x8100, 0x88a8, 0x9100]
tags = []
lt_start = 12
length_type = frame_bytes[lt_start] * 256 + frame_bytes[lt_start + 1]
while length_type in tag_ethertypes: # This is a tag
tpid = length_type
tci = frame_bytes[lt_start + 2] * 256 + frame_bytes[lt_start + 3]
tags.append(EthernetTag(tpid, tci))
lt_start += 4
length_type = frame_bytes[lt_start] * 256 + frame_bytes[lt_start + 1]
if len(tags) == 0: # No tags
tags = None
data_bytes = frame_bytes[lt_start+2:-4]
crc = 0
for b in frame_bytes[-4:]:
crc <<= 8
crc += b
ef = EthernetFrame(frame_bytes[0:6], frame_bytes[6:12], tags=tags, length_type=length_type, data=data_bytes, crc=crc)
status = EthernetStreamStatus.CRCError if not ef.crc_is_valid() else stream.StreamStatus.Ok
sf = EthernetStreamFrame((frame_start, end_time), ef)
# Annotate fields
bounds = (byte_start_times[0], byte_start_times[6])
sf.subrecords.append(stream.StreamSegment(bounds, str(ef.dest), kind='dest'))
sf.subrecords[-1].annotate('addr', {'_bits':48}, stream.AnnotationFormat.Small)
bounds = (byte_start_times[6], byte_start_times[12])
sf.subrecords.append(stream.StreamSegment(bounds, str(ef.source), kind='source'))
sf.subrecords[-1].annotate('addr', {'_bits':48}, stream.AnnotationFormat.Small)
# Tags
if tags is not None:
for i, t in enumerate(tags):
bounds = (byte_start_times[12 + 4*i], byte_start_times[12 + 4*i + 4])
sf.subrecords.append(stream.StreamSegment(bounds, 'tag', kind='tag'))
sf.subrecords[-1].annotate('ctrl', {}, stream.AnnotationFormat.String)
# Ethertype / length
bounds = (byte_start_times[lt_start], byte_start_times[lt_start+2])
length_type = ef.length_type
if length_type >= 0x600:
kind = 'ethertype'
if length_type in ethertypes:
value = ethertypes[length_type]
else:
value = 'Unknown: {:04X}'.format(length_type)
text_format = stream.AnnotationFormat.Small
else:
kind = 'length'
value = length_type
text_format = stream.AnnotationFormat.Int
sf.subrecords.append(stream.StreamSegment(bounds, value, kind=kind))
sf.subrecords[-1].annotate('ctrl', {'_bits':16}, text_format)
# Data
bounds = (byte_start_times[lt_start+2], byte_start_times[-4])
sf.subrecords.append(stream.StreamSegment(bounds, 'Payload, {} bytes'.format(len(data_bytes)), kind='data'))
sf.subrecords[-1].annotate('data', {}, stream.AnnotationFormat.String)
# CRC
bounds = (byte_start_times[-4], crc_end_time)
status = EthernetStreamStatus.CRCError if not ef.crc_is_valid() else stream.StreamStatus.Ok
#print('## CRC bytes:', [hex(b) for b in frame_bytes[-4:]])
sf.subrecords.append(stream.StreamSegment(bounds, frame_bytes[-4:], kind='CRC', status=status))
sf.subrecords[-1].annotate('check', {}, stream.AnnotationFormat.Hex)
yield sf
def add_overshoot(bits, duration, overshoot=0.75, undershoot=0.8):
'''Add simulated overshoot to an edge stream
This function is intended to simulate the overshoot behavior produced by the
output drivers and magnetics of 10Base-T ethernet. This is done crudely by scaling
the edge stream values by the overshoot and undershoot factors. This results in a
non-standard edge stream that can be processed by synth_wave() to create a
realistic sampled waveform but is otherwise not useful.
bits (iterable of (float, int))
A differential edge stream to add overshoot to.
duration (float)
The amount of time to add overshoot after each edge transition.
overshoot (float)
The fraction of a high-level that the overshoot extends past.
undershoot (float)
The fraction of the overshoot that the undershoot extends past. Only used for
transitions to idle.
Yields an edge stream with overshoot transitions inserted.
'''
undershoot = undershoot * overshoot
overshoot = 1 + overshoot
prev_bit = None
prev_fall = False
for b in bits:
if prev_bit is not None:
if b[0] - prev_bit[0] > duration:
if prev_bit[1] != 0:
yield (prev_bit[0], prev_bit[1] * overshoot)
yield (prev_bit[0] + duration, prev_bit[1])
else: # 0, generate undershoot before idle state
if prev_fall:
yield (prev_bit[0], -undershoot)
yield (prev_bit[0] + duration, prev_bit[1])
else:
yield prev_bit
else: # Bit time is shorter than overshoot duration
yield (prev_bit[0], prev_bit[1] * overshoot)
prev_fall = True if b[1] < prev_bit[1] else False
prev_bit = b
yield prev_bit
def ethernet_synth(frames, overshoot=None, idle_start=0.0, frame_interval=0.0, idle_end=0.0):
'''Generate synthesized Ethernet frames
frames (sequence of EthernetFrame)
Frames to be synthesized.
overshoot (None or (float, float))
When a pair of floats is provided these indicate the overshoot parameters to add
to the waveform. The first number is the fraction of a bit period that the overshoot
covers. This should be less than 0.5. The second number is the fraction of a high-level
that the overshoot extends past. When used, the edge stream must be converted to a
sample stream with low-pass filtering by synth_wave() before it accurately represents
overshoot.
idle_start (float)
The amount of idle time before the transmission of frames begins.
frame_interval (float)
The amount of time between frames.
idle_end (float)
The amount of idle time after the last frame.
Yields an edge stream of (float, int) pairs. The first element in the iterator
is the initial state of the stream.
'''
bit_period = 1.0 / 10.0e6 #FIX set speed
frame_its = []
for i, frame in enumerate(frames):
istart = idle_start if i == 0 else 0.0
iend = idle_end if i == len(frames)-1 else bit_period
if hasattr(frame, 'edges'): # Link pulse
edges = iter(frame.edges(bit_period))
else: # A proper frame
edges = manchester_encode(frame.bit_stream(), bit_period, idle_start=istart, idle_end=iend)
if overshoot is not None and len(overshoot) == 2:
frame_its.append(add_overshoot(diff_encode(edges), bit_period * overshoot[1], overshoot[0]))
else:
frame_its.append(diff_encode(edges))
return sigp.chain_edges(frame_interval, *frame_its)
def _crc32_table_gen():
poly = 0xedb88320
mask = 0xffffffff
tbl = [0] * 256
for i in xrange(len(tbl)):
sreg = i
for j in xrange(8):
if sreg & 0x01 != 0:
sreg = poly ^ (sreg >> 1)
else:
sreg >>= 1;
tbl[i] = sreg & mask
return tbl
_crc32_table = _crc32_table_gen()
def table_ethernet_crc32(d):
'''Calculate Ethernet CRC-32 on data
This is a table-based byte-wise implementation
d (sequence of int)
Array of integers representing bytes
Returns an integer with the CRC value.
'''
sreg = 0xffffffff
mask = 0xffffffff
tbl = _crc32_table
for byte in d:
tidx = (sreg ^ byte) & 0xff
sreg = (sreg >> 8) ^ tbl[tidx] & mask
return sreg ^ mask
|
from django.urls import path, re_path, include, reverse
from django.utils.translation import gettext_lazy as _
from pytigon_lib.schviews import generic_table_start, gen_tab_action, gen_row_action
from django.views.generic import TemplateView
from . import views
urlpatterns = [
gen_row_action("Scripts", "run", views.run),
re_path(
"run/(?P<script_name>\w+)/$",
views.run_script_by_name,
{},
name="schsimplescripts_run_script_by_name",
),
]
gen = generic_table_start(urlpatterns, "schsimplescripts", views)
gen.standard("Scripts", _("Scripts"), _("Scripts"))
|
import serial
import sys
BAUDRATE = 115200
PARITY = True
def configure_serial(serial_port):
return serial.Serial(
port=serial_port,
baudrate=BAUDRATE,
parity=serial.PARITY_EVEN if PARITY else serial.PARITY_NONE,
stopbits=serial.STOPBITS_TWO,
bytesize=serial.EIGHTBITS,
timeout=1
)
if __name__ == "__main__":
if len(sys.argv)!=2:
print "Give serial port address as a command line argument."
exit()
try:
ser = configure_serial(sys.argv[1])
if not ser.isOpen():
raise Exception
except:
print 'Opening serial port {} failed.'.format(sys.argv[1])
raise
exit()
while True:
c = raw_input()
for i in c:
ser.write(i)
print ser.read(1)
|
r"""
Plotting module file for the "tictactoe" game. It defines the plotting functions.
Notes
-----
"""
import doctest
import unittest
from matplotlib.patches import Polygon, Rectangle, Wedge
from dstauffman2.games.tictactoe.constants import COLOR, PLAYER, SIZES
def plot_cur_move(ax, move):
r"""
Plots the piece corresponding the current players move.
Parameters
----------
ax : object
Axis to plot on
move : int
current player to move
Examples
--------
>>> from dstauffman2.games.tictactoe import plot_cur_move, PLAYER
>>> import matplotlib.pyplot as plt
>>> plt.ioff()
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> _ = ax.set_xlim(-0.5, 0.5)
>>> _ = ax.set_ylim(-0.5, 0.5)
>>> plot_cur_move(ax, PLAYER['x'])
>>> plt.show(block=False) # doctest: +SKIP
>>> plt.close(fig)
"""
# local alias
box_size = SIZES['square']
# fill background
ax.add_patch(Rectangle((-box_size/2, -box_size/2), box_size, box_size, \
facecolor=COLOR['board'], edgecolor='k'))
# draw the piece
if move == PLAYER['x']:
plot_piece(ax, 0, 0, SIZES['piece'], COLOR['x'], shape=PLAYER['x'])
elif move == PLAYER['o']:
plot_piece(ax, 0, 0, SIZES['piece'], COLOR['o'], shape=PLAYER['o'])
elif move == PLAYER['none']:
pass
else:
raise ValueError('Unexpected player.')
# turn the axes back off (they get reinitialized at some point)
ax.set_axis_off()
def plot_piece(ax, vc, hc, size, color, shape, thick=True):
r"""
Plots a piece on the board.
Parameters
----------
ax : object
Axis to plot on
vc : float
Vertical center (Y-axis or board row)
hc : float
Horizontal center (X-axis or board column)
size : float
size
color : 3-tuple
RGB triplet color
shape : int
type of piece to plot
Examples
--------
>>> from dstauffman2.games.tictactoe import plot_piece, PLAYER
>>> import matplotlib.pyplot as plt
>>> plt.ioff()
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> _ = ax.set_xlim(-0.5, 2.5)
>>> _ = ax.set_ylim(-0.5, 2.5)
>>> ax.invert_yaxis()
>>> _ = plot_piece(ax, 1, 1, 0.9, (0, 0, 1), PLAYER['x'])
>>> plt.show(block=False) # doctest: +SKIP
>>> plt.close(fig)
"""
if thick:
width = 0.2 # normalized units
else:
width = 0.1
if shape != PLAYER['o']:
coords1 = [(c[0]*size/2+hc, c[1]*size/2+vc) for c in [(1, 1), (-1+width, -1), (-1, -1), (1-width, 1), (1, 1)]]
coords2 = [(c[0]*size/2+hc, c[1]*size/2+vc) for c in [(-1, 1), (-1+width, 1), (1, -1), (1-width, -1), (-1, 1)]]
if shape == PLAYER['o']:
# plot an O
patch1 = Wedge((hc, vc), size/2, 0, 360, width=size*width/2, facecolor=color, edgecolor='k')
piece = [patch1]
elif shape == PLAYER['x']:
# plot an X
patch1 = Polygon(coords1, True, facecolor=color, edgecolor='k')
patch2 = Polygon(coords2, True, facecolor=color, edgecolor='k')
piece = [patch1, patch2]
ax
elif shape == PLAYER['draw']:
# plot a combined O and X
patch1 = Wedge((hc, vc), size/2, 0, 360, width=size*width/2, facecolor=color, edgecolor='k')
patch2 = Polygon(coords1, True, facecolor=color, edgecolor='k')
patch3 = Polygon(coords2, True, facecolor=color, edgecolor='k')
piece = [patch1, patch2, patch3]
else:
raise ValueError('Unexpected shape.')
# plot piece
for this_patch in piece:
ax.add_patch(this_patch)
return piece
def plot_board(ax, board):
r"""
Plots the board (and the current player move).
Parameters
----------
ax : object
Axis to plot on
board : 2D int ndarray
Board position
Examples
--------
>>> from dstauffman2.games.tictactoe import plot_board, PLAYER
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> plt.ioff()
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, aspect='equal')
>>> _ = ax.set_xlim(-0.5, 2.5)
>>> _ = ax.set_ylim(-0.5, 2.5)
>>> ax.invert_yaxis()
>>> board = np.full((3, 3), PLAYER['none'], dtype=int)
>>> board[0, 0:2] = PLAYER['x']
>>> plot_board(ax, board)
>>> plt.show(block=False) # doctest: +SKIP
>>> plt.close(fig)
"""
# get axes limits
(m, n) = board.shape
s = SIZES['square']/2
xmin = 0 - s
xmax = m - 1 + s
ymin = 0 - s
ymax = n - 1 + s
# fill background
ax.add_patch(Rectangle((-xmin-1, -ymin-1), xmax-xmin, ymax-ymin, facecolor=COLOR['board'], \
edgecolor=None))
# draw minor horizontal lines
ax.plot([1-s, 1-s], [ymin, ymax], color=COLOR['edge'], linewidth=2)
ax.plot([2-s, 2-s], [ymin, ymax], color=COLOR['edge'], linewidth=2)
# draw minor vertical lines
ax.plot([xmin, xmax], [1-s, 1-s], color=COLOR['edge'], linewidth=2)
ax.plot([xmin, xmax], [2-s, 2-s], color=COLOR['edge'], linewidth=2)
# loop through and place pieces
for i in range(m):
for j in range(n):
if board[i, j] == PLAYER['none']:
pass
elif board[i, j] == PLAYER['o']:
plot_piece(ax, i, j, SIZES['piece'], COLOR['o'], PLAYER['o'])
elif board[i, j] == PLAYER['x']:
plot_piece(ax, i, j, SIZES['piece'], COLOR['x'], PLAYER['x'])
else:
raise ValueError('Bad board position.')
def plot_win(ax, mask, board):
r"""
Plots the winning pieces in red.
Parameters
----------
ax : object
Axis to plot on
mask : 2D bool ndarray
Mask for which squares to plot the win
board : 2D int ndarray
Board position
Examples
--------
>>> from dstauffman2.games.tictactoe import plot_win, PLAYER
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> plt.ioff()
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, aspect='equal')
>>> _ = ax.set_xlim(-0.5, 2.5)
>>> _ = ax.set_ylim(-0.5, 2.5)
>>> ax.invert_yaxis()
>>> mask = np.zeros((3, 3), dtype=bool)
>>> mask[0, 0:2] = True
>>> board = np.full((3, 3), PLAYER['none'], dtype=int)
>>> board[0, 0:2] = PLAYER['x']
>>> plot_win(ax, mask, board)
>>> plt.show(block=False) # doctest: +SKIP
>>> plt.close(fig)
"""
(m, n) = mask.shape
for i in range(m):
for j in range(n):
if mask[i, j]:
plot_piece(ax, i, j, SIZES['piece'], COLOR['win'], board[i, j], thick=False)
def plot_possible_win(ax, o_moves, x_moves):
r"""
Plots the possible wins on the board.
Examples
--------
>>> from dstauffman2.games.tictactoe import plot_possible_win, find_moves
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> plt.ioff()
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, aspect='equal')
>>> _ = ax.set_xlim(-0.5, 2.5)
>>> _ = ax.set_ylim(-0.5, 2.5)
>>> ax.invert_yaxis()
>>> board = np.array([[1, 1, 0], [0, 0, 0], [0, 0, 0]], dtype=int)
>>> (o_moves, x_moves) = find_moves(board)
>>> plot_possible_win(ax, o_moves, x_moves)
>>> plt.show(block=False) # doctest: +SKIP
>>> plt.close(fig)
"""
# find set of winning positions to plot
best_power = o_moves[0].power
pos_o = set([move for move in o_moves if move.power >= best_power])
best_power = x_moves[0].power
pos_x = set([move for move in x_moves if move.power >= best_power])
# find intersecting positions
pos_both = pos_o & pos_x
# plot the whole pieces
for pos in pos_o ^ pos_both:
plot_piece(ax, pos.row, pos.column, SIZES['piece'], COLOR['win_o'], PLAYER['o'], thick=False)
for pos in pos_x ^ pos_both:
plot_piece(ax, pos.row, pos.column, SIZES['piece'], COLOR['win_x'], PLAYER['x'], thick=False)
# plot the pieces that would win for either player
for pos in pos_both:
plot_piece(ax, pos.row, pos.column, SIZES['piece'], COLOR['win_ox'], PLAYER['draw'], thick=False)
def plot_powers(ax, board, o_moves, x_moves):
r"""
Plots the powers of each move visually on the board.
Examples
--------
>>> from dstauffman2.games.tictactoe import plot_powers, find_moves, plot_board
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> plt.ioff()
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, aspect='equal')
>>> _ = ax.set_xlim(-0.5, 2.5)
>>> _ = ax.set_ylim(-0.5, 2.5)
>>> ax.invert_yaxis()
>>> board = np.array([[-1, 0, 0], [0, 1, 0], [0, 1, 0]], dtype=int)
>>> plot_board(ax, board)
>>> (o_moves, x_moves) = find_moves(board)
>>> plot_powers(ax, board, o_moves, x_moves)
>>> plt.show(block=False) # doctest: +SKIP
>>> plt.close(fig)
"""
for this_move in o_moves:
ax.annotate('{}'.format(this_move.power), xy=(this_move.column-0.4, this_move.row-0.4), \
xycoords='data', horizontalalignment='left', verticalalignment='center', fontsize=15, color='b')
for this_move in x_moves:
ax.annotate('{}'.format(this_move.power), xy=(this_move.column+0.4, this_move.row+0.4), \
xycoords='data', horizontalalignment='right', verticalalignment='center', fontsize=15, color='k')
if __name__ == '__main__':
unittest.main(module='dstauffman2.games.tictactoe.tests.test_plotting', exit=False)
doctest.testmod(verbose=False)
|
import copy, re
from tools.commons import enum, UsageError, OrderedDict
from tools.metadata import getArguments, getIterators
from tools.patterns import regexPatterns
from machinery.commons import conversionOptions, \
getSymbolAccessStringAndRemainder, \
implement, \
replaceEarlyExits
from symbol import DeclarationType, FrameworkArray, frameworkArrayName, limitLength, uniqueIdentifier
RegionType = enum(
"MODULE_DECLARATION",
"KERNEL_CALLER_DECLARATION",
"OTHER"
)
def implementSymbolAccessStringAndRemainder(
line,
suffix,
symbol,
iterators=[],
parallelRegionTemplate=None,
callee=None,
useDeviceVersionIfAvailable=True
):
isPointerAssignment = regexPatterns.pointerAssignmentPattern.match(line) != None
try:
symbolAccessString, remainder = getSymbolAccessStringAndRemainder(
symbol,
iterators,
parallelRegionTemplate,
suffix,
callee,
isPointerAssignment,
useDeviceVersionIfAvailable=useDeviceVersionIfAvailable
)
except UsageError as e:
raise UsageError("%s; Print of Line: %s" %(str(e), line))
return symbolAccessString, remainder
def regionWithInertCode(routine, codeLines):
region = Region(routine)
for line in codeLines:
region.loadLine(line)
return region
class Region(object):
def __init__(self):
self._linesAndSymbols = []
def __contains__(self, text):
for line, _ in self._linesAndSymbols:
if text in line:
return True
return False
@property
def usedSymbolNames(self):
return set([symbol.name for symbol in sum([symbols for (_, symbols) in self._linesAndSymbols], [])])
@property
def linesAndSymbols(self):
return self._linesAndSymbols
@property
def isCallingKernel(self):
return False
def _sanitize(self, text, skipDebugPrint=False):
if not conversionOptions.debugPrint or skipDebugPrint:
return text.strip() + "\n"
return "!<--- %s\n%s\n!--->\n" %(
type(self),
text.strip()
)
def clone(self):
region = self.__class__()
region._linesAndSymbols = copy.copy(self._linesAndSymbols)
return region
def loadLine(self, line, symbolsOnCurrentLine=None):
stripped = line.strip()
if stripped == "":
return
self._linesAndSymbols.append((
stripped,
symbolsOnCurrentLine if symbolsOnCurrentLine else []
))
def firstAccessTypeOfScalar(self, symbol):
if symbol.domains:
raise Exception("non scalars not supported for this operation")
for line, symbols in self._linesAndSymbols:
if not symbol in symbols:
continue
if symbol.scalarWriteAccessPattern.match(line):
return "w"
return "r"
return None
def implemented(self, parentRoutine, parentRegion=None, skipDebugPrint=False):
parallelRegionTemplate = None
if isinstance(parentRegion, ParallelRegion):
parallelRegionTemplate = parentRegion.template
iterators = parentRoutine.implementation.getIterators(parallelRegionTemplate) \
if parallelRegionTemplate else []
text = "\n".join([
implement(
replaceEarlyExits(line, parentRoutine.implementation, parentRoutine.node.getAttribute('parallelRegionPosition')),
symbols,
implementSymbolAccessStringAndRemainder,
iterators,
parallelRegionTemplate,
useDeviceVersionIfAvailable=parentRoutine.implementation.onDevice
)
for (line, symbols) in self._linesAndSymbols
])
if text == "":
return ""
return self._sanitize(text, skipDebugPrint)
class CallRegion(Region):
def __init__(self):
super(CallRegion, self).__init__()
self._callee = None
self._passedInSymbolsByName = None
@property
def isCallingKernel(self):
if self._callee \
and hasattr(self._callee, "node") \
and self._callee.node.getAttribute("parallelRegionPosition") == "within":
return True
return False
@property
def usedSymbolNames(self):
compactedSymbols = sum([
s.compactedSymbols for s in self._callee._additionalArguments
if isinstance(s, FrameworkArray)
], []) if hasattr(self._callee, "_additionalArguments") and self._callee._additionalArguments else []
additionalArgumentSymbols = [
s for s in self._callee._additionalArguments
if not isinstance(s, FrameworkArray)
] if hasattr(self._callee, "_additionalArguments") and self._callee._additionalArguments else []
return super(CallRegion, self).usedSymbolNames \
| set([a.split("(")[0].strip() for a in self._callee.programmerArguments]) \
| set([s.name for s in compactedSymbols + additionalArgumentSymbols])
def _adjustedArguments(self, arguments, parentRoutine, parentRegion=None):
def adjustArgument(argument, parallelRegionTemplate, iterators):
return implement(
argument,
parentRoutine.symbolsByName.values(),
implementSymbolAccessStringAndRemainder,
iterators,
parallelRegionTemplate,
self._callee,
useDeviceVersionIfAvailable=parentRoutine.implementation.onDevice
)
if not hasattr(self._callee, "implementation"):
return arguments
parallelRegionTemplate = None
if isinstance(parentRegion, ParallelRegion):
parallelRegionTemplate = parentRegion.template
iterators = self._callee.implementation.getIterators(parallelRegionTemplate) \
if parallelRegionTemplate else []
return [
adjustArgument(argument, parallelRegionTemplate, iterators)
for argument in arguments
]
def loadCallee(self, callee):
self._callee = callee
def loadPassedInSymbolsByName(self, symbolsByName):
self._passedInSymbolsByName = copy.copy(symbolsByName)
def clone(self):
clone = super(CallRegion, self).clone()
clone.loadCallee(self._callee)
clone.loadPassedInSymbolsByName(self._passedInSymbolsByName)
return clone
def implemented(self, parentRoutine, parentRegion=None, skipDebugPrint=False):
if not self._callee:
raise Exception("call not loaded for call region in %s" %(parentRegion.name))
text = ""
calleeName = parentRoutine._adjustedCalleeNamesByName[self._callee.name]
usedCompactedParameters = [
s for s in sorted(parentRoutine._packedRealSymbolsByCalleeName.get(calleeName, []))
if s.name in self._callee.usedSymbolNames
]
for idx, symbol in enumerate(usedCompactedParameters):
text += "%s(%i) = %s" %(
limitLength(frameworkArrayName(calleeName)),
idx+1,
symbol.nameInScope()
) + " ! type %i symbol compaction for callee %s\n" %(symbol.declarationType, calleeName)
parallelRegionPosition = None
if hasattr(self._callee, "implementation"):
parallelRegionPosition = self._callee.node.getAttribute("parallelRegionPosition")
isForeignModuleCall = parentRoutine.parentModuleName != self._callee.parentModuleName
if hasattr(self._callee, "implementation") and parallelRegionPosition == "within" and not isForeignModuleCall:
if not self._callee.parallelRegionTemplates \
or len(self._callee.parallelRegionTemplates) == 0:
raise Exception("No parallel region templates found for subroutine %s" %(
self._callee.name
))
text += "%s call %s %s" %(
self._callee.implementation.kernelCallPreparation(
self._callee.parallelRegionTemplates[0],
calleeNode=self._callee.node
),
calleeName,
self._callee.implementation.kernelCallConfig()
)
else:
text += "call " + calleeName
text += "("
if hasattr(self._callee, "implementation"):
requiredAdditionalArgumentSymbols = [
symbol for symbol in self._callee.additionalArgumentSymbols
if symbol.name in self._callee.usedSymbolNames \
and (not symbol.isTypeParameter or symbol.isDimensionParameter)
]
if len(requiredAdditionalArgumentSymbols) > 0:
text += " &\n"
bridgeStr1 = "&\n&"
numOfProgrammerSpecifiedArguments = len(self._callee.programmerArguments)
for symbolNum, symbol in enumerate(requiredAdditionalArgumentSymbols):
symbolInCurrentContext = parentRoutine.symbolsByName.get(symbol.nameInScope(useDeviceVersionIfAvailable=False))
if not symbolInCurrentContext:
symbolInCurrentContext = parentRoutine.symbolsByName.get(symbol.name)
if not symbolInCurrentContext:
raise Exception("%s not found in context. All context keys: %s; Names in Scope: %s" %(
symbol.nameInScope(useDeviceVersionIfAvailable=False),
parentRoutine.symbolsByName.keys(),
[s.nameInScope(useDeviceVersionIfAvailable=False) for s in parentRoutine.symbolsByName.values()]
))
hostName = symbolInCurrentContext.nameInScope()
text += hostName
if symbolNum < len(requiredAdditionalArgumentSymbols) - 1 or numOfProgrammerSpecifiedArguments > 0:
text += ", %s" %(bridgeStr1)
text += ", ".join(self._adjustedArguments(self._callee.programmerArguments, parentRoutine, parentRegion)) + ")\n"
if hasattr(self._callee, "implementation") \
and not self._callee.implementation.allowsMixedHostAndDeviceCode \
and not isForeignModuleCall:
activeSymbolsByName = dict(
(symbol.name, symbol)
for symbol in self._callee.additionalArgumentSymbols + self._passedInSymbolsByName.values()
if symbol.name in self._callee.usedSymbolNamesInKernels
)
text += self._callee.implementation.kernelCallPost(
activeSymbolsByName,
self._callee.node
)
return self._sanitize(text, skipDebugPrint)
class ParallelRegion(Region):
def __init__(self):
self._currRegion = Region()
self._subRegions = [self._currRegion]
self._activeTemplate = None
super(ParallelRegion, self).__init__()
def __contains__(self, text):
for region in self._subRegions:
if text in region:
return True
return False
@property
def isCallingKernel(self):
for region in self._subRegions:
if region.isCallingKernel:
return True
return False
@property
def linesAndSymbols(self):
return sum([
region.linesAndSymbols
for region in self._subRegions
], [])
@property
def currRegion(self):
return self._currRegion
@property
def template(self):
return self._activeTemplate
@property
def usedSymbolNames(self):
return set(sum([
list(region.usedSymbolNames)
for region in self._subRegions
], []))
def switchToRegion(self, region):
self._currRegion = region
self._subRegions.append(region)
def loadLine(self, line, symbolsOnCurrentLine=None):
self._currRegion.loadLine(line, symbolsOnCurrentLine)
def loadActiveParallelRegionTemplate(self, templateNode):
self._activeTemplate = templateNode
def clone(self):
clone = super(ParallelRegion, self).clone()
if self._activeTemplate:
clone.loadActiveParallelRegionTemplate(self._activeTemplate)
clone._subRegions = []
for region in self._subRegions:
clonedRegion = region.clone()
clone._subRegions.append(clonedRegion)
clone._currRegion = clone._subRegions[0]
return clone
def firstAccessTypeOfScalar(self, symbol):
for region in self._subRegions:
accessType = region.firstAccessTypeOfScalar(symbol)
if accessType != None:
return accessType
return None
def implemented(self, parentRoutine, parentRegion=None, skipDebugPrint=False):
text = ""
hasAtExits = None
routineHasKernels = parentRoutine.node.getAttribute('parallelRegionPosition') == 'within'
if routineHasKernels and self._activeTemplate:
text += parentRoutine.implementation.parallelRegionBegin(
parentRoutine,
[s for s in parentRoutine.symbolsByName.values() if s.name in self.usedSymbolNames],
self._activeTemplate
).strip() + "\n"
else:
hasAtExits = "@exit" in self
if hasAtExits:
text += parentRoutine.implementation.parallelRegionStubBegin()
text += "\n".join([
region.implemented(
parentRoutine,
parentRegion=self,
skipDebugPrint=skipDebugPrint
)
for region in self._subRegions
])
if routineHasKernels and self._activeTemplate:
text += parentRoutine.implementation.parallelRegionEnd(self._activeTemplate, parentRoutine).strip() + "\n"
elif hasAtExits:
text += parentRoutine.implementation.parallelRegionStubEnd()
return self._sanitize(text, skipDebugPrint)
class RoutineSpecificationRegion(Region):
def __init__(self):
super(RoutineSpecificationRegion, self).__init__()
self._additionalParametersByKernelName = None
self._symbolsToAdd = None
self._compactionDeclarationPrefixByCalleeName = None
self._currAdditionalCompactedSubroutineParameters = None
self._allImports = None
self._typeParameterSymbolsByName = None
self._dataSpecificationLines = []
@property
def usedSymbolNames(self):
result = []
for symbol in sum([symbols for _, symbols in self._linesAndSymbols], []):
result += [tp.name for tp in symbol.usedTypeParameters if tp.isDimensionParameter]
return set(result)
def clone(self):
clone = super(RoutineSpecificationRegion, self).clone()
clone.loadAdditionalContext(
self._additionalParametersByKernelName,
self._symbolsToAdd,
self._compactionDeclarationPrefixByCalleeName,
self._currAdditionalCompactedSubroutineParameters,
self._allImports
)
clone._dataSpecificationLines = copy.copy(self._dataSpecificationLines)
clone._typeParameterSymbolsByName = copy.copy(self._typeParameterSymbolsByName)
return clone
def loadDataSpecificationLine(self, line):
self._dataSpecificationLines.append(line)
def loadAdditionalContext(
self,
additionalParametersByKernelName,
symbolsToAdd,
compactionDeclarationPrefixByCalleeName,
currAdditionalCompactedSubroutineParameters,
allImports
):
self._additionalParametersByKernelName = copy.copy(additionalParametersByKernelName)
self._symbolsToAdd = copy.copy(symbolsToAdd)
self._compactionDeclarationPrefixByCalleeName = copy.copy(compactionDeclarationPrefixByCalleeName)
self._currAdditionalCompactedSubroutineParameters = copy.copy(currAdditionalCompactedSubroutineParameters)
self._allImports = copy.copy(allImports)
def loadTypeParameterSymbolsByName(self, typeParameterSymbolsByName):
self._typeParameterSymbolsByName = copy.copy(typeParameterSymbolsByName)
def firstAccessTypeOfScalar(self, symbol):
return None
def implemented(self, parentRoutine, parentRegion=None, skipDebugPrint=False):
def getImportLine(importedSymbols, parentRoutine):
return parentRoutine.implementation.getImportSpecification(
importedSymbols,
RegionType.KERNEL_CALLER_DECLARATION if parentRoutine.isCallingKernel else RegionType.OTHER,
parentRoutine.node.getAttribute('parallelRegionPosition'),
parentRoutine.parallelRegionTemplates
)
declarationRegionType = RegionType.OTHER
if parentRoutine.isCallingKernel:
declarationRegionType = RegionType.KERNEL_CALLER_DECLARATION
if self._additionalParametersByKernelName == None \
or self._symbolsToAdd == None \
or self._compactionDeclarationPrefixByCalleeName == None \
or self._currAdditionalCompactedSubroutineParameters == None:
raise Exception("additional context not properly loaded for routine specification region in %s" %(
parentRoutine.name
))
importsFound = False
declaredSymbolsByScopedName = OrderedDict()
textForKeywords = ""
textBeforeDeclarations = ""
textAfterDeclarations = ""
declarations = ""
symbolsToAddByScopedName = dict(
(symbol.nameInScope(), symbol)
for symbol in self._symbolsToAdd
)
iterators = set(getIterators(
parentRoutine.node,
parentRoutine.parallelRegionTemplates,
parentRoutine.implementation.architecture
))
for (line, symbols) in self._linesAndSymbols:
if not symbols or len(symbols) == 0:
allImportMatch = regexPatterns.importAllPattern.match(line)
selectiveImportMatch = regexPatterns.importPattern.match(line)
if allImportMatch:
importsFound = True
elif selectiveImportMatch:
importsFound = True
elif not importsFound:
textForKeywords += line.strip() + "\n"
elif len(declaredSymbolsByScopedName.keys()) == 0:
textBeforeDeclarations += line.strip() + "\n"
else:
textAfterDeclarations += line.strip() + "\n"
continue
for symbol in symbols:
if symbol.nameInScope() in symbolsToAddByScopedName:
continue
if symbol.isCompacted:
continue #compacted symbols are handled as part of symbolsToAdd
if symbol.name in iterators:
continue #will be added to declarations by implementation class
specTuple = symbol.getSpecificationTuple(line)
if specTuple[0]:
declaredSymbolsByScopedName[symbol.nameInScope(useDeviceVersionIfAvailable=False)] = symbol
symbol.loadDeclaration(
specTuple,
parentRoutine.programmerArguments,
parentRoutine.name
)
continue
match = symbol.importPattern.match(line)
if not match:
match = symbol.importMapPattern.match(line)
if match:
importsFound = True
continue
raise Exception("symbol %s expected to be referenced in line '%s', but all matchings have failed" %(
symbol.name,
line
))
text = ""
if len(self._typeParameterSymbolsByName.keys()) > 0 \
and conversionOptions.debugPrint \
and not skipDebugPrint:
text += "!<----- type parameters --\n"
for typeParameterSymbol in self._typeParameterSymbolsByName.values():
if typeParameterSymbol.sourceModule in parentRoutine.moduleNamesCompletelyImported:
continue
if typeParameterSymbol.isDimensionParameter:
continue
text += getImportLine([typeParameterSymbol], parentRoutine)
if self._allImports:
if len(self._allImports.keys()) > 0 and conversionOptions.debugPrint and not skipDebugPrint:
text += "!<----- synthesized imports --\n"
for (sourceModule, nameInScope) in self._allImports:
if not nameInScope:
text += getImportLine(sourceModule, parentRoutine)
continue
if sourceModule in parentRoutine.moduleNamesCompletelyImported:
continue
if nameInScope in self._typeParameterSymbolsByName \
and not self._typeParameterSymbolsByName[nameInScope].isDimensionParameter:
continue
sourceName = self._allImports[(sourceModule, nameInScope)]
symbol = parentRoutine.symbolsByName.get(sourceName)
if symbol != None and symbol.sourceModule == parentRoutine.parentModuleName:
continue
if symbol != None:
text += getImportLine([symbol], parentRoutine)
else:
adjustedSourceName = parentRoutine._adjustedCalleeNamesByName.get(sourceName, sourceName)
adjustedNameInScope = parentRoutine._adjustedCalleeNamesByName.get(nameInScope, nameInScope)
importSpecification = "use %s, only: %s => %s" %(sourceModule, adjustedNameInScope, adjustedSourceName) \
if adjustedNameInScope != adjustedSourceName \
else "use %s, only: %s" %(sourceModule, adjustedNameInScope)
text += importSpecification
if conversionOptions.debugPrint and not skipDebugPrint:
text += " ! resynthesizing user input - no associated HF aware symbol found"
text += "\n"
if textForKeywords != "" and conversionOptions.debugPrint and not skipDebugPrint:
text += "!<----- other imports and specs: ------\n"
text += textForKeywords
if textBeforeDeclarations != "" and conversionOptions.debugPrint and not skipDebugPrint:
text += "!<----- before declarations: --\n"
text += textBeforeDeclarations
if len(declaredSymbolsByScopedName.keys()) > 0:
if conversionOptions.debugPrint and not skipDebugPrint:
text += "!<----- declarations: -------\n"
declarations = "\n".join([
parentRoutine.implementation.adjustDeclarationForDevice(
symbol.getDeclarationLine(parentRoutine, purgeList=[]),
[symbol],
parentRoutine,
declarationRegionType,
parentRoutine.node.getAttribute('parallelRegionPosition')
).strip()
for symbol in declaredSymbolsByScopedName.values()
]).strip() + "\n"
text += declarations
if len(self._dataSpecificationLines) > 0 and conversionOptions.debugPrint and not skipDebugPrint:
text += "!<----- data specifications: --\n"
if len(self._dataSpecificationLines) > 0:
text += "\n".join(self._dataSpecificationLines) + "\n"
if textAfterDeclarations != "" and conversionOptions.debugPrint and not skipDebugPrint:
text += "!<----- after declarations: --\n"
text += textAfterDeclarations
#$$$ this needs to be adjusted for the unused symbols
numberOfAdditionalDeclarations = (
len(sum([
self._additionalParametersByKernelName[kname][1]
for kname in self._additionalParametersByKernelName
], [])) + len(self._symbolsToAdd) + len(parentRoutine._packedRealSymbolsByCalleeName.keys())
)
if numberOfAdditionalDeclarations > 0 and conversionOptions.debugPrint and not skipDebugPrint:
text += "!<----- auto emul symbols : --\n"
defaultPurgeList = ['intent', 'public', 'parameter', 'allocatable', 'save']
for symbol in self._symbolsToAdd:
if not symbol.name in parentRoutine.usedSymbolNames:
continue
if symbol.isTypeParameter and not symbol.isDimensionParameter:
continue
if isinstance(symbol, FrameworkArray):
symbol.compactedSymbols = [
s for s in symbol.compactedSymbols
if s.name in parentRoutine.usedSymbolNames
]
symbol.domains = [("hfauto", str(len(symbol.compactedSymbols)))]
purgeList = defaultPurgeList
if not symbol.isCompacted:
purgeList=['public', 'parameter', 'allocatable', 'save']
text += parentRoutine.implementation.adjustDeclarationForDevice(
symbol.getDeclarationLine(parentRoutine, purgeList).strip(),
[symbol],
parentRoutine,
declarationRegionType,
parentRoutine.node.getAttribute('parallelRegionPosition')
).rstrip() + " ! type %i symbol added for this subroutine\n" %(symbol.declarationType)
for callee in parentRoutine.callees:
#this hasattr is used to test the callee for analyzability without circular imports
if not hasattr(callee, "implementation"):
continue
additionalImports, additionalDeclarations = self._additionalParametersByKernelName.get(
callee.name,
([], [])
)
additionalImportSymbolsByName = {}
for symbol in additionalImports:
additionalImportSymbolsByName[symbol.name] = symbol
implementation = callee.implementation
for symbol in parentRoutine.filterOutSymbolsAlreadyAliveInCurrentScope(additionalDeclarations):
if symbol.declarationType not in [DeclarationType.LOCAL_ARRAY, DeclarationType.LOCAL_SCALAR]:
# only symbols that are local to the kernel actually need to be declared here.
# Everything else we should have in our own scope already, either through additional imports or
# through module association (we assume the kernel and its wrapper reside in the same module)
continue
if not symbol.name in parentRoutine.usedSymbolNames:
continue
if symbol.isTypeParameter and not symbol.isDimensionParameter:
continue
if symbol.nameInScope(useDeviceVersionIfAvailable=False) in declaredSymbolsByScopedName:
continue
#in case the array uses domain sizes in the declaration that are additional symbols themselves
#we need to fix them.
adjustedDomains = []
for (domName, domSize) in symbol.domains:
domSizeSymbol = additionalImportSymbolsByName.get(domSize)
if domSizeSymbol is None:
adjustedDomains.append((domName, domSize))
continue
adjustedDomains.append((domName, domSizeSymbol.nameInScope()))
symbol.domains = adjustedDomains
text += implementation.adjustDeclarationForDevice(
symbol.getDeclarationLine(parentRoutine, defaultPurgeList).strip(),
[symbol],
parentRoutine,
declarationRegionType,
parentRoutine.node.getAttribute('parallelRegionPosition')
).rstrip() + " ! type %i symbol added for callee %s\n" %(symbol.declarationType, callee.name)
toBeCompacted = [
symbol for symbol in parentRoutine._packedRealSymbolsByCalleeName.get(callee.name, [])
if symbol.name in callee.usedSymbolNames
]
if len(toBeCompacted) > 0:
#TODO: generalize for cases where we don't want this to be on the device
#(e.g. put this into Implementation class)
compactedArray = FrameworkArray(
callee.name,
self._compactionDeclarationPrefixByCalleeName[callee.name],
domains=[("hfauto", str(len(toBeCompacted)))],
isOnDevice=True
)
text += implementation.adjustDeclarationForDevice(
compactedArray.getDeclarationLine(parentRoutine).strip(),
[compactedArray],
parentRoutine,
declarationRegionType,
parentRoutine.node.getAttribute('parallelRegionPosition')
).rstrip() + " ! compaction array added for callee %s\n" %(callee.name)
declarationEndText = parentRoutine.implementation.declarationEnd(
[
s for s in parentRoutine.symbolsByName.values() + parentRoutine.additionalImports
if s.isToBeTransfered or s.name in parentRoutine.usedSymbolNames
],
parentRoutine
)
if len(declarationEndText) > 0:
text += "!<----- impl. specific decl end : --\n"
text += declarationEndText
usedCompactedParameters = [
s for s in sorted(self._currAdditionalCompactedSubroutineParameters)
if s.name in parentRoutine.usedSymbolNames
]
for idx, symbol in enumerate(usedCompactedParameters):
text += "%s = %s(%i)" %(
symbol.nameInScope(),
limitLength(frameworkArrayName(parentRoutine.name)),
idx+1
) + " ! additional type %i symbol compaction\n" %(symbol.declarationType)
return self._sanitize(text, skipDebugPrint)
class RoutineEarlyExitRegion(Region):
def implemented(self, parentRoutine, parentRegion=None, skipDebugPrint=False):
text = parentRoutine.implementation.subroutineExitPoint(
[
s for s in parentRoutine.symbolsByName.values()
if s.isToBeTransfered or s.name in parentRoutine.usedSymbolNames
],
parentRoutine.isCallingKernel,
isSubroutineEnd=False
)
text += super(RoutineEarlyExitRegion, self).implemented(
parentRoutine,
parentRegion=parentRegion,
skipDebugPrint=True
)
return self._sanitize(text, skipDebugPrint)
|
"""
GAVIP Example AVIS: Data Sharing AVI
Django models used by the AVI pipeline
"""
import datetime
from django.db import models
from pipeline.models import AviJob, AviJobRequest
class SharedDataModel(AviJob):
"""
This model is used to store the parameters for the AVI pipeline.
Notice that it contains identical field names here as is the variables in
the pipeline itself.
An AviJob model must contain all fields required by the intended
pipeline class (ProcessVOTable) in this case.
"""
sharedfile = models.CharField(max_length=1000)
outputFile = models.CharField(default="", max_length=100)
pipeline_task = "ProcessVOTable"
def get_absolute_url(self):
return "%i/" % self.pk
|
import ethtool
import glob
import os
NET_PATH = '/sys/class/net'
NIC_PATH = '/sys/class/net/*/device'
BRIDGE_PATH = '/sys/class/net/*/bridge'
BONDING_PATH = '/sys/class/net/*/bonding'
WLAN_PATH = '/sys/class/net/*/wireless'
NET_BRPORT = '/sys/class/net/%s/brport'
NET_MASTER = '/sys/class/net/%s/master'
NET_STATE = '/sys/class/net/%s/carrier'
PROC_NET_VLAN = '/proc/net/vlan/'
BONDING_SLAVES = '/sys/class/net/%s/bonding/slaves'
BRIDGE_PORTS = '/sys/class/net/%s/brif'
def wlans():
return [b.split('/')[-2] for b in glob.glob(WLAN_PATH)]
def is_wlan(iface):
return iface in wlans()
def nics():
return list(set([b.split('/')[-2] for b in glob.glob(NIC_PATH)]) -
set(wlans()))
def is_nic(iface):
return iface in nics()
def bondings():
return [b.split('/')[-2] for b in glob.glob(BONDING_PATH)]
def is_bonding(iface):
return iface in bondings()
def vlans():
return list(set([b.split('/')[-1]
for b in glob.glob(NET_PATH + '/*')]) &
set([b.split('/')[-1]
for b in glob.glob(PROC_NET_VLAN + '*')]))
def is_vlan(iface):
return iface in vlans()
def bridges():
return [b.split('/')[-2] for b in glob.glob(BRIDGE_PATH)]
def is_bridge(iface):
return iface in bridges()
def all_interfaces():
return [d.rsplit("/", 1)[-1] for d in glob.glob(NET_PATH + '/*')]
def slaves(bonding):
return open(BONDING_SLAVES % bonding).readline().split()
def ports(bridge):
return os.listdir(BRIDGE_PORTS % bridge)
def is_brport(nic):
return os.path.exists(NET_BRPORT % nic)
def is_bondlave(nic):
return os.path.exists(NET_MASTER % nic)
def operstate(dev):
link_status = link_detected(dev)
return "down" if link_status == "n/a" else "up"
def link_detected(dev):
# try to read interface carrier (link) status
try:
carrier = open(NET_STATE % dev).readline().strip()
# when IOError is raised, interface is down
except IOError:
return "n/a"
# if value is 1, interface up with cable connected
# 0 corresponds to interface up with cable disconnected
return "yes" if carrier == '1' else "no"
def get_vlan_device(vlan):
""" Return the device of the given VLAN. """
dev = None
if os.path.exists(PROC_NET_VLAN + vlan):
for line in open(PROC_NET_VLAN + vlan):
if "Device:" in line:
dummy, dev = line.split()
break
return dev
def get_bridge_port_device(bridge):
"""Return the nics list that belongs to bridge."""
# br --- v --- bond --- nic1
if bridge not in bridges():
raise ValueError('unknown bridge %s' % bridge)
nics = []
for port in ports(bridge):
if port in vlans():
device = get_vlan_device(port)
if device in bondings():
nics.extend(slaves(device))
else:
nics.append(device)
if port in bondings():
nics.extend(slaves(port))
else:
nics.append(port)
return nics
def aggregated_bridges():
return [bridge for bridge in bridges() if
(set(get_bridge_port_device(bridge)) & set(nics()))]
def bare_nics():
"The nic is not a port of a bridge or a slave of bond."
return [nic for nic in nics() if not (is_brport(nic) or is_bondlave(nic))]
def is_bare_nic(iface):
return iface in bare_nics()
def all_favored_interfaces():
return aggregated_bridges() + bare_nics() + bondings()
def get_interface_type(iface):
# FIXME if we want to get more device type
# just support nic, bridge, bondings and vlan, for we just
# want to expose this 4 kinds of interface
try:
if is_nic(iface):
return "nic"
if is_bonding(iface):
return "bonding"
if is_bridge(iface):
return "bridge"
if is_vlan(iface):
return "vlan"
return 'unknown'
except IOError:
return 'unknown'
def get_interface_info(iface):
if iface not in ethtool.get_devices():
raise ValueError('unknown interface: %s' % iface)
ipaddr = ''
netmask = ''
try:
ipaddr = ethtool.get_ipaddr(iface)
netmask = ethtool.get_netmask(iface)
except IOError:
pass
iface_link_detected = link_detected(iface)
iface_status = 'active' if iface_link_detected != "n/a" else "inactive"
return {'name': iface,
'type': get_interface_type(iface),
'status': iface_status,
'link_detected': iface_link_detected,
'ipaddr': ipaddr,
'netmask': netmask}
|
from weboob.capabilities.housing import CapHousing, Housing, HousingPhoto
from weboob.tools.backend import Module
from .browser import SeLogerBrowser
__all__ = ['SeLogerModule']
class SeLogerModule(Module, CapHousing):
NAME = 'seloger'
MAINTAINER = u'Romain Bignon'
EMAIL = 'romain@weboob.org'
VERSION = '2.1'
DESCRIPTION = 'French housing website'
LICENSE = 'AGPLv3+'
ICON = 'http://static.poliris.com/z/portail/svx/portals/sv6_gen/favicon.png'
BROWSER = SeLogerBrowser
def search_housings(self, query):
cities = [c.id for c in query.cities if c.backend == self.name]
if len(cities) == 0:
return list([])
return self.browser.search_housings(query.type, cities, query.nb_rooms,
query.area_min, query.area_max,
query.cost_min, query.cost_max,
query.house_types,
query.advert_types)
def get_housing(self, housing):
if isinstance(housing, Housing):
id = housing.id
else:
id = housing
housing = None
return self.browser.get_housing(id, housing)
def search_city(self, pattern):
return self.browser.search_geo(pattern)
def fill_photo(self, photo, fields):
if 'data' in fields and photo.url and not photo.data:
photo.data = self.browser.open(photo.url).content
return photo
def fill_housing(self, housing, fields):
if 'DPE' in fields or 'GES' in fields:
housing = self.browser.get_housing_detail(housing)
fields.remove('DPE')
fields.remove('GES')
if len(fields) > 0:
housing = self.browser.get_housing(housing.id, housing)
return housing
OBJECTS = {HousingPhoto: fill_photo, Housing: fill_housing}
|
"Demonstrating function evaluation at arbitrary points."
from __future__ import print_function
from dolfin import *
from numpy import array
mesh = UnitCubeMesh(8, 8, 8);
x = (0.31, 0.32, 0.33)
Vs = FunctionSpace(mesh, "CG", 2)
Vv = VectorFunctionSpace(mesh, "CG", 2)
fs = Expression("sin(3.0*x[0])*sin(3.0*x[1])*sin(3.0*x[2])", degree=2)
fv = Expression(("sin(3.0*x[0])*sin(3.0*x[1])*sin(3.0*x[2])",
"1.0 + 3.0*x[0] + 4.0*x[1] + 0.5*x[2]","2"), element = Vv.ufl_element())
g = project(fs, V=Vs)
print("""
Evaluate user-defined scalar function fs
fs(x) = %f
Evaluate discrete function g (projection of fs)
g(x) = %f
Evaluate user-defined vector valued function fv
fs(x) = %s""" % (fs(x), g(x), str(fv(x))) )
|
import shutil
import os
import hashlib
import psutil
import urllib.request
import tarfile
from getpass import getuser
from os import getenv, mkdir
from os.path import expanduser, dirname, isdir, isfile, islink, \
join, lexists, normpath, realpath, relpath
from subprocess import check_call, check_output, CalledProcessError, STDOUT
from stat import ST_MODE
from tempfile import NamedTemporaryFile
DEVNULL = open(os.devnull, 'w')
ENTRYFILE = "__init__.py"
GITCONFIG_PATH = join(getenv("HOME"), ".gitconfig")
WORKDIR = realpath(join(dirname(__file__)))
def add_git_config_entry(params):
hash_before = checksum(realpath(GITCONFIG_PATH))
cmd = ["git", "config", "--global"] + params
check_call(cmd)
hash_after = checksum(realpath(GITCONFIG_PATH))
msg = "Added \"{}\" to git config".format(" ".join(params))
if hash_before != hash_after:
changed_print(msg)
else:
ok_print(msg)
def add_user_to_group(user, group):
msg = "current user is in group {}".format(group)
sudo_user = getenv("SUDO_USER", "")
if sudo_user:
cmd = "sudo -u {} groups".format(sudo_user,)
curr_user_groups = check_output(
cmd,
shell=True).decode("utf-8").strip().split()
else:
curr_user_groups = check_output(
["groups"],
universal_newlines=True).strip().split()
if group in curr_user_groups:
ok_print(msg)
else:
launch_silent(
["sudo", "gpasswd", "-a", user, group])
changed_print(msg + ". Please relogin to take effect.")
def changed_print(msg):
print("\033[93mChanged\033[0m {}".format(msg))
def change_java(new_version):
msg = "{} set to default".format(new_version)
java_version = check_output(
["sudo", "archlinux-java", "get"],
universal_newlines=True).strip()
if java_version == new_version:
ok_print(msg)
else:
launch_silent(["sudo", "archlinux-java", "set", new_version])
changed_print(msg)
def checksum(file, algorithm="md5"):
if not lexists(file):
return 0
if algorithm == "md5":
hash = hashlib.md5()
elif algorithm == "sha256":
hash = hashlib.sha256()
content = open(file, "rb").read()
hash.update(content)
return hash.hexdigest()
def chmod(path, mode):
msg = "{} mode changed to {}".format(path, mode)
if get_mode(path) == mode:
ok_print(msg)
else:
launch_silent(["sudo", "chmod", mode, path])
changed_print(msg)
def chsh(user, shell):
msg = "{}'s shell set to {}".format(user, shell)
with open("/etc/passwd", "r") as passwd_file:
for line in passwd_file.readlines():
elements = line.strip().split(":")
if elements[0] == user:
if elements[-1:][0] == shell:
ok_print(msg)
else:
launch_silent(["sudo", "chsh", "-s", shell, user])
changed_print(msg)
def clone(url, path):
path = realpath(path)
sudo_user = getenv("SUDO_USER", "")
msg = "repo {} is cloned and up-to-date into {}".format(url, path)
if lexists(join(path, ".git")) and repo_origin(path) == url:
cmd = ["git", "-C", path, "pull", "--recurse-submodules", "--rebase"]
else:
cmd = ["git", "clone", "--recurse-submodules", url, path]
if sudo_user:
cmd = ["sudo", "-u", sudo_user] + cmd
launch_silent(cmd)
ok_print(msg)
def create_dir(dst):
norm_dst = normpath(dst)
msg = "{} created".format(norm_dst)
if isdir(norm_dst):
ok_print(msg)
return
elif isfile(norm_dst):
os.remove(norm_dst)
mkdir_p(norm_dst)
changed_print(msg)
def daemon_reload():
try:
launch_silent(["systemctl", "daemon-reload"])
except CalledProcessError as e:
if e.returncode == 1:
return False
else:
raise CalledProcessError
return True
def enable_service(service):
msg = "{} is enabled".format(service)
if service_enabled(service):
ok_print(msg)
else:
launch_silent(["sudo", "systemctl", "enable", service])
changed_print(msg)
def error_print(msg):
print("\033[91mERROR\033[0m {}".format(msg))
def file_lines(path):
with open(path, "r") as content:
for line in content.readlines():
line = line.strip()
if line and not line.startswith("#"):
yield line
def filter_temp(path):
return not path.startswith((".", "_"))
def get_cmd(line):
array = line.split()
cmd = array[0]
params = " ".join(array[1:])
return [cmd, params]
def get_mode(path):
return oct(os.lstat(path)[ST_MODE])[-3:]
def install_helm_repo(name, url):
changed_print(f"Add helm repo {name}, url {url}")
sudo_user = getenv("SUDO_USER", "")
launch_silent(
cmd=f"sudo -u {sudo_user} helm repo add {name} {url}",
shell=True
)
def git_crypt_install():
if not isfile("/usr/local/bin/git-crypt"):
git_crypt_src = join(WORKDIR, "..", "identity", "git-crypt")
launch_silent(["make"], cwd=git_crypt_src)
launch_silent(["sudo", "make", "install"], cwd=git_crypt_src)
changed_print("git-crypt installed")
else:
ok_print("git-crypt installed")
def git_crypt_unlock(repo_path, key_path):
file_type = check_output(
["file", "-b", "--mime-type", join(repo_path, "ssh_config")],
universal_newlines=True).strip()
if file_type == "application/octet-stream":
check_call(["git-crypt", "unlock", key_path], cwd=repo_path)
changed_print("Private repo {} unlocked".format(repo_path))
else:
ok_print("Private repo {} unlocked".format(repo_path))
def info_print(msg):
print("\033[94m{}\033[0m".format(msg))
def install_file(src, dst):
norm_src = normpath(src)
norm_dst = normpath(dst)
msg = "{} -> {}".format(norm_src, norm_dst)
if isfile(norm_dst):
if realpath(norm_dst) == realpath(norm_src):
ok_print(msg)
return
else:
os.remove(norm_dst)
# Dangling link
elif islink(norm_dst):
os.remove(norm_dst)
os.symlink(norm_src, norm_dst)
changed_print(msg)
def install_remote_archive(url, reference_checksum, path):
msg = "{} archive installed in {}".format(url, path)
if isdir(join(path, "google-cloud-sdk")):
ok_print(msg)
return
with NamedTemporaryFile() as temp_file:
with urllib.request.urlopen(url) as download_url:
temp_file.write(download_url.read())
file_checksum = checksum(temp_file.name, "sha256")
if not reference_checksum == file_checksum:
raise Exception(
"""Checksum mismatch!!!
Downloaded file checksum: {}
Reference checksum: {}""",
file_checksum,
checksum
)
with tarfile.open(temp_file.name, "r:gz") as archive:
archive.extractall(path)
sudo_user = getenv("SUDO_USER", "")
launch_silent(["chown", "-R", sudo_user, path])
changed_print(msg)
def install(package):
msg = "{} installed".format(package)
if is_installed(package):
ok_print(msg)
else:
changed_print(msg)
sudo_user = getenv("SUDO_USER", "")
cmd = "yaourt -S --noconfirm {}".format(package)
# Handles demotion since yaourt refuse to be launch as root
if sudo_user:
cmd = "sudo -u {} {}".format(sudo_user, cmd)
launch_silent(cmd, shell=True)
else:
launch_silent(cmd)
def install_tree(src,
dst,
hidden=False,
file_filter=filter_temp,
dir_filter=filter_temp):
create_dir(dst)
for root, dirs, files in os.walk(src):
dst_path = join(dst, relpath(root, src))
for name in [file for file in files if file_filter(file)]:
dst_name = ".{}".format(name) if hidden else name
install_file(join(root, name), join(dst_path, dst_name))
for name in [dir for dir in dirs if dir_filter(dir)]:
dst_name = ".{}".format(name) if hidden else name
create_dir(join(dst_path, dst_name))
def is_installed(package):
try:
launch_silent(["pacman", "-Qs", "^{}$".format(package)])
except CalledProcessError as e:
if e.returncode == 1:
return False
else:
raise CalledProcessError
return True
def is_laptop():
return psutil.sensors_battery() is not None
def launch_silent(cmd, cwd=None, shell=False):
check_call(cmd, stdout=DEVNULL, stderr=STDOUT, cwd=cwd, shell=shell)
def line_in_file(path, line):
path = realpath(expanduser(path))
msg = "\"{}\" in file \"{}\"".format(line, path)
with open(path, 'a+') as f:
f.seek(0)
for current_line in f.readlines():
if current_line.strip() == line:
ok_print(msg)
return
f.write(line + "\n")
changed_print(msg)
def mkdir_p(path):
curr_path = "/"
for dir in path.split("/"):
curr_path = join(curr_path, dir)
if not lexists(curr_path):
mkdir(curr_path)
def modprobe(module):
msg = "{} module loaded".format(module)
changed_print(msg)
launch_silent(["sudo", "modprobe", module])
def ok_print(msg):
print("\033[92mOK\033[0m {}".format(msg))
def pacman_refresh():
launch_silent(["sudo", "pacman", "-Syy"])
changed_print("Pacman database sync'ed")
launch_silent(["sudo", "pacman-key", "--refresh-keys"])
changed_print("Pacman keys refreshed")
def remove_file(target):
norm_target = normpath(target)
msg = "{} removed".format(norm_target)
if not lexists(norm_target):
ok_print(msg)
elif isdir(norm_target):
error_print(
"{} can't remove since it is not a file.".format(norm_target))
else:
os.remove(norm_target)
changed_print(msg)
def remove_matching_files(src, dst, hidden=False, file_filter=filter_temp):
for root, dirs, files in os.walk(src):
dst_path = join(dst, relpath(root, src))
for name in [file for file in files if file_filter(file)]:
dst_name = ".{}".format(name) if hidden else name
remove_file(join(dst_path, dst_name))
# Do not touch dirs, since it is too dangerous
def remove(package):
msg = "{} removed".format(package)
if is_installed(package):
changed_print(msg)
launch_silent(["yaourt", "-R", "--noconfirm", package])
else:
ok_print(msg)
def remove_tree(target):
msg = "{} removed".format(target)
if lexists(target):
shutil.rmtree(target)
changed_print(msg)
else:
ok_print(msg)
def repo_origin(path):
return check_output(
["git", "-C", path, "remote", "get-url", "origin"],
universal_newlines=True
).strip()
def service_enabled(service):
try:
launch_silent(["systemctl", "is-enabled", service])
except CalledProcessError as e:
if e.returncode == 1:
return False
else:
raise CalledProcessError
return True
def stop_process(name):
msg = "{} stopped".format(name)
try:
stdout = check_output(["pgrep", "-u", getuser(), name])
except:
ok_print(msg)
return
pid = stdout.splitlines()[0]
check_call(["kill", pid])
changed_print(msg)
def vim_cmd(cmd):
try:
sudo_user = getenv("SUDO_USER", "")
if sudo_user:
vim_cmd = "sudo -u {} vim {}".format(sudo_user, cmd)
else:
vim_cmd = "vim {}".format(cmd)
check_call(vim_cmd, shell=True)
changed_print("vim {} succeeded".format(cmd))
except:
error_print("vim {} failed".format(vim_cmd))
|
import mcl_platform.tasking
from tasking_dsz import *
_fw = mcl_platform.tasking.GetFramework()
if _fw == 'dsz':
RPC_INFO_STATUS = dsz.RPC_INFO_STATUS
RPC_INFO_GET_FILTER = dsz.RPC_INFO_GET_FILTER
RPC_INFO_VALIDATE_FILTER = dsz.RPC_INFO_VALIDATE_FILTER
RPC_INFO_SEND_CONTROL = dsz.RPC_INFO_SEND_CONTROL
else:
raise RuntimeError('Unsupported framework (%s)' % _fw)
|
print('Hello and by the way, what\'s your name?')
yourName = input()
print('So you say...what would be your password, if you are in fact the person who belongs here?')
password = input()
if yourName == 'Mary':
print('Hello Mary')
else:
print('Getouttahere')
if password == 'swordfish':
print('Access granted.')
else:
print('Wrong password.')
|
import eventlet
from eventlet import backdoor
import signal, code, traceback
import threading
DEBUG_BACKDOOR = True
DEBUG_SIGINT = False
debug_locals = {}
if DEBUG_BACKDOOR:
backdoor_th = threading.Thread(target=backdoor.backdoor_server, args=(eventlet.listen(('localhost', 3000)), debug_locals))
backdoor_th.daemon = True
backdoor_th.start()
def signal_handler( signal_number ):
"""
A decorator to set the specified function as handler for a signal.
This function is the 'outer' decorator, called with only the (non-function)
arguments
"""
# create the 'real' decorator which takes only a function as an argument
def __decorator( function ):
signal.signal( signal_number, function )
return function
return __decorator
if DEBUG_SIGINT:
@signal_handler(signal.SIGINT)
def debug_handler(sig, frame):
"""Interrupt running process, and provide a python prompt for
interactive debugging."""
debug_locals["_frame"] = frame # Allow access to frame object.
i = code.InteractiveConsole(debug_locals)
message = "Signal recieved : entering python shell.\nTraceback:\n"
message += ''.join(traceback.format_stack(frame))
i.interact(message)
|
from netaddr import *
from datetime import datetime
import blescan
import time
import sys
import bluetooth._bluetooth as bluez
from Kitty import Kitty
from CheckKittys import CheckKittys
from BLESerialScanner import BLESerialScanner
import SendMail
import config
def process(mac, rssi):
found = False
for k in config.kittys:
if mac == k.mac:
k.lastHeard = datetime.now()
print 'Heard ' , k.name , ' at ' + str(rssi) + 'dBm!'
if k.ttw != 180:
SendMail.sendMail(k.name + ' reacquired')
k.ttw = 180
found = True
break
if not found:
print 'Unkown mac: ' , mac
sys.stdout.flush()
def main():
running = True
kittyChecker = CheckKittys()
scanner = BLESerialScanner(process)
kittyChecker.daemon = True
kittyChecker.kittys = config.kittys
kittyChecker.running = True
kittyChecker.start()
scanner.start()
message = "Kitty Tracker Active! Now tracking " + ", ".join(str(k.name) for k in config.kittys)
print message
SendMail.sendMail(message)
try:
while running:
time.sleep(1)
except KeyboardInterrupt:
running = False
kittyChecker.running = False
scanner.running = False
print "Terminating..."
if __name__ == '__main__':
main()
|
from tempfile import mkstemp
from shutil import move
import glob, os , os.path
def find(name, directory):
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith(name):
return (root, file)
def findFiles(name, directory, parent):
foundFiles = []
for root, dirs, files in os.walk(directory):
for adir in dirs:
if adir.endswith(parent):
path = root +("/"+adir)
for r, d, f in os.walk(path):
for afile in f:
if afile.endswith(name):
foundFiles.append((r, afile))
return foundFiles
def replace(file_path, pattern, subst):
#Create temp file
fh, abs_path = mkstemp()
with open(abs_path,'w') as new_file:
with open(file_path) as old_file:
for line in old_file:
new_file.write(line.replace(pattern, subst))
close(fh)
#Remove original file
remove(file_path)
#Move new file
move(abs_path, file_path)
|
from relier.web.views import AuthenticatedView
from relier.models import Event
from flask import render_template, g, abort, redirect, request
class DeleteEvent(AuthenticatedView):
def get(self, event_id):
if not g.user.is_admin:
abort(403)
if Event.select().where(Event.id == event_id).count() == 0:
abort(404)
event = Event.get(Event.id == event_id)
event.delete_instance(recursive=True)
return redirect('/events/')
|
""" Unit tests for `weblayer.bootstrap`.
"""
import unittest
try: # pragma: no cover
from mock import Mock
except: # pragma: no cover
pass
class TestInitBootstrapper(unittest.TestCase):
""" Test the logic of `Bootstrap.__init__`.
"""
def make_one(self, *args, **kwargs):
from weblayer.bootstrap import Bootstrapper
return Bootstrapper(*args, **kwargs)
def test_init_settings(self):
""" `settings` is available as `self._user_settings`.
"""
settings = Mock()
bootstrapper = self.make_one(settings=settings)
self.assertTrue(bootstrapper._user_settings == settings)
def test_init_url_map(self):
""" `url_mapping` is available as `self._url_mapping`.
"""
url_mapping = Mock()
bootstrapper = self.make_one(url_mapping=url_mapping)
self.assertTrue(bootstrapper._url_mapping == url_mapping)
class TestCallBootstrapper(unittest.TestCase):
""" Test the logic of `Bootstrap.__call__`.
"""
def setUp(self):
from weblayer import bootstrap
self.__registry = bootstrap.registry
self.registry = Mock()
self.registry.getUtility.return_value = 'registered utility'
bootstrap.registry = self.registry
def tearDown(self):
from weblayer import bootstrap
bootstrap.registry = self.__registry
def make_one(self, *args, **kwargs):
from weblayer.bootstrap import Bootstrapper
bootstrapper = Bootstrapper(*args, **kwargs)
bootstrapper.require_settings = Mock()
bootstrapper.require_settings.return_value = 'required settings'
bootstrapper.register_components = Mock()
return bootstrapper
def test_register_components_settings_kwarg(self):
""" Calls `self.register_components(settings={'a': 'b'})`.
"""
bootstrapper = self.make_one()
bootstrapper(settings={'a': 'b'})
bootstrapper.register_components.assert_called_with(
settings={'a': 'b'}
)
def test_require_settings_register_components(self):
""" Calls `self.register_components(settings=settings)`.
"""
bootstrapper = self.make_one()
bootstrapper(require_settings=True)
bootstrapper.register_components.assert_called_with(
settings='required settings'
)
def test_require_settings_by_default(self):
""" Calls `self.register_components(settings=settings)` by default.
"""
bootstrapper = self.make_one()
bootstrapper()
bootstrapper.register_components.assert_called_with(settings='required settings')
def test_require_settings_false_register_components(self):
""" Calls `self.register_components(settings=None)`.
"""
bootstrapper = self.make_one()
bootstrapper(require_settings=False)
bootstrapper.register_components.assert_called_with(
settings=None
)
def test_register_components_path_router_kwarg(self):
""" Calls `self.register_components(path_router='path router')`.
"""
bootstrapper = self.make_one()
bootstrapper(path_router='path router')
bootstrapper.register_components.assert_called_with(
settings='required settings',
path_router='path router'
)
def test_require_settings_packages(self):
""" Passes `packages` through to `self.require_settings`, defaulting
to `None`.
"""
bootstrapper = self.make_one()
bootstrapper()
args = bootstrapper.require_settings.call_args
packages = args[1]['packages']
self.assertTrue(packages is None)
bootstrapper(packages=['foo'])
args = bootstrapper.require_settings.call_args
packages = args[1]['packages']
self.assertTrue(packages == ['foo'])
def test_require_settings_scan_framework(self):
""" Passes `scan_framework` through to `self.require_settings`,
defaulting to `True`.
"""
bootstrapper = self.make_one()
bootstrapper()
args = bootstrapper.require_settings.call_args
scan_framework = args[1]['scan_framework']
self.assertTrue(scan_framework is True)
bootstrapper(scan_framework=False)
args = bootstrapper.require_settings.call_args
scan_framework = args[1]['scan_framework']
self.assertTrue(scan_framework is False)
def test_require_settings_extra_categories(self):
""" Passes `extra_categories` through to `self.require_settings`,
defaulting to `None`.
"""
bootstrapper = self.make_one()
bootstrapper()
args = bootstrapper.require_settings.call_args
extra_categories = args[1]['extra_categories']
self.assertTrue(extra_categories is None)
bootstrapper(extra_categories=['a', 'b', 'c'])
args = bootstrapper.require_settings.call_args
extra_categories = args[1]['extra_categories']
self.assertTrue(extra_categories == ['a', 'b', 'c'])
def test_returns_registered_settings_and_path_router(self):
""" Returns `settings, path_router`.
"""
from weblayer.interfaces import ISettings
from weblayer.interfaces import IPathRouter
bootstrapper = self.make_one()
settings, path_router = bootstrapper()
first_call_args = self.registry.getUtility.call_args_list[0][0]
self.assertTrue(first_call_args[0] == ISettings)
self.assertTrue(settings == 'registered utility')
second_call_args = self.registry.getUtility.call_args_list[1][0]
self.assertTrue(second_call_args[0] == IPathRouter)
self.assertTrue(path_router == 'registered utility')
class TestBootstrapperRequireSettings(unittest.TestCase):
""" Test the logic of `bootstrapper.require_settings`.
"""
def setUp(self):
from weblayer import bootstrap
self.__sys = bootstrap.sys
self.__RequirableSettings = bootstrap.RequirableSettings
self.sys = Mock()
self.sys.modules = {
'weblayer': 'weblayer package',
'weblayer.auth': 'weblayer.auth package',
'weblayer.base': 'weblayer.base package',
'weblayer.bootstrap': 'weblayer.bootstrap package',
'weblayer.component': 'weblayer.component package',
'weblayer.cookie': 'weblayer.cookie package',
'weblayer.interfaces': 'weblayer.interfaces package',
'weblayer.method': 'weblayer.method package',
'weblayer.normalise': 'weblayer.normalise package',
'weblayer.request': 'weblayer.request package',
'weblayer.route': 'weblayer.route package',
'weblayer.settings': 'weblayer.settings package',
'weblayer.static': 'weblayer.static package',
'weblayer.template': 'weblayer.template package',
'weblayer.utils': 'weblayer.utils package',
'weblayer.wsgi': 'weblayer.wsgi package',
'a': 'a package',
'b': 'b package'
}
self.RequirableSettings = Mock()
self.RequirableSettings.return_value = 'requirable settings'
bootstrap.sys = self.sys
bootstrap.RequirableSettings = self.RequirableSettings
def tearDown(self):
from weblayer import bootstrap
bootstrap.sys = self.__sys
bootstrap.RequirableSettings = self.__RequirableSettings
def make_one(self, *args, **kwargs):
from weblayer.bootstrap import Bootstrapper
return Bootstrapper(*args, **kwargs)
def test_packages(self):
""" If `packages` is not `None`, which it defaults to`, passes each
item in `packages` through to `RequirableSettings`,
preceeded by 'weblayer' if scan_framework is True, which is
the default.
"""
bootstrapper = self.make_one()
bootstrapper.require_settings(packages=None, scan_framework=False)
args = self.RequirableSettings.call_args
packages = args[1]['packages']
self.assertTrue(packages == [])
bootstrapper = self.make_one()
bootstrapper.require_settings(scan_framework=False)
args = self.RequirableSettings.call_args
packages = args[1]['packages']
self.assertTrue(packages == [])
bootstrapper = self.make_one()
bootstrapper.require_settings()
args = self.RequirableSettings.call_args
packages = args[1]['packages']
self.assertTrue(packages[0] == 'weblayer.auth package')
self.assertTrue(packages[-1] == 'weblayer.wsgi package')
bootstrapper = self.make_one()
bootstrapper.require_settings(packages=['a', 'b'])
args = self.RequirableSettings.call_args
packages = args[1]['packages']
self.assertTrue(packages[0] == 'weblayer.auth package')
self.assertTrue(packages[-3] == 'weblayer.wsgi package')
self.assertTrue(packages[-2] == 'a package')
self.assertTrue(packages[-1] == 'b package')
def test_extra_categories(self):
""" `extra_categories` is passed to `RequirableSettings`
defaulting to `None`.
"""
bootstrapper = self.make_one()
bootstrapper.require_settings(extra_categories=None)
args = self.RequirableSettings.call_args
extra_categories = args[1]['extra_categories']
self.assertTrue(extra_categories is None)
bootstrapper = self.make_one()
bootstrapper.require_settings()
args = self.RequirableSettings.call_args
extra_categories = args[1]['extra_categories']
self.assertTrue(extra_categories is None)
bootstrapper = self.make_one()
bootstrapper.require_settings(extra_categories=['a', 'b', 'c'])
args = self.RequirableSettings.call_args
extra_categories = args[1]['extra_categories']
self.assertTrue(extra_categories == ['a', 'b', 'c'])
def test_returns_settings(self):
""" Returns `settings`.
"""
bootstrapper = self.make_one()
settings = bootstrapper.require_settings()
self.assertTrue(settings == 'requirable settings')
def _was_registered(m, interface):
""" Takes a mock and the interface it should have been called with
as the last argument, iterates through the call_args_list and returns
`True` if it finds the interface.
>>> m = Mock()
>>> m1 = m('a', 'IFoo')
>>> m2 = m('b', 'c', 'd', 'e', 'IBar')
>>> _was_registered(m, 'IFoo')
True
>>> _was_registered(m, 'IBar')
True
>>> _was_registered(m, 'IBaz')
False
"""
for item in m.call_args_list:
if item[0][-1] == interface:
return True
return False
def _was_called_with(m, *args, **kwargs):
""" Takes a mock and the args and kwargs it should have been
called with, iterates through the call_args_list and returns
`True` if they match once.
>>> m = Mock()
>>> m1 = m('a', foo='bar')
>>> m2 = m('b', baz='blah')
>>> _was_called_with(m, 'b', baz='blah')
True
>>> _was_called_with(m, 'a', foo='bar')
True
>>> _was_called_with(m, 'a')
False
>>> _was_called_with(m, baz='blah')
False
"""
for item in m.call_args_list:
if item[0] == args and item[1] == kwargs:
return True
return False
class TestBootstrapperRegisterComponents(unittest.TestCase):
""" Test the logic of `bootstrapper.register_components`.
"""
def setUp(self):
from weblayer import bootstrap
self.__registry = bootstrap.registry
self.registry = Mock()
self.registry.registerUtility.return_value = 'registered utility'
self.registry.registerAdapter.return_value = 'registered adapter'
bootstrap.registry = self.registry
def tearDown(self):
from weblayer import bootstrap
bootstrap.registry = self.__registry
def make_one(self, *args, **kwargs):
from weblayer.bootstrap import Bootstrapper
return Bootstrapper(*args, **kwargs)
def test_settings_false(self):
""" If `settings` is `False`, nothing is registered.
"""
from weblayer.interfaces import ISettings
bootstrapper = self.make_one()
bootstrapper.register_components(settings=False)
self.assertTrue(
not _was_registered(
self.registry.registerUtility,
ISettings
)
)
def test_settings_none(self):
""" If `settings` is `None`, init `RequirableSettings`,
call with `self._user_settings` and register `self._settings`.
"""
from weblayer.interfaces import ISettings
from weblayer import bootstrap
__RequirableSettings = bootstrap.RequirableSettings
RequirableSettings = Mock()
requirable_settings = Mock()
RequirableSettings.return_value = requirable_settings
bootstrap.RequirableSettings = RequirableSettings
bootstrapper = self.make_one()
bootstrapper.register_components(settings=None)
RequirableSettings.assert_called_with()
requirable_settings.assert_called_with(bootstrapper._user_settings)
self.assertTrue(
_was_called_with(
self.registry.registerUtility,
requirable_settings,
ISettings
)
)
def test_settings_default_to_none(self):
""" Init `RequirableSettings`, call with
`self._user_settings` and register `self._settings`.
"""
from weblayer.interfaces import ISettings
from weblayer import bootstrap
__RequirableSettings = bootstrap.RequirableSettings
RequirableSettings = Mock()
requirable_settings = Mock()
RequirableSettings.return_value = requirable_settings
bootstrap.RequirableSettings = RequirableSettings
bootstrapper = self.make_one()
bootstrapper.register_components()
RequirableSettings.assert_called_with()
requirable_settings.assert_called_with(bootstrapper._user_settings)
self.assertTrue(
_was_called_with(
self.registry.registerUtility,
requirable_settings,
ISettings
)
)
def test_settings_passed_in(self):
""" If `settings` is neither `False` nor `None`, it's called
with `self._user_settings` and registered.
"""
from weblayer.interfaces import ISettings
bootstrapper = self.make_one()
settings = Mock()
bootstrapper.register_components(settings=settings)
settings.assert_called_with(bootstrapper._user_settings)
self.assertTrue(
_was_called_with(
self.registry.registerUtility,
settings,
ISettings
)
)
def test_path_router_false(self):
""" If `path_router` is `False`, nothing is registered.
"""
from weblayer.interfaces import IPathRouter
bootstrapper = self.make_one()
bootstrapper.register_components(path_router=False)
self.assertTrue(
not _was_registered(
self.registry.registerUtility,
IPathRouter
)
)
def test_path_router_none(self):
""" If `path_router` is `None`, call `RegExpPathRouter` with
`self._url_mapping` and register the resulting `path_router`.
"""
from weblayer.interfaces import IPathRouter
from weblayer import bootstrap
__RegExpPathRouter = bootstrap.RegExpPathRouter
RegExpPathRouter = Mock()
RegExpPathRouter.return_value = 'path router'
bootstrap.RegExpPathRouter = RegExpPathRouter
bootstrapper = self.make_one()
bootstrapper.register_components(path_router=None)
RegExpPathRouter.assert_called_with(bootstrapper._url_mapping)
self.assertTrue(
_was_called_with(
self.registry.registerUtility,
'path router',
IPathRouter
)
)
bootstrap.RegExpPathRouter = __RegExpPathRouter
def test_path_router_defaults_to_none(self):
""" Call `RegExpPathRouter` with `self._url_mapping` and register
the resulting `path_router`.
"""
from weblayer.interfaces import IPathRouter
from weblayer import bootstrap
__RegExpPathRouter = bootstrap.RegExpPathRouter
RegExpPathRouter = Mock()
RegExpPathRouter.return_value = 'path router'
bootstrap.RegExpPathRouter = RegExpPathRouter
bootstrapper = self.make_one()
bootstrapper.register_components()
RegExpPathRouter.assert_called_with(bootstrapper._url_mapping)
self.assertTrue(
_was_called_with(
self.registry.registerUtility,
'path router',
IPathRouter
)
)
bootstrap.RegExpPathRouter = __RegExpPathRouter
def test_path_router_passed_in(self):
""" If `path_router` is neither `False` nor `None`, it's registered.
"""
from weblayer.interfaces import IPathRouter
bootstrapper = self.make_one()
path_router = Mock()
bootstrapper.register_components(path_router=path_router)
self.assertTrue(
_was_called_with(
self.registry.registerUtility,
path_router,
IPathRouter
)
)
def test_template_renderer_false(self):
""" If `TemplateRenderer` is `False`, nothing is registered.
"""
from weblayer.interfaces import ITemplateRenderer
bootstrapper = self.make_one()
bootstrapper.register_components(TemplateRenderer=False)
self.assertTrue(
not _was_registered(
self.registry.registerAdapter,
ITemplateRenderer
)
)
def test_template_renderer_none(self):
""" If `TemplateRenderer` is `None`, register `MakoTemplateRenderer`.
"""
from weblayer.interfaces import ISettings
from weblayer.interfaces import ITemplateRenderer
from weblayer import bootstrap
__MakoTemplateRenderer = bootstrap.MakoTemplateRenderer
MakoTemplateRenderer = Mock()
bootstrap.MakoTemplateRenderer = MakoTemplateRenderer
bootstrapper = self.make_one()
bootstrapper.register_components(TemplateRenderer=None)
self.assertTrue(
_was_called_with(
self.registry.registerAdapter,
MakoTemplateRenderer,
required=[ISettings],
provided=ITemplateRenderer
)
)
bootstrap.MakoTemplateRenderer = __MakoTemplateRenderer
def test_template_renderer_defaults_to_none(self):
""" Register `MakoTemplateRenderer`.
"""
from weblayer.interfaces import ISettings
from weblayer.interfaces import ITemplateRenderer
from weblayer import bootstrap
__MakoTemplateRenderer = bootstrap.MakoTemplateRenderer
MakoTemplateRenderer = Mock()
bootstrap.MakoTemplateRenderer = MakoTemplateRenderer
bootstrapper = self.make_one()
bootstrapper.register_components()
self.assertTrue(
_was_called_with(
self.registry.registerAdapter,
MakoTemplateRenderer,
required=[ISettings],
provided=ITemplateRenderer
)
)
bootstrap.MakoTemplateRenderer = __MakoTemplateRenderer
def test_template_renderer_passed_in(self):
""" If `TemplateRenderer` is neither `False` nor `None`,
it's registered.
"""
from weblayer.interfaces import ISettings
from weblayer.interfaces import ITemplateRenderer
bootstrapper = self.make_one()
TemplateRenderer = Mock()
bootstrapper.register_components(TemplateRenderer=TemplateRenderer)
self.assertTrue(
_was_called_with(
self.registry.registerAdapter,
TemplateRenderer,
required=[ISettings],
provided=ITemplateRenderer
)
)
def test_authentication_manager_false(self):
""" If `AuthenticationManager` is `False`, nothing is registered.
"""
from weblayer.interfaces import IAuthenticationManager
bootstrapper = self.make_one()
bootstrapper.register_components(AuthenticationManager=False)
self.assertTrue(
not _was_registered(
self.registry.registerAdapter,
IAuthenticationManager
)
)
def test_authentication_manager_none(self):
""" If `AuthenticationManager` is `None`, register
`TrivialAuthenticationManager`.
"""
from weblayer.interfaces import IRequest
from weblayer.interfaces import IAuthenticationManager
from weblayer import bootstrap
__AuthenticationManager = bootstrap.TrivialAuthenticationManager
AuthenticationManager = Mock()
bootstrap.TrivialAuthenticationManager = AuthenticationManager
bootstrapper = self.make_one()
bootstrapper.register_components(AuthenticationManager=None)
self.assertTrue(
_was_called_with(
self.registry.registerAdapter,
AuthenticationManager,
required=[IRequest],
provided=IAuthenticationManager
)
)
bootstrap.TrivialAuthenticationManager = __AuthenticationManager
def test_authentication_manager_defaults_to_none(self):
""" Register `TrivialAuthenticationManager`.
"""
from weblayer.interfaces import IRequest
from weblayer.interfaces import IAuthenticationManager
from weblayer import bootstrap
__AuthenticationManager = bootstrap.TrivialAuthenticationManager
AuthenticationManager = Mock()
bootstrap.TrivialAuthenticationManager = AuthenticationManager
bootstrapper = self.make_one()
bootstrapper.register_components()
self.assertTrue(
_was_called_with(
self.registry.registerAdapter,
AuthenticationManager,
required=[IRequest],
provided=IAuthenticationManager
)
)
bootstrap.TrivialAuthenticationManager = __AuthenticationManager
def test_authentication_manager_passed_in(self):
""" If `AuthenticationManager` is neither `False` nor `None`,
it's registered.
"""
from weblayer.interfaces import IRequest
from weblayer.interfaces import IAuthenticationManager
bootstrapper = self.make_one()
AuthenticationManager = Mock()
bootstrapper.register_components(
AuthenticationManager=AuthenticationManager
)
self.assertTrue(
_was_called_with(
self.registry.registerAdapter,
AuthenticationManager,
required=[IRequest],
provided=IAuthenticationManager
)
)
def test_static_url_generator_false(self):
""" If `StaticURLGenerator` is `False`, nothing is registered.
"""
from weblayer.interfaces import IStaticURLGenerator
bootstrapper = self.make_one()
bootstrapper.register_components(StaticURLGenerator=False)
self.assertTrue(
not _was_registered(
self.registry.registerAdapter,
IStaticURLGenerator
)
)
def test_static_url_generator_none(self):
""" If `StaticURLGenerator` is `None`, register
`MemoryCachedStaticURLGenerator`.
"""
from weblayer.interfaces import IRequest
from weblayer.interfaces import ISettings
from weblayer.interfaces import IStaticURLGenerator
from weblayer import bootstrap
__StaticURLGenerator = bootstrap.MemoryCachedStaticURLGenerator
StaticURLGenerator = Mock()
bootstrap.MemoryCachedStaticURLGenerator = StaticURLGenerator
bootstrapper = self.make_one()
bootstrapper.register_components(StaticURLGenerator=None)
self.assertTrue(
_was_called_with(
self.registry.registerAdapter,
StaticURLGenerator,
required=[IRequest, ISettings],
provided=IStaticURLGenerator
)
)
bootstrap.MemoryCachedStaticURLGenerator = __StaticURLGenerator
def test_static_url_generator_defaults_none(self):
""" Register `MemoryCachedStaticURLGenerator`.
"""
from weblayer.interfaces import IRequest
from weblayer.interfaces import ISettings
from weblayer.interfaces import IStaticURLGenerator
from weblayer import bootstrap
__StaticURLGenerator = bootstrap.MemoryCachedStaticURLGenerator
StaticURLGenerator = Mock()
bootstrap.MemoryCachedStaticURLGenerator = StaticURLGenerator
bootstrapper = self.make_one()
bootstrapper.register_components()
self.assertTrue(
_was_called_with(
self.registry.registerAdapter,
StaticURLGenerator,
required=[IRequest, ISettings],
provided=IStaticURLGenerator
)
)
bootstrap.MemoryCachedStaticURLGenerator = __StaticURLGenerator
def test_static_url_generator_passed_in(self):
""" If `StaticURLGenerator` is neither `False` nor `None`,
it's registered.
"""
from weblayer.interfaces import IRequest
from weblayer.interfaces import ISettings
from weblayer.interfaces import IStaticURLGenerator
bootstrapper = self.make_one()
StaticURLGenerator = Mock()
bootstrapper.register_components(StaticURLGenerator=StaticURLGenerator)
self.assertTrue(
_was_called_with(
self.registry.registerAdapter,
StaticURLGenerator,
required=[IRequest, ISettings],
provided=IStaticURLGenerator
)
)
def test_secure_cookie_wrapper_false(self):
""" If `SecureCookieWrapper` is `False`, nothing is registered.
"""
from weblayer.interfaces import ISecureCookieWrapper
bootstrapper = self.make_one()
bootstrapper.register_components(SecureCookieWrapper=False)
self.assertTrue(
not _was_registered(
self.registry.registerAdapter,
ISecureCookieWrapper
)
)
def test_secure_cookie_wrapper_none(self):
""" If `SecureCookieWrapper` is `None`, register
`SignedSecureCookieWrapper`.
"""
from weblayer.interfaces import IRequest, IResponse
from weblayer.interfaces import ISettings
from weblayer.interfaces import ISecureCookieWrapper
from weblayer import bootstrap
__SecureCookieWrapper = bootstrap.SignedSecureCookieWrapper
SecureCookieWrapper = Mock()
bootstrap.SignedSecureCookieWrapper = SecureCookieWrapper
bootstrapper = self.make_one()
bootstrapper.register_components(SecureCookieWrapper=None)
self.assertTrue(
_was_called_with(
self.registry.registerAdapter,
SecureCookieWrapper,
required=[IRequest, IResponse, ISettings],
provided=ISecureCookieWrapper
)
)
bootstrap.MemoryCachedSecureCookieWrapper = __SecureCookieWrapper
def test_secure_cookie_wrapper_defaults_none(self):
""" Register `SignedSecureCookieWrapper`.
"""
from weblayer.interfaces import IRequest, IResponse
from weblayer.interfaces import ISettings
from weblayer.interfaces import ISecureCookieWrapper
from weblayer import bootstrap
__SecureCookieWrapper = bootstrap.SignedSecureCookieWrapper
SecureCookieWrapper = Mock()
bootstrap.SignedSecureCookieWrapper = SecureCookieWrapper
bootstrapper = self.make_one()
bootstrapper.register_components()
self.assertTrue(
_was_called_with(
self.registry.registerAdapter,
SecureCookieWrapper,
required=[IRequest, IResponse, ISettings],
provided=ISecureCookieWrapper
)
)
bootstrap.MemoryCachedSecureCookieWrapper = __SecureCookieWrapper
def test_secure_cookie_wrapper_passed_in(self):
""" If `SecureCookieWrapper` is neither `False` nor `None`,
it's registered.
"""
from weblayer.interfaces import IRequest, IResponse
from weblayer.interfaces import ISettings
from weblayer.interfaces import ISecureCookieWrapper
bootstrapper = self.make_one()
SecureCookieWrapper = Mock()
bootstrapper.register_components(SecureCookieWrapper=SecureCookieWrapper)
self.assertTrue(
_was_called_with(
self.registry.registerAdapter,
SecureCookieWrapper,
required=[IRequest, IResponse, ISettings],
provided=ISecureCookieWrapper
)
)
def test_method_selector_false(self):
""" If `MethodSelector` is `False`, nothing is registered.
"""
from weblayer.interfaces import IMethodSelector
bootstrapper = self.make_one()
bootstrapper.register_components(MethodSelector=False)
self.assertTrue(
not _was_registered(
self.registry.registerAdapter,
IMethodSelector
)
)
def test_method_selector_none(self):
""" If `MethodSelector` is `None`, register
`ExposedMethodSelector`.
"""
from weblayer.interfaces import IRequestHandler
from weblayer.interfaces import IMethodSelector
from weblayer import bootstrap
__MethodSelector = bootstrap.ExposedMethodSelector
MethodSelector = Mock()
bootstrap.ExposedMethodSelector = MethodSelector
bootstrapper = self.make_one()
bootstrapper.register_components(MethodSelector=None)
self.assertTrue(
_was_called_with(
self.registry.registerAdapter,
MethodSelector,
required=[IRequestHandler],
provided=IMethodSelector
)
)
bootstrap.ExposedMethodSelector = __MethodSelector
def test_method_selector_defaults_to_none(self):
""" Register `ExposedMethodSelector`.
"""
from weblayer.interfaces import IRequestHandler
from weblayer.interfaces import IMethodSelector
from weblayer import bootstrap
__MethodSelector = bootstrap.ExposedMethodSelector
MethodSelector = Mock()
bootstrap.ExposedMethodSelector = MethodSelector
bootstrapper = self.make_one()
bootstrapper.register_components()
self.assertTrue(
_was_called_with(
self.registry.registerAdapter,
MethodSelector,
required=[IRequestHandler],
provided=IMethodSelector
)
)
bootstrap.ExposedMethodSelector = __MethodSelector
def test_method_selector_passed_in(self):
""" If `MethodSelector` is neither `False` nor `None`,
it's registered.
"""
from weblayer.interfaces import IRequestHandler
from weblayer.interfaces import IMethodSelector
bootstrapper = self.make_one()
MethodSelector = Mock()
bootstrapper.register_components(MethodSelector=MethodSelector)
self.assertTrue(
_was_called_with(
self.registry.registerAdapter,
MethodSelector,
required=[IRequestHandler],
provided=IMethodSelector
)
)
def test_response_normaliser_false(self):
""" If `ResponseNormaliser` is `False`, nothing is registered.
"""
from weblayer.interfaces import IResponseNormaliser
bootstrapper = self.make_one()
bootstrapper.register_components(ResponseNormaliser=False)
self.assertTrue(
not _was_registered(
self.registry.registerAdapter,
IResponseNormaliser
)
)
def test_response_normaliser_none(self):
""" If `ResponseNormaliser` is `None`, register
`DefaultToJSONResponseNormaliser`.
"""
from weblayer.interfaces import IResponse
from weblayer.interfaces import IResponseNormaliser
from weblayer import bootstrap
__ResponseNormaliser = bootstrap.DefaultToJSONResponseNormaliser
ResponseNormaliser = Mock()
bootstrap.DefaultToJSONResponseNormaliser = ResponseNormaliser
bootstrapper = self.make_one()
bootstrapper.register_components(ResponseNormaliser=None)
self.assertTrue(
_was_called_with(
self.registry.registerAdapter,
ResponseNormaliser,
required=[IResponse],
provided=IResponseNormaliser
)
)
bootstrap.DefaultToJSONResponseNormaliser = __ResponseNormaliser
def test_response_normaliser_defaults_to_none(self):
""" Register `DefaultToJSONResponseNormaliser`.
"""
from weblayer.interfaces import IResponse
from weblayer.interfaces import IResponseNormaliser
from weblayer import bootstrap
__ResponseNormaliser = bootstrap.DefaultToJSONResponseNormaliser
ResponseNormaliser = Mock()
bootstrap.DefaultToJSONResponseNormaliser = ResponseNormaliser
bootstrapper = self.make_one()
bootstrapper.register_components()
self.assertTrue(
_was_called_with(
self.registry.registerAdapter,
ResponseNormaliser,
required=[IResponse],
provided=IResponseNormaliser
)
)
bootstrap.DefaultToJSONResponseNormaliser = __ResponseNormaliser
def test_response_normaliser_passed_in(self):
""" If `ResponseNormaliser` is neither `False` nor `None`,
it's registered.
"""
from weblayer.interfaces import IResponse
from weblayer.interfaces import IResponseNormaliser
bootstrapper = self.make_one()
ResponseNormaliser = Mock()
bootstrapper.register_components(ResponseNormaliser=ResponseNormaliser)
self.assertTrue(
_was_called_with(
self.registry.registerAdapter,
ResponseNormaliser,
required=[IResponse],
provided=IResponseNormaliser
)
)
|
from somecards import app
if __name__ == '__main__':
app.run(debug=True)
|
def name():
alive = 1
while alive == 1:
print("Before you enter the game, please present identification.")
char_name = str(input("Enter your name now: "))
print("Is ", char_name, "your name? If so press 1. If not press 2.")
name_correct = int(input("Is it correct?:"))
if name_correct == 1:
alive+=1
elif name_correct == 2:
print("Alright, let's try again...")
return char_name
|
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'colorlog',
'docker==4.2.2',
'PyYAML',
'python-dateutil',
]
setup_requirements = [
'pytest-runner',
# TODO(dimaspivak): put setup requirements (distutils extensions, etc.) here
]
test_requirements = [
'pytest',
# TODO: put package test requirements here
]
setup(
name='clusterdock',
version='2.3.0',
description="clusterdock is a framework for creating Docker-based container clusters",
long_description=readme + '\n\n' + history,
long_description_content_type='text/markdown',
author="Dima Spivak",
author_email='dima@spivak.ch',
url='https://github.com/clusterdock/clusterdock',
packages=find_packages(),
entry_points={'console_scripts': ['clusterdock = clusterdock.cli:main']},
include_package_data=True,
install_requires=requirements,
license="Apache Software License 2.0",
zip_safe=False,
keywords='clusterdock',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
|
import time
import connexion
import anchore_engine.apis
import anchore_engine.common.helpers
import anchore_engine.configuration.localconfig
import anchore_engine.subsys.servicestatus
from anchore_engine.apis.authorization import INTERNAL_SERVICE_ALLOWED, get_authorizer
from anchore_engine.subsys import locking, logger, simplequeue
authorizer = get_authorizer()
def normalize_errors(f):
def decorator(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as err:
logger.exception("API Error for {}".format(f.__name__))
resp = anchore_engine.common.helpers.make_response_error(
err, in_httpcode=500
)
return resp, resp["httpcode"]
return decorator
@normalize_errors
@authorizer.requires_account(with_types=INTERNAL_SERVICE_ALLOWED)
def status():
logger.info("Hitting status!")
request_inputs = anchore_engine.apis.do_request_prep(
connexion.request, default_params={}
)
return_object = {}
httpcode = 500
# try:
service_record = anchore_engine.subsys.servicestatus.get_my_service_record()
return_object = anchore_engine.subsys.servicestatus.get_status(service_record)
httpcode = 200
# except Exception as err:
# return_object = anchore_engine.services.common.make_response_error(err, in_httpcode=httpcode)
# httpcode = return_object['httpcode']
logger.info("Exiting: {} {}".format(return_object, httpcode))
return return_object, httpcode
@normalize_errors
@authorizer.requires_account(with_types=INTERNAL_SERVICE_ALLOWED)
def is_inqueue(queuename, bodycontent):
request_inputs = anchore_engine.apis.do_request_prep(
connexion.request, default_params={}
)
try:
return_object = simplequeue.is_inqueue(queuename, bodycontent)
httpcode = 200
except Exception as err:
return_object = str(err)
httpcode = 500
return return_object, httpcode
@authorizer.requires_account(with_types=INTERNAL_SERVICE_ALLOWED)
def update_queueid(queuename, bodycontent):
request_inputs = anchore_engine.apis.do_request_prep(
connexion.request, default_params={}
)
try:
src_queueId = bodycontent.get("src_queueId", None)
dst_queueId = bodycontent.get("dst_queueId", None)
count = simplequeue.update_queueid(
queuename, src_queueId=src_queueId, dst_queueId=dst_queueId
)
return_object = str(count)
httpcode = 200
except Exception as err:
return_object = str(err)
httpcode = 500
return return_object, httpcode
@authorizer.requires_account(with_types=INTERNAL_SERVICE_ALLOWED)
def qlen(queuename):
request_inputs = anchore_engine.apis.do_request_prep(
connexion.request, default_params={}
)
try:
qlen = simplequeue.qlen(queuename)
return_object = str(qlen)
httpcode = 200
except Exception as err:
return_object = str(err)
httpcode = 500
return return_object, httpcode
@authorizer.requires_account(with_types=INTERNAL_SERVICE_ALLOWED)
def enqueue(queuename, bodycontent, forcefirst=None, qcount=0):
request_inputs = anchore_engine.apis.do_request_prep(
connexion.request, default_params={"forcefirst": forcefirst, "qcount": qcount}
)
try:
return_object = simplequeue.enqueue(
queuename, bodycontent, qcount=qcount, forcefirst=forcefirst
)
httpcode = 200
except Exception as err:
return_object = str(err)
httpcode = 500
return return_object, httpcode
@authorizer.requires_account(with_types=INTERNAL_SERVICE_ALLOWED)
def dequeue(queuename, wait_max_seconds=0, visibility_timeout=0):
request_inputs = anchore_engine.apis.do_request_prep(
connexion.request, default_params={}
)
wait_expired = False
wait_intervals = wait_max_seconds * 2
return_object = None
while not wait_expired:
try:
return_object = simplequeue.dequeue(
queuename, visibility_timeout=visibility_timeout
)
if return_object:
return return_object, 200
else:
# A very rough way to do long-polling, but occupies a thread during the wait
if wait_intervals > 0:
wait_intervals -= 1
time.sleep(0.5)
else:
wait_expired = True
except Exception as err:
return_object = str(err)
return return_object, 500
return return_object, 200
@authorizer.requires_account(with_types=INTERNAL_SERVICE_ALLOWED)
def delete_message(queuename, receipt_handle):
"""
Delete a message in given queue using the given receipt_handle, which must match the currently outstanding handle for the message.
:param queuename:
:param receipt_handle:
:return:
"""
request_inputs = anchore_engine.apis.do_request_prep(
connexion.request, default_params={}
)
return_object = None
try:
if simplequeue.delete_msg(queuename, receipt_handle):
httpcode = 200
else:
httpcode = 404
except Exception as err:
return_object = str(err)
httpcode = 500
return return_object, httpcode
@authorizer.requires_account(with_types=INTERNAL_SERVICE_ALLOWED)
def update_message_visibility_timeout(queuename, receipt_handle, visibility_timeout):
"""
Delete a message in given queue using the given receipt_handle, which must match the currently outstanding handle for the message.
:param queuename:
:param receipt_handle:
:return:
"""
request_inputs = anchore_engine.apis.do_request_prep(
connexion.request, default_params={}
)
try:
result = simplequeue.update_visibility_timeout(
queuename, receipt_handle, visibility_timeout
)
if result:
return_object = result
httpcode = 200
else:
httpcode = 400
return_object = None
except Exception as err:
return_object = str(err)
httpcode = 500
return return_object, httpcode
@authorizer.requires_account(with_types=INTERNAL_SERVICE_ALLOWED)
def queues():
request_inputs = anchore_engine.apis.do_request_prep(
connexion.request, default_params={}
)
try:
return_object = simplequeue.get_queuenames()
httpcode = 200
except Exception as err:
return_object = str(err)
httpcode = 500
return return_object, httpcode
@authorizer.requires_account(with_types=INTERNAL_SERVICE_ALLOWED)
def create_lease(lease_id):
request_inputs = anchore_engine.apis.do_request_prep(
connexion.request, default_params={}
)
try:
return_object = locking.manager().init_lease(lease_id)
httpcode = 200
except Exception as err:
return_object = str(err)
httpcode = 500
return return_object, httpcode
@authorizer.requires_account(with_types=INTERNAL_SERVICE_ALLOWED)
def list_leases():
request_inputs = anchore_engine.apis.do_request_prep(
connexion.request, default_params={}
)
try:
return_object = locking.manager().list()
httpcode = 200
except Exception as err:
return_object = str(err)
httpcode = 500
return return_object, httpcode
@authorizer.requires_account(with_types=INTERNAL_SERVICE_ALLOWED)
def describe_lease(lease_id):
request_inputs = anchore_engine.apis.do_request_prep(
connexion.request, default_params={}
)
try:
return_object = locking.manager().get(lease_id)
httpcode = 200
except Exception as err:
return_object = str(err)
httpcode = 500
return return_object, httpcode
@authorizer.requires_account(with_types=INTERNAL_SERVICE_ALLOWED)
def acquire_lease(lease_id, client_id, ttl):
request_inputs = anchore_engine.apis.do_request_prep(
connexion.request, default_params={}
)
try:
return_object = locking.manager().acquire_lease(lease_id, client_id, ttl)
httpcode = 200
except Exception as err:
return_object = str(err)
httpcode = 500
return return_object, httpcode
@authorizer.requires_account(with_types=INTERNAL_SERVICE_ALLOWED)
def release_lease(lease_id, client_id, epoch):
request_inputs = anchore_engine.apis.do_request_prep(
connexion.request, default_params={}
)
try:
return_object = locking.manager().release_lease(lease_id, client_id, epoch)
httpcode = 200
except Exception as err:
return_object = str(err)
httpcode = 500
return return_object, httpcode
@authorizer.requires_account(with_types=INTERNAL_SERVICE_ALLOWED)
def refresh_lease(lease_id, client_id, ttl, epoch):
request_inputs = anchore_engine.apis.do_request_prep(
connexion.request, default_params={}
)
try:
return_object = locking.manager().refresh(lease_id, client_id, epoch, ttl)
httpcode = 200
except Exception as err:
return_object = str(err)
httpcode = 500
return return_object, httpcode
|
from .test_helper import argv_kiwi_tests
import mock
from mock import patch
from azurectl.account.service import AzureAccount
from azurectl.config.parser import Config
from collections import namedtuple
import azurectl
from pytest import raises
from azurectl.azurectl_exceptions import (
AzureConfigVariableNotFound,
AzureConfigVariableNotFound,
AzureManagementCertificateNotFound,
AzureServiceManagementError,
AzureServiceManagementError,
AzureServiceManagementUrlNotFound,
AzureSubscriptionCertificateDecodeError,
AzureSubscriptionIdNotFound,
AzureSubscriptionIdNotFound,
AzureSubscriptionIdNotFound,
AzureSubscriptionParseError,
AzureSubscriptionParseError,
AzureSubscriptionPKCS12DecodeError,
AzureSubscriptionPrivateKeyDecodeError,
AzureUnrecognizedManagementUrl
)
class TestAzureAccount:
@patch('azurectl.account.service.NamedTemporaryFile')
def setup(self, mock_temp):
tempfile = mock.Mock()
tempfile.name = 'tempfile'
mock_temp.return_value = tempfile
self.account = AzureAccount(
Config(
region_name='East US 2', filename='../data/config'
)
)
azurectl.account.service.load_pkcs12 = mock.Mock()
def __mock_management_service(
self, endpoint, service_response=None, side_effect=None
):
mock_service_function = mock.Mock()
if side_effect:
mock_service_function.side_effect = side_effect
else:
mock_service_function.return_value = service_response
mock_service = mock.Mock(**{endpoint: mock_service_function})
self.account.get_management_service = mock.Mock(
return_value=mock_service
)
@patch('azurectl.account.service.ServiceManagementService')
@patch('azurectl.account.service.dump_privatekey')
@patch('azurectl.account.service.dump_certificate')
@patch('azurectl.account.service.AzureAccount.get_management_url')
@patch('azurectl.account.service.AzureAccount.certificate_filename')
def test_service_error(
self, mock_mgmt_cert, mock_mgmt_url, mock_dump_certificate,
mock_dump_pkey, mock_service
):
mock_mgmt_cert.return_value = 'certfile'
mock_mgmt_url.return_value = 'test.url'
mock_dump_certificate.return_value = 'abc'
mock_dump_pkey.return_value = 'abc'
mock_service.side_effect = AzureServiceManagementError
with raises(AzureServiceManagementError):
self.account.storage_names()
def test_storage_name(self):
assert self.account.storage_name() == 'bob'
def test_storage_container(self):
assert self.account.storage_container() == 'foo'
@patch('azurectl.account.service.dump_privatekey')
@patch('azurectl.account.service.dump_certificate')
def test_subscription_cert_decode_error(
self, mock_dump_certificate, mock_dump_pkey
):
mock_dump_pkey.return_value = b'abc'
mock_dump_certificate.side_effect = \
AzureSubscriptionCertificateDecodeError
with raises(AzureSubscriptionCertificateDecodeError):
self.account.get_management_service()
def test_subscription_management_cert_not_found(self):
account_invalid = AzureAccount(
Config(
region_name='East US 2',
filename='../data/config.missing_publishsettings_cert'
)
)
with raises(AzureManagementCertificateNotFound):
account_invalid.get_management_service()
@patch('azurectl.account.service.load_pkcs12')
@patch('azurectl.account.service.dump_privatekey')
@patch('azurectl.account.service.dump_certificate')
@patch('base64.b64decode')
def test_subscription_id_missing(
self, base64_decode, mock_dump_certificate,
mock_dump_pkey, mock_pkcs12
):
account_invalid = AzureAccount(
Config(
region_name='East US 2',
filename='../data/config.missing_publishsettings_id'
)
)
with raises(AzureSubscriptionIdNotFound):
account_invalid.get_management_service()
def test_get_management_url(self):
mgmt_url = self.account.get_management_url()
assert mgmt_url == 'test.url'
def test_get_management_url_missing(self):
account_invalid = AzureAccount(
Config(
region_name='East US 2',
filename='../data/config.missing_mgmt_url'
)
)
with raises(AzureServiceManagementUrlNotFound):
account_invalid.get_management_url()
@patch('azurectl.account.service.AzureAccount.get_management_url')
def test_get_blob_service_host_base(self, mock_mgmt_url):
mock_mgmt_url.return_value = 'management.test.url'
host_base = self.account.get_blob_service_host_base()
assert host_base == 'test.url'
@patch('azurectl.account.service.AzureAccount.get_management_url')
def test_get_blob_service_host_base_with_bad_url(self, mock_mgmt_url):
mock_mgmt_url.return_value = 'invalid.test.url'
with raises(AzureUnrecognizedManagementUrl):
self.account.get_blob_service_host_base()
def test_subscription_pkcs12_error(self):
account_invalid = AzureAccount(
Config(
region_name='East US 2',
filename='../data/config.corrupted_p12_cert'
)
)
with raises(AzureSubscriptionPKCS12DecodeError):
account_invalid.get_management_service()
def test_empty_publishsettings(self):
account_invalid = AzureAccount(
Config(
region_name='East US 2',
filename='../data/config.empty_publishsettings'
)
)
with raises(AzureSubscriptionParseError):
account_invalid.get_management_url()
def test_missing_publishsettings(self):
account_invalid = AzureAccount(
Config(
region_name='East US 2',
filename='../data/config.missing_publishsettings'
)
)
with raises(AzureSubscriptionParseError):
account_invalid.get_management_url()
@patch('azurectl.account.service.dump_privatekey')
@patch('azurectl.account.service.dump_certificate')
@patch('azurectl.account.service.AzureAccount.get_management_url')
def test_publishsettings_with_multiple_subscriptions_defaults_to_first(
self,
mock_mgmt_url,
mock_dump_certificate,
mock_dump_pkey
):
account = AzureAccount(
Config(
region_name='East US 2',
filename='../data/config.multiple_subscriptions_no_id'
)
)
assert account.subscription_id() == 'first'
@patch('azurectl.account.service.dump_privatekey')
@patch('azurectl.account.service.dump_certificate')
@patch('azurectl.account.service.AzureAccount.get_management_url')
def test_config_specifies_subscription_in_publishsettings(
self,
mock_mgmt_url,
mock_dump_certificate,
mock_dump_pkey
):
account = AzureAccount(
Config(
region_name='East US 2',
filename='../data/config.multiple_subscriptions_set_id'
)
)
assert account.subscription_id() == 'second'
def test_publishsettings_invalid_cert(self):
account_invalid = AzureAccount(
Config(
region_name='East US 2',
filename='../data/config.invalid_publishsettings_cert'
)
)
with raises(AzureSubscriptionPrivateKeyDecodeError):
account_invalid.certificate_filename()
def test_config_subscription_id_not_found_in_publishsettings(self):
account_invalid = AzureAccount(
Config(
region_name='East US 2',
filename='../data/config.missing_set_subscription_id'
)
)
with raises(AzureSubscriptionIdNotFound):
account_invalid.get_management_url()
def test_config_subscription_id_missing(self):
account_invalid = AzureAccount(
Config(
region_name='East US 2',
filename='../data/config.set_subscription_id_missing_id'
)
)
with raises(AzureSubscriptionIdNotFound):
account_invalid.get_management_url()
def test_config_without_publishsettings(self):
account = AzureAccount(
Config(
region_name='East US 2',
filename='../data/config.no_publishsettings'
)
)
assert account.get_management_url() == 'test.url'
assert account.certificate_filename() == '../data/pemfile'
assert account.subscription_id() == 'id1234'
@patch('azurectl.account.service.dump_privatekey')
@patch('azurectl.account.service.dump_certificate')
def test_config_create_cert_from_publishsettings(
self, mock_dump_certificate, mock_dump_pkey
):
mock_dump_pkey.return_value = b'abc'
mock_dump_certificate.return_value = b'cert'
assert self.account.certificate_filename() == 'tempfile'
def test_config_must_have_management_url_or_publishsettings(self):
account = AzureAccount(
Config(
filename='../data/config.publishsettings_undefined'
)
)
with raises(AzureConfigVariableNotFound):
account.get_management_url()
def test_config_must_have_management_pem_file_or_publishsettings(self):
account = AzureAccount(
Config(
filename='../data/config.publishsettings_undefined'
)
)
with raises(AzureConfigVariableNotFound):
account.certificate_filename()
def test_storage_key(self):
primary = namedtuple(
'primary', 'primary'
)
keys = namedtuple(
'storage_service_keys', 'storage_service_keys'
)
self.__mock_management_service(
'get_storage_account_keys',
keys(storage_service_keys=primary(primary='foo'))
)
assert self.account.storage_key() == 'foo'
def test_storage_key_error(self):
self.__mock_management_service(
'get_storage_account_keys', None, side_effect=Exception
)
with raises(AzureServiceManagementError):
self.account.storage_key()
@patch('azurectl.account.service.ServiceManagementService')
def test_get_management_service(self, mock_service):
self.account.subscription_id = mock.Mock()
self.account.certificate_filename = mock.Mock()
self.account.get_management_url = mock.Mock()
service = self.account.get_management_service()
assert self.account._AzureAccount__service == service
assert self.account.subscription_id.called
assert self.account.certificate_filename.called
assert self.account.get_management_url.called
@patch('azurectl.account.service.ServiceManagementService.list_storage_accounts')
def test_storage_names(self, mock_service):
names = namedtuple(
'service_name', 'service_name'
)
service_result = [names(service_name='foo')]
self.__mock_management_service('list_storage_accounts', service_result)
assert self.account.storage_names() == ['foo']
def test_instance_types(self):
# given
names = namedtuple(
'names',
'name memory_in_mb cores max_data_disk_count \
virtual_machine_resource_disk_size_in_mb'
)
service_result = [names(
name='foo',
memory_in_mb=1,
cores=2,
max_data_disk_count=3,
virtual_machine_resource_disk_size_in_mb=4
)]
self.__mock_management_service('list_role_sizes', service_result)
# when
x = self.account.instance_types()
# then
assert self.account.instance_types() == [
{'foo': {
'cores': 2,
'max_disk_count': 3,
'disk_size': '4MB',
'memory': '1MB'
}}
]
def test_locations(self):
# given
mock_location = mock.Mock(
compute_capabilities={
'virtual_machines_role_sizes': [],
'web_worker_role_sizes': []
},
display_name='Mock Region',
available_services=['Compute',
'Storage',
'PersistentVMRole',
'HighMemory']
)
mock_location.configure_mock(name='Mock Region')
self.__mock_management_service('list_locations', [mock_location])
# when
result = self.account.locations()
# then
assert result == ['Mock Region']
def test_filtered_locations(self):
# given
mock_location = mock.Mock(
compute_capabilities={
'virtual_machines_role_sizes': [],
'web_worker_role_sizes': []
},
display_name='Mock Region',
available_services=['Compute',
'Storage',
'PersistentVMRole',
'HighMemory']
)
mock_location.configure_mock(name='Mock Region')
self.__mock_management_service('list_locations', [mock_location])
self.account.certificate_filename = mock.Mock()
# when
result = self.account.locations('Compute')
# then
assert result == ['Mock Region']
# when
result = self.account.locations('foo')
# then
assert result == []
|
from __future__ import unicode_literals
from io import StringIO
from mock import Mock, patch
import pytest
from flaky import flaky
from flaky import _flaky_plugin
from flaky.flaky_pytest_plugin import (
FlakyPlugin,
FlakyCallInfo,
FlakyXdist,
PLUGIN,
pytest_sessionfinish,
)
from flaky.names import FlakyNames
from flaky.utils import unicode_type
@pytest.fixture
def mock_io(monkeypatch):
mock_string_io = StringIO()
def string_io():
return mock_string_io
monkeypatch.setattr(_flaky_plugin, 'StringIO', string_io)
return mock_string_io
@pytest.fixture
def string_io():
return StringIO()
@pytest.fixture
def flaky_plugin(mock_io):
# pylint:disable=unused-argument
return FlakyPlugin()
@pytest.fixture
def mock_plugin_rerun(monkeypatch, flaky_plugin):
calls = []
def rerun_test(test):
calls.append(test)
monkeypatch.setattr(flaky_plugin, '_rerun_test', rerun_test)
def get_calls():
return calls
return get_calls
@pytest.fixture(params=['instance', 'module', 'parent'])
def flaky_test(request):
def test_function():
pass
test_owner = Mock()
setattr(test_owner, 'test_method', test_function)
setattr(test_owner, 'obj', test_owner)
kwargs = {request.param: test_owner}
test = MockTestItem(**kwargs)
setattr(test, 'owner', test_owner)
return test
@pytest.fixture
def call_info(flaky_test):
return MockFlakyCallInfo(flaky_test, 'call')
@pytest.fixture
def mock_error():
return MockError()
class MockError(object):
def __init__(self):
super(MockError, self).__init__()
self.type = Mock()
self.value = Mock()
self.value.message = 'failed'
self.traceback = Mock()
class MockTestItem(object):
name = 'test_method'
instance = None
module = None
parent = None
def __init__(self, instance=None, module=None, parent=None):
if instance is not None:
self.instance = instance
if module is not None:
self.module = module
if parent is not None:
self.parent = parent
def runtest(self):
pass
class MockFlakyCallInfo(FlakyCallInfo):
def __init__(self, item, when):
# pylint:disable=super-init-not-called
# super init not called because it has unwanted side effects
self.when = when
self._item = item
def test_flaky_plugin_report(flaky_plugin, mock_io, string_io):
flaky_report = 'Flaky tests passed; others failed. ' \
'No more tests; that ship has sailed.'
expected_string_io = StringIO()
expected_string_io.write('===Flaky Test Report===\n\n')
expected_string_io.write(flaky_report)
expected_string_io.write('\n===End Flaky Test Report===\n')
mock_io.write(flaky_report)
flaky_plugin.terminal_summary(string_io)
assert string_io.getvalue() == expected_string_io.getvalue()
@pytest.fixture(params=(
{},
{'flaky_report': ''},
{'flaky_report': 'ŝȁḿҏľȅ ƭȅхƭ'},
))
def mock_xdist_node_slaveoutput(request):
return request.param
@pytest.fixture(params=(None, object()))
def mock_xdist_error(request):
return request.param
@pytest.mark.parametrize('assign_slaveoutput', (True, False))
def test_flaky_xdist_nodedown(
mock_xdist_node_slaveoutput,
assign_slaveoutput,
mock_xdist_error
):
flaky_xdist = FlakyXdist()
node = Mock()
if assign_slaveoutput:
node.slaveoutput = mock_xdist_node_slaveoutput
else:
delattr(node, 'slaveoutput')
mock_stream = Mock(StringIO)
with patch.object(PLUGIN, '_stream', mock_stream):
flaky_xdist.pytest_testnodedown(node, mock_xdist_error)
if assign_slaveoutput and 'flaky_report' in mock_xdist_node_slaveoutput:
mock_stream.write.assert_called_once_with(
mock_xdist_node_slaveoutput['flaky_report'],
)
else:
assert not mock_stream.write.called
_REPORT_TEXT1 = 'Flaky report text'
_REPORT_TEXT2 = 'Ḿőŕȅ ƒľȁƙŷ ŕȅҏőŕƭ ƭȅхƭ'
@pytest.mark.parametrize('initial_report,stream_report,expected_report', (
('', '', ''),
('', _REPORT_TEXT1, _REPORT_TEXT1),
(_REPORT_TEXT1, '', _REPORT_TEXT1),
(_REPORT_TEXT1, _REPORT_TEXT2, _REPORT_TEXT1 + _REPORT_TEXT2),
(_REPORT_TEXT2, _REPORT_TEXT1, _REPORT_TEXT2 + _REPORT_TEXT1),
))
def test_flaky_session_finish_copies_flaky_report(
initial_report,
stream_report,
expected_report,
):
PLUGIN.stream.seek(0)
PLUGIN.stream.truncate()
PLUGIN.stream.write(stream_report)
PLUGIN.config = Mock()
PLUGIN.config.slaveoutput = {'flaky_report': initial_report}
pytest_sessionfinish()
assert PLUGIN.config.slaveoutput['flaky_report'] == expected_report
def test_flaky_plugin_can_suppress_success_report(
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
):
flaky()(flaky_test)
# pylint:disable=protected-access
flaky_plugin._flaky_success_report = False
# pylint:enable=protected-access
call_info.when = 'call'
actual_plugin_handles_success = flaky_plugin.add_success(
call_info,
flaky_test,
)
assert actual_plugin_handles_success is False
assert string_io.getvalue() == mock_io.getvalue()
class TestFlakyPytestPlugin(object):
_test_method_name = 'test_method'
def test_flaky_plugin_handles_success(
self,
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
):
self._test_flaky_plugin_handles_success(
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
)
def test_flaky_plugin_handles_success_for_needs_rerun(
self,
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
mock_plugin_rerun,
):
self._test_flaky_plugin_handles_success(
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
min_passes=2,
)
assert mock_plugin_rerun()[0] == flaky_test
def test_flaky_plugin_ignores_success_for_non_flaky_test(
self,
flaky_plugin,
flaky_test,
call_info,
string_io,
mock_io,
):
flaky_plugin.add_success(call_info, flaky_test)
self._assert_test_ignored(mock_io, string_io, call_info)
def test_flaky_plugin_ignores_failure_for_non_flaky_test(
self,
flaky_plugin,
flaky_test,
call_info,
string_io,
mock_io,
):
flaky_plugin.add_failure(call_info, flaky_test, None)
self._assert_test_ignored(mock_io, string_io, call_info)
def test_flaky_plugin_handles_failure(
self,
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
mock_error,
mock_plugin_rerun,
):
self._test_flaky_plugin_handles_failure(
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
mock_error,
)
assert mock_plugin_rerun()[0] == flaky_test
def test_flaky_plugin_handles_failure_for_no_more_retries(
self,
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
mock_error,
):
self._test_flaky_plugin_handles_failure(
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
mock_error,
max_runs=1,
)
def test_flaky_plugin_handles_additional_failures(
self,
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
mock_error,
mock_plugin_rerun,
):
self._test_flaky_plugin_handles_failure(
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
mock_error,
current_errors=[None],
)
assert mock_plugin_rerun()[0] == flaky_test
def _assert_flaky_attributes_contains(
self,
expected_flaky_attributes,
test,
):
actual_flaky_attributes = self._get_flaky_attributes(test)
assert all(
item in actual_flaky_attributes.items()
for item in expected_flaky_attributes.items()
)
def test_flaky_plugin_exits_after_false_rerun_filter(
self,
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
mock_error,
mock_plugin_rerun,
):
err_tuple = (mock_error.type, mock_error.value, mock_error.traceback)
def rerun_filter(err, name, test, plugin):
assert err == err_tuple
assert name == flaky_test.name
assert test is flaky_test
assert plugin is flaky_plugin
return False
flaky(rerun_filter=rerun_filter)(flaky_test)
call_info.when = 'call'
actual_plugin_handles_failure = flaky_plugin.add_failure(
call_info,
flaky_test,
mock_error,
)
assert actual_plugin_handles_failure is False
assert not mock_plugin_rerun()
string_io.writelines([
self._test_method_name,
' failed and was not selected for rerun.',
'\n\t',
unicode_type(mock_error.type),
'\n\t',
unicode_type(mock_error.value),
'\n\t',
unicode_type(mock_error.traceback),
'\n',
])
assert string_io.getvalue() == mock_io.getvalue()
@staticmethod
def _assert_test_ignored(mock_io, string_io, call_info):
assert call_info
assert mock_io.getvalue() == string_io.getvalue()
def _test_flaky_plugin_handles_success(
self,
test,
plugin,
info,
stream,
mock_stream,
current_passes=0,
current_runs=0,
max_runs=2,
min_passes=1,
):
flaky(max_runs, min_passes)(test)
setattr(
test,
FlakyNames.CURRENT_PASSES,
current_passes,
)
setattr(
test,
FlakyNames.CURRENT_RUNS,
current_runs,
)
too_few_passes = current_passes + 1 < min_passes
retries_remaining = current_runs + 1 < max_runs
expected_plugin_handles_success = too_few_passes and retries_remaining
info.when = 'call'
actual_plugin_handles_success = plugin.add_success(
info,
test,
)
assert expected_plugin_handles_success == actual_plugin_handles_success
self._assert_flaky_attributes_contains(
{
FlakyNames.CURRENT_PASSES: current_passes + 1,
FlakyNames.CURRENT_RUNS: current_runs + 1,
},
test,
)
stream.writelines([
self._test_method_name,
" passed {0} out of the required {1} times. ".format(
current_passes + 1, min_passes,
),
])
if expected_plugin_handles_success:
stream.write(
'Running test again until it passes {0} times.\n'.format(
min_passes,
),
)
else:
stream.write('Success!\n')
assert stream.getvalue() == mock_stream.getvalue()
def _test_flaky_plugin_handles_failure(
self,
test,
plugin,
info,
stream,
mock_stream,
mock_error,
current_errors=None,
current_passes=0,
current_runs=0,
max_runs=2,
min_passes=1,
rerun_filter=None,
):
flaky(max_runs, min_passes, rerun_filter)(test)
if current_errors is None:
current_errors = [None]
else:
current_errors.append(None)
setattr(
test,
FlakyNames.CURRENT_ERRORS,
current_errors,
)
setattr(
test,
FlakyNames.CURRENT_PASSES,
current_passes,
)
setattr(
test,
FlakyNames.CURRENT_RUNS,
current_runs,
)
too_few_passes = current_passes < min_passes
retries_remaining = current_runs + 1 < max_runs
expected_plugin_handles_failure = too_few_passes and retries_remaining
info.when = 'call'
actual_plugin_handles_failure = plugin.add_failure(
info,
test,
mock_error,
)
assert expected_plugin_handles_failure == actual_plugin_handles_failure
self._assert_flaky_attributes_contains(
{
FlakyNames.CURRENT_RUNS: current_runs + 1,
FlakyNames.CURRENT_ERRORS: current_errors
},
test,
)
if expected_plugin_handles_failure:
stream.writelines([
self._test_method_name,
' failed ({0} runs remaining out of {1}).'.format(
max_runs - current_runs - 1, max_runs
),
'\n\t',
unicode_type(mock_error.type),
'\n\t',
unicode_type(mock_error.value),
'\n\t',
unicode_type(mock_error.traceback),
'\n',
])
else:
message = ' failed; it passed {0} out of the required {1} times.'
stream.writelines([
self._test_method_name,
message.format(
current_passes,
min_passes
),
'\n\t',
unicode_type(mock_error.type),
'\n\t',
unicode_type(mock_error.value),
'\n\t',
unicode_type(mock_error.traceback),
'\n',
])
assert stream.getvalue() == mock_stream.getvalue()
def test_flaky_plugin_handles_excinfo_set(
self,
flaky_plugin,
flaky_test,
call_info,
string_io,
mock_io,
mock_error,
mock_plugin_rerun,
):
pass
@staticmethod
def _get_flaky_attributes(test):
actual_flaky_attributes = dict((
(attr, getattr(
test,
attr,
None,
)) for attr in FlakyNames()
))
return actual_flaky_attributes
|
import functools
import random
import eventlet
import netaddr
from neutron_lib import exceptions
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
import ryu.app.ofctl.api as ofctl_api
import ryu.exception as ryu_exc
from ryu.lib import ofctl_string
from ryu.ofproto import ofproto_parser
import six
from neutron._i18n import _
from neutron.agent.common import ovs_lib
LOG = logging.getLogger(__name__)
BUNDLE_ID_WIDTH = 1 << 32
COOKIE_DEFAULT = object()
class ActiveBundleRunning(exceptions.NeutronException):
message = _("Another active bundle 0x%(bundle_id)x is running")
class OpenFlowSwitchMixin(object):
"""Mixin to provide common convenient routines for an openflow switch.
NOTE(yamamoto): super() points to ovs_lib.OVSBridge.
See ovs_bridge.py how this class is actually used.
"""
@staticmethod
def _cidr_to_ryu(ip):
n = netaddr.IPNetwork(ip)
if n.hostmask:
return (str(n.ip), str(n.netmask))
return str(n.ip)
def __init__(self, *args, **kwargs):
self._app = kwargs.pop('ryu_app')
self.active_bundles = set()
super(OpenFlowSwitchMixin, self).__init__(*args, **kwargs)
def _get_dp_by_dpid(self, dpid_int):
"""Get Ryu datapath object for the switch."""
timeout_sec = cfg.CONF.OVS.of_connect_timeout
start_time = timeutils.now()
while True:
dp = ofctl_api.get_datapath(self._app, dpid_int)
if dp is not None:
break
# The switch has not established a connection to us.
# Wait for a little.
if timeutils.now() > start_time + timeout_sec:
m = _("Switch connection timeout")
LOG.error(m)
# NOTE(yamamoto): use RuntimeError for compat with ovs_lib
raise RuntimeError(m)
eventlet.sleep(1)
return dp
def _send_msg(self, msg, reply_cls=None, reply_multi=False,
active_bundle=None):
timeout_sec = cfg.CONF.OVS.of_request_timeout
timeout = eventlet.Timeout(seconds=timeout_sec)
if active_bundle is not None:
(dp, ofp, ofpp) = self._get_dp()
msg = ofpp.ONFBundleAddMsg(dp, active_bundle['id'],
active_bundle['bundle_flags'], msg, [])
try:
result = ofctl_api.send_msg(self._app, msg, reply_cls, reply_multi)
except ryu_exc.RyuException as e:
m = _("ofctl request %(request)s error %(error)s") % {
"request": msg,
"error": e,
}
LOG.error(m)
# NOTE(yamamoto): use RuntimeError for compat with ovs_lib
raise RuntimeError(m)
except eventlet.Timeout as e:
with excutils.save_and_reraise_exception() as ctx:
if e is timeout:
ctx.reraise = False
m = _("ofctl request %(request)s timed out") % {
"request": msg,
}
LOG.error(m)
# NOTE(yamamoto): use RuntimeError for compat with ovs_lib
raise RuntimeError(m)
finally:
timeout.cancel()
LOG.debug("ofctl request %(request)s result %(result)s",
{"request": msg, "result": result})
return result
@staticmethod
def _match(_ofp, ofpp, match, **match_kwargs):
if match is not None:
return match
return ofpp.OFPMatch(**match_kwargs)
def uninstall_flows(self, table_id=None, strict=False, priority=0,
cookie=COOKIE_DEFAULT, cookie_mask=0,
match=None, active_bundle=None, **match_kwargs):
(dp, ofp, ofpp) = self._get_dp()
if table_id is None:
table_id = ofp.OFPTT_ALL
if cookie == ovs_lib.COOKIE_ANY:
cookie = 0
if cookie_mask != 0:
raise Exception("cookie=COOKIE_ANY but cookie_mask set to %s" %
cookie_mask)
elif cookie == COOKIE_DEFAULT:
cookie = self._default_cookie
cookie_mask = ovs_lib.UINT64_BITMASK
match = self._match(ofp, ofpp, match, **match_kwargs)
if strict:
cmd = ofp.OFPFC_DELETE_STRICT
else:
cmd = ofp.OFPFC_DELETE
msg = ofpp.OFPFlowMod(dp,
command=cmd,
cookie=cookie,
cookie_mask=cookie_mask,
table_id=table_id,
match=match,
priority=priority,
out_group=ofp.OFPG_ANY,
out_port=ofp.OFPP_ANY)
self._send_msg(msg, active_bundle=active_bundle)
def dump_flows(self, table_id=None):
(dp, ofp, ofpp) = self._get_dp()
if table_id is None:
table_id = ofp.OFPTT_ALL
msg = ofpp.OFPFlowStatsRequest(dp, table_id=table_id)
replies = self._send_msg(msg,
reply_cls=ofpp.OFPFlowStatsReply,
reply_multi=True)
flows = []
for rep in replies:
flows += rep.body
return flows
def cleanup_flows(self):
cookies = set([f.cookie for f in self.dump_flows()]) - \
self.reserved_cookies
LOG.debug("Reserved cookies for %s: %s", self.br_name,
self.reserved_cookies)
for c in cookies:
LOG.warning("Deleting flow with cookie 0x%(cookie)x",
{'cookie': c})
self.uninstall_flows(cookie=c, cookie_mask=ovs_lib.UINT64_BITMASK)
def install_goto_next(self, table_id, active_bundle=None):
self.install_goto(table_id=table_id, dest_table_id=table_id + 1,
active_bundle=active_bundle)
def install_output(self, port, table_id=0, priority=0,
match=None, **match_kwargs):
(_dp, ofp, ofpp) = self._get_dp()
actions = [ofpp.OFPActionOutput(port, 0)]
instructions = [ofpp.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS, actions)]
self.install_instructions(table_id=table_id, priority=priority,
instructions=instructions,
match=match, **match_kwargs)
def install_normal(self, table_id=0, priority=0,
match=None, **match_kwargs):
(_dp, ofp, _ofpp) = self._get_dp()
self.install_output(port=ofp.OFPP_NORMAL,
table_id=table_id, priority=priority,
match=match, **match_kwargs)
def install_goto(self, dest_table_id, table_id=0, priority=0,
match=None, **match_kwargs):
(_dp, _ofp, ofpp) = self._get_dp()
instructions = [ofpp.OFPInstructionGotoTable(table_id=dest_table_id)]
self.install_instructions(table_id=table_id, priority=priority,
instructions=instructions,
match=match, **match_kwargs)
def install_drop(self, table_id=0, priority=0, match=None, **match_kwargs):
self.install_instructions(table_id=table_id, priority=priority,
instructions=[], match=match, **match_kwargs)
def install_instructions(self, instructions,
table_id=0, priority=0,
match=None, active_bundle=None, **match_kwargs):
(dp, ofp, ofpp) = self._get_dp()
match = self._match(ofp, ofpp, match, **match_kwargs)
if isinstance(instructions, six.string_types):
# NOTE: instructions must be str for the ofctl of_interface.
# After the ofctl driver is removed, a deprecation warning
# could be added here.
jsonlist = ofctl_string.ofp_instruction_from_str(
ofp, instructions)
instructions = ofproto_parser.ofp_instruction_from_jsondict(
dp, jsonlist)
msg = ofpp.OFPFlowMod(dp,
table_id=table_id,
cookie=self.default_cookie,
match=match,
priority=priority,
instructions=instructions)
self._send_msg(msg, active_bundle=active_bundle)
def install_apply_actions(self, actions,
table_id=0, priority=0,
match=None, **match_kwargs):
(dp, ofp, ofpp) = self._get_dp()
instructions = [
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions),
]
self.install_instructions(table_id=table_id,
priority=priority,
match=match,
instructions=instructions,
**match_kwargs)
def bundled(self, atomic=False, ordered=False):
return BundledOpenFlowBridge(self, atomic, ordered)
class BundledOpenFlowBridge(object):
def __init__(self, br, atomic, ordered):
self.br = br
self.active_bundle = None
self.bundle_flags = 0
if not atomic and not ordered:
return
(dp, ofp, ofpp) = self.br._get_dp()
if atomic:
self.bundle_flags |= ofp.ONF_BF_ATOMIC
if ordered:
self.bundle_flags |= ofp.ONF_BF_ORDERED
def __getattr__(self, name):
if name.startswith('install') or name.startswith('uninstall'):
under = getattr(self.br, name)
if self.active_bundle is None:
return under
return functools.partial(under, active_bundle=dict(
id=self.active_bundle, bundle_flags=self.bundle_flags))
raise AttributeError("Only install_* or uninstall_* methods "
"can be used")
def __enter__(self):
if self.active_bundle is not None:
raise ActiveBundleRunning(bundle_id=self.active_bundle)
while True:
self.active_bundle = random.randrange(BUNDLE_ID_WIDTH)
if self.active_bundle not in self.br.active_bundles:
self.br.active_bundles.add(self.active_bundle)
break
try:
(dp, ofp, ofpp) = self.br._get_dp()
msg = ofpp.ONFBundleCtrlMsg(dp, self.active_bundle,
ofp.ONF_BCT_OPEN_REQUEST,
self.bundle_flags, [])
reply = self.br._send_msg(msg, reply_cls=ofpp.ONFBundleCtrlMsg)
if reply.type != ofp.ONF_BCT_OPEN_REPLY:
raise RuntimeError(
"Unexpected reply type %d != ONF_BCT_OPEN_REPLY" %
reply.type)
return self
except Exception:
self.br.active_bundles.remove(self.active_bundle)
self.active_bundle = None
raise
def __exit__(self, type, value, traceback):
(dp, ofp, ofpp) = self.br._get_dp()
if type is None:
ctrl_type = ofp.ONF_BCT_COMMIT_REQUEST
expected_reply = ofp.ONF_BCT_COMMIT_REPLY
else:
ctrl_type = ofp.ONF_BCT_DISCARD_REQUEST
expected_reply = ofp.ONF_BCT_DISCARD_REPLY
LOG.warning(
"Discarding bundle with ID 0x%(id)x due to an exception",
{'id': self.active_bundle})
try:
msg = ofpp.ONFBundleCtrlMsg(dp, self.active_bundle,
ctrl_type,
self.bundle_flags, [])
reply = self.br._send_msg(msg, reply_cls=ofpp.ONFBundleCtrlMsg)
if reply.type != expected_reply:
# The bundle ID may be in a bad state. Let's leave it
# in active_bundles so that we will never use it again.
raise RuntimeError("Unexpected reply type %d" % reply.type)
self.br.active_bundles.remove(self.active_bundle)
finally:
# It is possible the bundle is kept open, but this must be
# cleared or all subsequent __enter__ will fail.
self.active_bundle = None
|
from os import listdir
from os.path import isfile, join
import random
import subprocess
def main():
# get random
failCount = 0
receiptDir = "receipts/"
receipts = [ f for f in listdir(receiptDir) if isfile(join(receiptDir, f))]
for i in range(5):
failCount += display_receipt(receiptDir + random.choice(receipts))
if failCount >= 3:
playSound("sorry_no_more_deals.ogg")
return
playSound("youre_a_deal_master.ogg")
return
def playSound(fName):
subprocess.call(['play', fName], stderr=subprocess.DEVNULL)
def playDeal(isDeal):
if isDeal:
playSound("wow_what_a_deal.ogg")
else:
playSound("what_a_terrible_price.ogg")
def display_receipt(fName):
isDeal = None
receiptBody = None
answer = None
with open(fName) as fHnd:
isDeal = fHnd.readline()
receiptBody = fHnd.read()
isDeal = "true" in isDeal.lower()
print(receiptBody)
print("Is this a deal? (y/n)")
answer = input()
while (answer.lower() != "y" and
answer.lower() != "n"):
answer = input()
print("Is this a deal? (y/n)")
playDeal(isDeal)
if (answer == "y" and isDeal
or answer == 'n' and not isDeal):
return 0
return 1
if __name__ == '__main__':
main()
|
"""Utilities related to layer/model functionality.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils.conv_utils import convert_kernel
from tensorflow.python.util.tf_export import tf_export
def count_params(weights):
"""Count the total number of scalars composing the weights.
Arguments:
weights: An iterable containing the weights on which to compute params
Returns:
The total number of scalars composing the weights
"""
return int(np.sum([np.prod(p.get_shape().as_list()) for p in set(weights)]))
def print_summary(model, line_length=None, positions=None, print_fn=None):
"""Prints a summary of a model.
Arguments:
model: Keras model instance.
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements in each line.
If not provided, defaults to `[.33, .55, .67, 1.]`.
print_fn: Print function to use.
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
It defaults to `print` (prints to stdout).
"""
if print_fn is None:
print_fn = print
if model.__class__.__name__ == 'Sequential':
sequential_like = True
elif not model._is_graph_network:
# We treat subclassed models as a simple sequence of layers, for logging
# purposes.
sequential_like = True
else:
sequential_like = True
nodes_by_depth = model._nodes_by_depth.values()
nodes = []
for v in nodes_by_depth:
if (len(v) > 1) or (len(v) == 1 and len(v[0].inbound_layers) > 1):
# if the model has multiple nodes
# or if the nodes have multiple inbound_layers
# the model is no longer sequential
sequential_like = False
break
nodes += v
if sequential_like:
# search for shared layers
for layer in model.layers:
flag = False
for node in layer._inbound_nodes:
if node in nodes:
if flag:
sequential_like = False
break
else:
flag = True
if not sequential_like:
break
if sequential_like:
line_length = line_length or 65
positions = positions or [.45, .85, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #']
else:
line_length = line_length or 98
positions = positions or [.33, .55, .67, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Connected to']
relevant_nodes = []
for v in model._nodes_by_depth.values():
relevant_nodes += v
def print_row(fields, positions):
line = ''
for i in range(len(fields)):
if i > 0:
line = line[:-1] + ' '
line += str(fields[i])
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print_fn(line)
print_fn('_' * line_length)
print_row(to_display, positions)
print_fn('=' * line_length)
def print_layer_summary(layer):
"""Prints a summary for a single layer.
Arguments:
layer: target layer.
"""
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
except RuntimeError: # output_shape unknown in Eager mode.
output_shape = '?'
name = layer.name
cls_name = layer.__class__.__name__
fields = [name + ' (' + cls_name + ')', output_shape, layer.count_params()]
print_row(fields, positions)
def print_layer_summary_with_connections(layer):
"""Prints a summary for a single layer (including topological connections).
Arguments:
layer: target layer.
"""
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
connections = []
for node in layer._inbound_nodes:
if relevant_nodes and node not in relevant_nodes:
# node is not part of the current network
continue
for i in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[i].name
inbound_node_index = node.node_indices[i]
inbound_tensor_index = node.tensor_indices[i]
connections.append(inbound_layer + '[' + str(inbound_node_index) +
'][' + str(inbound_tensor_index) + ']')
name = layer.name
cls_name = layer.__class__.__name__
if not connections:
first_connection = ''
else:
first_connection = connections[0]
fields = [
name + ' (' + cls_name + ')', output_shape,
layer.count_params(), first_connection
]
print_row(fields, positions)
if len(connections) > 1:
for i in range(1, len(connections)):
fields = ['', '', '', connections[i]]
print_row(fields, positions)
layers = model.layers
for i in range(len(layers)):
if sequential_like:
print_layer_summary(layers[i])
else:
print_layer_summary_with_connections(layers[i])
if i == len(layers) - 1:
print_fn('=' * line_length)
else:
print_fn('_' * line_length)
model._check_trainable_weights_consistency()
if hasattr(model, '_collected_trainable_weights'):
trainable_count = count_params(model._collected_trainable_weights)
else:
trainable_count = count_params(model.trainable_weights)
non_trainable_count = count_params(model.non_trainable_weights)
print_fn('Total params: {:,}'.format(trainable_count + non_trainable_count))
print_fn('Trainable params: {:,}'.format(trainable_count))
print_fn('Non-trainable params: {:,}'.format(non_trainable_count))
print_fn('_' * line_length)
@tf_export('keras.utils.convert_all_kernels_in_model')
def convert_all_kernels_in_model(model):
"""Converts all convolution kernels in a model from Theano to TensorFlow.
Also works from TensorFlow to Theano.
Arguments:
model: target model for the conversion.
"""
# Note: SeparableConvolution not included
# since only supported by TF.
conv_classes = {
'Conv1D',
'Conv2D',
'Conv3D',
'Conv2DTranspose',
}
to_assign = []
for layer in model.layers:
if layer.__class__.__name__ in conv_classes:
original_kernel = K.get_value(layer.kernel)
converted_kernel = convert_kernel(original_kernel)
to_assign.append((layer.kernel, converted_kernel))
K.batch_set_value(to_assign)
def convert_dense_weights_data_format(dense,
previous_feature_map_shape,
target_data_format='channels_first'):
"""Utility useful when changing a convnet's `data_format`.
When porting the weights of a convnet from one data format to the other,
if the convnet includes a `Flatten` layer
(applied to the last convolutional feature map)
followed by a `Dense` layer, the weights of that `Dense` layer
should be updated to reflect the new dimension ordering.
Arguments:
dense: The target `Dense` layer.
previous_feature_map_shape: A shape tuple of 3 integers,
e.g. `(512, 7, 7)`. The shape of the convolutional
feature map right before the `Flatten` layer that
came before the target `Dense` layer.
target_data_format: One of "channels_last", "channels_first".
Set it "channels_last"
if converting a "channels_first" model to "channels_last",
or reciprocally.
"""
assert target_data_format in {'channels_last', 'channels_first'}
kernel, bias = dense.get_weights()
for i in range(kernel.shape[1]):
if target_data_format == 'channels_first':
c, h, w = previous_feature_map_shape
original_fm_shape = (h, w, c)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (2, 0, 1)) # last -> first
else:
h, w, c = previous_feature_map_shape
original_fm_shape = (c, h, w)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (1, 2, 0)) # first -> last
kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),))
dense.set_weights([kernel, bias])
|
import mxnet as mx
import mxnext as X
from mxnext import dwconv, conv, relu6, add, global_avg_pool, sigmoid, to_fp16, to_fp32
from mxnext.backbone.resnet_v1b_helper import resnet_unit
from symbol.builder import Backbone
def _make_divisible(dividend, divisor):
if dividend % divisor == 0:
return dividend
else:
return (dividend // divisor + 1) * divisor
round32 = lambda dividend: _make_divisible(dividend, 32)
def se(input, prefix, f_down, f_up):
with mx.name.Prefix(prefix + "_"):
gap = mx.sym.mean(input, axis=-1, keepdims=True)
gap = mx.sym.mean(gap, axis=-2, keepdims=True)
fc1 = conv(gap, name="fc1", filter=f_down)
fc1 = relu6(fc1, name="fc1_relu")
fc2 = conv(fc1, name="fc2", filter=f_up)
att = sigmoid(fc2, name="sigmoid")
input = mx.sym.broadcast_mul(input, att, name="mul")
return input
def convnormrelu(input, prefix, kernel, f_in, f_out, stride, proj, norm, **kwargs):
with mx.name.Prefix(prefix + "_"):
conv1 = conv(input, name="conv1", filter=f_out, kernel=kernel, stride=stride, no_bias=False)
bn1 = norm(conv1, name="bn1")
relu1 = relu6(bn1, name="relu1")
return relu1
def mbconv(input, prefix, kernel, f_in, f_out, stride, proj, bottleneck_ratio, norm, **kwargs):
with mx.name.Prefix(prefix + "_"):
if bottleneck_ratio != 1:
conv1 = conv(input, name="conv1", filter=f_in * bottleneck_ratio, no_bias=False)
bn1 = norm(conv1, name="bn1")
relu1 = relu6(bn1, name="relu1")
else:
relu1 = input
conv2 = dwconv(relu1, name="conv2", filter=f_in * bottleneck_ratio,
kernel=kernel, stride=stride, no_bias=False)
bn2 = norm(conv2, name="bn2")
relu2 = relu6(bn2, name="relu2")
relu2 = se(relu2, prefix=prefix + "_se2", f_down=f_in//4, f_up=f_in * bottleneck_ratio)
conv3 = conv(relu2, name="conv3", filter=f_out, no_bias=False)
bn3 = norm(conv3, name="bn3")
if proj:
return bn3
else:
return bn3 + input
mbc1 = lambda input, prefix, kernel, f_in, f_out, stride, proj, norm, **kwargs: \
mbconv(input, prefix, kernel, f_in, f_out, stride, proj, 1, norm, **kwargs)
mbc6 = lambda input, prefix, kernel, f_in, f_out, stride, proj, norm, **kwargs: \
mbconv(input, prefix, kernel, f_in, f_out, stride, proj, 6, norm, **kwargs)
def efficientnet_helper(data, norm, us, fos, fis, ss, ks, cs):
stages = []
for i, (u, fo, fi, s, k, c) in enumerate(zip(us, fos, fis, ss, ks, cs), start=1):
for j in range(1, u + 1):
s = s if j == 1 else 1
proj = True if j == 1 else False
fi = fi if j == 1 else fo
data = c(data, prefix="stage%s_unit%s" % (i, j), f_in=fi, f_out=fo,
kernel=k, stride=s, proj=proj, norm=norm)
stages.append(data)
return stages
def efficientnet_b4(data, norm, **kwargs):
# 1.5 GFLOPs
us = [1, 2, 4, 4, 6, 6, 8, 2, 1]
fos = [48, 24, 32, 56, 112, 160, 272, 448, 1792]
fis = [0] + fos[:-1]
ss = [2, 1, 2, 2, 2, 1, 2, 1, 1]
ks = [3, 3, 3, 5, 3, 5, 5, 3, 1]
cs = [convnormrelu, mbc1, mbc6, mbc6, mbc6, mbc6, mbc6, mbc6, convnormrelu]
return efficientnet_helper(data, norm, us, fos, fis, ss, ks, cs)
def efficientnet_b5(data, norm, **kwargs):
# 2.3 GFLOPs
us = [1, 3, 5, 5, 7, 7, 9, 3, 1]
fos = [48, 24, 40, 64, 128, 172, 304, 512, 2048]
fis = [0] + fos[:-1]
ss = [2, 1, 2, 2, 2, 1, 2, 1, 1]
ks = [3, 3, 3, 5, 3, 5, 5, 3, 1]
# ks = [3, 5, 5, 5, 5, 5, 5, 5, 1]
cs = [convnormrelu, mbc1, mbc6, mbc6, mbc6, mbc6, mbc6, mbc6, convnormrelu]
return efficientnet_helper(data, norm, us, fos, fis, ss, ks, cs)
def efficientnet_b6(data, norm, **kwargs):
# 3.3 GFLOPs
us = [1, 3, 6, 6, 8, 8, 11, 3, 1]
fos = [56, 32, 40, 72, 144, 200, 344, 576, 2304]
fis = [0] + fos[:-1]
ss = [2, 1, 2, 2, 2, 1, 2, 1, 1]
ks = [3, 3, 3, 5, 3, 5, 5, 3, 1]
cs = [convnormrelu, mbc1, mbc6, mbc6, mbc6, mbc6, mbc6, mbc6, convnormrelu]
return efficientnet_helper(data, norm, us, fos, fis, ss, ks, cs)
def efficientnet_b7(data, norm, **kwargs):
# 5.1 GFLOPs
us = [1, 4, 7, 7, 10, 10, 13, 4, 1]
fos = [64, 32, 48, 80, 160, 224, 384, 640, 2560]
fis = [0] + fos[:-1]
ss = [2, 1, 2, 2, 2, 1, 2, 1, 1]
ks = [3, 3, 3, 5, 3, 5, 5, 3, 1]
cs = [convnormrelu, mbc1, mbc6, mbc6, mbc6, mbc6, mbc6, mbc6, convnormrelu]
return efficientnet_helper(data, norm, us, fos, fis, ss, ks, cs)
def efficientnet_fpn_builder(efficientnet):
class EfficientNetFPN(Backbone):
def __init__(self, pBackbone):
super().__init__(pBackbone)
p = self.p
data = X.var("data")
if p.fp16:
data = data.astype("float16")
stages = efficientnet(data, p.normalizer, params=p)
self.symbol = (stages[2], stages[3], stages[5], stages[8])
def get_rpn_feature(self):
return self.symbol
def get_rcnn_feature(self):
return self.symbol
return EfficientNetFPN
EfficientNetB4FPN = efficientnet_fpn_builder(efficientnet_b4)
EfficientNetB5FPN = efficientnet_fpn_builder(efficientnet_b5)
EfficientNetB6FPN = efficientnet_fpn_builder(efficientnet_b6)
EfficientNetB7FPN = efficientnet_fpn_builder(efficientnet_b7)
if __name__ == "__main__":
data = X.var("data")
norm = X.normalizer_factory()
*_, last = efficientnet_b4(data, norm)
mx.viz.print_summary(last, shape={"data": (1, 3, 224, 224)})
|
"""This example updates the given client buyer's status."""
import argparse
import os
import pprint
import sys
sys.path.insert(0, os.path.abspath('..'))
from googleapiclient.errors import HttpError
import samples_util
DEFAULT_ACCOUNT_ID = 'ENTER_ACCOUNT_ID_HERE'
DEFAULT_CLIENT_BUYER_ID = 'ENTER_CLIENT_BUYER_ID_HERE'
DEFAULT_CLIENT_NAME = 'ENTER_CLIENT_NAME_HERE'
DEFAULT_ENTITY_ID = 'ENTER_ENTITY_ID_HERE'
DEFAULT_ENTITY_TYPE = 'ENTER_ENTITY_TYPE_HERE'
DEFAULT_ROLE = 'ENTER_ROLE_HERE'
DEFAULT_STATUS = 'ENTER_STATUS_HERE'
DEFAULT_VISIBLE_TO_SELLER = 'ENTER_SELLER_VISIBILITY_HERE'
VALID_ENTITY_TYPES = ('ADVERTISER', 'BRAND', 'AGENCY')
VALID_ROLES = ('CLIENT_DEAL_VIEWER', 'CLIENT_DEAL_NEGOTIATOR',
'CLIENT_DEAL_APPROVER')
VALID_STATUS = ('ACTIVE', 'DISABLED')
def main(ad_exchange_buyer, account_id, client_buyer_id, body):
try:
# Construct and execute the request.
client = ad_exchange_buyer.accounts().clients().update(
accountId=account_id, clientAccountId=client_buyer_id,
body=body).execute()
print(f'Client buyer with account ID: "{account_id}" and client buyer ID '
f'"{client_buyer_id}" has been updated.')
pprint.pprint(client)
except HttpError as e:
print(e)
if __name__ == '__main__':
def entity_type(s):
if s not in VALID_ENTITY_TYPES:
raise argparse.ArgumentTypeError(f'Invalid value "{s}".')
return s
def status(s):
if s not in VALID_STATUS:
raise argparse.ArgumentTypeError(f'Invalid value "{s}".')
return s
def role(s):
if s not in VALID_ROLES:
raise argparse.ArgumentTypeError(f'Invalid value "{s}".')
return s
parser = argparse.ArgumentParser(
description='Lists client buyers for a given Ad Exchange account id.')
parser.add_argument(
'-a', '--account_id', default=DEFAULT_ACCOUNT_ID, type=int,
help='The integer id of the Authorized Buyers account.')
parser.add_argument(
'-c', '--client_buyer_id', default=DEFAULT_CLIENT_BUYER_ID, type=int,
help='The integer id of the client buyer.')
parser.add_argument(
'-cn', '--client_name', default=DEFAULT_CLIENT_NAME,
help='The name used to represent this client to publishers.')
parser.add_argument(
'-ei', '--entity_id', default=DEFAULT_ENTITY_ID,
help=('The integer id representing the client entity. This is a '
'unique id that can be found in the advertisers.txt, '
'brands.txt, or agencies.txt dictionary files depending on the '
'entity type. These files can be found on the following page: '
'https://developers.google.com/authorized-buyers/rtb/data'))
parser.add_argument(
'-et', '--entity_type', default=DEFAULT_ENTITY_TYPE, type=entity_type,
help=('The type of the client entity. This can be set to any of the '
'following: %s' % str(VALID_ENTITY_TYPES)))
parser.add_argument(
'-r', '--role', default=DEFAULT_ROLE, type=role,
help=('The desired role to be assigned to the client buyer. This can '
'be set to any of the following: %s' % str(VALID_ROLES)))
parser.add_argument(
'-s', '--status', default=DEFAULT_STATUS, type=status,
help=('The desired update to the client buyer\'s status. This can be '
'set to any of the following: %s' % str(VALID_STATUS)))
parser.add_argument(
'-v', '--visible_to_seller', default=DEFAULT_VISIBLE_TO_SELLER, type=bool,
help=('Whether the client buyer will be visible to sellers.'))
args = parser.parse_args()
# Create a body containing the required fields.
BODY = {
'clientName': args.client_name,
'entityId': args.entity_id,
'entityType': args.entity_type,
'role': args.role,
'status': args.status,
'visibleToSeller': args.visible_to_seller
}
try:
service = samples_util.GetService('v2beta1')
except IOError as ex:
print(f'Unable to create adexchangebuyer service - {ex}')
print('Did you specify the key file in samples_util.py?')
sys.exit(1)
main(service, args.account_id, args.client_buyer_id, BODY)
|
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
import Status.ttypes
import beeswaxd.ttypes
import cli_service.ttypes
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class TImpalaQueryOptions(object):
ABORT_ON_ERROR = 0
MAX_ERRORS = 1
DISABLE_CODEGEN = 2
BATCH_SIZE = 3
MEM_LIMIT = 4
NUM_NODES = 5
MAX_SCAN_RANGE_LENGTH = 6
MAX_IO_BUFFERS = 7
NUM_SCANNER_THREADS = 8
ALLOW_UNSUPPORTED_FORMATS = 9
DEFAULT_ORDER_BY_LIMIT = 10
DEBUG_ACTION = 11
ABORT_ON_DEFAULT_LIMIT_EXCEEDED = 12
_VALUES_TO_NAMES = {
0: "ABORT_ON_ERROR",
1: "MAX_ERRORS",
2: "DISABLE_CODEGEN",
3: "BATCH_SIZE",
4: "MEM_LIMIT",
5: "NUM_NODES",
6: "MAX_SCAN_RANGE_LENGTH",
7: "MAX_IO_BUFFERS",
8: "NUM_SCANNER_THREADS",
9: "ALLOW_UNSUPPORTED_FORMATS",
10: "DEFAULT_ORDER_BY_LIMIT",
11: "DEBUG_ACTION",
12: "ABORT_ON_DEFAULT_LIMIT_EXCEEDED",
}
_NAMES_TO_VALUES = {
"ABORT_ON_ERROR": 0,
"MAX_ERRORS": 1,
"DISABLE_CODEGEN": 2,
"BATCH_SIZE": 3,
"MEM_LIMIT": 4,
"NUM_NODES": 5,
"MAX_SCAN_RANGE_LENGTH": 6,
"MAX_IO_BUFFERS": 7,
"NUM_SCANNER_THREADS": 8,
"ALLOW_UNSUPPORTED_FORMATS": 9,
"DEFAULT_ORDER_BY_LIMIT": 10,
"DEBUG_ACTION": 11,
"ABORT_ON_DEFAULT_LIMIT_EXCEEDED": 12,
}
class TInsertResult(object):
"""
Attributes:
- rows_appended
"""
thrift_spec = (
None, # 0
(1, TType.MAP, 'rows_appended', (TType.STRING,None,TType.I64,None), None, ), # 1
)
def __init__(self, rows_appended=None,):
self.rows_appended = rows_appended
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.rows_appended = {}
(_ktype1, _vtype2, _size0 ) = iprot.readMapBegin()
for _i4 in xrange(_size0):
_key5 = iprot.readString();
_val6 = iprot.readI64();
self.rows_appended[_key5] = _val6
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TInsertResult')
if self.rows_appended is not None:
oprot.writeFieldBegin('rows_appended', TType.MAP, 1)
oprot.writeMapBegin(TType.STRING, TType.I64, len(self.rows_appended))
for kiter7,viter8 in self.rows_appended.items():
oprot.writeString(kiter7)
oprot.writeI64(viter8)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.rows_appended is None:
raise TProtocol.TProtocolException(message='Required field rows_appended is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TPingImpalaServiceResp(object):
"""
Attributes:
- version
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'version', None, None, ), # 1
)
def __init__(self, version=None,):
self.version = version
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.version = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TPingImpalaServiceResp')
if self.version is not None:
oprot.writeFieldBegin('version', TType.STRING, 1)
oprot.writeString(self.version)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TResetTableReq(object):
"""
Attributes:
- db_name
- table_name
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'db_name', None, None, ), # 1
(2, TType.STRING, 'table_name', None, None, ), # 2
)
def __init__(self, db_name=None, table_name=None,):
self.db_name = db_name
self.table_name = table_name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.db_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TResetTableReq')
if self.db_name is not None:
oprot.writeFieldBegin('db_name', TType.STRING, 1)
oprot.writeString(self.db_name)
oprot.writeFieldEnd()
if self.table_name is not None:
oprot.writeFieldBegin('table_name', TType.STRING, 2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.db_name is None:
raise TProtocol.TProtocolException(message='Required field db_name is unset!')
if self.table_name is None:
raise TProtocol.TProtocolException(message='Required field table_name is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TResetCatalogResp(object):
"""
Attributes:
- status
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (Status.ttypes.TStatus, Status.ttypes.TStatus.thrift_spec), None, ), # 1
)
def __init__(self, status=None,):
self.status = status
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = Status.ttypes.TStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TResetCatalogResp')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TResetTableResp(object):
"""
Attributes:
- status
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (Status.ttypes.TStatus, Status.ttypes.TStatus.thrift_spec), None, ), # 1
)
def __init__(self, status=None,):
self.status = status
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = Status.ttypes.TStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TResetTableResp')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
from cheetax.sql import Query
from cheetax.metadata import Column, SourceType, DataType, ExaDatatype, ExaSchemaType
from voluptuous import Schema, Required, All, Any, Range, Optional, ALLOW_EXTRA
from cheetax.logger import GLOBAL_LOGGER as logger
from cheetax.util import normalize_name
from cheetax import __staging_schema__ as staging_schema
class Base(object):
# Functions that needs to be present
# in the adapter classifiers
def get_prefix_name(self):
"""returns a string with the prefix before table name"""
raise NotImplementedError('get_prefix_name is not implemented in the adapter')
def get_table_name(self):
"""returns a string with the table name that is placed after prefix name"""
raise NotImplementedError('get_metadata_sql is not implemented in the adapter')
def get_validation_schema(self):
"""returns a query that return the metadata for a table"""
raise NotImplementedError('get_validation_schema is not implemented in the adapter')
def get_test_sql(self):
"""returns a query that return the metadata for a table"""
raise NotImplementedError('get_metadata_sql is not implemented in the adapter')
def get_source_sql(self):
"""returns a query that return the metadata for a table"""
raise NotImplementedError('get_metadata_sql is not implemented in the adapter')
def get_metadata_sql(self):
"""returns a query that return the metadata for a table"""
raise NotImplementedError('get_metadata_sql is not implemented in the adapter')
def map_column(self):
"""return a Column object based on a single list from the metadata list"""
raise NotImplementedError('map_column is not implemented in the adapter')
def is_primary(self):
"""returns true or false if column is primary key based on input value"""
raise NotImplementedError('is_primary is not implemented in the adapter')
def map_character_set(self):
"""returns a string either ASCII or UTF8 based on input value"""
raise NotImplementedError('map_character_set is not implemented in the adapter')
def map_datatype(self):
"""returns a DataType object based on a single list from the metadata list"""
raise NotImplementedError('map_datatype is not implemented in the adapter')
# Helper functions that can be used
# in the adapter class
def quote_select(self, select):
column_sort_list = []
if select != '*':
for col in select.split(','):
column = "''{column}''".format(column=col)
column_sort_list.append(column)
return ','.join(column_sort_list)
return None
def double_quote_select(self, select):
column_sort_list = []
if select != '*':
for col in select.split(','):
column = '"{column}"'.format(column=col)
column_sort_list.append(column)
return ','.join(column_sort_list)
return None
def get_column_list(self, columns):
column_list = []
for column in columns:
column_list.append(self.map_column(column))
return column_list
# Exasol standard functions
def mapTimezone(self, column_name):
column = self.source.get('column_options')
if not column[column_name]:
raise KeyError('No column_option entry for {}'.format(column))
if not column[column_name]['format']:
raise KeyError('No format for column_option {}'.format(column))
if not column[column_name]['timezone']:
raise KeyError('No timezone for column_option {}'.format(column))
return column[column_name]['timezone']
def _exaWarehouseMetadata(self, source, table_name):
table = Table(source, ExaSchemaType.EXTRACT, table_name)
metaDataQuery = str(Query.select_(self.getColumns())
.from_('EXA_ALL_COLUMNS')
.where_("column_table = '{table}'".format(table=table.getFullTableName()))
.where_("column_schema = '{schema}'".format(schema=ExaSchemaType.EXTRACT.value)))
columns = self.profile.readData(metaDataQuery)
table.columns = self._exaMapColumns(columns)
return table
def exaMapColumns(self, columns):
columnsList = []
for column in columns:
name = column[0]
dataType = self._exaColumnMap(column)
primaryKey = self._exaIsPrimary(column[6])
timezone = self.mapTimezone(name) if dataType.getType() == ExaDatatype.TIMESTAMP else None
columnsList.append(Column(name, dataType, primaryKey, timezone))
return columnsList
def _exaIsPrimary(self, key):
if key == 'PRI':
return True
return False
def _exaCharacterSetMap(self, charSet):
if charSet.upper() == 'UTF8':
return 'UTF8'
else:
return 'ANSCII'
def _exaColumnMap(self, column):
dataType = column[1]
size = column[3]
precision = column[4]
scale = column[5]
charSet = self._exaCharacterSetMap(column[1].split()[-1])
### string types ###
if dataType.startswith('VARCHAR'):
return DataType.createVarChar(size, charSet)
elif dataType.startswith('CHAR'):
return DataType.createChar(size, charSet)
elif dataType.startswith('DECIMAL'):
return DataType.createDecimal(precision, scale)
elif dataType.startswith('DOUBLE'):
return DataType.createDouble()
elif dataType.startswith('DATE'):
return DataType.createDate()
elif dataType.startswith('BOOLEAN'):
return DataType.createBool()
elif dataType.startswith('TIMESTAMP'):
return DataType.createTimestamp()
elif dataType.startswith('GEOMETRY'):
return DataType.createTimestamp()
else:
raise ValueError('{} is not supported'.format(dataType))
# @classmethod
# def schema_exists(self, ws, schema):
#
# with ws.cursor() as c:
#
# c.execute(" \
# SELECT 1 FROM SYS.EXA_ALL_OBJECTS \
# WHERE OBJECT_TYPE = 'SCHEMA' \
# AND OBJECT_NAME = '{schema}' \
# "
# .format(schema=schema.upper()))
#
# if len(c) == 0:
# return False
#
# return True
#
# @classmethod
# def get_table_name(source, table=None):
# if table is not None:
# return '{}_{}'.format(source, table)
#
# return source
#
# @classmethod
# def validate(self, schema, dictionary):
# return common.validate(schema, dictionary)
#
# @classmethod
# def insert(self):
# statement = " \
# "
#
# @classmethod
# def create_missing_schemas(self, ws):
# # Create Schemas
# self.create_schema(ws, project.datawarehouse_schema)
# self.create_schema(ws, project.archive_schema)
# self.create_schema(ws, project.extract_schema)
#
# @classmethod
# def get_columns(self, ws, table_name):
# """Get extract table columns"""
# with ws.cursor() as c:
# c.execute(" \
# SELECT COLUMN_NAME FROM SYS.EXA_ALL_COLUMNS \
# WHERE COLUMN_SCHEMA = '{schema}' \
# AND COLUMN_TABLE = '{table}'".format(
# schema=project.extract_schema,
# table=table_name.upper()
# )
# )
#
# columns = []
# for column in c.fetchall():
# columns.append('"{column}"'.format(column=column[0]))
#
# return columns
#
# @classmethod
# def format_string(self, text):
# # define desired replacements here
# dic = {
# 'æ': 'ae',
# 'å': 'aa',
# 'ø': 'oe',
# ' ': '_'
# }
#
# for i, j in dic.items():
# text = text.replace(i, j)
#
# return text.upper()
#
# @classmethod
# def create_schema(self, ws, schema):
# """Create schema"""
# if not self.schema_exists(ws, schema):
# with ws.cursor() as c:
# c.execute("CREATE SCHEMA {schema}".format(schema=schema))
# self.ws.commit()
#
# @classmethod
# def create_extract_table(self, ws, table, source, select, statement):
# """Create a extract table"""
#
# source_table = "{source}_{table}".format(table=table,source=source)
# complete_table = self.format_string(source_table)
#
# if not self.table_exists(ws, project.extract_schema, complete_table):
# with ws.cursor() as c:
# c.execute("CREATE TABLE {schema}.{table} AS SELECT {select} FROM ({statement})"
# .format(
# schema=project.extract_schema,
# table=complete_table,
# source=source,
# select=select,
# statement=statement
# )
# )
# ws.commit()
# @classmethod
# def create_archive_table(self, ws, table, source):
# """Create a archive table"""
#
# complete_table = "{source}_{table}".format(table=table,source=source)
#
# if not self.table_exists(ws, project.archive_schema, complete_table.upper()):
#
# columns = ','.join(self.get_columns(ws, table.upper()))
#
# with self.ws.cursor() as c:
# # Create table
# c.execute("CREATE TABLE {archive_schema}.{table} \
# AS SELECT {table_format} FROM \
# ( \
# SELECT {select} FROM {extract_schema}.{table} \
# where 1=0 \
# ) \
# ".format(
# archive_schema=project.archive_schema
# extract_schema=project.extract_schema
# table=table,
# table_format=table_format,
# select=select
# )
# )
#
# def __init__(self, profile):
#
# self.project = Project(profile)
#
# # Objects
# self.sourceValidatorObj = sourceValidatorObj
# self.profileValidatorObj = profileValidatorObj
#
# self.source = source
#
#
# # Profile variables
# self.profile_host = profileValidatorObj.get_key('host')
# self.profile_port = profileValidatorObj.get_key('port')
# self.profile_username = profileValidatorObj.get_key('username')
# self.profile_password = profileValidatorObj.get_key('password')
#
#
# # Initialize and connect
# self.ws = exasol.connect(
# 'ws://{}:{}'.format(self.profile_host, self.profile_port),
# self.profile_username,
# self.profile_password,
# autocommit = False,
# useCompression = True
# )
#
# # Create Schemas
# self.create_schema('CHEETAX_AUDIT')
# self.create_schema('CHEETAX_METADATA')
# self.create_schema('CHEETAX_DATAWAREHOUSE')
# self.create_schema('CHEETAX_ARCHIVE')
# self.create_schema('CHEETAX_EXTRACT')
#
# # Create Audit Tables
#
#
#
# def schema_exists(self, schema):
#
# with self.ws.cursor() as c:
#
# c.execute("""
# SELECT 1 FROM SYS.EXA_ALL_OBJECTS
# WHERE OBJECT_TYPE = 'SCHEMA'
# AND OBJECT_NAME = '{0}'
# """.format(schema))
#
# if len(c) == 0:
# return False
#
# return True
#
# def table_exists(self, schema, table):
#
# with self.ws.cursor() as c:
# c.execute("""
# SELECT 1 FROM SYS.EXA_ALL_OBJECTS
# WHERE OBJECT_TYPE = 'TABLE'
# AND OBJECT_NAME = '{0}'
# AND ROOT_NAME ='{1}'
# """.format(table, schema))
#
# if len(c) == 0:
# return False
#
# return True
#
#
# def create_schema(self, schema):
# """Create schema"""
# if not self.schema_exists(schema):
# with self.ws.cursor() as c:
# c.execute("CREATE SCHEMA {}".format(schema))
# self.ws.commit()
#
#
# def format_name(self, text):
# # define desired replacements here
# dic = {
# 'æ': 'ae',
# 'å': 'aa',
# 'ø': 'oe',
# ' ': '_'
# }
#
# for i, j in dic.items():
# text = text.replace(i, j)
#
# return text.upper()
#
#
#
#
#
#
# # Add housekeeping columns
# c.execute("ALTER TABLE CHEETAX_ARCHIVE.{} ADD CHEETAX_KEY DECIMAL(18, 0) IDENTITY".format(table))
# c.execute("ALTER TABLE CHEETAX_ARCHIVE.{} ADD CHEETAX_ACTION CHAR(1)".format(table))
# c.execute("ALTER TABLE CHEETAX_ARCHIVE.{} ADD CHEETAX_CHECKSUM CHAR(32)".format(table))
# c.execute("ALTER TABLE CHEETAX_ARCHIVE.{} ADD CHEETAX_FROM TIMESTAMP".format(table))
# c.execute("ALTER TABLE CHEETAX_ARCHIVE.{} ADD CHEETAX_TO TIMESTAMP".format(table))
#
# self.ws.commit()
#
# def insert_to_extract(self, table, table_format, statement):
# with self.ws.cursor() as c:
# c.execute('TRUNCATE TABLE CHEETAX_EXTRACT.{table}'.format(table=table))
# c.execute('INSERT INTO CHEETAX_EXTRACT.{table} select {table_format} from ({statement})'.format(
# table=table,
# statement=statement,
# table_format=table_format
# )
# )
# self.ws.commit()
#
# def merge_to_archive(self, table, primary_key):
# print(merge)
|
from __future__ import print_function
import sys
import os
import tempfile
import subprocess
import random
import string
import glob
import struct
import atexit
import six
import pysam
from six.moves import urllib
from . import cbedtools
from . import settings
from . import filenames
from . import genome_registry
from .logger import logger
from .cbedtools import create_interval_from_list
BUFSIZE = 1
_tags = {}
def _check_for_bedtools(program_to_check='intersectBed', force_check=False):
"""
Checks installation as well as version (based on whether or not "bedtools
intersect" works, or just "intersectBed")
"""
if settings._bedtools_installed and not force_check:
return True
try:
p = subprocess.Popen(
[os.path.join(settings._bedtools_path, 'bedtools'),
settings._prog_names[program_to_check]],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
settings._bedtools_installed = True
settings._v_2_15_plus = True
except (OSError, KeyError) as err:
try:
p = subprocess.Popen(
[os.path.join(settings._bedtools_path, program_to_check)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
settings._bedtools_installed = True
settings._v_2_15_plus = False
except OSError as err:
if err.errno == 2:
if settings._bedtools_path:
add_msg = "(tried path '%s')" % settings._bedtools_path
else:
add_msg = ""
raise OSError("Please make sure you have installed BEDTools"
"(https://github.com/arq5x/bedtools) and that "
"it's on the path. %s" % add_msg)
def _check_for_R():
try:
p = subprocess.Popen(
[os.path.join(settings._R_path, 'R'), '--version'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
settings._R_installed = True
except OSError:
if settings._R_path:
add_msg = "(tried path '%s')" % settings._R_path
else:
add_msg = ""
raise ValueError(
'Please install R and ensure it is on your path %s' % add_msg)
class Error(Exception):
"""Base class for this module's exceptions"""
pass
class BEDToolsError(Error):
def __init__(self, cmd, msg):
self.cmd = str(cmd)
self.msg = str(msg)
def __str__(self):
m = '\nCommand was:\n\n\t' + self.cmd + '\n' + \
'\nError message was:\n' + self.msg
return m
def isGZIP(fn):
with open(fn, 'rb') as f:
start = f.read(3)
if start == b"\x1f\x8b\x08":
return True
return False
def isBGZIP(fn):
"""
Reads a filename to see if it's a BGZIPed file or not.
"""
header_str = open(fn, 'rb').read(15)
if len(header_str) < 15:
return False
header = struct.unpack_from('BBBBiBBHBBB', header_str)
id1, id2, cm, flg, mtime, xfl, os_, xlen, si1, si2, slen = header
if (id1 == 31) and (id2 == 139) and (cm == 8) and (flg == 4) and \
(si1 == 66) and (si2 == 67) and (slen == 2):
return True
return False
def isBAM(fn):
if not isBGZIP(fn):
return False
# Need to differentiate between BAM and plain 'ol BGZIP. Try reading header
# . . .
try:
pysam.Samfile(fn, 'rb')
return True
except ValueError:
return False
def find_tagged(tag):
"""
Returns the bedtool object with tagged with *tag*. Useful for tracking
down bedtools you made previously.
"""
for key, item in _tags.items():
try:
if item._tag == tag:
return item
except AttributeError:
pass
raise ValueError('tag "%s" not found' % tag)
def _flatten_list(x):
nested = True
while nested:
check_again = False
flattened = []
for element in x:
if isinstance(element, list):
flattened.extend(element)
check_again = True
else:
flattened.append(element)
nested = check_again
x = flattened[:]
return x
def set_tempdir(tempdir):
"""
Set the directory for temp files.
Useful for clusters that use a /scratch partition rather than a /tmp dir.
Convenience function to simply set tempfile.tempdir.
"""
if not os.path.exists(tempdir):
errstr = 'The tempdir you specified, %s, does not exist' % tempdir
raise ValueError(errstr)
tempfile.tempdir = tempdir
def get_tempdir():
"""
Gets the current tempdir for the module.
"""
return tempfile.gettempdir()
def cleanup(verbose=False, remove_all=False):
"""
Deletes all temp files from the current session (or optionally *all* \
sessions)
If *verbose*, reports what it's doing
If *remove_all*, then ALL files matching "pybedtools.*.tmp" in the temp dir
will be deleted.
"""
if settings.KEEP_TEMPFILES:
return
for fn in filenames.TEMPFILES:
if verbose:
print('removing', fn)
if os.path.exists(fn):
os.unlink(fn)
if remove_all:
fns = glob.glob(os.path.join(get_tempdir(), 'pybedtools.*.tmp'))
for fn in fns:
os.unlink(fn)
def _version_2_15_plus_names(prog_name):
if not settings._bedtools_installed:
_check_for_bedtools()
if not settings._v_2_15_plus:
return [prog_name]
try:
prog_name = settings._prog_names[prog_name]
except KeyError:
if prog_name in settings._new_names:
pass
raise BEDToolsError(
prog_name, prog_name + 'not a recognized BEDTools program')
return [os.path.join(settings._bedtools_path, 'bedtools'), prog_name]
def call_bedtools(cmds, tmpfn=None, stdin=None, check_stderr=None, decode_output=True, encode_input=True):
"""
Use subprocess.Popen to call BEDTools and catch any errors.
Output goes to *tmpfn*, or, if None, output stays in subprocess.PIPE and
can be iterated over.
*stdin* is an optional file-like object that will be sent to
subprocess.Popen.
Prints some useful help upon getting common errors.
*check_stderr* is a function that takes the stderr string as input and
returns True if it's OK (that is, it's not really an error). This is
needed, e.g., for calling fastaFromBed which will report that it has to
make a .fai for a fasta file.
*decode_output* should be set to False when you are iterating over a BAM
file, where the data represent binary rather than text data.
"""
input_is_stream = stdin is not None
output_is_stream = tmpfn is None
_orig_cmds = cmds[:]
cmds = []
cmds.extend(_version_2_15_plus_names(_orig_cmds[0]))
cmds.extend(_orig_cmds[1:])
try:
# coming from an iterator, sending as iterator
if input_is_stream and output_is_stream:
logger.debug(
'helpers.call_bedtools(): input is stream, output is '
'stream')
logger.debug(
'helpers.call_bedtools(): cmds=%s', ' '.join(cmds))
p = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
bufsize=BUFSIZE)
if encode_input:
for line in stdin:
p.stdin.write(line.encode())
else:
for line in stdin:
p.stdin.write(line)
# This is important to prevent deadlocks
p.stdin.close()
if decode_output:
output = (i.decode('UTF-8') for i in p.stdout)
else:
output = (i for i in p.stdout)
stderr = None
# coming from an iterator, writing to file
if input_is_stream and not output_is_stream:
logger.debug(
'helpers.call_bedtools(): input is stream, output is file')
logger.debug(
'helpers.call_bedtools(): cmds=%s', ' '.join(cmds))
outfile = open(tmpfn, 'wb')
p = subprocess.Popen(cmds,
stdout=outfile,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
bufsize=BUFSIZE)
if hasattr(stdin, 'read'):
stdout, stderr = p.communicate(stdin.read())
else:
for item in stdin:
p.stdin.write(item.encode())
stdout, stderr = p.communicate()
output = tmpfn
outfile.close()
# coming from a file, sending as iterator
if not input_is_stream and output_is_stream:
logger.debug(
'helpers.call_bedtools(): input is filename, '
'output is stream')
logger.debug(
'helpers.call_bedtools(): cmds=%s', ' '.join(cmds))
p = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=BUFSIZE)
if decode_output:
output = (i.decode('UTF-8') for i in p.stdout)
else:
output = (i for i in p.stdout)
stderr = None
# file-to-file
if not input_is_stream and not output_is_stream:
logger.debug(
'helpers.call_bedtools(): input is filename, output '
'is filename (%s)', tmpfn)
logger.debug(
'helpers.call_bedtools(): cmds=%s', ' '.join(cmds))
outfile = open(tmpfn, 'wb')
p = subprocess.Popen(cmds,
stdout=outfile,
stderr=subprocess.PIPE,
bufsize=BUFSIZE)
stdout, stderr = p.communicate()
output = tmpfn
outfile.close()
# Check if it's OK using a provided function to check stderr. If it's
# OK, dump it to sys.stderr so it's printed, and reset it to None so we
# don't raise an exception
if check_stderr is not None:
if isinstance(stderr, bytes):
stderr = stderr.decode('UTF_8')
if check_stderr(stderr):
sys.stderr.write(stderr)
stderr = None
if stderr:
# Fix for issue #147. In general, we consider warnings to not be
# fatal, so just show 'em and continue on.
#
# bedtools source has several different ways of showing a warning,
# but they seem to all have "WARNING" in the first 20 or so
# characters
if isinstance(stderr, bytes):
stderr = stderr.decode('UTF_8')
if len(stderr) > 20 and "WARNING" in stderr[:20]:
sys.stderr.write(stderr)
else:
raise BEDToolsError(subprocess.list2cmdline(cmds), stderr)
except (OSError, IOError) as err:
print('%s: %s' % (type(err), os.strerror(err.errno)))
print('The command was:\n\n\t%s\n' % subprocess.list2cmdline(cmds))
problems = {
2: ('* Did you spell the command correctly?',
'* Do you have BEDTools installed and on the path?'),
13: ('* Do you have permission to write '
'to the output file ("%s")?' % tmpfn,),
24: ('* Too many files open -- please submit '
'a bug report so that this can be fixed',)
}
print('Things to check:')
print('\n\t' + '\n\t'.join(problems[err.errno]))
raise OSError('See above for commands that gave the error')
return output
def set_bedtools_path(path=""):
"""
Explicitly set path to `BEDTools` installation dir.
If BEDTools is not available on your system path, specify the path to the
dir containing the BEDTools executables (intersectBed, subtractBed, etc)
with this function.
To reset and use the default system path, call this function with no
arguments or use path="".
"""
settings._bedtools_path = path
def set_R_path(path=""):
"""
Explicitly set path to `R` installation dir.
If R is not available on the path, then it can be explicitly
specified here.
Use path="" to reset to default system path.
"""
settings._R_path = path
def _check_sequence_stderr(x):
"""
If stderr created by fastaFromBed starts with 'index file', then don't
consider it an error.
"""
if isinstance(x, bytes):
x = x.decode('UTF-8')
if x.startswith('index file'):
return True
if x.startswith("WARNING"):
return True
return False
def _call_randomintersect(_self, other, iterations, intersect_kwargs,
shuffle_kwargs, report_iterations, debug,
_orig_processes):
"""
Helper function that list-ifies the output from randomintersection, s.t.
it can be pickled across a multiprocess Pool.
"""
return list(
_self.randomintersection(
other, iterations,
intersect_kwargs=intersect_kwargs,
shuffle_kwargs=shuffle_kwargs,
report_iterations=report_iterations,
debug=False, processes=None,
_orig_processes=_orig_processes)
)
def close_or_delete(*args):
"""
Single function that can be used to get rid of a BedTool, whether it's a
streaming or file-based version.
"""
for x in args:
if isinstance(x.fn, six.string_types):
os.unlink(x.fn)
elif hasattr(x.fn, 'close'):
x.fn.close()
if hasattr(x.fn, 'throw'):
x.fn.throw(StopIteration)
def n_open_fds():
pid = os.getpid()
procs = subprocess.check_output(
['lsof', '-w', '-Ff', '-p', str(pid)])
nprocs = 0
for i in procs.splitlines():
if i[1:].isdigit() and i[0] == 'f':
nprocs += 1
return nprocs
import re
coord_re = re.compile(
r"""
(?P<chrom>.+):
(?P<start>\d+)-
(?P<stop>\d+)
(?:\[(?P<strand>.)\])?""", re.VERBOSE)
def string_to_interval(s):
"""
Convert string of the form "chrom:start-stop" or "chrom:start-stop[strand]"
to an interval.
Assumes zero-based coords.
If it's already an interval, then return it as-is.
"""
if isinstance(s, six.string_types):
m = coord_re.search(s)
if m.group('strand'):
return create_interval_from_list([
m.group('chrom'),
m.group('start'),
m.group('stop'),
'.',
'0',
m.group('strand')])
else:
return create_interval_from_list([
m.group('chrom'),
m.group('start'),
m.group('stop'),
])
return s
class FisherOutput(object):
def __init__(self, s, **kwargs):
"""
fisher returns text results like::
# Contingency Table
#_________________________________________
# | not in -b | in -b |
# not in -a | 3137160615 | 503 |
# in -a | 100 | 46 |
#_________________________________________
# p-values for fisher's exact test
left right two-tail ratio
1.00000 0.00000 0.00000 2868973.922
"""
if isinstance(s, str):
s = open(s).read()
if hasattr(s, 'next'):
s = ''.join(i for i in s)
table = {
'not in -a': {
'not in -b': None,
'in -b': None
},
'in -a': {
'not in -b': None,
'in -b': None,
},
}
self.text = s
lines = s.splitlines()
for i in lines:
if 'not in -a' in i:
_, in_b, not_in_b, _= i.strip().split('|')
table['not in -a']['not in -b'] = int(not_in_b)
table['not in -a']['in -b'] = int(in_b)
if ' in -a' in i:
_, in_b, not_in_b, _ = i.strip().split('|')
table['in -a']['not in -b'] = int(not_in_b)
table['in -a']['in -b'] = int(in_b)
self.table = table
left, right, two_tail, ratio = lines[-1].split()
self.left_tail = float(left)
self.right_tail = float(right)
self.two_tail = float(two_tail)
self.ratio = float(ratio)
def __str__(self):
return self.text
def __repr__(self):
return '<%s at %s>\n%s' % (self.__class__.__name__, id(self), self.text)
def internet_on(timeout=1):
try:
response = urllib.request.urlopen('http://genome.ucsc.edu', timeout=timeout)
return True
except urllib.error.URLError as err:
pass
return False
def get_chromsizes_from_ucsc(genome, saveas=None, mysql='mysql', timeout=None):
"""
Download chrom size info for *genome* from UCSC and returns the dictionary.
If you need the file, then specify a filename with *saveas* (the dictionary
will still be returned as well).
If ``mysql`` is not on your path, specify where to find it with
*mysql=<path to mysql executable>*.
*timeout* is how long to wait for a response; mostly used for testing.
Example usage:
>>> dm3_chromsizes = get_chromsizes_from_ucsc('dm3')
>>> for i in sorted(dm3_chromsizes.items()):
... print('{0}: {1}'.format(*i))
chr2L: (0, 23011544)
chr2LHet: (0, 368872)
chr2R: (0, 21146708)
chr2RHet: (0, 3288761)
chr3L: (0, 24543557)
chr3LHet: (0, 2555491)
chr3R: (0, 27905053)
chr3RHet: (0, 2517507)
chr4: (0, 1351857)
chrM: (0, 19517)
chrU: (0, 10049037)
chrUextra: (0, 29004656)
chrX: (0, 22422827)
chrXHet: (0, 204112)
chrYHet: (0, 347038)
"""
if not internet_on(timeout=timeout):
raise ValueError('It appears you don\'t have an internet connection '
'-- unable to get chromsizes from UCSC')
cmds = [mysql,
'--user=genome',
'--host=genome-mysql.cse.ucsc.edu',
'-A',
'-e',
'select chrom, size from %s.chromInfo' % genome]
try:
p = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1)
stdout, stderr = p.communicate()
if stderr:
print(stderr)
print('Commands were:\n')
print((subprocess.list2cmdline(cmds)))
lines = stdout.splitlines()[1:]
d = {}
for line in lines:
if isinstance(line, bytes):
line = line.decode('UTF-8')
chrom, size = line.split()
d[chrom] = (0, int(size))
if saveas is not None:
chromsizes_to_file(d, saveas)
return d
except OSError as err:
if err.errno == 2:
raise OSError("Can't find mysql -- if you don't have it "
"installed, you'll have to get chromsizes "
" manually, or "
"specify the path with the 'mysql' kwarg.")
else:
raise
def chromsizes_to_file(chrom_sizes, fn=None):
"""
Converts a *chromsizes* dictionary to a file. If *fn* is None, then a
tempfile is created (which can be deleted with pybedtools.cleanup()).
Returns the filename.
"""
if fn is None:
tmpfn = tempfile.NamedTemporaryFile(prefix='pybedtools.',
suffix='.tmp', delete=False)
tmpfn = tmpfn.name
filenames.TEMPFILES.append(tmpfn)
fn = tmpfn
if isinstance(chrom_sizes, str):
chrom_sizes = chromsizes(chrom_sizes)
fout = open(fn, 'wt')
for chrom, bounds in sorted(chrom_sizes.items()):
line = chrom + '\t' + str(bounds[1]) + '\n'
fout.write(line)
fout.close()
return fn
def chromsizes(genome):
"""
Looks for a *genome* already included in the genome registry; if not found
then it looks it up on UCSC. Returns the dictionary of chromsize tuples
where each tuple has (start,stop).
Chromsizes are described as (start, stop) tuples to allow randomization
within specified regions; e. g., you can make a chromsizes dictionary that
represents the extent of a tiling array.
Example usage:
>>> dm3_chromsizes = chromsizes('dm3')
>>> for i in sorted(dm3_chromsizes.items()):
... print(i)
('chr2L', (0, 23011544))
('chr2LHet', (0, 368872))
('chr2R', (0, 21146708))
('chr2RHet', (0, 3288761))
('chr3L', (0, 24543557))
('chr3LHet', (0, 2555491))
('chr3R', (0, 27905053))
('chr3RHet', (0, 2517507))
('chr4', (0, 1351857))
('chrM', (0, 19517))
('chrU', (0, 10049037))
('chrUextra', (0, 29004656))
('chrX', (0, 22422827))
('chrXHet', (0, 204112))
('chrYHet', (0, 347038))
"""
try:
return getattr(genome_registry, genome)
except AttributeError:
return get_chromsizes_from_ucsc(genome)
atexit.register(cleanup)
|
import argparse
import datetime
from jflow.config_reader import JFlowConfigReader
def date(datestr):
try:
return datetime.datetime.strptime(datestr, JFlowConfigReader().get_date_format())
except:
raise argparse.ArgumentTypeError("'" + datestr + "' is an invalid date!")
|
"""@package dewberry
@brief script that identifies the incorrect CSHORE output
This software is provided free of charge under the New BSD License. Please see
the following license information:
Copyright (c) 2014, Dewberry
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Dewberry nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE DEWBERRY
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@author(s) Janghwoan Choi <jchoi@dewberry.com>
Jared Dorvinen <jdorvinen@dewberry.com>
4/13/2017 - Reformatted, rewrote directory walking method, added commentary, added logfile, and
updated syntax to be compatible with Python 3.x - JD
"""
import sys
import os
def is_number(string):
""" Check if a string is a valid number.
input:
string - string characters being checked
output:
returns True or False
ref:
http://stackoverflow.com/questions/354038/how-do-i-check-if-a-string-is-a-number-in-python
"""
try:
# Convert original string to a float and then back to a string
number = str(float(string))
# Check if the number is 'nan' or a non-real number
if number == "nan" or number == "inf" or number == "-inf":
# if 'string' is not a real number, then it's not a number
return False
except ValueError:
# if could not convert original string to a number, check it it is a complex number
try:
complex(string) # for complex
except ValueError:
# If can't confirm that 'string' is a complex number, then it's not a number
return False
# If 'string' is not a non-real number and/or is a complex number, then it's a number
return True
def CheckCShoreOutput(inputfolderpath):
""" CheckCShoreOutput(inputfolderpath)
Function examines all CSHORE ODOC output files found in an 'inputfolderpath' to see if the
model simulation was able to completely execute through all of the inputfile's timesteps.
Input:
inputfolderpath - parent folder containing all CSHORE model outputs
Outputs:
prints to screen path to any model simulations that did not completely finish.
"""
# Open logfile in inputfolderpath
with open(os.path.join(inputfolderpath, 'checkCSHOREoutput.log'), 'w') as logfile:
# Walk the directory structure from the bottom up
for root, dirs, files in os.walk(inputfolderpath, topdown=False):
# For each file in a given directory
for filed in files:
# Check if the filename is 'ODOC', this is the CSHORE output file we're checking
if filed == 'ODOC':
# Create full path to the ODOC file
odocfile = os.path.join(root, filed)
# Open the odoc file in read only mode
with open(odocfile, 'r') as odoc:
# Initialize variables
time_step_to_compare = ''
last_time_step = ''
# Read each line of the ODOC file
for line in odoc:
parsed = line.strip().split()
if len(parsed) == 6 and is_number(parsed[0]):
last_time_step = parsed[0]
elif "on input bottom profile at TIME" in line:
# changed this to use the parsed object - JD
time_step_to_compare = parsed[7] #line.split('=')[1].split('Line')[0].strip()
elif "on bottom profile computed at TIME" in line:
# changed this to use the parsed object - JD
time_step_to_compare = parsed[8] #line.split('=')[1].split('Line')[0].strip()
# If the last timestep of the model input and output are not the same,
if last_time_step != time_step_to_compare:
# create an error message
errormessage = 'error: {0} {1} {2}\n'.format(odocfile,
last_time_step,
time_step_to_compare)
# write it to the logfile
logfile.write(errormessage)
# and print it in the terminal
print(errormessage)
# Check if any errors have been found,
try:
if errormessage in locals():
pass
except UnboundLocalError:
# if not, print 'All's good' to errorlog
message = 'No errors found in CSHORE output files contained in directory\n{}'
logfile.write(message.format(inputfolderpath))
# and print it in the terminal
print(message.format(inputfolderpath))
# Once all files are checked, print out "completed..."
print("completed...")
if __name__ == '__main__':
INPUTFOLDERPATH = sys.argv[1]
CheckCShoreOutput(INPUTFOLDERPATH)
|
import collections
from senlin.common import exception as exc
from senlin.common.i18n import _
class BaseConstraint(collections.abc.Mapping):
KEYS = (
TYPE, CONSTRAINT,
) = (
'type', 'constraint',
)
def __str__(self):
"""Utility method for generating schema docs."""
return self.desc()
def validate(self, value, schema=None, context=None):
"""Base entry for validation."""
if not self._validate(value, schema=schema, context=context):
raise ValueError(self._error(value))
@classmethod
def _name(cls):
return cls.__name__
def __getitem__(self, key):
if key == self.TYPE:
return self._name()
elif key == self.CONSTRAINT:
return self._constraint()
raise KeyError(key)
def __iter__(self):
for k in self.KEYS:
try:
self[k]
except KeyError:
pass
else:
yield k
def __len__(self):
return len(list(iter(self)))
class AllowedValues(BaseConstraint):
def __init__(self, allowed_values):
if (not isinstance(allowed_values, collections.abc.Sequence) or
isinstance(allowed_values, str)):
msg = _('AllowedValues must be a list or a string')
raise exc.ESchema(message=msg)
self.allowed = tuple(allowed_values)
def desc(self):
values = ', '.join(str(v) for v in self.allowed)
return _('Allowed values: %s') % values
def _error(self, value):
values = ', '.join(str(v) for v in self.allowed)
return _("'%(value)s' must be one of the allowed values: "
"%(allowed)s") % dict(value=value, allowed=values)
def _validate(self, value, schema=None, context=None):
if isinstance(value, list):
return all(v in self.allowed for v in value)
# try implicit type conversion
if schema is not None:
_allowed = tuple(schema.to_schema_type(v)
for v in self.allowed)
return schema.to_schema_type(value) in _allowed
return value in self.allowed
def _constraint(self):
return list(self.allowed)
|
import logging
from cim.common import h
from cim.common import LoggingObject
from cim import CIM
from cim import is_index_page_number_valid
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("cim.grapher")
class Grapher(LoggingObject):
def __init__(self, cim):
super(Grapher, self).__init__()
self._cim = cim
@staticmethod
def _format_index_page(page):
ret = []
ret.append("<header> logical page: {:s} | physical page: {:s} | count: {:s}".format(
h(page.logical_page_number),
h(page.physical_page_number),
h(page.key_count)))
for i in xrange(page.key_count):
key = page.get_key(i)
ret.append(" | {{ {key:s} | <child_{i:s}> {child:s} }}".format(
key=key.human_format,
i=h(i),
child=h(page.get_child(i))))
return "".join(ret)
def _graph_index_page_rec(self, page):
print(" \"node{:s}\" [".format(h(page.logical_page_number)))
print(" label = \"{:s}\"".format(self._format_index_page(page)))
print(" shape = \"record\"")
print(" ];")
key_count = page.key_count
for i in xrange(key_count + 1):
child_page_number = page.get_child(i)
if not is_index_page_number_valid(child_page_number):
continue
child_page = self._cim.logical_index_store.get_page(child_page_number)
self._graph_index_page_rec(child_page)
for i in xrange(key_count):
child_page_number = page.get_child(i)
if not is_index_page_number_valid(child_page_number):
continue
print(" \"node{num:s}\":child_{i:s} -> \"node{child:s}\"".format(
num=h(page.logical_page_number),
i=h(i),
child=h(child_page_number)))
# last entry has two links, to both less and greater children nodes
final_child_index = page.get_child(key_count)
if is_index_page_number_valid(final_child_index):
print(" \"node{num:s}\":child_{i:s} -> \"node{child:s}\"".format(
num=h(page.logical_page_number),
i=h(key_count - 1),
child=h(final_child_index)))
def graph_index_from_page(self, page):
print("digraph g {")
print(" graph [ rankdir = \"LR\" ];")
print(" node [")
print(" fontsize = \"16\"")
print(" shape = \"ellipse\"")
print(" ];")
print(" edge [];")
self._graph_index_page_rec(page)
print("}")
def graph_index(self):
root = self._cim.logical_index_store.root_page
self.graph_index_from_page(root)
def main(type_, path, page_number=None):
if type_ not in ("xp", "win7"):
raise RuntimeError("Invalid mapping type: {:s}".format(type_))
c = CIM(type_, path)
g = Grapher(c)
root = c.logical_index_store.root_page
# print(root)
# print(root.tree())
# print(root.get_key(0))
# print(root.get_key(1))
# print(root.get_child(0))
# print(root.get_child(1))
if page_number is None:
g.graph_index()
else:
page_number = int(page_number)
i = c.logical_index_store
p = i.get_page(page_number)
g.graph_index_from_page(p)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
import sys
main(*sys.argv[1:])
|
"""Simple setup script"""
import os
import subprocess
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
abspath = os.path.dirname(os.path.realpath(__file__))
with open("requirements.txt") as f:
requirements = f.read().splitlines()
print(find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]))
setup(name="dlrm",
package_dir={'dlrm': 'dlrm'},
version="1.0.0",
description="Reimplementation of Facebook's DLRM",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=requirements,
zip_safe=False,
ext_modules=[
CUDAExtension(name="dlrm.cuda_ext",
sources=[
os.path.join(abspath, "src/pytorch_ops.cpp"),
os.path.join(abspath, "src/dot_based_interact_pytorch_types.cu"),
os.path.join(abspath, "src/gather_gpu.cu")
],
extra_compile_args={
'cxx': [],
'nvcc' : [
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
'-gencode', 'arch=compute_70,code=sm_70',
'-gencode', 'arch=compute_70,code=compute_70',
'-gencode', 'arch=compute_80,code=sm_80']
})
],
cmdclass={"build_ext": BuildExtension})
|
import gnupg
import logging
import tempfile
import time
import urllib2
from ss2config import *
import selfserve.exceptions
logger = logging.getLogger("%s.lib.keys" % LOGGER_NAME)
HTTP_NOT_FOUND = 404
def fetch_key(availid):
try:
return urllib2.urlopen(KEY_FOR_AVAILID_URL % availid).read()
except urllib2.HTTPError, he:
if he.getcode() == HTTP_NOT_FOUND:
return None
else:
raise
def _fingerprint_for_gpg(fingerprint):
# Note: this works even if a slash is not present
slash = fingerprint.find('/')
return fingerprint[slash+1:].replace(' ', '')
def maybe_encrypt(plaintext, fingerprints, keys):
"""If possible, encrypt PLAINTEXT to the subset of the given KEYS that
are also present in FINGERPRINTS. Return the new text and a boolean
indicating whether encryption was done."""
# Can we encrypt?
if keys is None or fingerprints is None:
return (plaintext, False)
expiry = time.time() - 60 # one minute
homedir = tempfile.mkdtemp(suffix='.%d' % expiry, dir=STATE_DIR, prefix="selfserve-gnupghome.")
pgp = gnupg.GPG(gnupghome=homedir)
pgp.import_keys(keys)
fingerprints = map(_fingerprint_for_gpg, fingerprints)
ciphertext = pgp.encrypt(plaintext, fingerprints, always_trust=True)
if not ciphertext:
raise selfserve.exceptions.EncryptionError(ciphertext)
return (str(ciphertext), True)
|
class whrandom:
#
# Initialize an instance.
# Without arguments, initialize from current time.
# With arguments (x, y, z), initialize from them.
#
def __init__(self, x = 0, y = 0, z = 0):
self.seed(x, y, z)
#
# Set the seed from (x, y, z).
# These must be integers in the range [0, 256).
#
def seed(self, x = 0, y = 0, z = 0):
if not type(x) == type(y) == type(z) == type(0):
raise TypeError, 'seeds must be integers'
if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256):
raise ValueError, 'seeds must be in range(0, 256)'
if 0 == x == y == z:
# Initialize from current time
import time
t = long(time.time() * 256)
t = int((t&0xffffff) ^ (t>>24))
t, x = divmod(t, 256)
t, y = divmod(t, 256)
t, z = divmod(t, 256)
# Zero is a poor seed, so substitute 1
self._seed = (x or 1, y or 1, z or 1)
#
# Get the next random number in the range [0.0, 1.0).
#
def random(self):
x, y, z = self._seed
#
x = (171 * x) % 30269
y = (172 * y) % 30307
z = (170 * z) % 30323
#
self._seed = x, y, z
#
return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0
#
# Get a random number in the range [a, b).
#
def uniform(self, a, b):
return a + (b-a) * self.random()
#
# Get a random integer in the range [a, b] including both end points.
#
def randint(self, a, b):
return a + int(self.random() * (b+1-a))
#
# Choose a random element from a non-empty sequence.
#
def choice(self, seq):
return seq[int(self.random() * len(seq))]
_inst = whrandom()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
randint = _inst.randint
choice = _inst.choice
|
import glob
import os
import sys
CR = b'\r'
CRLF = b'\r\n'
LF = b'\n'
def sanitycheck(pattern, allow_utf8 = False, allow_eol = (CRLF, LF), indent = 1):
error_count = 0
for filename in glob.glob(pattern, recursive=True):
if not os.path.isfile(filename):
continue
with open(filename, 'rb') as file:
content = file.read()
error = []
eol = None
lineno = 1
if not content:
error.append(' Empty file found')
elif content[-1] != 10: # LF
error.append(' Missing a blank line before EOF')
for line in content.splitlines(True):
if allow_utf8 and lineno == 1 and line.startswith(b'\xef\xbb\xbf'):
line = line[3:]
if any(b == 7 for b in line):
error.append(' TAB found at Ln:{} {}'.format(lineno, line))
if any(b > 127 for b in line):
error.append(' Non-ASCII character found at Ln:{} {}'.format(lineno, line))
if line[-2:] == CRLF:
if not eol:
eol = CRLF
elif eol != CRLF:
error.append(' Inconsistent line ending found at Ln:{} {}'.format(lineno, line))
line = line[:-2]
elif line[-1:] == LF:
if not eol:
eol = LF
elif eol != LF:
error.append(' Inconsistent line ending found at Ln:{} {}'.format(lineno, line))
line = line[:-1]
elif line[-1:] == CR:
error.append(' CR found at Ln:{} {}'.format(lineno, line))
line = line[:-1]
if eol:
if eol not in allow_eol:
error.append(' Line ending {} not allowed at Ln:{}'.format(eol, lineno))
break
if line.startswith(b' '):
spc_count = 0
for c in line:
if c != 32:
break
spc_count += 1
if not indent or spc_count % indent:
error.append(' {} SPC found at Ln:{} {}'.format(spc_count, lineno, line))
if line[-1:] == b' ' or line[-1:] == b'\t':
error.append(' Trailing space found at Ln:{} {}'.format(lineno, line))
lineno += 1
if error:
error_count += 1
print('{} [FAIL]'.format(filename), file=sys.stderr)
for msg in error:
print(msg, file=sys.stderr)
else:
# print('{} [PASS]'.format(filename))
pass
return error_count
retval = 0
retval += sanitycheck('.editorconfig', allow_eol = (LF,), indent = 0)
retval += sanitycheck('**/Dockerfile', allow_eol = (LF,), indent = 2)
retval += sanitycheck('**/*.cmd', allow_eol = (CRLF,), indent = 2)
retval += sanitycheck('**/*.config', allow_utf8 = True, allow_eol = (LF,), indent = 2)
retval += sanitycheck('**/*.cs', allow_utf8 = True, allow_eol = (LF,))
retval += sanitycheck('**/*.cshtml', allow_utf8 = True, allow_eol = (LF,), indent = 4)
retval += sanitycheck('**/*.csproj', allow_utf8 = True, allow_eol = (LF,), indent = 2)
retval += sanitycheck('**/*.htm', allow_eol = (LF,), indent = 4)
retval += sanitycheck('**/*.html', allow_eol = (LF,), indent = 4)
retval += sanitycheck('**/*.md', allow_eol = (LF,))
retval += sanitycheck('**/*.proj', allow_eol = (LF,), indent = 2)
retval += sanitycheck('**/*.props', allow_eol = (LF,), indent = 2)
retval += sanitycheck('**/*.py', allow_eol = (LF,), indent = 4)
retval += sanitycheck('**/*.ruleset', allow_utf8 = True, allow_eol = (LF,), indent = 2)
retval += sanitycheck('**/*.sln', allow_utf8 = True, allow_eol = (LF,), indent = 4)
retval += sanitycheck('**/*.targets', allow_eol = (LF,), indent = 2)
retval += sanitycheck('**/*.xml', allow_eol = (LF,), indent = 4)
retval += sanitycheck('**/*.yml', allow_eol = (LF,), indent = 2)
sys.exit(retval)
|
""" Make reference distribution over the range 1 to 100
Usage:
> cumulative_dist.dat
simeon@RottenApple ld4l-cul-usage>git mv cumulative_dist.dat reference_dist.dat
simeon@RottenApple ld4l-cul-usage>./make_reference_dist.py analysis/harvard_stackscore_distribution.dat > reference_dist.dat
simeon@RottenApple ld4l-cul-usage>head reference_dist.dat
10.98078307
20.00551701
30.00350775
40.00177471
50.00128962
60.00095700
"""
import sys
def read_dist(file):
"""Read in a distribution
# Distribution of StackScore values at Harvard, 2015-06-24
#
#record_count stackscore fraction_of_records
13560805 1 0.98078307
76281 2 0.00551701
48500 3 0.00350775
24538 4 0.00177471
...
140 100 0.00001013
"""
fh = open(file, 'r')
dist = {}
total = 0
for line in fh.readlines():
if (line.startswith('#')):
continue
(count,stackscore,frac) = line.split()
count = int(count)
stackscore = int(stackscore)
if (stackscore<1 or stackscore>100):
raise Exception("Stackscore out of range in: %s" % line)
dist[stackscore] = count
total += count
return(dist,total)
dfile = sys.argv[1]
(dist,total) = read_dist(dfile)
cumulative = 0
print "# Reference distribution over StackScore 1..100"
print "# (derived from distribution in %s)" % (dfile)
print "#\n#stackscore fraction"
for ss in xrange(1,101):
print "%d\t%.8f" % (ss,float(dist[ss])/total)
|
def is_palindrome(str):
if len(str) < 2:
return True
if ((str[0] == str[-1]) and is_palindrome(str[1:-1])):
return True
else:
return False
|
import httplib
import copy
import socket
import random
import os
import logging
import threading
import sys
from time import sleep
if not sys.version.startswith('2.4'):
from urlparse import urlparse
else:
# python 2.4
from windmill.tools.urlparse_25 import urlparse
logger = logging.getLogger(__name__)
import windmill
from windmill.server import proxy
from windmill.dep import wsgi_jsonrpc
from windmill.dep import wsgi_xmlrpc
from windmill.dep import wsgi_fileserver
import jsmin
START_DST_PORT = 32000
CURRENT_DST_PORT = [random.randint(32000, 34000)]
def reconstruct_url(environ):
# From WSGI spec, PEP 333
from urllib import quote
url = environ['wsgi.url_scheme']+'://'
if environ.get('HTTP_HOST'): url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
url += environ.get('SCRIPT_NAME','')
url += environ.get('PATH_INFO','')
# Fix ;arg=value in url
if url.find('%3B') is not -1:
url, arg = url.split('%3B', 1)
url = ';'.join([url, arg.replace('%3D', '=')])
# Stick query string back in
if environ.get('QUERY_STRING'):
url += '?' + environ['QUERY_STRING']
# Stick it in environ for convenience
environ['reconstructed_url'] = url
return url
HTTPConnection = httplib.HTTPConnection
WindmillProxyApplication = proxy.WindmillProxyApplication
WindmillProxyApplication.ConnectionClass = HTTPConnection
add_namespace = None
class WindmillChooserApplication(object):
"""Application to handle choosing the proper application to handle each request"""
def __init__(self, apps, proxy):
self.namespaces = dict([ (arg.ns, arg) for arg in apps ])
self.proxy = proxy
def add_namespace(self, name, application):
"""Add an application to a specific url namespace in windmill"""
self.namespaces[name] = application
def handler(self, environ, start_response):
"""Windmill app chooser"""
sleep(.2)
reconstruct_url(environ)
for key in self.namespaces:
if environ['PATH_INFO'].find('/'+key+'/') is not -1:
logger.debug('dispatching request %s to %s' % (environ['reconstructed_url'], key))
return self.namespaces[key](environ, start_response)
logger.debug('dispatching request %s to WindmillProxyApplication' % reconstruct_url(environ))
response = self.proxy(environ, start_response)
return response
def __call__(self, environ, start_response):
response = self.handler(environ, start_response)
for x in response:
yield x
class WindmillCompressor(object):
"""Full JavaScript Compression Library"""
js_file_list = [
('lib', 'firebug', 'pi.js',),
('lib', 'firebug', 'firebug-lite.js',),
('lib', 'json2.js',),
('lib', 'browserdetect.js',),
('wm', 'windmill.js',), # fleegix
('lib', 'getXPath.js',),
('lib', 'elementslib.js',),
('lib', 'js-xpath.js',),
('controller', 'controller.js',),
('controller', 'commands.js',),
('controller', 'asserts.js',),
('controller', 'waits.js',), # fleegix
('controller', 'flex.js',),
('wm', 'registry.js',),
('extensions', 'extensions.js',),
('wm', 'utils.js',), # fleegix
('wm', 'ide', 'ui.js',), # fleegix
('wm', 'ide', 'recorder.js',), # fleegix
('wm', 'ide', 'remote.js',), # fleegix
('wm', 'ide', 'dx.js',), # fleegix
('wm', 'ide', 'ax.js',), # fleegix
('wm', 'ide', 'results.js',),
('wm', 'xhr.js',), # fleegix
('wm', 'metrics.js',),
('wm', 'events.js',),
('wm', 'global.js',), # fleegix
('wm', 'jstest.js',), # fleegix
('wm', 'load.js',),
]
def __init__(self, js_path, enabled=True):
self.enabled = enabled
self.js_path = js_path
self.compressed_windmill = None
if enabled:
self._thread = threading.Thread(target=self.compress_file)
self._thread.start()
def compress_file(self):
compressed_windmill = ''
for filename in self.js_file_list:
compressed_windmill += jsmin.jsmin(open(os.path.join(self.js_path, *filename), 'r').read())
self.compressed_windmill = compressed_windmill
def __call__(self, environ, start_response):
if not self.enabled:
start_response('404 Not Found', [('Content-Type', 'text/plain',), ('Content-Length', '0',)])
return ['']
# if self.compressed_windmill is None:
# self.compressed_windmill = ''
# for filename in self.js_file_list:
# self.compressed_windmill += jsmin.jsmin(open(os.path.join(self.js_path, *filename), 'r').read())
while not self.compressed_windmill:
sleep(.15)
start_response('200 Ok', [('Content-Type', 'application/x-javascript',),
('Content-Length', str(len(self.compressed_windmill)),)])
return [self.compressed_windmill]
def make_windmill_server(http_port=None, js_path=None, compression_enabled=None):
if http_port is None:
http_port = windmill.settings['SERVER_HTTP_PORT']
if js_path is None:
js_path = windmill.settings['JS_PATH']
if compression_enabled is None:
compression_enabled = not windmill.settings['DISABLE_JS_COMPRESS']
# Start up all the convergence objects
import convergence
test_resolution_suite = convergence.TestResolutionSuite()
command_resolution_suite = convergence.CommandResolutionSuite()
queue = convergence.ControllerQueue(command_resolution_suite, test_resolution_suite)
xmlrpc_methods_instance = convergence.XMLRPCMethods(queue, test_resolution_suite, command_resolution_suite)
jsonrpc_methods_instance = convergence.JSONRPCMethods(queue, test_resolution_suite, command_resolution_suite)
# Start up all the wsgi applications
windmill_serv_app = wsgi_fileserver.WSGIFileServerApplication(root_path=js_path, mount_point='/windmill-serv/')
windmill_proxy_app = WindmillProxyApplication()
windmill_xmlrpc_app = wsgi_xmlrpc.WSGIXMLRPCApplication(instance=xmlrpc_methods_instance)
windmill_jsonrpc_app = wsgi_jsonrpc.WSGIJSONRPCApplication(instance=jsonrpc_methods_instance)
windmill_compressor_app = WindmillCompressor(os.path.join(js_path, 'js'), compression_enabled)
windmill_serv_app.ns = 'windmill-serv'
windmill_xmlrpc_app.ns = 'windmill-xmlrpc'
windmill_jsonrpc_app.ns = 'windmill-jsonrpc'
windmill_compressor_app.ns = 'windmill-compressor'
global add_namespace
import https
if windmill.has_ssl:
import certificate
cc = certificate.CertificateCreator()
else:
cc = None
httpd = https.WindmillHTTPServer(('0.0.0.0', http_port),
https.WindmillHTTPRequestHandler, cc,
apps=[windmill_serv_app, windmill_jsonrpc_app,
windmill_xmlrpc_app, windmill_compressor_app],
proxy=https.WindmillHTTPSProxyApplication())
add_namespace = httpd.add_namespace
# Attach some objects to httpd for convenience
httpd.controller_queue = queue
httpd.test_resolution_suite = test_resolution_suite
httpd.command_resolution_suite = command_resolution_suite
httpd.xmlrpc_methods_instance = xmlrpc_methods_instance
httpd.jsonrpc_methods_instance = jsonrpc_methods_instance
return httpd
|
"""The backups api."""
import webob
from webob import exc
from cinder.api import common
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.views import backups as backup_views
from cinder.api import xmlutil
from cinder import backup as backupAPI
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import utils
LOG = logging.getLogger(__name__)
def make_backup(elem):
elem.set('id')
elem.set('status')
elem.set('size')
elem.set('container')
elem.set('volume_id')
elem.set('object_count')
elem.set('availability_zone')
elem.set('created_at')
elem.set('name')
elem.set('description')
elem.set('fail_reason')
def make_backup_restore(elem):
elem.set('backup_id')
elem.set('volume_id')
class BackupTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('backup', selector='backup')
make_backup(root)
alias = Backups.alias
namespace = Backups.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class BackupsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('backups')
elem = xmlutil.SubTemplateElement(root, 'backup', selector='backups')
make_backup(elem)
alias = Backups.alias
namespace = Backups.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class BackupRestoreTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('restore', selector='restore')
make_backup_restore(root)
alias = Backups.alias
namespace = Backups.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class CreateDeserializer(wsgi.MetadataXMLDeserializer):
def default(self, string):
dom = utils.safe_minidom_parse_string(string)
backup = self._extract_backup(dom)
return {'body': {'backup': backup}}
def _extract_backup(self, node):
backup = {}
backup_node = self.find_first_child_named(node, 'backup')
attributes = ['container', 'display_name',
'display_description', 'volume_id']
for attr in attributes:
if backup_node.getAttribute(attr):
backup[attr] = backup_node.getAttribute(attr)
return backup
class RestoreDeserializer(wsgi.MetadataXMLDeserializer):
def default(self, string):
dom = utils.safe_minidom_parse_string(string)
restore = self._extract_restore(dom)
return {'body': {'restore': restore}}
def _extract_restore(self, node):
restore = {}
restore_node = self.find_first_child_named(node, 'restore')
if restore_node.getAttribute('volume_id'):
restore['volume_id'] = restore_node.getAttribute('volume_id')
return restore
class BackupsController(wsgi.Controller):
"""The Backups API controller for the OpenStack API."""
_view_builder_class = backup_views.ViewBuilder
def __init__(self):
self.backup_api = backupAPI.API()
super(BackupsController, self).__init__()
@wsgi.serializers(xml=BackupTemplate)
def show(self, req, id):
"""Return data about the given backup."""
LOG.debug(_('show called for member %s'), id)
context = req.environ['cinder.context']
try:
backup = self.backup_api.get(context, backup_id=id)
except exception.BackupNotFound as error:
raise exc.HTTPNotFound(explanation=unicode(error))
return self._view_builder.detail(req, backup)
def delete(self, req, id):
"""Delete a backup."""
LOG.debug(_('delete called for member %s'), id)
context = req.environ['cinder.context']
LOG.audit(_('Delete backup with id: %s'), id, context=context)
try:
self.backup_api.delete(context, id)
except exception.BackupNotFound as error:
raise exc.HTTPNotFound(explanation=unicode(error))
except exception.InvalidBackup as error:
raise exc.HTTPBadRequest(explanation=unicode(error))
return webob.Response(status_int=202)
@wsgi.serializers(xml=BackupsTemplate)
def index(self, req):
"""Returns a summary list of backups."""
return self._get_backups(req, is_detail=False)
@wsgi.serializers(xml=BackupsTemplate)
def detail(self, req):
"""Returns a detailed list of backups."""
return self._get_backups(req, is_detail=True)
def _get_backups(self, req, is_detail):
"""Returns a list of backups, transformed through view builder."""
context = req.environ['cinder.context']
backups = self.backup_api.get_all(context)
limited_list = common.limited(backups, req)
if is_detail:
backups = self._view_builder.detail_list(req, limited_list)
else:
backups = self._view_builder.summary_list(req, limited_list)
return backups
# TODO(frankm): Add some checks here including
# - whether requested volume_id exists so we can return some errors
# immediately
# - maybe also do validation of swift container name
@wsgi.response(202)
@wsgi.serializers(xml=BackupTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Create a new backup."""
LOG.debug(_('Creating new backup %s'), body)
if not self.is_valid_body(body, 'backup'):
raise exc.HTTPBadRequest()
context = req.environ['cinder.context']
try:
backup = body['backup']
volume_id = backup['volume_id']
except KeyError:
msg = _("Incorrect request body format")
raise exc.HTTPBadRequest(explanation=msg)
container = backup.get('container', None)
name = backup.get('name', None)
description = backup.get('description', None)
LOG.audit(_("Creating backup of volume %(volume_id)s in container"
" %(container)s"),
{'volume_id': volume_id, 'container': container},
context=context)
try:
new_backup = self.backup_api.create(context, name, description,
volume_id, container)
except exception.InvalidVolume as error:
raise exc.HTTPBadRequest(explanation=unicode(error))
except exception.VolumeNotFound as error:
raise exc.HTTPNotFound(explanation=unicode(error))
except exception.ServiceNotFound as error:
raise exc.HTTPInternalServerError(explanation=unicode(error))
retval = self._view_builder.summary(req, dict(new_backup.iteritems()))
return retval
@wsgi.response(202)
@wsgi.serializers(xml=BackupRestoreTemplate)
@wsgi.deserializers(xml=RestoreDeserializer)
def restore(self, req, id, body):
"""Restore an existing backup to a volume."""
LOG.debug(_('Restoring backup %(backup_id)s (%(body)s)'),
{'backup_id': id, 'body': body})
if not self.is_valid_body(body, 'restore'):
raise exc.HTTPBadRequest()
context = req.environ['cinder.context']
try:
restore = body['restore']
except KeyError:
msg = _("Incorrect request body format")
raise exc.HTTPBadRequest(explanation=msg)
volume_id = restore.get('volume_id', None)
LOG.audit(_("Restoring backup %(backup_id)s to volume %(volume_id)s"),
{'backup_id': id, 'volume_id': volume_id},
context=context)
try:
new_restore = self.backup_api.restore(context,
backup_id=id,
volume_id=volume_id)
except exception.InvalidInput as error:
raise exc.HTTPBadRequest(explanation=unicode(error))
except exception.InvalidVolume as error:
raise exc.HTTPBadRequest(explanation=unicode(error))
except exception.InvalidBackup as error:
raise exc.HTTPBadRequest(explanation=unicode(error))
except exception.BackupNotFound as error:
raise exc.HTTPNotFound(explanation=unicode(error))
except exception.VolumeNotFound as error:
raise exc.HTTPNotFound(explanation=unicode(error))
except exception.VolumeSizeExceedsAvailableQuota as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.message, headers={'Retry-After': 0})
except exception.VolumeLimitExceeded as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.message, headers={'Retry-After': 0})
retval = self._view_builder.restore_summary(
req, dict(new_restore.iteritems()))
return retval
class Backups(extensions.ExtensionDescriptor):
"""Backups support."""
name = 'Backups'
alias = 'backups'
namespace = 'http://docs.openstack.org/volume/ext/backups/api/v1'
updated = '2012-12-12T00:00:00+00:00'
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
Backups.alias, BackupsController(),
collection_actions={'detail': 'GET'},
member_actions={'restore': 'POST'})
resources.append(res)
return resources
|
import os
import mox
from nova import context
from nova import db
from nova.network import linux_net
from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import test
from nova import utils
CONF = cfg.CONF
CONF.import_opt('network_driver', 'nova.config')
LOG = logging.getLogger(__name__)
HOST = "testhost"
instances = {'00000000-0000-0000-0000-0000000000000000':
{'id': 0,
'uuid': '00000000-0000-0000-0000-0000000000000000',
'host': 'fake_instance00',
'created_at': 'fakedate',
'updated_at': 'fakedate',
'hostname': 'fake_instance00'},
'00000000-0000-0000-0000-0000000000000001':
{'id': 1,
'uuid': '00000000-0000-0000-0000-0000000000000001',
'host': 'fake_instance01',
'created_at': 'fakedate',
'updated_at': 'fakedate',
'hostname': 'fake_instance01'}}
addresses = [{"address": "10.0.0.1"},
{"address": "10.0.0.2"},
{"address": "10.0.0.3"},
{"address": "10.0.0.4"},
{"address": "10.0.0.5"},
{"address": "10.0.0.6"}]
networks = [{'id': 0,
'uuid': "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
'label': 'test0',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:db8::/64',
'gateway_v6': '2001:db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'dhcp_server': '0.0.0.0',
'dhcp_start': '192.168.100.1',
'vlan': None,
'host': None,
'project_id': 'fake_project',
'vpn_public_address': '192.168.0.2'},
{'id': 1,
'uuid': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
'label': 'test1',
'injected': False,
'multi_host': True,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:db9::/64',
'gateway_v6': '2001:db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'dhcp_server': '0.0.0.0',
'dhcp_start': '192.168.100.1',
'vlan': None,
'host': None,
'project_id': 'fake_project',
'vpn_public_address': '192.168.1.2'}]
fixed_ips = [{'id': 0,
'network_id': 0,
'address': '192.168.0.100',
'instance_id': 0,
'allocated': True,
'virtual_interface_id': 0,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000',
'floating_ips': []},
{'id': 1,
'network_id': 1,
'address': '192.168.1.100',
'instance_id': 0,
'allocated': True,
'virtual_interface_id': 1,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000',
'floating_ips': []},
{'id': 2,
'network_id': 1,
'address': '192.168.0.101',
'instance_id': 1,
'allocated': True,
'virtual_interface_id': 2,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001',
'floating_ips': []},
{'id': 3,
'network_id': 0,
'address': '192.168.1.101',
'instance_id': 1,
'allocated': True,
'virtual_interface_id': 3,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001',
'floating_ips': []},
{'id': 4,
'network_id': 0,
'address': '192.168.0.102',
'instance_id': 0,
'allocated': True,
'virtual_interface_id': 4,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000',
'floating_ips': []},
{'id': 5,
'network_id': 1,
'address': '192.168.1.102',
'instance_id': 1,
'allocated': True,
'virtual_interface_id': 5,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001',
'floating_ips': []}]
vifs = [{'id': 0,
'address': 'DE:AD:BE:EF:00:00',
'uuid': '00000000-0000-0000-0000-0000000000000000',
'network_id': 0,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000'},
{'id': 1,
'address': 'DE:AD:BE:EF:00:01',
'uuid': '00000000-0000-0000-0000-0000000000000001',
'network_id': 1,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000'},
{'id': 2,
'address': 'DE:AD:BE:EF:00:02',
'uuid': '00000000-0000-0000-0000-0000000000000002',
'network_id': 1,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001'},
{'id': 3,
'address': 'DE:AD:BE:EF:00:03',
'uuid': '00000000-0000-0000-0000-0000000000000003',
'network_id': 0,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001'},
{'id': 4,
'address': 'DE:AD:BE:EF:00:04',
'uuid': '00000000-0000-0000-0000-0000000000000004',
'network_id': 0,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000'},
{'id': 5,
'address': 'DE:AD:BE:EF:00:05',
'uuid': '00000000-0000-0000-0000-0000000000000005',
'network_id': 1,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001'}]
def get_associated(context, network_id, host=None):
result = []
for datum in fixed_ips:
if (datum['network_id'] == network_id and datum['allocated']
and datum['instance_uuid'] is not None
and datum['virtual_interface_id'] is not None):
instance = instances[datum['instance_uuid']]
if host and host != instance['host']:
continue
cleaned = {}
cleaned['address'] = datum['address']
cleaned['instance_uuid'] = datum['instance_uuid']
cleaned['network_id'] = datum['network_id']
cleaned['vif_id'] = datum['virtual_interface_id']
vif = vifs[datum['virtual_interface_id']]
cleaned['vif_address'] = vif['address']
cleaned['instance_hostname'] = instance['hostname']
cleaned['instance_updated'] = instance['updated_at']
cleaned['instance_created'] = instance['created_at']
result.append(cleaned)
return result
class LinuxNetworkTestCase(test.TestCase):
def setUp(self):
super(LinuxNetworkTestCase, self).setUp()
network_driver = CONF.network_driver
self.driver = importutils.import_module(network_driver)
self.driver.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=True)
def get_vifs(_context, instance_uuid):
return [vif for vif in vifs if vif['instance_uuid'] ==
instance_uuid]
def get_instance(_context, instance_id):
return instances[instance_id]
self.stubs.Set(db, 'virtual_interface_get_by_instance', get_vifs)
self.stubs.Set(db, 'instance_get', get_instance)
self.stubs.Set(db, 'network_get_associated_fixed_ips', get_associated)
def test_update_dhcp_for_nw00(self):
self.flags(use_single_default_gateway=True)
self.mox.StubOutWithMock(self.driver, 'write_to_file')
self.mox.StubOutWithMock(fileutils, 'ensure_tree')
self.mox.StubOutWithMock(os, 'chmod')
self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
os.chmod(mox.IgnoreArg(), mox.IgnoreArg())
os.chmod(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.driver.update_dhcp(self.context, "eth0", networks[0])
def test_update_dhcp_for_nw01(self):
self.flags(use_single_default_gateway=True)
self.mox.StubOutWithMock(self.driver, 'write_to_file')
self.mox.StubOutWithMock(fileutils, 'ensure_tree')
self.mox.StubOutWithMock(os, 'chmod')
self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
os.chmod(mox.IgnoreArg(), mox.IgnoreArg())
os.chmod(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.driver.update_dhcp(self.context, "eth0", networks[0])
def test_get_dhcp_hosts_for_nw00(self):
self.flags(use_single_default_gateway=True)
expected = (
"DE:AD:BE:EF:00:00,fake_instance00.novalocal,"
"192.168.0.100,net:NW-0\n"
"DE:AD:BE:EF:00:03,fake_instance01.novalocal,"
"192.168.1.101,net:NW-3\n"
"DE:AD:BE:EF:00:04,fake_instance00.novalocal,"
"192.168.0.102,net:NW-4"
)
actual_hosts = self.driver.get_dhcp_hosts(self.context, networks[0])
self.assertEquals(actual_hosts, expected)
def test_get_dhcp_hosts_for_nw01(self):
self.flags(use_single_default_gateway=True)
self.flags(host='fake_instance01')
expected = (
"DE:AD:BE:EF:00:02,fake_instance01.novalocal,"
"192.168.0.101,net:NW-2\n"
"DE:AD:BE:EF:00:05,fake_instance01.novalocal,"
"192.168.1.102,net:NW-5"
)
actual_hosts = self.driver.get_dhcp_hosts(self.context, networks[1])
self.assertEquals(actual_hosts, expected)
def test_get_dns_hosts_for_nw00(self):
expected = (
"192.168.0.100\tfake_instance00.novalocal\n"
"192.168.1.101\tfake_instance01.novalocal\n"
"192.168.0.102\tfake_instance00.novalocal"
)
actual_hosts = self.driver.get_dns_hosts(self.context, networks[0])
self.assertEquals(actual_hosts, expected)
def test_get_dns_hosts_for_nw01(self):
expected = (
"192.168.1.100\tfake_instance00.novalocal\n"
"192.168.0.101\tfake_instance01.novalocal\n"
"192.168.1.102\tfake_instance01.novalocal"
)
actual_hosts = self.driver.get_dns_hosts(self.context, networks[1])
self.assertEquals(actual_hosts, expected)
def test_get_dhcp_opts_for_nw00(self):
expected_opts = 'NW-3,3\nNW-4,3'
actual_opts = self.driver.get_dhcp_opts(self.context, networks[0])
self.assertEquals(actual_opts, expected_opts)
def test_get_dhcp_opts_for_nw01(self):
self.flags(host='fake_instance01')
expected_opts = "NW-5,3"
actual_opts = self.driver.get_dhcp_opts(self.context, networks[1])
self.assertEquals(actual_opts, expected_opts)
def test_dhcp_opts_not_default_gateway_network(self):
expected = "NW-0,3"
data = get_associated(self.context, 0)[0]
actual = self.driver._host_dhcp_opts(data)
self.assertEquals(actual, expected)
def test_host_dhcp_without_default_gateway_network(self):
expected = ','.join(['DE:AD:BE:EF:00:00',
'fake_instance00.novalocal',
'192.168.0.100'])
data = get_associated(self.context, 0)[0]
actual = self.driver._host_dhcp(data)
self.assertEquals(actual, expected)
def test_host_dns_without_default_gateway_network(self):
expected = "192.168.0.100\tfake_instance00.novalocal"
data = get_associated(self.context, 0)[0]
actual = self.driver._host_dns(data)
self.assertEquals(actual, expected)
def test_linux_bridge_driver_plug(self):
"""Makes sure plug doesn't drop FORWARD by default.
Ensures bug 890195 doesn't reappear."""
def fake_execute(*args, **kwargs):
return "", ""
self.stubs.Set(utils, 'execute', fake_execute)
def verify_add_rule(chain, rule):
self.assertEqual(chain, 'FORWARD')
self.assertIn('ACCEPT', rule)
self.stubs.Set(linux_net.iptables_manager.ipv4['filter'],
'add_rule', verify_add_rule)
driver = linux_net.LinuxBridgeInterfaceDriver()
driver.plug({"bridge": "br100", "bridge_interface": "eth0"},
"fakemac")
def test_vlan_override(self):
"""Makes sure vlan_interface flag overrides network bridge_interface.
Allows heterogeneous networks a la bug 833426"""
driver = linux_net.LinuxBridgeInterfaceDriver()
info = {}
@classmethod
def test_ensure(_self, vlan, bridge, interface, network, mac_address):
info['passed_interface'] = interface
self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver,
'ensure_vlan_bridge', test_ensure)
network = {
"bridge": "br100",
"bridge_interface": "base_interface",
"vlan": "fake"
}
self.flags(vlan_interface="")
driver.plug(network, "fakemac")
self.assertEqual(info['passed_interface'], "base_interface")
self.flags(vlan_interface="override_interface")
driver.plug(network, "fakemac")
self.assertEqual(info['passed_interface'], "override_interface")
driver.plug(network, "fakemac")
def test_flat_override(self):
"""Makes sure flat_interface flag overrides network bridge_interface.
Allows heterogeneous networks a la bug 833426"""
driver = linux_net.LinuxBridgeInterfaceDriver()
info = {}
@classmethod
def test_ensure(_self, bridge, interface, network, gateway):
info['passed_interface'] = interface
self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver,
'ensure_bridge', test_ensure)
network = {
"bridge": "br100",
"bridge_interface": "base_interface",
}
driver.plug(network, "fakemac")
self.assertEqual(info['passed_interface'], "base_interface")
self.flags(flat_interface="override_interface")
driver.plug(network, "fakemac")
self.assertEqual(info['passed_interface'], "override_interface")
def test_isolated_host(self):
self.flags(fake_network=False,
share_dhcp_address=True)
# NOTE(vish): use a fresh copy of the manager for each test
self.stubs.Set(linux_net, 'iptables_manager',
linux_net.IptablesManager())
self.stubs.Set(linux_net, 'binary_name', 'test')
executes = []
inputs = []
def fake_execute(*args, **kwargs):
executes.append(args)
process_input = kwargs.get('process_input')
if process_input:
inputs.append(process_input)
return "", ""
self.stubs.Set(utils, 'execute', fake_execute)
driver = linux_net.LinuxBridgeInterfaceDriver()
@classmethod
def fake_ensure(_self, bridge, interface, network, gateway):
return bridge
self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver,
'ensure_bridge', fake_ensure)
iface = 'eth0'
dhcp = '192.168.1.1'
network = {'dhcp_server': dhcp,
'bridge': 'br100',
'bridge_interface': iface}
driver.plug(network, 'fakemac')
expected = [
('ebtables', '-D', 'INPUT', '-p', 'ARP', '-i', iface,
'--arp-ip-dst', dhcp, '-j', 'DROP'),
('ebtables', '-I', 'INPUT', '-p', 'ARP', '-i', iface,
'--arp-ip-dst', dhcp, '-j', 'DROP'),
('ebtables', '-D', 'OUTPUT', '-p', 'ARP', '-o', iface,
'--arp-ip-src', dhcp, '-j', 'DROP'),
('ebtables', '-I', 'OUTPUT', '-p', 'ARP', '-o', iface,
'--arp-ip-src', dhcp, '-j', 'DROP'),
('iptables-save', '-c', '-t', 'filter'),
('iptables-restore', '-c'),
('iptables-save', '-c', '-t', 'nat'),
('iptables-restore', '-c'),
('ip6tables-save', '-c', '-t', 'filter'),
('ip6tables-restore', '-c'),
]
self.assertEqual(executes, expected)
expected_inputs = [
'-A test-FORWARD -m physdev --physdev-in %s '
'-d 255.255.255.255 -p udp --dport 67 -j DROP' % iface,
'-A test-FORWARD -m physdev --physdev-out %s '
'-d 255.255.255.255 -p udp --dport 67 -j DROP' % iface,
'-A test-FORWARD -m physdev --physdev-in %s '
'-d 192.168.1.1 -j DROP' % iface,
'-A test-FORWARD -m physdev --physdev-out %s '
'-s 192.168.1.1 -j DROP' % iface,
]
for inp in expected_inputs:
self.assertTrue(inp in inputs[0])
def _test_initialize_gateway(self, existing, expected, routes=''):
self.flags(fake_network=False)
executes = []
def fake_execute(*args, **kwargs):
executes.append(args)
if args[0] == 'ip' and args[1] == 'addr' and args[2] == 'show':
return existing, ""
if args[0] == 'ip' and args[1] == 'route' and args[2] == 'show':
return routes, ""
self.stubs.Set(utils, 'execute', fake_execute)
network = {'dhcp_server': '192.168.1.1',
'cidr': '192.168.1.0/24',
'broadcast': '192.168.1.255',
'cidr_v6': '2001:db8::/64'}
self.driver.initialize_gateway_device('eth0', network)
self.assertEqual(executes, expected)
def test_initialize_gateway_moves_wrong_ip(self):
existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
" mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
" link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
" inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n"
" inet6 dead::beef:dead:beef:dead/64 scope link\n"
" valid_lft forever preferred_lft forever\n")
expected = [
('sysctl', '-w', 'net.ipv4.ip_forward=1'),
('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
('ip', 'route', 'show', 'dev', 'eth0'),
('ip', 'addr', 'del', '192.168.0.1/24',
'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.1.1/24',
'brd', '192.168.1.255', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.0.1/24',
'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
('ip', '-f', 'inet6', 'addr', 'change',
'2001:db8::/64', 'dev', 'eth0'),
]
self._test_initialize_gateway(existing, expected)
def test_initialize_gateway_resets_route(self):
routes = ("default via 192.168.0.1 dev eth0\n"
"192.168.100.0/24 via 192.168.0.254 dev eth0 proto static\n")
existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
" mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
" link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
" inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n"
" inet6 dead::beef:dead:beef:dead/64 scope link\n"
" valid_lft forever preferred_lft forever\n")
expected = [
('sysctl', '-w', 'net.ipv4.ip_forward=1'),
('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
('ip', 'route', 'show', 'dev', 'eth0'),
('ip', 'route', 'del', 'default', 'dev', 'eth0'),
('ip', 'route', 'del', '192.168.100.0/24', 'dev', 'eth0'),
('ip', 'addr', 'del', '192.168.0.1/24',
'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.1.1/24',
'brd', '192.168.1.255', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.0.1/24',
'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
('ip', 'route', 'add', 'default', 'via', '192.168.0.1',
'dev', 'eth0'),
('ip', 'route', 'add', '192.168.100.0/24', 'via', '192.168.0.254',
'dev', 'eth0', 'proto', 'static'),
('ip', '-f', 'inet6', 'addr', 'change',
'2001:db8::/64', 'dev', 'eth0'),
]
self._test_initialize_gateway(existing, expected, routes)
def test_initialize_gateway_no_move_right_ip(self):
existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
" mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
" link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
" inet 192.168.1.1/24 brd 192.168.1.255 scope global eth0\n"
" inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n"
" inet6 dead::beef:dead:beef:dead/64 scope link\n"
" valid_lft forever preferred_lft forever\n")
expected = [
('sysctl', '-w', 'net.ipv4.ip_forward=1'),
('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
('ip', '-f', 'inet6', 'addr', 'change',
'2001:db8::/64', 'dev', 'eth0'),
]
self._test_initialize_gateway(existing, expected)
def test_initialize_gateway_add_if_blank(self):
existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
" mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
" link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
" inet6 dead::beef:dead:beef:dead/64 scope link\n"
" valid_lft forever preferred_lft forever\n")
expected = [
('sysctl', '-w', 'net.ipv4.ip_forward=1'),
('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
('ip', 'route', 'show', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.1.1/24',
'brd', '192.168.1.255', 'dev', 'eth0'),
('ip', '-f', 'inet6', 'addr', 'change',
'2001:db8::/64', 'dev', 'eth0'),
]
self._test_initialize_gateway(existing, expected)
def test_apply_ran(self):
manager = linux_net.IptablesManager()
manager.iptables_apply_deferred = False
self.mox.StubOutWithMock(manager, '_apply')
manager._apply()
self.mox.ReplayAll()
empty_ret = manager.apply()
self.assertEqual(empty_ret, None)
def test_apply_not_run(self):
manager = linux_net.IptablesManager()
manager.iptables_apply_deferred = True
self.mox.StubOutWithMock(manager, '_apply')
self.mox.ReplayAll()
manager.apply()
def test_deferred_unset_apply_ran(self):
manager = linux_net.IptablesManager()
manager.iptables_apply_deferred = True
self.mox.StubOutWithMock(manager, '_apply')
manager._apply()
self.mox.ReplayAll()
manager.defer_apply_off()
self.assertFalse(manager.iptables_apply_deferred)
|
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_endpoint_address import V1EndpointAddress
class TestV1EndpointAddress(unittest.TestCase):
""" V1EndpointAddress unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1EndpointAddress(self):
"""
Test V1EndpointAddress
"""
model = kubernetes.client.models.v1_endpoint_address.V1EndpointAddress()
if __name__ == '__main__':
unittest.main()
|
from sublime_plugin import WindowCommand
from ..platformio.compile import Compile
class DeviotCompileSketchCommand(WindowCommand):
def run(self):
Compile()
|
servoPin01 = 2
servoPin02 = 3
servoPin03 = 4
servoPin04 = 5
servoPin05 = 6
servoPin06 = 7
servo01Max = 180
servo01Min = 0
servo02Max = 180
servo02Min = 0
servo03Max = 180
servo03Min = 0
servo04Max = 180
servo04Min = 0
servo05Max = 180
servo05Min = 0
servo06Max = 180
servo06Min = 0
comPort = "COM9"
rHandAbrir_keyw = "abrir"
rHandFechar_keyw = "fechar"
rHandCentro_keyw = "centro"
rHandMetal_keyw = "metal"
rHandDedo_keyw = "dedo"
rHandEsquerda_keyw = "esquerda"
rHandDireita_keyw = "direita"
rHandGosto_keyw = "gosto"
rHandContar_keyw = "contar"
webkitspeechrecognition = Runtime.start("webkitspeechrecognition","WebkitSpeechRecognition")
arduino = Runtime.start("arduino","Arduino")
speech = Runtime.start("speech","AcapelaSpeech")
webgui = Runtime.start("webgui","WebGui")
arduino.connect(comPort)
servo01 = Runtime.start("servo01","Servo")
servo01.attach(arduino.getName(), servoPin01)
servo02 = Runtime.start("servo02","Servo")
servo02.attach(arduino.getName(), servoPin02)
servo03 = Runtime.start("servo03","Servo")
servo03.attach(arduino.getName(), servoPin03)
servo04 = Runtime.start("servo04","Servo")
servo04.attach(arduino.getName(), servoPin04)
servo05 = Runtime.start("servo05","Servo")
servo05.attach(arduino.getName(), servoPin05)
servo06 = Runtime.start("servo06","Servo")
servo06.attach(arduino.getName(), servoPin06)
webkitspeechrecognition.setLanguage("pt-PT")
speech.setVoice("Celia")
def rHandAttach():
servo01.attach(arduino.getName(), servoPin01)
servo02.attach(arduino.getName(), servoPin02)
servo03.attach(arduino.getName(), servoPin03)
servo04.attach(arduino.getName(), servoPin04)
servo05.attach(arduino.getName(), servoPin05)
servo06.attach(arduino.getName(), servoPin06)
def rHandDetach():
servo01.detach()
servo02.detach()
servo03.detach()
servo04.detach()
servo05.detach()
servo06.detach()
def rHandAbrir():
rHandAttach()
servo01.moveTo(servo01Min)
servo02.moveTo(servo02Min)
servo03.moveTo(servo03Min)
servo04.moveTo(servo04Min)
servo05.moveTo(servo05Min)
rHandDetach()
def rHandFechar():
rHandAttach()
servo01.moveTo(servo01Max)
servo02.moveTo(servo02Max)
servo03.moveTo(servo03Max)
servo04.moveTo(servo04Max)
servo05.moveTo(servo05Max)
rHandDetach()
def rHandMetal():
servo01.moveTo(servo01Max)
servo02.moveTo(servo02Min)
servo03.moveTo(servo03Max)
servo04.moveTo(servo04Max)
servo05.moveTo(servo05Min)
def rHandDedo():
servo01.moveTo(servo01Max)
servo02.moveTo(servo02Max)
servo03.moveTo(servo03Min)
servo04.moveTo(servo04Max)
servo05.moveTo(servo05Max)
def rHandGosto():
servo01.moveTo(servo01Min)
servo02.moveTo(servo02Max)
servo03.moveTo(servo03Max)
servo04.moveTo(servo04Max)
servo05.moveTo(servo05Max)
def rHandEsquerda():
servo06.moveTo(servo06Min)
def rHandDireita():
servo06.moveTo(servo06Max)
def rHandCentro():
servo01.moveTo(90)
servo02.moveTo(90)
servo03.moveTo(90)
servo04.moveTo(90)
servo05.moveTo(90)
def rHandContar():
rHandDireita()
rHandFechar()
sleep(1)
servo01.moveTo(servo01Min)
sleep(1)
servo02.moveTo(servo02Min)
sleep(1)
servo03.moveTo(servo03Min)
sleep(1)
servo04.moveTo(servo04Min)
sleep(1)
servo05.moveTo(servo05Min)
sleep(1)
def rHandPos(data):
print data
servo01.moveTo(int(data))
servo02.moveTo(int(data))
servo03.moveTo(int(data))
servo04.moveTo(int(data))
servo05.moveTo(int(data))
def onText(data):
print data
# speech.speakBlocking(data)
if (data == rHandAbrir_keyw):
print "a abrir a mao"
speech.speakBlocking("abrir")
rHandAbrir()
elif (data == rHandFechar_keyw):
print "a fechar a mao"
rHandFechar()
elif (data == rHandCentro_keyw):
print "ao centro"
rHandCentro()
elif (data == rHandMetal_keyw):
print "Metal"
rHandMetal()
elif (data == rHandDedo_keyw):
print "Dedo do meio"
rHandDedo()
elif (data == rHandEsquerda_keyw):
print "Rodar o pulso a esquerda"
rHandEsquerda()
elif (data == rHandDireita_keyw):
print "Rodar o pulso a direita"
rHandDireita()
elif (data == rHandGosto_keyw):
print "Gosto"
rHandGosto()
elif (data == rHandContar_keyw):
print "Contar"
rHandContar()
elif (data >= 0 or data <= 180):
print "ir para: "
rHandPos(data)
webkitspeechrecognition.addListener("publishText","python","onText")
|
import os
class JVMNotFoundException(RuntimeError):
pass
class JVMFinder(object):
"""
JVM library finder base class
"""
def __init__(self):
"""
Sets up members
"""
# Library file name
self._libfile = "libjvm.so"
# Predefined locations
self._locations = ("/usr/lib/jvm", "/usr/java")
# Search methods
self._methods = (self._get_from_java_home,
self._get_from_known_locations)
def find_libjvm(self, java_home):
"""
Recursively looks for the given file
:param java_home: A Java home folder
:param filename: Name of the file to find
:return: The first found file path, or None
"""
found_jamvm = False
# Look for the file
for root, _, names in os.walk(java_home):
if self._libfile in names:
# Found it, but check for jamvm
if os.path.split(root)[1] == "jamvm":
found_jamvm = True
continue # maybe we will find another one?
return os.path.join(root, self._libfile)
else:
if found_jamvm:
raise JVMNotFoundException("Sorry JamVM is known to be broken."
" Please ensure your JAVA_HOME"
" contains at least another JVM "
"implementation (eg. server)")
# File not found
raise JVMNotFoundException("Sorry no JVM could be found."
" Please ensure your JAVA_HOME"
" environment variable is pointing"
" to correct installation.")
def find_possible_homes(self, parents):
"""
Generator that looks for the first-level children folders that could be
Java installations, according to their name
:param parents: A list of parent directories
:return: The possible JVM installation folders
"""
homes = []
java_names = ('jre', 'jdk', 'java')
for parent in parents:
for childname in sorted(os.listdir(parent)):
# Compute the real path
path = os.path.realpath(os.path.join(parent, childname))
if path in homes or not os.path.isdir(path):
# Already known path, or not a directory -> ignore
continue
# Check if the path seems OK
real_name = os.path.basename(path).lower()
for java_name in java_names:
if java_name in real_name:
# Correct JVM folder name
homes.append(path)
yield path
break
def get_jvm_path(self):
"""
Retrieves the path to the default or first found JVM library
:return: The path to the JVM shared library file
:raise ValueError: No JVM library found
"""
for method in self._methods:
try:
jvm = method()
except NotImplementedError:
# Ignore missing implementations
pass
else:
if jvm is not None:
return jvm
else:
raise ValueError("No JVM shared library file ({0}) found. "
"Try setting up the JAVA_HOME environment "
"variable properly.".format(self._libfile))
def _get_from_java_home(self):
"""
Retrieves the Java library path according to the JAVA_HOME environment
variable
:return: The path to the JVM library, or None
"""
# Get the environment variable
java_home = os.getenv("JAVA_HOME")
if java_home and os.path.exists(java_home):
# Get the real installation path
java_home = os.path.realpath(java_home)
# Look for the library file
return self.find_libjvm(java_home)
def _get_from_known_locations(self):
"""
Retrieves the first existing Java library path in the predefined known
locations
:return: The path to the JVM library, or None
"""
for home in self.find_possible_homes(self._locations):
jvm = self.find_libjvm(home)
if jvm is not None:
return jvm
|
"""Package Alias admin handler."""
import httplib
import json
from simian.mac import admin
from simian.mac import common
from simian.mac import models
from simian.mac.common import auth
class PackageAlias(admin.AdminHandler):
"""Handler for /admin/package_alias."""
@admin.AdminHandler.XsrfProtected('manifests_aliases')
def post(self):
"""POST handler."""
if not self.IsAdminUser():
self.response.set_status(httplib.FORBIDDEN)
return
if self.request.get('create_package_alias'):
self._CreatePackageAlias()
elif self.request.get('enabled'):
self._TogglePackageAlias()
else:
self.response.set_status(httplib.NOT_FOUND)
def _CreatePackageAlias(self):
"""Creates a new or edits an existing package alias, with verification."""
package_alias = self.request.get('package_alias').strip()
munki_pkg_name = self.request.get('munki_pkg_name').strip()
if not package_alias:
msg = 'Package Alias is required.'
self.redirect('/admin/package_alias?msg=%s' % msg)
return
if not munki_pkg_name:
munki_pkg_name = None
elif not models.PackageInfo.all().filter('name =', munki_pkg_name).get():
msg = 'Munki pkg %s does not exist.' % munki_pkg_name
self.redirect('/admin/package_alias?msg=%s' % msg)
return
alias = models.PackageAlias(
key_name=package_alias, munki_pkg_name=munki_pkg_name)
if not munki_pkg_name:
alias.enabled = False
alias.put()
msg = 'Package Alias successfully saved.'
self.redirect('/admin/package_alias?msg=%s' % msg)
def _TogglePackageAlias(self):
"""Sets an existing PackageAlias as enabled/disabled."""
key_name = self.request.get('key_name')
enabled = self.request.get('enabled') == '1'
alias = models.PackageAlias.get_by_key_name(key_name)
alias.enabled = enabled
alias.put()
data = {'enabled': enabled, 'key_name': key_name}
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(data))
def get(self, report=None, product_id=None):
"""GET handler."""
auth.DoUserAuth()
self._DisplayMain()
def _DisplayMain(self):
"""Displays the main Package Alias report."""
package_aliases = models.PackageAlias.all()
is_admin = self.IsAdminUser()
# TODO(user): generate PackageInfo dict so admin select box can use display
# names, munki package names can link to installs, etc.
if is_admin:
munki_pkg_names = models.PackageInfo.GetManifestModPkgNames(
common.MANIFEST_MOD_ADMIN_GROUP)
else:
munki_pkg_names = None
data = {
'package_aliases': package_aliases,
'munki_pkg_names': munki_pkg_names,
'report_type': 'manifests_aliases',
}
self.Render('package_alias.html', data)
|
import mmcv
import torch
from mmdet.models.dense_heads import GFLHead, LDHead
def test_ld_head_loss():
"""Tests vfnet head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9, ignore_iof_thr=0.1),
allowed_border=-1,
pos_weight=-1,
debug=False))
self = LDHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_ld=dict(type='KnowledgeDistillationKLDivLoss', loss_weight=1.0),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]))
teacher_model = GFLHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds = self.forward(feat)
rand_soft_target = teacher_model.forward(feat)[1]
# Test that empty ground truth encourages the network to predict
# background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
rand_soft_target, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero, ld loss should
# be non-negative but there should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_ld_loss = sum(empty_gt_losses['loss_ld'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_ld_loss.item() >= 0, 'ld loss should be non-negative'
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
rand_soft_target, img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
gt_bboxes_ignore = gt_bboxes
# When truth is non-empty but ignored then the cls loss should be nonzero,
# but there should be no box loss.
ignore_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
rand_soft_target, img_metas, gt_bboxes_ignore)
ignore_cls_loss = sum(ignore_gt_losses['loss_cls'])
ignore_box_loss = sum(ignore_gt_losses['loss_bbox'])
assert ignore_cls_loss.item() > 0, 'cls loss should be non-zero'
assert ignore_box_loss.item() == 0, 'gt bbox ignored loss should be zero'
# When truth is non-empty and not ignored then both cls and box loss should
# be nonzero for random inputs
gt_bboxes_ignore = [torch.randn(1, 4)]
not_ignore_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes,
gt_labels, rand_soft_target, img_metas,
gt_bboxes_ignore)
not_ignore_cls_loss = sum(not_ignore_gt_losses['loss_cls'])
not_ignore_box_loss = sum(not_ignore_gt_losses['loss_bbox'])
assert not_ignore_cls_loss.item() > 0, 'cls loss should be non-zero'
assert not_ignore_box_loss.item(
) > 0, 'gt bbox not ignored loss should be non-zero'
|
import os
import six
import sys
import tempfile
from mock import patch, mock
from airflow import configuration as conf
from airflow.configuration import mkdir_p
from airflow.exceptions import AirflowConfigException
if six.PY2:
# Need `assertWarns` back-ported from unittest2
import unittest2 as unittest
else:
import unittest
SETTINGS_FILE_VALID = """
LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'airflow.task': {
'format': '[%%(asctime)s] {{%%(filename)s:%%(lineno)d}} %%(levelname)s - %%(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'airflow.task',
'stream': 'ext://sys.stdout'
},
'task': {
'class': 'logging.StreamHandler',
'formatter': 'airflow.task',
'stream': 'ext://sys.stdout'
},
},
'loggers': {
'airflow': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False
},
'airflow.task': {
'handlers': ['task'],
'level': 'INFO',
'propagate': False,
},
}
}
"""
SETTINGS_FILE_INVALID = """
LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'airflow.task': {
'format': '[%%(asctime)s] {{%%(filename)s:%%(lineno)d}} %%(levelname)s - %%(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'airflow.task',
'stream': 'ext://sys.stdout'
}
},
'loggers': {
'airflow': {
'handlers': ['file.handler'], # this handler does not exists
'level': 'INFO',
'propagate': False
}
}
}
"""
SETTINGS_FILE_EMPTY = """
"""
SETTINGS_DEFAULT_NAME = 'custom_airflow_local_settings'
class settings_context(object):
"""
Sets a settings file and puts it in the Python classpath
:param content:
The content of the settings file
"""
def __init__(self, content, dir=None, name='LOGGING_CONFIG'):
self.content = content
self.settings_root = tempfile.mkdtemp()
filename = "{}.py".format(SETTINGS_DEFAULT_NAME)
if dir:
# Replace slashes by dots
self.module = dir.replace('/', '.') + '.' + SETTINGS_DEFAULT_NAME + '.' + name
# Create the directory structure
dir_path = os.path.join(self.settings_root, dir)
mkdir_p(dir_path)
# Add the __init__ for the directories
# This is required for Python 2.7
basedir = self.settings_root
for part in dir.split('/'):
open(os.path.join(basedir, '__init__.py'), 'w').close()
basedir = os.path.join(basedir, part)
open(os.path.join(basedir, '__init__.py'), 'w').close()
self.settings_file = os.path.join(dir_path, filename)
else:
self.module = SETTINGS_DEFAULT_NAME + '.' + name
self.settings_file = os.path.join(self.settings_root, filename)
def __enter__(self):
with open(self.settings_file, 'w') as handle:
handle.writelines(self.content)
sys.path.append(self.settings_root)
conf.set(
'core',
'logging_config_class',
self.module
)
return self.settings_file
def __exit__(self, *exc_info):
#shutil.rmtree(self.settings_root)
# Reset config
conf.set('core', 'logging_config_class', '')
sys.path.remove(self.settings_root)
class TestLoggingSettings(unittest.TestCase):
# Make sure that the configure_logging is not cached
def setUp(self):
self.old_modules = dict(sys.modules)
def tearDown(self):
# Remove any new modules imported during the test run. This lets us
# import the same source files for more than one test.
for m in [m for m in sys.modules if m not in self.old_modules]:
del sys.modules[m]
# When we try to load an invalid config file, we expect an error
def test_loading_invalid_local_settings(self):
from airflow.logging_config import configure_logging, log
with settings_context(SETTINGS_FILE_INVALID):
with patch.object(log, 'warning') as mock_info:
# Load config
with self.assertRaises(ValueError):
configure_logging()
mock_info.assert_called_with(
'Unable to load the config, contains a configuration error.'
)
def test_loading_valid_complex_local_settings(self):
# Test what happens when the config is somewhere in a subfolder
module_structure = 'etc.airflow.config'
dir_structure = module_structure.replace('.', '/')
with settings_context(SETTINGS_FILE_VALID, dir_structure):
from airflow.logging_config import configure_logging, log
with patch.object(log, 'info') as mock_info:
configure_logging()
mock_info.assert_called_with(
'Successfully imported user-defined logging config from %s',
'etc.airflow.config.{}.LOGGING_CONFIG'.format(
SETTINGS_DEFAULT_NAME
)
)
# When we try to load a valid config
def test_loading_valid_local_settings(self):
with settings_context(SETTINGS_FILE_VALID):
from airflow.logging_config import configure_logging, log
with patch.object(log, 'info') as mock_info:
configure_logging()
mock_info.assert_called_with(
'Successfully imported user-defined logging config from %s',
'{}.LOGGING_CONFIG'.format(
SETTINGS_DEFAULT_NAME
)
)
# When we load an empty file, it should go to default
def test_loading_no_local_settings(self):
with settings_context(SETTINGS_FILE_EMPTY):
from airflow.logging_config import configure_logging
with self.assertRaises(ImportError):
configure_logging()
# When the key is not available in the configuration
def test_when_the_config_key_does_not_exists(self):
from airflow import logging_config
conf_get = conf.get
def side_effect(*args):
if args[1] == 'logging_config_class':
raise AirflowConfigException
else:
return conf_get(*args)
logging_config.conf.get = mock.Mock(side_effect=side_effect)
with patch.object(logging_config.log, 'debug') as mock_debug:
logging_config.configure_logging()
mock_debug.assert_any_call(
'Could not find key logging_config_class in config'
)
# Just default
def test_loading_local_settings_without_logging_config(self):
from airflow.logging_config import configure_logging, log
with patch.object(log, 'debug') as mock_info:
configure_logging()
mock_info.assert_called_with(
'Unable to load custom logging, using default config instead'
)
def test_1_9_config(self):
from airflow.logging_config import configure_logging
conf.set('core', 'task_log_reader', 'file.task')
try:
with self.assertWarnsRegex(DeprecationWarning, r'file.task'):
configure_logging()
self.assertEqual(conf.get('core', 'task_log_reader'), 'task')
finally:
conf.remove_option('core', 'task_log_reader', remove_default=False)
if __name__ == '__main__':
unittest.main()
|
import wx
ID_NEW = 1
ID_RENAME = 2
ID_CLEAR = 3
ID_DELETE = 4
class ListBox(wx.Frame):
def __init__(self, parent, id, title):
wx.Frame.__init__(self, parent, id, title, size=(350, 220))
panel = wx.Panel(self, -1)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.listbox = wx.ListBox(panel, -1)
hbox.Add(self.listbox, 1, wx.EXPAND | wx.ALL, 20)
btnPanel = wx.Panel(panel, -1)
vbox = wx.BoxSizer(wx.VERTICAL)
new = wx.Button(btnPanel, ID_NEW, 'New', size=(90, 30))
ren = wx.Button(btnPanel, ID_RENAME, 'Rename', size=(90, 30))
dlt = wx.Button(btnPanel, ID_DELETE, 'Delete', size=(90, 30))
clr = wx.Button(btnPanel, ID_CLEAR, 'Clear', size=(90, 30))
self.Bind(wx.EVT_BUTTON, self.NewItem, id=ID_NEW)
self.Bind(wx.EVT_BUTTON, self.OnRename, id=ID_RENAME)
self.Bind(wx.EVT_BUTTON, self.OnDelete, id=ID_DELETE)
self.Bind(wx.EVT_BUTTON, self.OnClear, id=ID_CLEAR)
self.Bind(wx.EVT_LISTBOX_DCLICK, self.OnRename)
vbox.Add((-1, 20))
vbox.Add(new)
vbox.Add(ren, 0, wx.TOP, 5)
vbox.Add(dlt, 0, wx.TOP, 5)
vbox.Add(clr, 0, wx.TOP, 5)
btnPanel.SetSizer(vbox)
hbox.Add(btnPanel, 0.6, wx.EXPAND | wx.RIGHT, 20)
panel.SetSizer(hbox)
self.Centre()
self.Show(True)
def NewItem(self, event):
text = wx.GetTextFromUser('Enter a new item', 'Insert dialog')
if text != '':
self.listbox.Append(text)
def OnRename(self, event):
sel = self.listbox.GetSelection()
text = self.listbox.GetString(sel)
renamed = wx.GetTextFromUser('Rename item', 'Rename dialog', text)
if renamed != '':
self.listbox.Delete(sel)
self.listbox.Insert(renamed, sel)
def OnDelete(self, event):
sel = self.listbox.GetSelection()
if sel != -1:
self.listbox.Delete(sel)
def OnClear(self, event):
self.listbox.Clear()
app = wx.App()
ListBox(None, -1, 'ListBox')
app.MainLoop()
|
'''
Copyright 2011 SRI International
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import bencode, sign, tests, util, verify, errors
__all__ = ["bencode", "sign", "tests", "util", "verify", "errors", "cmd"]
|
from .common import random_str
from rancher import ApiError
from .conftest import wait_for
import time
import pytest
def test_workload_image_change_private_registry(admin_pc):
client = admin_pc.client
registry1_name = random_str()
registries = {'index.docker.io': {
'username': 'testuser',
'password': 'foobarbaz',
}}
registry1 = client.create_dockerCredential(name=registry1_name,
registries=registries)
assert registry1.name == registry1_name
registry2_name = random_str()
registries = {'quay.io': {
'username': 'testuser',
'password': 'foobarbaz',
}}
registry2 = client.create_dockerCredential(name=registry2_name,
registries=registries)
ns = admin_pc.cluster.client.create_namespace(
name=random_str(),
projectId=admin_pc.project.id)
name = random_str()
workload = client.create_workload(
name=name,
namespaceId=ns.id,
scale=1,
containers=[{
'name': 'one',
'image': 'testuser/testimage',
}])
assert workload.name == name
assert len(workload.imagePullSecrets) == 1
for secret in workload.imagePullSecrets:
assert secret['name'] == registry1_name
containers = [{
'name': 'one',
'image': 'quay.io/testuser/testimage',
}]
workload = client.update(workload, containers=containers)
for container in workload.containers:
assert container['image'] == 'quay.io/testuser/testimage'
assert len(workload.imagePullSecrets) == 1
assert workload.imagePullSecrets[0]['name'] == registry2_name
client.delete(registry1)
client.delete(registry2)
client.delete(ns)
def test_workload_ports_change(admin_pc):
client = admin_pc.client
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
# create workload with no ports assigned
# and verify headless service is created
name = random_str()
workload = client.create_workload(
name=name,
namespaceId=ns.id,
scale=1,
containers=[{
'name': 'one',
'image': 'nginx',
}])
svc = wait_for_service_create(client, name)
assert svc.clusterIp is None
assert svc.name == workload.name
assert svc.kind == "ClusterIP"
# update workload with port, and validate cluster ip is set
ports = [{
'sourcePort': '0',
'containerPort': '80',
'kind': 'ClusterIP',
'protocol': 'TCP', }]
client.update(workload,
namespaceId=ns.id,
scale=1,
containers=[{
'name': 'one',
'image': 'nginx',
'ports': ports,
}]),
svc = wait_for_service_cluserip_set(client, name)
assert svc.clusterIp is not None
# update workload with no ports, and validate cluster ip is reset
client.update(workload,
namespaceId=ns.id,
scale=1,
containers=[{
'name': 'one',
'image': 'nginx',
'ports': [],
}]),
svc = wait_for_service_cluserip_reset(client, name)
assert svc.clusterIp is None
client.delete(ns)
def test_workload_probes(admin_pc):
client = admin_pc.client
ns = admin_pc.cluster.client.create_namespace(
name=random_str(),
projectId=admin_pc.project.id)
# create workload with probes
name = random_str()
container = {
'name': 'one',
'image': 'nginx',
'livenessProbe': {
'failureThreshold': 3,
'initialDelaySeconds': 10,
'periodSeconds': 2,
'successThreshold': 1,
'tcp': False,
'timeoutSeconds': 2,
'host': 'localhost',
'path': '/healthcheck',
'port': 80,
'scheme': 'HTTP',
},
'readinessProbe': {
'failureThreshold': 3,
'initialDelaySeconds': 10,
'periodSeconds': 2,
'successThreshold': 1,
'timeoutSeconds': 2,
'tcp': True,
'host': 'localhost',
'port': 80,
},
}
workload = client.create_workload(name=name,
namespaceId=ns.id,
scale=1,
containers=[container])
assert workload.containers[0].livenessProbe.host == 'localhost'
assert workload.containers[0].readinessProbe.host == 'localhost'
container['livenessProbe']['host'] = 'updatedhost'
container['readinessProbe']['host'] = 'updatedhost'
workload = client.update(workload,
namespaceId=ns.id,
scale=1,
containers=[container])
assert workload.containers[0].livenessProbe.host == 'updatedhost'
assert workload.containers[0].readinessProbe.host == 'updatedhost'
client.delete(ns)
def test_workload_scheduling(admin_pc):
client = admin_pc.client
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
name = random_str()
workload = client.create_workload(
name=name,
namespaceId=ns.id,
scale=1,
scheduling={
"scheduler": "some-scheduler",
},
containers=[{
'name': 'one',
'image': 'nginx',
}])
assert workload.scheduling.scheduler == "some-scheduler"
workload = client.update(workload,
namespaceId=ns.id,
scale=1,
scheduling={
"scheduler": "test-scheduler",
},
containers=[{
'name': 'one',
'image': 'nginx',
}]),
assert workload[0].scheduling.scheduler == "test-scheduler"
client.delete(ns)
def test_statefulset_workload_volumemount_subpath(admin_pc):
client = admin_pc.client
# setup
name = random_str()
# valid volumeMounts
volumeMounts = [{
'name': 'vol1',
'mountPath': 'var/lib/mysql',
'subPath': 'mysql',
}]
containers = [{
'name': 'mystatefulset',
'image': 'ubuntu:xenial',
'volumeMounts': volumeMounts,
}]
# invalid volumeMounts
volumeMounts_one = [{
'name': 'vol1',
'mountPath': 'var/lib/mysql',
'subPath': '/mysql',
}]
containers_one = [{
'name': 'mystatefulset',
'image': 'ubuntu:xenial',
'volumeMounts': volumeMounts_one,
}]
volumeMounts_two = [{
'name': 'vol1',
'mountPath': 'var/lib/mysql',
'subPath': '../mysql',
}]
containers_two = [{
'name': 'mystatefulset',
'image': 'ubuntu:xenial',
'volumeMounts': volumeMounts_two,
}]
statefulSetConfig = {
'podManagementPolicy': 'OrderedReady',
'revisionHistoryLimit': 10,
'strategy': 'RollingUpdate',
'type': 'statefulSetConfig',
}
volumes = [{
'name': 'vol1',
'persistentVolumeClaim': {
'persistentVolumeClaimId': "default: myvolume",
'readOnly': False,
'type': 'persistentVolumeClaimVolumeSource',
},
'type': 'volume',
}]
# 1. validate volumeMounts.subPath when workload creating
# invalid volumeMounts.subPath: absolute path
with pytest.raises(ApiError) as e:
client.create_workload(name=name,
namespaceId='default',
scale=1,
containers=containers_one,
statefulSetConfig=statefulSetConfig,
volumes=volumes)
assert e.value.error.status == 422
# invalid volumeMounts.subPath: contains '..'
with pytest.raises(ApiError) as e:
client.create_workload(name=name,
namespaceId='default',
scale=1,
containers=containers_two,
statefulSetConfig=statefulSetConfig,
volumes=volumes)
assert e.value.error.status == 422
# 2. validate volumeMounts.subPath when workload update
# create a validate workload then update
workload = client.create_workload(name=name,
namespaceId='default',
scale=1,
containers=containers,
statefulSetConfig=statefulSetConfig,
volumes=volumes)
with pytest.raises(ApiError) as e:
client.update(workload,
namespaceId='default',
scale=1,
containers=containers_one,
statefulSetConfig=statefulSetConfig,
volumes=volumes)
assert e.value.error.status == 422
with pytest.raises(ApiError) as e:
client.update(workload,
namespaceId='default',
scale=1,
containers=containers_two,
statefulSetConfig=statefulSetConfig,
volumes=volumes)
assert e.value.error.status == 422
def test_workload_redeploy(admin_pc, remove_resource):
client = admin_pc.client
ns = admin_pc.cluster.client.create_namespace(
name=random_str(),
projectId=admin_pc.project.id)
remove_resource(ns)
name = random_str()
workload = client.create_workload(
name=name,
namespaceId=ns.id,
scale=1,
containers=[{
'name': 'one',
'image': 'nginx',
}])
remove_resource(workload)
client.action(workload, 'redeploy')
def _timestamp_reset():
workloads = client.list_workload(uuid=workload.uuid).data
return len(workloads) > 0 and workloads[0].annotations[
'cattle.io/timestamp'] is not None
wait_for(_timestamp_reset,
fail_handler=lambda: 'Timed out waiting for timestamp reset')
def wait_for_service_create(client, name, timeout=30):
start = time.time()
services = client.list_service(name=name, kind="ClusterIP")
while len(services) == 0:
time.sleep(.5)
services = client.list_service(name=name, kind="ClusterIP")
if time.time() - start > timeout:
raise Exception('Timeout waiting for workload service')
return services.data[0]
def wait_for_service_cluserip_set(client, name, timeout=30):
start = time.time()
services = client.list_service(name=name, kind="ClusterIP")
while len(services) == 0 or services.data[0].clusterIp is None:
time.sleep(.5)
services = client.list_service(name=name, kind="ClusterIP")
if time.time() - start > timeout:
raise Exception('Timeout waiting for workload service')
return services.data[0]
def wait_for_service_cluserip_reset(client, name, timeout=30):
start = time.time()
services = client.list_service(name=name, kind="ClusterIP")
while len(services) == 0 or services.data[0].clusterIp is not None:
time.sleep(.5)
services = client.list_service(name=name, kind="ClusterIP")
if time.time() - start > timeout:
raise Exception('Timeout waiting for workload service')
return services.data[0]
|
import mailbox
import os
print('Before:')
mbox = mailbox.Maildir('Example')
mbox.lock()
try:
for message_id, message in mbox.iteritems():
print('{:6} "{}"'.format(message.get_subdir(),
message['subject']))
message.set_subdir('cur')
# Tell the mailbox to update the message.
mbox[message_id] = message
finally:
mbox.flush()
mbox.close()
print('\nAfter:')
mbox = mailbox.Maildir('Example')
for message in mbox:
print('{:6} "{}"'.format(message.get_subdir(),
message['subject']))
print()
for dirname, subdirs, files in os.walk('Example'):
print(dirname)
print(' Directories:', subdirs)
for name in files:
fullname = os.path.join(dirname, name)
print(fullname)
|
"""This example gets all proposal line items.
"""
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
proposal_line_item_service = client.GetService(
'ProposalLineItemService', version='v202111')
# Create a statement to select proposal line items.
statement = ad_manager.StatementBuilder(version='v202111')
# Retrieve a small amount of proposal line items at a time, paging
# through until all proposal line items have been retrieved.
while True:
response = proposal_line_item_service.getProposalLineItemsByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
for proposal_line_item in response['results']:
# Print out some information for each proposal line item.
print('Proposal line item with ID "%d" and name "%s" was found.\n' %
(proposal_line_item['id'], proposal_line_item['name']))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
|
__source__ = 'https://leetcode.com/problems/all-oone-data-structure/#/description'
import unittest
class Node(object):
"""
double linked list node
"""
def __init__(self, value, keys):
self.value = value
self.keys = keys
self.prev = None
self.next = None
class LinkedList(object):
def __init__(self):
self.head, self.tail = Node(0, set()), Node(0, set())
self.head.next, self.tail.prev = self.tail, self.head
def insert(self, pos, node):
node.prev, node.next = pos.prev, pos
pos.prev.next, pos.prev = node, node
return node
def erase(self, node):
node.prev.next, node.next.prev = node.next, node.prev
del node
def empty(self):
return self.head.next is self.tail
def begin(self):
return self.head.next
def end(self):
return self.tail
def front(self):
return self.head.next
def back(self):
return self.tail.prev
class AllOne(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.bucket_of_key = {}
self.buckets = LinkedList()
def inc(self, key):
"""
Inserts a new key <Key> with value 1. Or increments an existing key by 1.
:type key: str
:rtype: void
"""
if key not in self.bucket_of_key:
self.bucket_of_key[key] = self.buckets.insert(self.buckets.begin(), Node(0, set([key])))
bucket, next_bucket = self.bucket_of_key[key], self.bucket_of_key[key].next
if next_bucket is self.buckets.end() or next_bucket.value > bucket.value+1:
next_bucket = self.buckets.insert(next_bucket, Node(bucket.value+1, set()))
next_bucket.keys.add(key)
self.bucket_of_key[key] = next_bucket
bucket.keys.remove(key)
if not bucket.keys:
self.buckets.erase(bucket)
def dec(self, key):
"""
Decrements an existing key by 1. If Key's value is 1, remove it from the data structure.
:type key: str
:rtype: void
"""
if key not in self.bucket_of_key:
return
bucket, prev_bucket = self.bucket_of_key[key], self.bucket_of_key[key].prev
self.bucket_of_key.pop(key, None)
if bucket.value > 1:
if bucket is self.buckets.begin() or prev_bucket.value < bucket.value-1:
prev_bucket = self.buckets.insert(bucket, Node(bucket.value-1, set()))
prev_bucket.keys.add(key)
self.bucket_of_key[key] = prev_bucket
bucket.keys.remove(key)
if not bucket.keys:
self.buckets.erase(bucket)
def getMaxKey(self):
"""
Returns one of the keys with maximal value.
:rtype: str
"""
if self.buckets.empty():
return ""
return iter(self.buckets.back().keys).next()
def getMinKey(self):
"""
Returns one of the keys with Minimal value.
:rtype: str
"""
if self.buckets.empty():
return ""
return iter(self.buckets.front().keys).next()
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
Main idea is to maintain a list of Bucket's,
each Bucket contains all keys with the same count.
head and tail can ensure both getMaxKey() and getMaxKey() be done in O(1).
keyCountMap maintains the count of keys, countBucketMap provides O(1) access to a specific Bucket with given count.
Deleting and adding a Bucket in the Bucket list cost O(1), so both inc() and dec() take strict O(1) time.
public class AllOne {
// maintain a doubly linked list of Buckets
private Bucket head;
private Bucket tail;
// for accessing a specific Bucket among the Bucket list in O(1) time
private Map<Integer, Bucket> countBucketMap;
// keep track of count of keys
private Map<String, Integer> keyCountMap;
// each Bucket contains all the keys with the same count
private class Bucket {
int count;
Set<String> keySet;
Bucket next;
Bucket pre;
public Bucket(int cnt) {
count = cnt;
keySet = new HashSet<>();
}
}
/** Initialize your data structure here. */
public AllOne() {
head = new Bucket(Integer.MIN_VALUE);
tail = new Bucket(Integer.MAX_VALUE);
head.next = tail;
tail.pre = head;
countBucketMap = new HashMap<>();
keyCountMap = new HashMap<>();
}
/** Inserts a new key <Key> with value 1. Or increments an existing key by 1. */
public void inc(String key) {
if (keyCountMap.containsKey(key)) {
changeKey(key, 1);
} else {
keyCountMap.put(key, 1);
if (head.next.count != 1)
addBucketAfter(new Bucket(1), head);
head.next.keySet.add(key);
countBucketMap.put(1, head.next);
}
}
/** Decrements an existing key by 1. If Key's value is 1, remove it from the data structure. */
public void dec(String key) {
if (keyCountMap.containsKey(key)) {
int count = keyCountMap.get(key);
if (count == 1) {
keyCountMap.remove(key);
removeKeyFromBucket(countBucketMap.get(count), key);
} else {
changeKey(key, -1);
}
}
}
/** Returns one of the keys with maximal value. */
public String getMaxKey() {
return tail.pre == head ? "" : (String) tail.pre.keySet.iterator().next();
}
/** Returns one of the keys with Minimal value. */
public String getMinKey() {
return head.next == tail ? "" : (String) head.next.keySet.iterator().next();
}
// helper function to make change on given key according to offset
private void changeKey(String key, int offset) {
int count = keyCountMap.get(key);
keyCountMap.put(key, count + offset);
Bucket curBucket = countBucketMap.get(count);
Bucket newBucket;
if (countBucketMap.containsKey(count + offset)) {
// target Bucket already exists
newBucket = countBucketMap.get(count + offset);
} else {
// add new Bucket
newBucket = new Bucket(count + offset);
countBucketMap.put(count + offset, newBucket);
addBucketAfter(newBucket, offset == 1 ? curBucket : curBucket.pre);
}
newBucket.keySet.add(key);
removeKeyFromBucket(curBucket, key);
}
private void removeKeyFromBucket(Bucket bucket, String key) {
bucket.keySet.remove(key);
if (bucket.keySet.size() == 0) {
removeBucketFromList(bucket);
countBucketMap.remove(bucket.count);
}
}
private void removeBucketFromList(Bucket bucket) {
bucket.pre.next = bucket.next;
bucket.next.pre = bucket.pre;
bucket.next = null;
bucket.pre = null;
}
// add newBucket after preBucket
private void addBucketAfter(Bucket newBucket, Bucket preBucket) {
newBucket.pre = preBucket;
newBucket.next = preBucket.next;
preBucket.next.pre = newBucket;
preBucket.next = newBucket;
}
}
'''
|
from itertools import ifilter
from cinderclient.v1 import client as cinder_client
from cinderclient import exceptions as cinder_exc
from cloudferrylib.base import storage
from cloudferrylib.os.identity import keystone
from cloudferrylib.os.storage import filters as cinder_filters
from cloudferrylib.utils import filters
from cloudferrylib.utils import log
from cloudferrylib.utils import proxy_client
from cloudferrylib.utils import retrying
from cloudferrylib.utils import utils
import re
RE_EXTRACT_HOST = re.compile(r'//([^:^/]*)')
AVAILABLE = 'available'
IN_USE = "in-use"
CINDER_VOLUME = "cinder-volume"
LOG = log.getLogger(__name__)
ID = 'id'
DISPLAY_NAME = 'display_name'
PROJECT_ID = 'project_id'
STATUS = 'status'
TENANT_ID = 'tenant_id'
USER_ID = 'user_id'
DELETED = 'deleted'
HOST = 'host'
IGNORED_TBL_LIST = ('quotas', 'quota_usages')
QUOTA_TABLES = (
'quotas',
'quota_classes',
'quota_usages',
)
SRC = 'src'
TABLE_UNIQ_KEYS = {
'volumes': ['id'],
'quotas': ['project_id', 'resource'],
'quota_classes': ['class_name', 'resource'],
'quota_usages': ['project_id', 'resource'],
'reservations': ['project_id', 'resource', 'usage_id'],
'volume_metadata': ['volume_id', 'key'],
'volume_glance_metadata': ['volume_id', 'snapshot_id', 'key'],
}
VALID_STATUSES = ['available', 'in-use', 'attaching', 'detaching']
MIGRATED_VOLUMES_METADATA_KEY = 'src_volume_id'
class CinderStorage(storage.Storage):
"""Implements basic functionality around cinder client"""
def __init__(self, config, cloud):
super(CinderStorage, self).__init__(config)
self.ssh_host = config.cloud.ssh_host
self.mysql_host = config.mysql.db_host \
if config.mysql.db_host else self.ssh_host
self.cloud = cloud
self.identity_client = cloud.resources[utils.IDENTITY_RESOURCE]
self.mysql_connector = cloud.mysql_connector('cinder')
self.volume_filter = None
self.filter_tenant_id = None
@property
def cinder_client(self):
return self.proxy(self.get_client(self.config), self.config)
def get_client(self, params=None, tenant=None):
params = params or self.config
return cinder_client.Client(
params.cloud.user,
params.cloud.password,
tenant or params.cloud.tenant,
params.cloud.auth_url,
cacert=params.cloud.cacert,
insecure=params.cloud.insecure,
region_name=params.cloud.region
)
def get_filter(self):
if self.volume_filter is None:
with open(self.config.migrate.filter_path, 'r') as f:
filter_yaml = filters.FilterYaml(f)
filter_yaml.read()
self.volume_filter = cinder_filters.CinderFilters(
self.cinder_client, filter_yaml)
return self.volume_filter
def read_info(self, target='volumes', **kwargs):
if target == 'volumes':
return self._read_info_volumes(**kwargs)
if target == 'resources':
return self._read_info_resources(**kwargs)
def _read_info_resources(self, **kwargs):
res = dict()
self.filter_tenant_id = kwargs.get('tenant_id', None)
res['quotas'] = self._read_info_quota()
return res
def _read_info_volumes(self, **kwargs):
info = {utils.VOLUMES_TYPE: {}}
for vol in self.get_volumes_list(search_opts=kwargs):
if vol.status not in ['available', 'in-use']:
continue
volume = self.convert_volume(vol, self.config, self.cloud)
snapshots = {}
if self.config.migrate.keep_volume_snapshots:
search_opts = {'volume_id': volume['id']}
for snap in self.get_snapshots_list(search_opts=search_opts):
snapshot = self.convert_snapshot(snap,
volume,
self.config,
self.cloud)
snapshots[snapshot['id']] = snapshot
info[utils.VOLUMES_TYPE][vol.id] = {utils.VOLUME_BODY: volume,
'snapshots': snapshots,
utils.META_INFO: {
}}
if self.config.migrate.keep_volume_storage:
info['volumes_db'] = {utils.VOLUMES_TYPE: '/tmp/volumes'}
# cleanup db
self.cloud.ssh_util.execute('rm -rf /tmp/volumes',
host_exec=self.mysql_host)
for table_name, file_name in info['volumes_db'].iteritems():
self.download_table_from_db_to_file(table_name, file_name)
return info
def _read_info_quota(self):
admin_tenant_id = self.identity_client.get_tenant_id_by_name(
self.config.cloud.tenant)
service_tenant_id = self.identity_client.get_tenant_id_by_name(
self.config.cloud.service_tenant)
if self.cloud.position == 'src' and self.filter_tenant_id:
tmp_list = \
[admin_tenant_id, service_tenant_id]
tmp_list.extend(self.filter_tenant_id)
tmp_list = list(set(tmp_list))
tenant_ids = \
[tenant.id for tenant in
self.identity_client.get_tenants_list()
if tenant.id in tmp_list]
else:
tenant_ids = \
[tenant.id for tenant in
self.identity_client.get_tenants_list()
if tenant.id != service_tenant_id]
quotas = []
for tenant_id in tenant_ids:
quota = self.cinder_client.quotas.get(tenant_id)
quota_conv = self.convert_quota(quota)
quota_conv['tenant_id'] = tenant_id
quota_conv['tenant_name'] = self.identity_client\
.try_get_tenant_name_by_id(tenant_id)
quotas.append(quota_conv)
return quotas
def _deploy_quota(self, quotas):
quotas_res = []
for quota in quotas:
tenant_id = self.identity_client\
.get_tenant_id_by_name(quota['tenant_name'])
quota_arg = {k: v for k, v in quota.items()
if k not in ['tenant_id', 'tenant_name']}
quota_defaults = self.convert_quota(
self.cinder_client.quotas.defaults(tenant_id))
quota_res = {k: quota_arg[k] for k in list(set(quota_arg) &
set(quota_defaults))}
quota_obj = self.convert_quota(
self.cinder_client.quotas.update(tenant_id, **quota_res))
quotas_res.append(quota_obj)
return quotas_res
def _deploy_resources(self, info):
res = {
'volumes': {
'quotas': self._deploy_quota(info['quotas'])
}
}
return res
def deploy(self, info, target='volumes'):
if target == 'resources':
return self._deploy_resources(info)
if target == 'volumes':
return self.deploy_volumes(info)
def attach_volume_to_instance(self, volume_info):
if 'instance' in volume_info[utils.META_INFO]:
if volume_info[utils.META_INFO]['instance']:
self.attach_volume(
volume_info[utils.VOLUME_BODY]['id'],
volume_info[utils.META_INFO]['instance']['instance']['id'],
volume_info[utils.VOLUME_BODY]['device'])
def filter_volumes(self, volumes):
filtering_enabled = self.cloud.position == SRC
if filtering_enabled:
flts = self.get_filter().get_filters()
for f in flts:
volumes = ifilter(f, volumes)
volumes = list(volumes)
def get_name(volume):
if isinstance(volume, dict):
return volume.get(DISPLAY_NAME, volume['id'])
return getattr(volume, DISPLAY_NAME, None) or volume.id
LOG.info("Filtered volumes: %s",
", ".join((str(get_name(i)) for i in volumes)))
return [vol for vol in volumes
if cinder_filters.CinderFilters.get_col(vol, 'status').lower()
in VALID_STATUSES]
def get_volumes_list(self, detailed=True, search_opts=None):
search_opts = search_opts or {}
search_opts['all_tenants'] = 1
volumes = self.cinder_client.volumes.list(detailed, search_opts)
volumes = self.filter_volumes(volumes)
return volumes
def get_migrated_volume(self, volume_id):
""":returns: volume which was created from another volume using
:create_volume_from_volume: method"""
for v in self.get_volumes_list():
if v.metadata.get(MIGRATED_VOLUMES_METADATA_KEY) == volume_id:
return v
def get_snapshots_list(self, detailed=True, search_opts=None):
return self.cinder_client.volume_snapshots.list(detailed, search_opts)
def create_snapshot(self, volume_id, force=False,
display_name=None, display_description=None):
return self.cinder_client.volume_snapshots.create(volume_id,
force,
display_name,
display_description)
def create_volume_from_volume(self, volume, tenant_id):
"""Creates volume based on values from :param volume: and adds
metadata in order to not copy already copied volumes
:param volume: CF volume object (dict)
:raises: retrying.TimeoutExceeded if volume did not become available
in migrate.storage_backend_timeout time
"""
glance = self.cloud.resources[utils.IMAGE_RESOURCE]
compute = self.cloud.resources[utils.COMPUTE_RESOURCE]
az_mapper = compute.attr_override
metadata = volume.get('metadata', {})
metadata[MIGRATED_VOLUMES_METADATA_KEY] = volume['id']
image_id = None
if volume['bootable']:
image_metadata = volume['volume_image_metadata']
dst_image = glance.get_matching_image(
uuid=image_metadata['image_id'],
size=image_metadata['size'],
name=image_metadata['image_name'],
checksum=image_metadata['checksum'])
if dst_image:
image_id = dst_image.id
src_az = compute.get_availability_zone(volume['availability_zone'])
created_volume = self.create_volume(
size=volume['size'],
project_id=tenant_id,
display_name=volume['display_name'],
display_description=volume['display_description'],
availability_zone=src_az or az_mapper.get_attr(
volume, 'availability_zone'),
metadata=metadata,
imageRef=image_id)
timeout = self.config.migrate.storage_backend_timeout
retryer = retrying.Retry(max_time=timeout,
predicate=lambda v: v.status == 'available',
predicate_retval_as_arg=True,
retry_message="Volume is not available")
retryer.run(self.get_volume_by_id, created_volume.id)
return created_volume
def create_volume(self, size, **kwargs):
"""Creates volume of given size
:raises: OverLimit in case quota exceeds for tenant
"""
cinder = self.cinder_client
tenant_id = kwargs.get('project_id')
# if volume needs to be created in non-admin tenant, re-auth is
# required in that tenant
if tenant_id:
identity = self.cloud.resources[utils.IDENTITY_RESOURCE]
ks = identity.keystone_client
user = self.config.cloud.user
with keystone.AddAdminUserToNonAdminTenant(ks, user, tenant_id):
tenant = ks.tenants.get(tenant_id)
cinder = self.proxy(self.get_client(tenant=tenant.name),
self.config)
with proxy_client.expect_exception(cinder_exc.OverLimit):
return cinder.volumes.create(size, **kwargs)
else:
with proxy_client.expect_exception(cinder_exc.OverLimit):
return cinder.volumes.create(size, **kwargs)
def delete_volume(self, volume_id):
volume = self.get_volume_by_id(volume_id)
self.cinder_client.volumes.delete(volume)
def get_volume_by_id(self, volume_id):
return self.cinder_client.volumes.get(volume_id)
def update_volume(self, volume_id, **kwargs):
volume = self.get_volume_by_id(volume_id)
return self.cinder_client.volumes.update(volume, **kwargs)
def attach_volume(self, volume_id, instance_id, mountpoint, mode='rw'):
volume = self.get_volume_by_id(volume_id)
return self.cinder_client.volumes.attach(volume,
instance_uuid=instance_id,
mountpoint=mountpoint,
mode=mode)
def detach_volume(self, volume_id):
return self.cinder_client.volumes.detach(volume_id)
def finish(self, vol):
try:
with proxy_client.expect_exception(cinder_exc.BadRequest):
self.cinder_client.volumes.set_bootable(
vol[utils.VOLUME_BODY]['id'],
vol[utils.VOLUME_BODY]['bootable'])
except cinder_exc.BadRequest:
LOG.info("Can't update bootable flag of volume with id = %s "
"using API, trying to use DB...",
vol[utils.VOLUME_BODY]['id'])
self.__patch_option_bootable_of_volume(
vol[utils.VOLUME_BODY]['id'],
vol[utils.VOLUME_BODY]['bootable'])
def upload_volume_to_image(self, volume_id, force, image_name,
container_format, disk_format):
volume = self.get_volume_by_id(volume_id)
resp, image = self.cinder_client.volumes.upload_to_image(
volume=volume,
force=force,
image_name=image_name,
container_format=container_format,
disk_format=disk_format)
return resp, image['os-volume_upload_image']['image_id']
def get_status(self, resource_id):
return self.cinder_client.volumes.get(resource_id).status
def deploy_volumes(self, info):
new_ids = {}
for vol_id, vol in info[utils.VOLUMES_TYPE].iteritems():
vol_for_deploy = self.convert_to_params(vol)
volume = self.create_volume(**vol_for_deploy)
vol[utils.VOLUME_BODY]['id'] = volume.id
self.try_wait_for_status(volume.id, self.get_status, AVAILABLE)
self.finish(vol)
new_ids[volume.id] = vol_id
return new_ids
@staticmethod
def convert_quota(quota):
reprkeys = sorted(k
for k in quota.__dict__.keys()
if k[0] != '_' and k != 'manager')
quota_conv = {}
for k in reprkeys:
quota_conv[k] = quota.__dict__[k]
return quota_conv
@staticmethod
def convert_volume(vol, cfg, cloud):
compute = cloud.resources[utils.COMPUTE_RESOURCE]
volume = {
'id': vol.id,
'size': vol.size,
'display_name': vol.display_name,
'display_description': vol.display_description,
'volume_type': (
None if vol.volume_type == u'None' else vol.volume_type),
'availability_zone': vol.availability_zone,
'device': vol.attachments[0][
'device'] if vol.attachments else None,
'bootable': vol.bootable.lower() == 'true',
'volume_image_metadata': {},
'host': None,
'path': None,
'project_id': getattr(vol, 'os-vol-tenant-attr:tenant_id'),
'metadata': vol.metadata
}
if 'volume_image_metadata' in vol.__dict__:
volume['volume_image_metadata'] = {
'image_id': vol.volume_image_metadata['image_id'],
'checksum': vol.volume_image_metadata['checksum'],
'image_name': vol.volume_image_metadata.get('image_name'),
'size': int(vol.volume_image_metadata.get('size', 0))
}
if cfg.storage.backend == utils.CEPH:
volume['path'] = "%s/%s%s" % (
cfg.storage.rbd_pool, cfg.storage.volume_name_template, vol.id)
volume['host'] = (cfg.storage.host
if cfg.storage.host
else cfg.cloud.ssh_host)
elif vol.attachments and (cfg.storage.backend == utils.ISCSI):
instance = compute.read_info(
search_opts={'id': vol.attachments[0]['server_id']})
instance = instance[utils.INSTANCES_TYPE]
instance_info = instance.values()[0][utils.INSTANCE_BODY]
volume['host'] = instance_info['host']
list_disk = utils.get_libvirt_block_info(
instance_info['instance_name'],
cfg.cloud.ssh_host,
instance_info['host'],
cfg.cloud.ssh_user,
cfg.cloud.ssh_sudo_password)
volume['path'] = utils.find_element_by_in(list_disk, vol.id)
return volume
@staticmethod
def convert_snapshot(snap, volume, cfg, cloud):
snapshot = {
'id': snap.id,
'volume_id': snap.volume_id,
'tenant_id': snap.project_id,
'display_name': snap.display_name,
'display_description': snap.display_description,
'created_at': snap.created_at,
'size': snap.size,
'vol_path': volume['path']
}
if cfg.storage.backend == utils.CEPH:
snapshot['name'] = "%s%s" % (cfg.storage.snapshot_name_template,
snap.id)
snapshot['path'] = "%s@%s" % (snapshot['vol_path'],
snapshot['name'])
snapshot['host'] = (cfg.storage.host
if cfg.storage.host
else cfg.cloud.ssh_host)
return snapshot
@staticmethod
def convert_to_params(vol):
volume_body = vol[utils.VOLUME_BODY]
info = {
'size': volume_body['size'],
'display_name': volume_body['display_name'],
'display_description': volume_body['display_description'],
'volume_type': volume_body['volume_type'],
'availability_zone': volume_body['availability_zone'],
}
if 'image' in vol[utils.META_INFO]:
if vol[utils.META_INFO]['image']:
info['imageRef'] = vol[utils.META_INFO]['image']['id']
return info
def __patch_option_bootable_of_volume(self, volume_id, bootable):
cmd = ('UPDATE volumes SET volumes.bootable=%s WHERE '
'volumes.id="%s"') % (int(bootable), volume_id)
self.mysql_connector.execute(cmd)
def download_table_from_db_to_file(self, table_name, file_name):
self.mysql_connector.execute("SELECT * FROM %s INTO OUTFILE '%s';" %
(table_name, file_name))
def upload_table_to_db(self, table_name, file_name):
self.mysql_connector.execute("LOAD DATA INFILE '%s' INTO TABLE %s" %
(file_name, table_name))
def update_column_with_condition(self, table_name, column,
old_value, new_value):
self.mysql_connector.execute("UPDATE %s SET %s='%s' WHERE %s='%s'" %
(table_name, column, new_value, column,
old_value))
def update_column(self, table_name, column_name, new_value):
self.mysql_connector.execute("UPDATE %s SET %s='%s'" %
(table_name, column_name, new_value))
def get_volume_path_iscsi(self, vol_id):
cmd = "SELECT provider_location FROM volumes WHERE id='%s';" % vol_id
result = self.cloud.mysql_connector.execute(cmd)
if not result:
raise Exception('There is no such raw in Cinder DB with the '
'specified volume_id=%s' % vol_id)
provider_location = result.fetchone()[0]
provider_location_list = provider_location.split()
iscsi_target_id = provider_location_list[1]
lun = provider_location_list[2]
ip = provider_location_list[0].split(',')[0]
volume_path = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-%s' % (
ip,
iscsi_target_id,
lun)
return volume_path
|
"""
This bot uses the python library `unirest` which is not a
dependency of Zulip. To use this module, you will have to
install it in your local machine. In your terminal, enter
the following command:
$ sudo pip install unirest --upgrade
Note:
* You might have to use `pip3` if you are using python 3.
* The install command would also download any dependency
required by `unirest`.
"""
from __future__ import print_function
import os
import logging
import ssl
import sys
try:
import unirest
except ImportError:
logging.error("Dependency missing!!\n%s" % (__doc__))
sys.exit(0)
HELP_MESSAGE = '''
This bot allows users to translate a sentence into
'Yoda speak'.
Users should preface messages with '@yoda'.
Before running this, make sure to get a Mashape Api token.
Instructions are in the 'readme-yoda-bot.md' file.
Store it in the 'yoda_api_key.txt' file.
The 'yoda_api_key.txt' file should be located at '~/yoda_api_key.txt'.
Example input:
@yoda You will learn how to speak like me someday.
'''
class ApiKeyError(Exception):
'''raise this when there is an error with the Mashape Api Key'''
class YodaSpeakHandler(object):
'''
This bot will allow users to translate a sentence into 'Yoda speak'.
It looks for messages starting with '@yoda'.
'''
def usage(self):
return '''
This bot will allow users to translate a sentence into
'Yoda speak'.
Users should preface messages with '@yoda'.
Before running this, make sure to get a Mashape Api token.
Instructions are in the 'readme-yoda-bot.md' file.
Store it in the 'yoda_api_key.txt' file.
The 'yoda_api_key.txt' file should be located at '~/yoda_api_key.txt'.
Example input:
@yoda You will learn how to speak like me someday.
'''
def triage_message(self, message):
# return True iff we want to (possibly) response to this message
original_content = message['content']
return original_content.startswith('@yoda')
def handle_message(self, message, client, state_handler):
original_content = message['content']
stream = message['display_recipient']
subject = message['subject']
# this handles the message if its starts with @yoda
if original_content.startswith('@yoda'):
handle_input(client, original_content, stream, subject)
handler_class = YodaSpeakHandler
def send_to_yoda_api(sentence, api_key):
# function for sending sentence to api
response = unirest.get("https://yoda.p.mashape.com/yoda?sentence=" + sentence,
headers={
"X-Mashape-Key": api_key,
"Accept": "text/plain"
}
)
if response.code == 200:
return response.body
if response.code == 403:
raise ApiKeyError
else:
error_message = response.body['message']
logging.error(error_message)
error_code = response.code
error_message = error_message + 'Error code: ' + error_code +\
' Did you follow the instructions in the `readme-yoda-bot.md` file?'
return error_message
def format_input(original_content):
# replaces the '@yoda' with nothing, so that '@yoda' doesn't get sent to the api
message_content = original_content.replace('@yoda', '')
# gets rid of whitespace around the edges, so that they aren't a problem in the future
message_content = message_content.strip()
# replaces all spaces with '+' to be in the format the api requires
sentence = message_content.replace(' ', '+')
return sentence
def handle_input(client, original_content, stream, subject):
if is_help(original_content):
send_message(client, HELP_MESSAGE, stream, subject)
else:
sentence = format_input(original_content)
try:
reply_message = send_to_yoda_api(sentence, get_api_key())
except ssl.SSLError or TypeError:
reply_message = 'The service is temporarily unavailable, please try again.'
logging.error(reply_message)
except ApiKeyError:
reply_message = 'Invalid Api Key. Did you follow the instructions in the ' \
'`readme-yoda-bot.md` file?'
logging.error(reply_message)
send_message(client, reply_message, stream, subject)
def get_api_key():
# function for getting Mashape api key
home = os.path.expanduser('~')
with open(home + '/yoda_api_key.txt') as api_key_file:
api_key = api_key_file.read().strip()
return api_key
def send_message(client, message, stream, subject):
# function for sending a message
client.send_message(dict(
type='stream',
to=stream,
subject=subject,
content=message
))
def is_help(original_content):
# replaces the '@yoda' with nothing, so that '@yoda' doesn't get sent to the api
message_content = original_content.replace('@yoda', '')
# gets rid of whitespace around the edges, so that they aren't a problem in the future
message_content = message_content.strip()
if message_content == 'help':
return True
else:
return False
|
"""Example using opt_list with TF1.0."""
from absl import app
from opt_list import tf_opt_list
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
def main(_):
# Construct tensors representing a batch of data. For this example we use
# random data.
inp = tf.random.normal([512, 2]) / 4.
target = tf.math.tanh(1 / (1e-6 + inp))
# Define the neural network computation
net = tf.layers.dense(inp, 1024, activation="relu")
net = tf.layers.dense(net, 1024, activation="relu")
net = tf.layers.dense(net, 2, activation="linear")
# Create some loss.
loss = tf.reduce_mean(tf.square(net - target))
# Define the total number of training steps
training_iters = 200
# The TF V1 optimizers make use of the global step. Create it.
global_step = tf.train.get_or_create_global_step()
# Create the optimizer corresponding to the 0th hyperparameter configuration
# with the specified amount of training steps.
# global_step is used here to track training progress (e.g. for schedules).
opt = tf_opt_list.optimizer_for_idx(0, training_iters, iteration=global_step)
# construct the op that updates the model's parameters.
train_op = opt.minimize(loss)
# Train the network!
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(training_iters):
_, l = sess.run([train_op, loss])
if i % 10 == 0:
print(i, l)
if __name__ == "__main__":
app.run(main)
|
"""
Tests for filetolist.py
filetolist.py:
http://www.github.com/samjabrahams/anchorhub/lib/filetolist.py
"""
from anchorhub.lib.filetolist import FileToList
from anchorhub.util.getanchorhubpath import get_anchorhub_path
from anchorhub.compatibility import get_path_separator
def test_file_to_list_basic():
sep = get_path_separator()
path = get_anchorhub_path() + sep + 'lib' + sep + 'tests' + sep + \
'test_data' + sep + 'filelist'
assert FileToList.to_list(path) == ['Hello!\n', 'My name\n', 'is\n', \
'AnchorHub']
|
import subutai
def subutaistart():
subutai.NewConfiguration("Tray Client")
subutai.SetConfigurationDesc("Tray Client", "Tray application is used to connect to your peers via SSH. Along with it a p2p client will be installed")
subutai.SetConfigurationFile("Tray Client", "tray_install_linux")
subutai.NewConfiguration("P2P")
subutai.SetConfigurationDesc("P2P", "A peer-to-peer application that helps you to connect to your peers")
subutai.SetConfigurationFile("P2P", "p2p_install_linux")
subutai.NewConfiguration("Browser Plugin")
subutai.SetConfigurationDesc("Browser Plugin", "Plugin to manage your peers")
subutai.SetConfigurationFile("Browser Plugin", "e2e_install_linux")
return 0
|
from jnpr.junos import Device
from jnpr.junos.cfg.resource import Resource
from jnpr.junos.utils.config import Config
from jinja2 import Template
class JunosDevice():
"""
JunosDevice
Args:
:host: string containing the host to connect to
:username: string containing the username to authenticate with
:password: string contining the password to authenticate with
Examples:
Basic device connection:
.. code-block:: python
from pyJunosManager import JunosDevice
dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper")
dev.open()
print dev.get_facts()
dev.close()
"""
def __init__(self,host="",username="",password=""):
self.dev = Device(host=host,user=username,password=password)
self.dev.bind(cu=Config)
def open(self):
"""
Opens a NETCONF session to the specified Junos-based device
Example:
.. code-block:: python
from pyJunosManager import JunosDevice
//creates a connection to the Junos device
dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper")
dev.open()
"""
try:
self.dev.open()
except Exception as err:
print err
def close(self):
"""
Closes a NETCONF session to the specified device
Example:
.. code-block:: python
from pyJunosManager import JunosDevice
dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper")
dev.open()
dev.close()
"""
try:
self.dev.close()
except Exception as err:
print err
def get_facts(self):
"""
Returns the device facts as a Python dict
Example:
.. code-block:: python
from pyJunosManager import JunosDevice
import pprint
dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper")
dev.open()
facts = dev.get_facts()
dev.close()
pprint facts
"""
return self.dev.facts
def open_config(self,type="shared"):
"""
Opens the configuration of the currently connected device
Args:
:type: The type of configuration you want to open. Any string can be provided, however the standard supported options are: **exclusive**, **private**, and **shared**. The default mode is **shared**.
Examples:
.. code-block:: python
#Open shared config
from pyJunosManager import JunosDevice
dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper")
dev.open()
dev.open_config()
dev.close_config()
dev.close()
#Open private config
from pyJunosManager import JunosDevice
dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper")
dev.open()
dev.open_config("private")
dev.close_config()
dev.close()
"""
try:
#attempt to open a configuration
output = self.dev.rpc("<open-configuration><{0}/></open-configuration>".format(type))
except Exception as err:
#output an error if the configuration is not availble
print err
def close_config(self):
"""
Closes the exiting opened configuration
Example:
.. code-block:: python
from pyJunosManager import JunosDevice
dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper")
dev.open()
dev.open_config()
dev.close_config()
dev.close()
"""
try:
self.dev.rpc.close_configuration()
except Exception as err:
print err
def load_config_template(self,template,template_vars,type="text"):
"""
:template: A templated string using Jinja2 templates
:template_vars: A dict containing the vars used in the :template: string
:type: The type of configuration to load. The default is "text" or a standard Junos config block. Other options are: "set" for set style commands, "xml" for xml configs
Uses standard `Jinja2`_ Templating.
.. _`Jinja2`: http://jinja.pocoo.org/
Example:
.. code-block:: python
from pyJunosManager import JunosDevice
config_template = "system { host-name {{ hostname }}; }"
dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper")
dev.open()
dev.open_config()
dev.load_config_template(config_template,hostname="foo")
dev commit_and_quit()
dev.close()
"""
new_template = Template(template)
final_template = new_template.render(template_vars)
try:
output = self.dev.cu.load(final_template,format=type,merge=True)
except Exception as err:
print err
def commit_config(self):
"""
Commits exiting configuration
Example:
.. code-block:: python
from pyJunosManager import JunosDevice
dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper")
dev.open()
dev.open_config()
dev.commit_config()
dev.close_config()
dev.close()
"""
try:
self.dev.rpc.commit_configuration()
except Exception as err:
print err
def commit_and_quit(self):
"""
Commits and closes the currently open configration. Saves a step by not needing to manually close the config.
Example:
.. code-block:: python
from pyJunosManager import JunosDevice
dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper")
dev.open()
dev.load_config_template("system{ host-name {{ hostname }};}",hostname="foo")
dev commit_and_quit()
dev.close()
"""
try:
self.dev.rpc.commit_configuration()
self.close_config()
except Exception as err:
print err
|
import logging
import netaddr
from tempest.api.orchestration import base
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class NeutronResourcesTestJSON(base.BaseOrchestrationTest):
@classmethod
def skip_checks(cls):
super(NeutronResourcesTestJSON, cls).skip_checks()
if not CONF.service_available.neutron:
raise cls.skipException("Neutron support is required")
@classmethod
def setup_credentials(cls):
super(NeutronResourcesTestJSON, cls).setup_credentials()
cls.os = clients.Manager()
@classmethod
def setup_clients(cls):
super(NeutronResourcesTestJSON, cls).setup_clients()
cls.network_client = cls.os.network_client
cls.subnets_client = cls.os.subnets_client
@classmethod
def resource_setup(cls):
super(NeutronResourcesTestJSON, cls).resource_setup()
cls.neutron_basic_template = cls.load_template('neutron_basic')
cls.stack_name = data_utils.rand_name('heat')
template = cls.read_template('neutron_basic')
cls.keypair_name = (CONF.orchestration.keypair_name or
cls._create_keypair()['name'])
cls.external_network_id = CONF.network.public_network_id
tenant_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = CONF.network.tenant_network_mask_bits
cls.subnet_cidr = tenant_cidr.subnet(mask_bits).next()
# create the stack
cls.stack_identifier = cls.create_stack(
cls.stack_name,
template,
parameters={
'KeyName': cls.keypair_name,
'InstanceType': CONF.orchestration.instance_type,
'ImageId': CONF.compute.image_ref,
'ExternalNetworkId': cls.external_network_id,
'timeout': CONF.orchestration.build_timeout,
'DNSServers': CONF.network.dns_servers,
'SubNetCidr': str(cls.subnet_cidr)
})
cls.stack_id = cls.stack_identifier.split('/')[1]
try:
cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE')
resources = (cls.client.list_resources(cls.stack_identifier)
['resources'])
except exceptions.TimeoutException as e:
if CONF.compute_feature_enabled.console_output:
# attempt to log the server console to help with debugging
# the cause of the server not signalling the waitcondition
# to heat.
body = cls.client.show_resource(cls.stack_identifier,
'Server')
server_id = body['physical_resource_id']
LOG.debug('Console output for %s', server_id)
output = cls.servers_client.get_console_output(
server_id, None)['output']
LOG.debug(output)
raise e
cls.test_resources = {}
for resource in resources:
cls.test_resources[resource['logical_resource_id']] = resource
@test.idempotent_id('f9e2664c-bc44-4eef-98b6-495e4f9d74b3')
def test_created_resources(self):
"""Verifies created neutron resources."""
resources = [('Network', self.neutron_basic_template['resources'][
'Network']['type']),
('Subnet', self.neutron_basic_template['resources'][
'Subnet']['type']),
('RouterInterface', self.neutron_basic_template[
'resources']['RouterInterface']['type']),
('Server', self.neutron_basic_template['resources'][
'Server']['type'])]
for resource_name, resource_type in resources:
resource = self.test_resources.get(resource_name, None)
self.assertIsInstance(resource, dict)
self.assertEqual(resource_name, resource['logical_resource_id'])
self.assertEqual(resource_type, resource['resource_type'])
self.assertEqual('CREATE_COMPLETE', resource['resource_status'])
@test.idempotent_id('c572b915-edb1-4e90-b196-c7199a6848c0')
@test.services('network')
def test_created_network(self):
"""Verifies created network."""
network_id = self.test_resources.get('Network')['physical_resource_id']
body = self.networks_client.show_network(network_id)
network = body['network']
self.assertIsInstance(network, dict)
self.assertEqual(network_id, network['id'])
self.assertEqual(self.neutron_basic_template['resources'][
'Network']['properties']['name'], network['name'])
@test.idempotent_id('e8f84b96-f9d7-4684-ad5f-340203e9f2c2')
@test.services('network')
def test_created_subnet(self):
"""Verifies created subnet."""
subnet_id = self.test_resources.get('Subnet')['physical_resource_id']
body = self.subnets_client.show_subnet(subnet_id)
subnet = body['subnet']
network_id = self.test_resources.get('Network')['physical_resource_id']
self.assertEqual(subnet_id, subnet['id'])
self.assertEqual(network_id, subnet['network_id'])
self.assertEqual(self.neutron_basic_template['resources'][
'Subnet']['properties']['name'], subnet['name'])
self.assertEqual(sorted(CONF.network.dns_servers),
sorted(subnet['dns_nameservers']))
self.assertEqual(self.neutron_basic_template['resources'][
'Subnet']['properties']['ip_version'], subnet['ip_version'])
self.assertEqual(str(self.subnet_cidr), subnet['cidr'])
@test.idempotent_id('96af4c7f-5069-44bc-bdcf-c0390f8a67d1')
@test.services('network')
def test_created_router(self):
"""Verifies created router."""
router_id = self.test_resources.get('Router')['physical_resource_id']
body = self.network_client.show_router(router_id)
router = body['router']
self.assertEqual(self.neutron_basic_template['resources'][
'Router']['properties']['name'], router['name'])
self.assertEqual(self.external_network_id,
router['external_gateway_info']['network_id'])
self.assertEqual(True, router['admin_state_up'])
@test.idempotent_id('89f605bd-153e-43ee-a0ed-9919b63423c5')
@test.services('network')
def test_created_router_interface(self):
"""Verifies created router interface."""
router_id = self.test_resources.get('Router')['physical_resource_id']
network_id = self.test_resources.get('Network')['physical_resource_id']
subnet_id = self.test_resources.get('Subnet')['physical_resource_id']
body = self.network_client.list_ports()
ports = body['ports']
router_ports = filter(lambda port: port['device_id'] ==
router_id, ports)
created_network_ports = filter(lambda port: port['network_id'] ==
network_id, router_ports)
self.assertEqual(1, len(created_network_ports))
router_interface = created_network_ports[0]
fixed_ips = router_interface['fixed_ips']
subnet_fixed_ips = filter(lambda port: port['subnet_id'] ==
subnet_id, fixed_ips)
self.assertEqual(1, len(subnet_fixed_ips))
router_interface_ip = subnet_fixed_ips[0]['ip_address']
self.assertEqual(str(self.subnet_cidr.iter_hosts().next()),
router_interface_ip)
@test.idempotent_id('75d85316-4ac2-4c0e-a1a9-edd2148fc10e')
@test.services('compute', 'network')
def test_created_server(self):
"""Verifies created sever."""
server_id = self.test_resources.get('Server')['physical_resource_id']
server = self.servers_client.show_server(server_id)['server']
self.assertEqual(self.keypair_name, server['key_name'])
self.assertEqual('ACTIVE', server['status'])
network = server['addresses'][self.neutron_basic_template['resources'][
'Network']['properties']['name']][0]
self.assertEqual(4, network['version'])
self.assertIn(netaddr.IPAddress(network['addr']), self.subnet_cidr)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.