code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
../../../../../../../share/pyshared/orca/scripts/toolkits/WebKitGtk/script_utilities.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/toolkits/WebKitGtk/script_utilities.py
|
Python
|
gpl-3.0
| 87
|
from importlib import import_module
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
def runchecks(include_extended=False):
"""
Iterates through a tuple of systems checks, then returns a dictionary containing the check name as the key, and a
dict containing a status boolean and string describing the status, including any failure or error messages
"""
response_dict = {}
list_of_checks = list(settings.HEARTBEAT_CHECKS)
if include_extended:
list_of_checks += settings.HEARTBEAT_EXTENDED_CHECKS
for path in list_of_checks:
module, _, attr = path.rpartition('.')
try:
if module[0] == '.': # Relative path, assume relative to this app
mod = import_module(module, __package__)
else:
mod = import_module(module)
func = getattr(mod, attr)
check_name, is_ok, message = func()
response_dict[check_name] = {
'status': is_ok,
'message': message
}
except ImportError as e:
raise ImproperlyConfigured(u'Error importing module %s: "%s"' % (module, e))
except AttributeError:
raise ImproperlyConfigured(u'Module "%s" does not define a "%s" callable' % (module, attr))
return response_dict
|
jolyonb/edx-platform
|
openedx/core/djangoapps/heartbeat/runchecks.py
|
Python
|
agpl-3.0
| 1,359
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: test_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='test_config.proto',
package='config_service',
syntax='proto2',
serialized_pb=_b('\n\x11test_config.proto\x12\x0e\x63onfig_service\"\x17\n\x06\x43onfig\x12\r\n\x05param\x18\x01 \x01(\t')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_CONFIG = _descriptor.Descriptor(
name='Config',
full_name='config_service.Config',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='param', full_name='config_service.Config.param', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=37,
serialized_end=60,
)
DESCRIPTOR.message_types_by_name['Config'] = _CONFIG
Config = _reflection.GeneratedProtocolMessageType('Config', (_message.Message,), dict(
DESCRIPTOR = _CONFIG,
__module__ = 'test_config_pb2'
# @@protoc_insertion_point(class_scope:config_service.Config)
))
_sym_db.RegisterMessage(Config)
# @@protoc_insertion_point(module_scope)
|
luci/luci-py
|
appengine/components/components/config/test_config_pb2.py
|
Python
|
apache-2.0
| 1,863
|
# Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This project incorporates work covered by the following copyright and permission notice:
# Copyright (c) 2009, Julien Fache
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Plugins for CMS"""
import itertools
from django.conf import settings
from django.utils.translation import ugettext as _
from tagging.models import TaggedItem
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from gstudio.models import Nodetype
from gstudio.models import Author
from gstudio.managers import tags_published
from gstudio.plugins.models import RandomNodetypesPlugin
from gstudio.plugins.models import LatestNodetypesPlugin
from gstudio.plugins.models import SelectedNodetypesPlugin
class CMSLatestNodetypesPlugin(CMSPluginBase):
"""Django-cms plugin for the latest nodetypes filtered"""
module = _('nodetypes')
model = LatestNodetypesPlugin
name = _('Latest nodetypes')
render_template = 'gstudio/cms/nodetype_list.html'
filter_horizontal = ['metatypes', 'authors', 'tags']
fieldsets = (
(None, {
'fields': (
'number_of_nodetypes',
'template_to_render'
)
}),
(_('Sorting'), {
'fields': (
'metatypes',
'authors',
'tags'
),
'classes': (
'collapse',
)
}),
(_('Advanced'), {
'fields': (
'submetatypes',
),
}),
)
text_enabled = True
def formfield_for_manytomany(self, db_field, request, **kwargs):
"""Filtering manytomany field"""
if db_field.name == 'authors':
kwargs['queryset'] = Author.published.all()
if db_field.name == 'tags':
kwargs['queryset'] = tags_published()
return super(CMSLatestNodetypesPlugin, self).formfield_for_manytomany(
db_field, request, **kwargs)
def render(self, context, instance, placeholder):
"""Update the context with plugin's data"""
nodetypes = Nodetype.published.all()
if instance.metatypes.count():
cats = instance.metatypes.all()
if instance.submetatypes:
cats = itertools.chain(cats, *[c.get_descendants()
for c in cats])
nodetypes = nodetypes.filter(metatypes__in=cats)
if instance.authors.count():
nodetypes = nodetypes.filter(authors__in=instance.authors.all())
if instance.tags.count():
nodetypes = TaggedItem.objects.get_union_by_model(
nodetypes, instance.tags.all())
nodetypes = nodetypes.distinct()[:instance.number_of_nodetypes]
context.update({'nodetypes': nodetypes,
'object': instance,
'placeholder': placeholder})
return context
def icon_src(self, instance):
"""Icon source of the plugin"""
return settings.STATIC_URL + u'gstudio/img/plugin.png'
class CMSSelectedNodetypesPlugin(CMSPluginBase):
"""Django-cms plugin for a selection of nodetypes"""
module = _('nodetypes')
model = SelectedNodetypesPlugin
name = _('Selected nodetypes')
render_template = 'gstudio/cms/nodetype_list.html'
fields = ('nodetypes', 'template_to_render')
filter_horizontal = ['nodetypes']
text_enabled = True
def render(self, context, instance, placeholder):
"""Update the context with plugin's data"""
context.update({'nodetypes': instance.nodetypes.all(),
'object': instance,
'placeholder': placeholder})
return context
def icon_src(self, instance):
"""Icon source of the plugin"""
return settings.STATIC_URL + u'gstudio/img/plugin.png'
class CMSRandomNodetypesPlugin(CMSPluginBase):
"""Django-cms plugin for random nodetypes"""
module = _('nodetypes')
model = RandomNodetypesPlugin
name = _('Random node types')
render_template = 'gstudio/cms/random_nodetypes.html'
fields = ('number_of_nodetypes', 'template_to_render')
text_enabled = True
def render(self, context, instance, placeholder):
"""Update the context with plugin's data"""
context.update(
{'number_of_nodetypes': instance.number_of_nodetypes,
'template_to_render': str(instance.template_to_render) or
'gstudio/tags/random_nodetypes.html'})
return context
def icon_src(self, instance):
"""Icon source of the plugin"""
return settings.STATIC_URL + u'gstudio/img/plugin.png'
plugin_pool.register_plugin(CMSLatestNodetypesPlugin)
plugin_pool.register_plugin(CMSSelectedNodetypesPlugin)
plugin_pool.register_plugin(CMSRandomNodetypesPlugin)
|
gnowledge/ncert_nroer
|
gstudio/plugins/cms_plugins.py
|
Python
|
agpl-3.0
| 7,879
|
# -*- coding: utf-8 -*-
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
"""
TODO:
IDEAS:
LATER:
ISSUES:
Bugs:
Seg-faults when unregistering addon...
Mites:
* History back button does not light up on first cursor move.
It does light up on the second, or when mouse enters the tool-area
* Switching between local and global view triggers new cursor position in history trace.
* Each consecutive click on the linex operator triggers new cursor position in history trace.
(2011-01-16) Was not able to fix this because of some strange script behaviour
while trying to clear linexChoice from addHistoryLocation
QUESTIONS:
"""
import bpy
import bgl
import math
from mathutils import Vector, Matrix
from mathutils import geometry
from misc_utils import *
from constants_utils import *
from cursor_utils import *
from ui_utils import *
class CursorHistoryData(bpy.types.PropertyGroup):
# History tracker
historyDraw = bpy.props.BoolProperty(description="Draw history trace in 3D view",default=1)
historyDepth = 144
historyWindow = 12
historyPosition = [-1] # Integer must be in a list or else it can not be written to
historyLocation = []
#historySuppression = [False] # Boolean must be in a list or else it can not be written to
def addHistoryLocation(self, l):
if(self.historyPosition[0]==-1):
self.historyLocation.append(l.copy())
self.historyPosition[0]=0
return
if(l==self.historyLocation[self.historyPosition[0]]):
return
#if self.historySuppression[0]:
#self.historyPosition[0] = self.historyPosition[0] - 1
#else:
#self.hideLinexChoice()
while(len(self.historyLocation)>self.historyPosition[0]+1):
self.historyLocation.pop(self.historyPosition[0]+1)
#self.historySuppression[0] = False
self.historyLocation.append(l.copy())
if(len(self.historyLocation)>self.historyDepth):
self.historyLocation.pop(0)
self.historyPosition[0] = len(self.historyLocation)-1
#print (self.historyLocation)
#def enableHistorySuppression(self):
#self.historySuppression[0] = True
def previousLocation(self):
if(self.historyPosition[0]<=0):
return
self.historyPosition[0] = self.historyPosition[0] - 1
CursorAccess.setCursor(self.historyLocation[self.historyPosition[0]].copy())
def nextLocation(self):
if(self.historyPosition[0]<0):
return
if(self.historyPosition[0]+1==len(self.historyLocation)):
return
self.historyPosition[0] = self.historyPosition[0] + 1
CursorAccess.setCursor(self.historyLocation[self.historyPosition[0]].copy())
class VIEW3D_OT_cursor_previous(bpy.types.Operator):
"""Previous cursor location"""
bl_idname = "view3d.cursor_previous"
bl_label = "Previous cursor location"
bl_options = {'REGISTER'}
def modal(self, context, event):
return {'FINISHED'}
def execute(self, context):
cc = context.scene.cursor_history
cc.previousLocation()
return {'FINISHED'}
class VIEW3D_OT_cursor_next(bpy.types.Operator):
"""Next cursor location"""
bl_idname = "view3d.cursor_next"
bl_label = "Next cursor location"
bl_options = {'REGISTER'}
def modal(self, context, event):
return {'FINISHED'}
def execute(self, context):
cc = context.scene.cursor_history
cc.nextLocation()
return {'FINISHED'}
class VIEW3D_OT_cursor_history_show(bpy.types.Operator):
"""Show cursor trace"""
bl_idname = "view3d.cursor_history_show"
bl_label = "Show cursor trace"
bl_options = {'REGISTER'}
def modal(self, context, event):
return {'FINISHED'}
def execute(self, context):
cc = context.scene.cursor_history
cc.historyDraw = True
BlenderFake.forceRedraw()
return {'FINISHED'}
class VIEW3D_OT_cursor_history_hide(bpy.types.Operator):
"""Hide cursor trace"""
bl_idname = "view3d.cursor_history_hide"
bl_label = "Hide cursor trace"
bl_options = {'REGISTER'}
def modal(self, context, event):
return {'FINISHED'}
def execute(self, context):
cc = context.scene.cursor_history
cc.historyDraw = False
BlenderFake.forceRedraw()
return {'FINISHED'}
class VIEW3D_PT_cursor_history(bpy.types.Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_label = "Cursor History"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(self, context):
# Display in object or edit mode.
cc = context.scene.cursor_history
cc.addHistoryLocation(CursorAccess.getCursor())
if (context.area.type == 'VIEW_3D' and
(context.mode == 'EDIT_MESH'
or context.mode == 'OBJECT')):
return 1
return 0
def draw_header(self, context):
layout = self.layout
cc = context.scene.cursor_history
if cc.historyDraw:
GUI.drawIconButton(True, layout, 'RESTRICT_VIEW_OFF', "view3d.cursor_history_hide", False)
else:
GUI.drawIconButton(True, layout, 'RESTRICT_VIEW_ON' , "view3d.cursor_history_show", False)
def draw(self, context):
layout = self.layout
sce = context.scene
cc = context.scene.cursor_history
row = layout.row()
row.label("Navigation: ")
GUI.drawIconButton(cc.historyPosition[0]>0, row, 'PLAY_REVERSE', "view3d.cursor_previous")
#if(cc.historyPosition[0]<0):
#row.label(" -- ")
#else:
#row.label(" "+str(cc.historyPosition[0])+" ")
GUI.drawIconButton(cc.historyPosition[0]<len(cc.historyLocation)-1, row, 'PLAY', "view3d.cursor_next")
row = layout.row()
col = row.column()
col.prop(CursorAccess.findSpace(), "cursor_location")
class VIEW3D_PT_cursor_history_init(bpy.types.Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_label = "Register callback"
bl_options = {'DEFAULT_CLOSED'}
initDone = False
_handle = None
@staticmethod
def handle_add(self, context):
VIEW3D_PT_cursor_history_init._handle = bpy.types.SpaceView3D.draw_handler_add(
cursor_history_draw, (self, context), 'WINDOW', 'POST_PIXEL')
@staticmethod
def handle_remove():
if VIEW3D_PT_cursor_history_init._handle is not None:
bpy.types.SpaceView3D.draw_handler_remove(VIEW3D_PT_cursor_history_init._handle, 'WINDOW')
VIEW3D_PT_cursor_history_init._handle = None
@classmethod
def poll(cls, context):
if VIEW3D_PT_cursor_history_init.initDone:
return False
print ("Cursor History draw-callback registration...")
sce = context.scene
if context.area.type == 'VIEW_3D':
VIEW3D_PT_cursor_history_init.handle_add(cls, context)
VIEW3D_PT_cursor_history_init.initDone = True
print ("Cursor History draw-callback registered")
# Unregister to prevent double registration...
# Started to fail after v2.57
# bpy.types.unregister(VIEW3D_PT_cursor_history_init)
else:
print("View3D not found, cannot run operator")
return False
def draw_header(self, context):
pass
def draw(self, context):
pass
def cursor_history_draw(cls,context):
cc = context.scene.cursor_history
draw = 0
if hasattr(cc, "historyDraw"):
draw = cc.historyDraw
if(draw):
bgl.glEnable(bgl.GL_BLEND)
bgl.glShadeModel(bgl.GL_FLAT)
alpha = 1-PHI_INV
# History Trace
if cc.historyPosition[0]<0:
return
bgl.glBegin(bgl.GL_LINE_STRIP)
ccc = 0
for iii in range(cc.historyWindow+1):
ix_rel = iii - int(cc.historyWindow / 2)
ix = cc.historyPosition[0] + ix_rel
if(ix<0 or ix>=len(cc.historyLocation)):
continue
ppp = region3d_get_2d_coordinates(context, cc.historyLocation[ix])
if(ix_rel<=0):
bgl.glColor4f(0, 0, 0, alpha)
else:
bgl.glColor4f(1, 0, 0, alpha)
bgl.glVertex2f(ppp[0], ppp[1])
ccc = ccc + 1
bgl.glEnd()
|
PLyczkowski/Sticky-Keymap
|
2.74/scripts/addons_contrib/cursor_control/history.py
|
Python
|
gpl-2.0
| 9,295
|
from lib20k.primitives import VERSION
open("version.txt", "wt", encoding="utf-8").write(
"VERSION={}.{}.{}\n".format(*VERSION))
|
20kly/20kly
|
pyinstaller/linux/version_setup.py
|
Python
|
gpl-2.0
| 135
|
# -*- coding: utf-8 -*-
#
# Tuxemon
# Copyright (c) 2014-2017 William Edwards <shadowapex@gmail.com>,
# Benjamin Bean <superman2k5@gmail.com>
#
# This file is part of Tuxemon
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
import logging
from core.components.event.conditions import get_npc
from core.components.event.eventcondition import EventCondition
# Create a logger for optional handling of debug messages.
logger = logging.getLogger(__name__)
class NPCFacingTileCondition(EventCondition):
""" Checks to see if an NPC is facing a tile position
"""
name = "npc_facing_tile"
def test(self, game, condition):
""" Checks to see if an NPC is facing a tile position
:param game: The main game object that contains all the game's variables.
:param condition: A dictionary of condition details. See :py:func:`core.components.map.Map.loadevents`
for the format of the dictionary.
:type game: core.control.Control
:type condition: Dictionary
:rtype: Boolean
:returns: True or False
**Examples:**
>>> condition.__dict__
{
"type": "facing_tile",
"parameters": ["npc_maple"],
"width": 1,
"height": 1,
"operator": "is",
"x": 6,
"y": 9,
...
}
"""
# Get the player object from the game.
player = get_npc(game, condition.parameters[0])
if not player:
return False
coordinates = (condition.x, condition.y)
tile_location = None
# Next, we check the player position and see if we're one tile away from
# the tile.
if coordinates[1] == player.tile_pos[1]:
# Check to see if the tile is to the left of the player
if coordinates[0] == player.tile_pos[0] - 1:
logger.debug("Tile is to the left of the NPC")
tile_location = "left"
# Check to see if the tile is to the right of the player
elif coordinates[0] == player.tile_pos[0] + 1:
logger.debug("Tile is to the right of the player")
tile_location = "right"
if coordinates[0] == player.tile_pos[0]:
# Check to see if the tile is above the player
if coordinates[1] == player.tile_pos[1] - 1:
logger.debug("Tile is above the player")
tile_location = "up"
elif coordinates[1] == player.tile_pos[1] + 1:
logger.debug("Tile is below the player")
tile_location = "down"
# Then we check to see if we're facing the Tile
if player.facing == tile_location:
return True
else:
return False
|
treetrunk/Tuxemon
|
tuxemon/core/components/event/conditions/npc_facing_tile.py
|
Python
|
gpl-3.0
| 3,443
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Administrator
#
# Created: 08/10/2011
# Copyright: (c) Administrator 2011
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/usr/bin/env python
class Gol01:
def __init__(self):
pass
def getNeighbours_old(self, xCoordinate, yCoordinate):
return 3
def getNeighbours(self, xCoordinate, yCoordinate):
anzahlNachbarn = 0
for ii in range(3):
for jj in range(3):
try:
print ii, jj
if (self.field[ii][jj]):
anzahlNachbarn +=1
print "Field:", self.field[ii][jj]
except:
pass
if (self.field[xCoordinate][yCoordinate]):
anzahlNachbarn -= 1
print "Anzahl Nachbarn", anzahlNachbarn
return anzahlNachbarn
def isNextAlive(self, numberOfNeigbours):
return True
if __name__ == '__main__':
pass
|
hemmerling/codingdojo
|
src/game_of_life/python_coderetreat_socramob/cr_socramob09/gol01.py
|
Python
|
apache-2.0
| 1,168
|
import math, threading, time
from . matrix import Matrix
from .. util import deprecated
if deprecated.allowed(): # pragma: no cover
class POV(Matrix):
def __init__(self, *args, **kwds):
raise ValueError('layout.POV has been removed. Use animation.POV')
LEDPOV = POV
|
rec/BiblioPixel
|
bibliopixel/layout/pov.py
|
Python
|
mit
| 297
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from tempfile import gettempdir
from subprocess import check_output
def test_free_songs():
'''Tries to download a list of free songs'''
result = run_cmd('yt-songs get -v test_songs ' + gettempdir())
assert '3/3 songs downloaded successfully' in result
def run_cmd(cmd):
'''Run a shell command `cmd` and return its output.'''
return check_output(cmd, shell=True).decode('utf-8')
|
MinikOlsen/yt-songs
|
test_yt-songs.py
|
Python
|
mit
| 467
|
# Capture the original matplotlib rcParams
import matplotlib as mpl
_orig_rc_params = mpl.rcParams.copy()
# Import seaborn objects
from .rcmod import *
from .utils import *
from .palettes import *
from .linearmodels import *
from .categorical import *
from .distributions import *
from .timeseries import *
from .matrix import *
from .miscplot import *
from .axisgrid import *
from .widgets import *
from .xkcd_rgb import xkcd_rgb
from .crayons import crayons
# Set default aesthetics
set()
__version__ = "0.8.dev"
|
JWarmenhoven/seaborn
|
seaborn/__init__.py
|
Python
|
bsd-3-clause
| 518
|
import multiprocessing
from .FrameProtocol import Protocol
from .DistantIOProtocol import distantio_protocol
import logging
import binascii
class Worker(multiprocessing.Process):
def __init__(self,input_queue,output_connection,new_data_condition,stop_condition):
multiprocessing.Process.__init__(self)
self.input_queue = input_queue
self.output_connection = output_connection
self.wait_condition = new_data_condition
self.stop_condition = stop_condition
self.protocol = Protocol(self.on_frame_decoded_callback)
self.decoder = distantio_protocol()
def run(self):
logging.info("Worker process started")
while not self.stop_condition.is_set():
# Wait for new data to be put in the queue
if not self.input_queue.empty() :
data = self.input_queue.get()
for c in list(memoryview(data)):
self.protocol.decode(c)
logging.info("Worker process stopped")
def on_frame_decoded_callback(self,frame):
try:
instruction = self.decoder.process(frame)
except IndexError as e:
logging.warning("received error "+str(e)+" with frame : %s",binascii.hexlify(frame))
return
except ValueError as e:
logging.warning("received error "+str(e)+" with frame : %s",binascii.hexlify(frame))
return
else:
self.output_connection.send(instruction)
|
Overdrivr/DistantIO
|
distantio/Worker.py
|
Python
|
mit
| 1,489
|
# -*- coding: utf-8 -*-
#
# Copyright © 2016 Mark Wolf
#
# This file is part of Xanespy.
#
# Xanespy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Xanespy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Xanespy. If not, see <http://www.gnu.org/licenses/>.
"""Functions and classes that prepare experiments at specific
synchrotron beamlines."""
from typing import List, Tuple, Iterable, Sequence
from collections import namedtuple
import os
import numpy as np
import pandas as pd
from .edges import KEdge
from .utilities import position
ZoneplatePoint = namedtuple('ZoneplatePoint', ('x', 'y', 'z', 'energy'))
DetectorPoint = namedtuple('DetectorPoint', ('x', 'y', 'z', 'energy'))
class Zoneplate():
"""Type of focusing optic using in X-ray microscopy. It must be moved
with changing energy to properly focus the beam. In order to
properly predict zoneplate positions, it needs either two
position-energy pairs or one position-energy pair and a
step. Passing two position-energy pairs is preffered because this
allows x, y and z to be set properly instead of just z.
Parameters
----------
start : tuple
The first zoneplate position-energy pair.
z_step : int, optional
Adjustment in z-position for every positive change of 1 eV
of beam energy.
end : tuple, optional
The second zoneplate position-energy pair.
"""
def __init__(self,
start: ZoneplatePoint,
x_step: int=None,
y_step: int=None,
z_step: int=None,
end: ZoneplatePoint=None):
self.start = start
# Check sanity of arguments
if None in [x_step, y_step, z_step] and end is None:
msg = "Either x, y and z steps or `end` is required."
raise ValueError(msg)
elif z_step is not None and end is not None:
msg = "Passing both `step` or `end` is confusing."
raise ValueError(msg)
elif (None in [x_step, y_step, z_step]) and (start is not None) and (end is not None):
# Calculate the step from start and end points
self.step = position(
x=(end.x - start.x) / (end.energy - start.energy),
y=(end.y - start.y) / (end.energy - start.energy),
z=(end.z - start.z) / (end.energy - start.energy),
)
else:
# Use the steps given given by the user
self.step = position(x=x_step, y=y_step, z=z_step)
def position(self, energy: float):
"""Predict the x, y and z position of the zonplate for the given
energy.
"""
pos = position(
x=self.start.x + self.step.x * (energy - self.start.energy),
y=self.start.y + self.step.y * (energy - self.start.energy),
z=self.start.z + self.step.z * (energy - self.start.energy),
)
return pos
class Detector(Zoneplate):
"""A calibration object for the position of the detector."""
pass
def write_scaninfo_header(f, abba_mode, repetitions, ref_repetitions):
f.write('VERSION 1\n')
f.write('ENERGY 1\n')
f.write('TOMO 0\n')
f.write('MOSAIC 0\n')
f.write('MULTIEXPOSURE 4\n')
f.write('NREPEATSCAN 1\n')
f.write('WAITNSECS 0\n')
f.write('NEXPOSURES {}\n'.format(repetitions))
f.write('AVERAGEONTHEFLY 0\n')
f.write('REFNEXPOSURES {}\n'.format(ref_repetitions))
f.write('REF4EVERYEXPOSURES {}\n'.format(repetitions))
f.write('REFABBA {}\n'.format(1 if abba_mode else 0))
f.write('REFAVERAGEONTHEFLY 0\n')
f.write('MOSAICUP 1\n')
f.write('MOSAICDOWN 1\n')
f.write('MOSAICLEFT 1\n')
f.write('MOSAICRIGHT 1\n')
f.write('MOSAICOVERLAP 0.20\n')
f.write('MOSAICCENTRALTILE 1\n')
f.write('FILES\n')
def ssrl6_xanes_script(dest,
edge: KEdge,
zoneplate: Zoneplate,
positions: List[position],
reference_position: position,
iterations: Sequence,
iteration_rest: int=0,
frame_rest: int=0,
binning: int=2,
exposure=0.5,
repetitions: int=5,
ref_repetitions: int=10,
abba_mode: bool=True):
"""Prepare a script file for running multiple consecutive XANES
framesets on the transmission x-ray micrscope at the Advanced
Photon Source beamline 8-BM-B. Both `iteration_rest` and
`frame_rest` can be used to give the material time to recover from
X-ray damage.
Arguments
---------
dest
A file-like object that will hold the resulting script
edge : Edge
Description of the absorption edge.
binning : int, optional
how many CCD pixels to combine into one image pixel (eg. 2 means
2x2 CCD pixels become 1 image pixel.
exposure : float, optional
How many seconds to collect for per frame
positions
Locations to move the x, y (and z) axes to in order to capture
the image.
reference_position : tuple
Single x, y, z location to capture a reference frame.
iteration_rest : int, optional
Time (in seconds) to wait between iterations. Beam will wait at
reference location before starting next XANES set.
frame_rest : int, optional
Time (in seconds) to wait between frames. Beam will wait at
reference location before starting next energy frame.
zoneplate : Zoneplate
Calibration details for the Fresnel zone-plate.
detector : Detector
Like zoneplate, but for detector.
iterations : Iterable
Contains an identifier for each XANES dataset.
repetitions : int, optional
How many images to collect for each location/energy. These
frames will then be averaged during analysis.
ref_repetitions : int, optional
Same as `repetitions` but for reference frames.
abba_mode : bool, optional
If True, script will alternate sample and reference locations
first to save time. Eg: reference, sample, change-energy,
sample, reference, change-energy, etc. Not compatible with
`frame_rest` argument.
"""
# Sanity checks for arguments
if frame_rest and abba_mode:
raise ValueError("Cannot use `frame_rest` and `abba_mode` together.")
# Save a template for writing string and sample frames
ref_template = 'ref_{name}_{energy:07.1f}_eV_{ctr:03d}of{total:03d}.xrm\n'
sam_template = '{name}_fov{fov}_{energy:07.1f}_eV_{ctr:03d}of{total:03d}.xrm\n'
pos_template = 'moveto x {x:.2f}\nmoveto y {y:.2f}\nmoveto z {z:.2f}\n'
# Command to set the exposure and binning (gets used repeatedly)
exposure_str = 'setexp {exp:.2f}\nsetbinning {binning}\n'
exposure_str = exposure_str.format(exp=exposure,
binning=binning)
# Prepare a scan info file for TXM Wizard
dirname, filename = os.path.split(dest.name)
scaninfo = os.path.join(dirname, "ScanInfo_" + filename)
scaninfo = open(scaninfo, mode='w')
write_scaninfo_header(f=scaninfo, abba_mode=abba_mode,
repetitions=repetitions,
ref_repetitions=ref_repetitions)
for fs_name in iterations:
# Flag to keep track of ABBA mode status
ref_first = True
# Start writing header in file
dest.write(';; 2D XANES ;;\n')
# Step through each energy and write the commands
for E in edge.all_energies():
dest.write(';;;; set the MONO and the ZP\n')
dest.write('sete {:.02f}\n'.format(E))
# Determine correct zoneplate settings
zp_pos = zoneplate.position(energy=E)
dest.write('moveto zpx {:.2f}\n'.format(zp_pos.x))
dest.write('moveto zpy {:.2f}\n'.format(zp_pos.y))
dest.write('moveto zpz {:.2f}\n'.format(zp_pos.z))
# Determine commands for reference frame (but delay writing)
ref_s = ';;;; Move to reference position\n'
ref_s += pos_template.format(x=reference_position.x,
y=reference_position.y,
z=reference_position.z)
# Pause to let material recover
if frame_rest and ref_first:
ref_s += 'wait {:d}\n'.format(frame_rest)
ref_s += ';;;; Collect reference frames\n'
ref_s += exposure_str
for rep in range(0, ref_repetitions):
xrmfile = ref_template.format(name=fs_name,
energy=E,
ctr=rep,
total=ref_repetitions)
ref_s += "collect {}".format(xrmfile)
scaninfo.write(xrmfile)
# Pause to let material recover
if frame_rest and not ref_first:
ref_s += 'wait {:d}\n'.format(frame_rest)
# Write commands for collecting reference frames (if appropriate)
if ref_first:
dest.write(ref_s)
# Write commands for collecting sample frames
if ref_first:
pos_list = positions
else:
# Go backwards if even step in ABBA mode
pos_list = positions[::-1]
for idx, sam_pos in enumerate(pos_list):
if ref_first:
pos_idx = idx
else:
pos_idx = len(pos_list) - idx - 1
dest.write(';;;; Move to sample position {}\n'.format(pos_idx))
dest.write(pos_template.format(x=sam_pos.x, y=sam_pos.y, z=sam_pos.z))
dest.write(';;;; Collect frames sample position {}\n'.format(pos_idx))
dest.write(exposure_str)
for rep in range(0, repetitions):
xrmfile = sam_template.format(name=fs_name,
energy=E,
fov=pos_idx,
ctr=rep,
total=repetitions)
dest.write("collect {}".format(xrmfile))
scaninfo.write(xrmfile)
# Write commands for collecting reference frames (if in ABBA mode)
if not ref_first:
dest.write(ref_s)
# Toggle ABBA mode state if abba_mode is set
ref_first = not (ref_first and abba_mode)
# Move to reference position to avoid beam damage
dest.write(';;;; Park at reference position\n')
dest.write(pos_template.format(x=reference_position.x,
y=reference_position.y,
z=reference_position.z))
# Pause to let the material recover (unless this is the last dataset)
if iteration_rest and fs_name != iterations[-1]:
dest.write('wait {:d}\n'.format(iteration_rest))
# Close ScanInfo file
scaninfo.close()
def sector8_xanes_script(dest,
edge,
zoneplate,
detector,
sample_positions,
names,
iterations=range(0, 1),
binning=1,
exposure=30,
abba_mode=True):
"""Prepare an script file for running multiple consecutive XANES
framesets on the transmission x-ray micrscope at the Advanced
Photon Source beamline 8-BM-B. This function also creates a
tab-separated-values (tsv) file which contains each sample
filename and its associated meta-data. This can then be used for
real-time processing.
Arguments
---------
dest
file-like object that will hold the resulting script
zoneplate : Zoneplate
Calibration details for the Fresnel zone-plate.
detector : Detector
Like zoneplate, but for detector.
edge : KEdge
Description of the absorption edge.
binning : int, optional
how many CCD pixels to combine into one image pixel
(eg. 2 means 2x2 CCD pixels become 1 image pixel.
exposure : float
How many seconds to collect for per frame
sample_positions
Locations to move the x, y (and z) axes to in order to capture
the image.
names
sample name to use in file names. Should match `sample_positions` in length.
iterations : iterable
contains an identifier for each full set of xanes location with
reference.
abba_mode : str, optional
If True, script will locations forward and backwards to save
time. Eg: reference, sample, change-energy, sample, reference,
change-energy, etc. Not compatible with `frame_rest`
argument. If used, the reference frame should be first or last
in the order to make the process maximally efficient.
"""
# Prepare a blank pandas dataframe for keeping track of files
df = pd.DataFrame(columns=['filename', 'eV', 'iteration', 'reference'])
# Set the binning and exposure for the experiment
dest.write("setbinning {}\n".format(binning))
dest.write("setexp {}\n".format(exposure))
energies = edge.all_energies()
starting_energy = energies[0]
for iteration in iterations:
# Status flag for using abba_mode
reverse_positions = False
# Approach target energy from below
for energy in np.arange(starting_energy - 100, starting_energy, 2):
dest.write("moveto energy {:.2f}\n".format(energy))
for energy in energies:
# Set energy
dest.write("moveto energy {:.2f}\n".format(energy))
# Set zoneplate
zp_pos = zoneplate.position(energy)
dest.write("moveto zpz {:.2f}\n".format(zoneplate.position(energy).z))
# Set detector
det_pos = detector.position(energy)
dest.write("moveto detx {:.2f}\n".format(det_pos.x))
dest.write("moveto dety {:.2f}\n".format(det_pos.y))
dest.write("moveto detz {:.2f}\n".format(det_pos.z))
# Prepare range of sample positions
if reverse_positions and abba_mode:
position_indexes = range(len(sample_positions)-1, -1, -1)
else:
position_indexes = range(0, len(sample_positions))
reverse_positions = not reverse_positions
# Cycle through sample positions and collect data
fnames = []
ref_name = ""
for idx in position_indexes:
position = sample_positions[idx]
name = names[idx]
# Move to x, y, z
dest.write("moveto x {:.2f}\n".format(position.x))
dest.write("moveto y {:.2f}\n".format(position.y))
dest.write("moveto z {:.2f}\n".format(position.z))
# Turn energy float into "8250_0eV" string form
energy_str = "{:.1f}eV".format(energy).replace('.', '_')
# Collect frame
filename = "{name}_xanes{iter}_{energy}.xrm"
filename = filename.format(name=name,
iter=iteration,
energy=energy_str)
# Check if we're on a sample or reference frame
if "ref" in name:
ref_name = filename
else:
fnames.append(filename)
dest.write("collect {filename}\n".format(filename=filename))
# Add the collected files to the pandas dataframe
for fname in fnames:
row = {'filename': fname, 'eV': energy, 'iteration':
iteration, 'reference': ref_name}
df = df.append(row, ignore_index=True)
# Save the dataframe as a tab-separated value file
tsv_filename = os.path.splitext(dest.name)[0] + '.tsv'
df.to_csv(tsv_filename, sep='\t')
def monitor_sector8(tsv_filename):
"""Monitors a list of files and displays them as they are collected
from the instrument. A matplotlib axes is displayed and is updated
with each new frame that is detected. This function will block
until all files in the file list are accounted for.
Parameters
----------
tsv_filename : string
The name of the file that contains a tab-separated-values list
of filenames. This file is automatically generated by the
``sector8_xanes_script`` function.
"""
pass
|
canismarko/xanespy
|
xanespy/beamlines.py
|
Python
|
gpl-3.0
| 17,275
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
from future import standard_library
from pyload.requests.bucket import Bucket
from pyload.requests.curl.download import CurlDownload
from pyload.requests.curl.request import CurlRequest
from pyload.utils.fs import lopen
# needed to register globals
from tests.helper import stubs
from unittest2 import TestCase
standard_library.install_aliases()
class TestCurlRequest(TestCase):
cookie_url = "https://pyload.net"
def setUp(self):
self.dl = CurlDownload(Bucket())
def tearDown(self):
self.dl.close()
def test_download(self):
assert self.dl.context is not None
self.dl.download(
"https://pyload.net/lib/tpl/pyload/images/pyload-logo-edited3.5-new-font-small.png",
"/tmp/random.bin")
print(self.dl.size, self.dl.arrived)
assert self.dl.size == self.dl.arrived > 0
assert os.stat("/tmp/random.bin").st_size == self.dl.size
def test_cookies(self):
req = CurlRequest({})
req.load(self.cookie_url)
assert len(req.cj) > 0
dl = CurlDownload(Bucket(), req)
assert req.context is dl.context is not None
dl.download(self.cookie_url + "/cookies.php", "cookies.txt")
with lopen("cookies.txt", mode='rb') as fp:
cookies = fp.read().splitlines()
self.assertEqual(len(cookies), len(dl.context))
for c in cookies:
k, v = c.strip().split(":")
self.assertIn(k, req.cj)
def test_attributes(self):
assert self.dl.size == 0
assert self.dl.speed == 0
assert self.dl.arrived == 0
|
pyblub/pyload
|
tests/misc/test_curldownload.py
|
Python
|
agpl-3.0
| 1,704
|
# Copyright (C) 2018 DataArt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from .handler import Handler
from .device_hive import DeviceHive
from .device_hive_api import DeviceHiveApi
from .transports.transport import TransportError
from .api_request import ApiRequestError
from .api_response import ApiResponseError
from .device import DeviceError
from .network import NetworkError
from .device_type import DeviceTypeError
from .subscription import SubscriptionError
from .user import UserError
|
devicehive/devicehive-python
|
devicehive/__init__.py
|
Python
|
apache-2.0
| 1,077
|
import os
import warnings
import inspect
import shutil
import locale
import types
from copy import deepcopy
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.testing.compare import compare_images
from plotnine import ggplot, theme
TOLERANCE = 2 # Default tolerance for the tests
DPI = 72 # Default DPI for the tests
# This partial theme modifies all themes that are used in
# the test. It is limited to setting the size of the test
# images Should a test require a larger or smaller figure
# size, the dpi or aspect_ratio should be modified.
test_theme = theme(figure_size=(640/DPI, 480/DPI))
if not os.path.exists(os.path.join(
os.path.dirname(__file__), 'baseline_images')):
raise IOError(
"The baseline image directory does not exist. "
"This is most likely because the test data is not installed. "
"You may need to install plotnine from source to get the "
"test data.")
def raise_no_baseline_image(filename):
raise Exception("Baseline image {} is missing".format(filename))
def ggplot_equals(gg, right):
"""
Compare ggplot object to image determined by `right`
Parameters
----------
gg : ggplot
ggplot object
right : str | tuple
Identifier. If a tuple, then first element is the
identifier and the second element is a `dict`.
The `dict` can have two keys
- tol - tolerance for the image comparison, a float.
- savefig_kwargs - Parameter used by MPL to save
the figure. This is a `dict`.
The right looks like any one of the following::
- 'identifier'
- ('identifier', {'tol': 17})
- ('identifier', {'tol': 17, 'savefig_kwargs': {'dpi': 80}})
This function is meant to monkey patch ggplot.__eq__
so that tests can use the `assert` statement.
"""
_setup()
if isinstance(right, (tuple, list)):
name, params = right
tol = params.get('tol', TOLERANCE)
_savefig_kwargs = params.get('savefig_kwargs', {})
else:
name, tol = right, TOLERANCE
_savefig_kwargs = {}
savefig_kwargs = {'dpi': DPI}
savefig_kwargs.update(_savefig_kwargs)
gg += test_theme
fig = gg.draw()
test_file = inspect.stack()[1][1]
filenames = make_test_image_filenames(name, test_file)
# savefig ignores the figure face & edge colors
facecolor = fig.get_facecolor()
edgecolor = fig.get_edgecolor()
if facecolor:
savefig_kwargs['facecolor'] = facecolor
if edgecolor:
savefig_kwargs['edgecolor'] = edgecolor
# Save the figure before testing whether the original image
# actually exists. This makes creating new tests much easier,
# as the result image can afterwards just be copied.
fig.savefig(filenames.result, **savefig_kwargs)
_teardown()
if os.path.exists(filenames.baseline):
shutil.copyfile(filenames.baseline, filenames.expected)
else:
# Putting the exception in short function makes for
# short pytest error messages
raise_no_baseline_image(filenames.baseline)
err = compare_images(filenames.expected, filenames.result,
tol, in_decorator=True)
gg._err = err # For the pytest error message
return False if err else True
ggplot.__eq__ = ggplot_equals
def draw_test(self):
"""
Try drawing the ggplot object
Parameters
----------
self : ggplot
ggplot object
This function is meant to monkey patch ggplot.draw_test
so that tests can draw and not care about cleaning up
the MPL figure.
"""
try:
figure = self.draw()
except Exception as err:
plt.close('all')
raise err
else:
if figure:
plt.close(figure)
ggplot.draw_test = draw_test
def build_test(self):
"""
Try building the ggplot object
Parameters
----------
self : ggplot
ggplot object
This function is meant to monkey patch ggplot.build_test
so that tests build.
"""
self = deepcopy(self)
self._build()
return self
ggplot.build_test = build_test
def pytest_assertrepr_compare(op, left, right):
if (isinstance(left, ggplot) and
isinstance(right, (str, tuple)) and
op == "=="):
msg = ("images not close: {actual:s} vs. {expected:s} "
"(RMS {rms:.2f})".format(**left._err))
return [msg]
def make_test_image_filenames(name, test_file):
"""
Create filenames for testing
Parameters
----------
name : str
An identifier for the specific test. This will make-up
part of the filenames.
test_file : str
Full path of the test file. This will determine the
directory structure
Returns
-------
out : types.SimpleNamespace
Object with 3 attributes to store the generated filenames
- result
- baseline
- expected
`result`, is the filename for the image generated by the test.
`baseline`, is the filename for the baseline image to which
the result will be compared.
`expected`, is the filename to the copy of the baseline that
will be stored in the same directory as the result image.
Creating a copy make comparison easier.
"""
if '.png' not in name:
name = name + '.png'
basedir = os.path.abspath(os.path.dirname(test_file))
basename = os.path.basename(test_file)
subdir = os.path.splitext(basename)[0]
baseline_dir = os.path.join(basedir, 'baseline_images', subdir)
result_dir = os.path.abspath(os.path.join('result_images', subdir))
if not os.path.exists(result_dir):
os.makedirs(result_dir, exist_ok=True)
base, ext = os.path.splitext(name)
expected_name = '{}-{}{}'.format(base, 'expected', ext)
filenames = types.SimpleNamespace(
baseline=os.path.join(baseline_dir, name),
result=os.path.join(result_dir, name),
expected=os.path.join(result_dir, expected_name))
return filenames
# This is called from the cleanup decorator
def _setup():
# The baseline images are created in this locale, so we should use
# it during all of the tests.
try:
locale.setlocale(locale.LC_ALL, str('en_US.UTF-8'))
except locale.Error:
try:
locale.setlocale(locale.LC_ALL, str('English_United States.1252'))
except locale.Error:
warnings.warn(
"Could not set locale to English/United States. "
"Some date-related tests may fail")
plt.switch_backend('Agg') # use Agg backend for these test
if mpl.get_backend().lower() != "agg":
msg = ("Using a wrong matplotlib backend ({0}), "
"which will not produce proper images")
raise Exception(msg.format(mpl.get_backend()))
# These settings *must* be hardcoded for running the comparison
# tests
mpl.rcdefaults() # Start with all defaults
mpl.rcParams['text.hinting'] = 'auto'
mpl.rcParams['text.antialiased'] = True
mpl.rcParams['text.hinting_factor'] = 8
# make sure we don't carry over bad plots from former tests
msg = ("no of open figs: {} -> find the last test with ' "
"python tests.py -v' and add a '@cleanup' decorator.")
assert len(plt.get_fignums()) == 0, msg.format(plt.get_fignums())
def _teardown():
plt.close('all')
# reset any warning filters set in tests
warnings.resetwarnings()
|
has2k1/plotnine
|
plotnine/tests/conftest.py
|
Python
|
gpl-2.0
| 7,587
|
# Copyright (c) 2017 University Corporation for Atmospheric Research/Unidata.
# Distributed under the terms of the MIT License.
# SPDX-License-Identifier: MIT
"""Test Wyoming upper air dataset access."""
from datetime import datetime
from numpy.testing import assert_almost_equal
from siphon.simplewebservice.wyoming import WyomingUpperAir
from siphon.testing import get_recorder
recorder = get_recorder(__file__)
@recorder.use_cassette('wyoming_sounding')
def test_wyoming():
"""Test that we are properly parsing data from the wyoming archive."""
df = WyomingUpperAir.request_data(datetime(1999, 5, 4, 0), 'OUN')
assert_almost_equal(df['pressure'][5], 867.9, 2)
assert_almost_equal(df['height'][5], 1219., 2)
assert_almost_equal(df['temperature'][5], 17.4, 2)
assert_almost_equal(df['dewpoint'][5], 14.3, 2)
assert_almost_equal(df['u_wind'][5], 6.60, 2)
assert_almost_equal(df['v_wind'][5], 37.42, 2)
assert_almost_equal(df['speed'][5], 38.0, 1)
assert_almost_equal(df['direction'][5], 190.0, 1)
assert(df.units['pressure'] == 'hPa')
assert(df.units['height'] == 'meter')
assert(df.units['temperature'] == 'degC')
assert(df.units['dewpoint'] == 'degC')
assert(df.units['u_wind'] == 'knot')
assert(df.units['v_wind'] == 'knot')
assert(df.units['speed'] == 'knot')
assert(df.units['direction'] == 'degrees')
@recorder.use_cassette('wyoming_high_alt_sounding')
def test_high_alt_wyoming():
"""Test Wyoming data that starts at pressure less than 925 hPa."""
df = WyomingUpperAir.request_data(datetime(2010, 12, 9, 12), 'BOI')
assert_almost_equal(df['pressure'][2], 890.0, 2)
assert_almost_equal(df['height'][2], 1133., 2)
assert_almost_equal(df['temperature'][2], 5.4, 2)
assert_almost_equal(df['dewpoint'][2], 3.9, 2)
assert_almost_equal(df['u_wind'][2], -0.42, 2)
assert_almost_equal(df['v_wind'][2], 5.99, 2)
|
dopplershift/siphon
|
siphon/tests/test_wyoming.py
|
Python
|
mit
| 1,925
|
from google.appengine.ext import db
from google.appengine.ext.blobstore import blobstore
import string
import random
import access
import hashlib
import time
import datetime
import exceptions
class User(db.Model):
confirmation_status = db.StringProperty()
first_name = db.StringProperty()
last_name = db.StringProperty()
email = db.StringProperty()
alias = db.StringProperty()
picture = db.StringProperty()
industry = db.StringProperty()
location = db.StringProperty()
user_id = db.StringProperty()
linkedin_profile = db.StringProperty()
notify_mail = db.StringProperty()
summary = db.TextProperty()
user_profile = db.StringProperty(choices=set(['Administrator','Mentor','Entrepreneur','Job Applicant']))
created = db.DateTimeProperty(auto_now_add=True)
declined = db.DateTimeProperty()
confirmed = db.DateTimeProperty()
@classmethod
def create(cls,user_data,user_profile):
email_address = user_data.get('emailAddress')
results = cls.gql("WHERE email=:1",email_address)
count = results.count()
alias = access.createAlias( user_data.get('firstName') + " " + user_data.get('lastName') ) + "@mestmentorplatform.appspotmail.com"
if count==0:
user = User(
first_name = user_data.get('firstName'),
last_name = user_data.get('lastName'),
email = user_data.get('emailAddress'),
picture = user_data.get('big_pic'),
industry = user_data.get('industry'),
location = user_data.get('location') and user_data.get('location').get('name'),
user_id = user_data.get('id'),
summary = user_data.get('summary'),
notify_mail = user_data.get('emailAddress'),
linkedin_profile = user_data.get('publicProfileUrl'),
user_profile = user_profile,
alias = alias,
confirmation_status = access.confirmation_status()
)
return user
else:
return 'User already exists'
@classmethod
def delete(cls,user):
result = cls.gql("WHERE user_id=:1 and email=:2",user.user_id, user.email)
if result.count() == 0:
return True
else:
db.delete(result)
@classmethod
def status(cls,user):
result = cls.gql("WHERE user_id=:1 and email=:2",user.user_id, user.email)
if result.count() == 0:
return True
elif result.count()==1:
user = result.get()
user.confirmation_status = "confirmed"
user.put()
return True
else:
return False
@classmethod
def confirm(cls,new_user_id):
new_user = User.get_by_id(int(new_user_id))
new_user.confirmation_status = "confirmed"
new_user.confirmed = datetime.datetime.now()
new_user.put()
return "confirmed"
@classmethod
def decline(cls, user_id):
declined_user = User.get_by_id(int(user_id))
declined_user.confirmation_status = "declined"
declined_user.declined = datetime.datetime.now()
declined_user.put()
return "declined"
class Education(db.Model):
user = db.ReferenceProperty(User, collection_name = "educations")
school_name = db.StringProperty()
field_of_study = db.StringProperty()
start_date = db.StringProperty()
end_date = db.StringProperty()
activities = db.TextProperty()
notes = db.TextProperty()
created = db.DateTimeProperty(auto_now_add=True)
@classmethod
def create(cls, user, education):
new_education = Education(
user = user,
school_name = education.get('schoolName'),
field_of_study = education.get('fieldOfStudy'),
start_date = str(education.get('startDate').get('year')),
end_date = str(education.get('endDate').get('year')),
activities = education.get('activities'),
notes = education.get('notes')
)
new_education.put()
@classmethod
def delete(cls,user):
result = cls.gql("WHERE user=:1",user)
if result.count() == 0:
return True
else:
db.delete(result)
class Position(db.Model):
user = db.ReferenceProperty(User, collection_name = "positions")
position_id = db.StringProperty()
title = db.StringProperty()
summary = db.TextProperty()
start_date = db.StringProperty()
end_date = db.StringProperty()
is_current = db.BooleanProperty()
company = db.StringProperty()
created = db.DateTimeProperty(auto_now_add=True)
@classmethod
def create(cls, user, position):
new_position = Position(
user = user,
position_id = str(position.get('id')),
title = position.get('title'),
summary = position.get('summary'),
start_date = str(position.get('startDate')['year']),
end_date = position.get('endDate') and str(position.get('endDate')['year']),
is_current = position.get('isCurrent'),
company = position.get('company')['name']
)
new_position.put()
@classmethod
def delete(cls,user):
result = cls.gql("WHERE user=:1",user)
if result.count() == 0:
return True
else:
db.delete(result)
class Skills(db.Model):
user = db.ReferenceProperty(User, collection_name = "skills")
name = db.StringProperty()
created = db.DateTimeProperty(auto_now_add=True)
@classmethod
def create(cls,user,skill):
skills = Skills(
user = user,
name = skill.get('skill').get('name')
)
skills.put()
@classmethod
def delete(cls,user):
result = cls.gql("WHERE user=:1",user)
if result.count() == 0:
return True
else:
db.delete(result)
class Entrepreneur(db.Model):
user = db.ReferenceProperty(User, collection_name = 'entrepreneurprofile')
company_name = db.StringProperty()
designation = db.StringProperty()
website = db.StringProperty()
created = db.DateTimeProperty(auto_now_add=True)
@classmethod
def create(cls,user,company_name,designation,website):
entrepreneur = Entrepreneur(
user = user,
company_name = company_name,
designation = designation,
website = website
)
entrepreneur.put()
user.confirmation_status = "confirmed"
user.put()
return True
@classmethod
def delete(cls,user):
result = cls.gql("WHERE user=:1",user)
if result.count() == 0:
return True
else:
db.delete(result)
class Program(db.Model):
user = db.ReferenceProperty(User, collection_name = 'programs')
program_type = db.StringProperty(required=True, choices=set(['MEST Strike Force', 'MBA Consultant', 'Senior Advisor', 'Expert in Residence', 'Job Applicant']))
preferred_email = db.StringProperty()
mini_bio = db.TextProperty()
time_zone = db.StringProperty()
hours = db.IntegerProperty()
rock_star = db.BooleanProperty()
ninja = db.BooleanProperty()
guru = db.BooleanProperty()
created = db.DateTimeProperty(auto_now_add=True)
@classmethod
def create(cls,user,program_data):
results = cls.gql("WHERE user=:1",user)
count = results.count()
if count==0 and user.user_profile=="Mentor":
program = Program(
user = user,
program_type = program_data.get('program_type'),
commitment_level = program_data.get('commitment_level'),
preferred_email = program_data.get('email'),
mini_bio = program_data.get('mini_bio'),
time_zone = program_data.get('time_zone'),
hours = program_data.get('hours')
)
return program
elif count==0 and user.user_profile=="Job Applicant":
program = Program(
user = user,
program_type = "Job Applicant",
mini_bio = program_data.get('mini_bio'),
preferred_email = program_data.get('email')
)
return program
else:
return False
@classmethod
def delete(cls,user):
result = cls.gql("WHERE user=:1",user)
if result.count() == 0:
return True
else:
db.delete(result)
class Contribution(db.Model):
user = db.ReferenceProperty(User, collection_name='contributions')
program = db.ReferenceProperty(Program, collection_name='contributions')
company = db.StringProperty()
contributed_hours = db.IntegerProperty()
description = db.TextProperty()
created = db.DateTimeProperty(auto_now_add=True)
@classmethod
def add_contribution(cls, contribution):
mentor = contribution.get("user")
result = cls.gql("WHERE user=:1 and company=:2 and contributed_hours=:3 and description=:4",
contribution.get("user", ""),
contribution.get("company", ""),
contribution.get("hours", ""),
contribution.get("description", "")
)
if result.count() == 0:
new_contribution = Contribution(
user = contribution.get("user"),
company = contribution.get("company"),
program = mentor.programs[0],
contributed_hours = int(contribution.get("hours")),
description = contribution.get("description")
)
new_contribution.put()
return True
else:
return True
class Resource(db.Model):
user = db.ReferenceProperty(User, collection_name='resources')
resource_key = blobstore.BlobReferenceProperty()
title = db.TextProperty()
description = db.TextProperty()
rating = db.IntegerProperty()
created = db.DateTimeProperty(auto_now_add=True)
@classmethod
def industry(cls, industries, resource):
for industry in industries:
Sector.createResource(resource, industry)
@classmethod
def expertise(cls, experts, resource):
for expertise in experts:
Topic.createResource(resource, expertise)
@classmethod
def create(cls,user_id,resource_key, title, description, industry, expertise):
user = User.get_by_id(int(user_id))
new_resource = Resource(
user = user,
resource_key = resource_key,
title = title,
description = description
)
new_resource.put()
cls.expertise(expertise, new_resource)
cls.industry(industry, new_resource)
return new_resource
@classmethod
def delete(cls,user,resource_key):
result = cls.gql("WHERE user=:1 and resource_key",user,resource_key)
if result.count() == 0:
return True
else:
db.delete(result)
class Topic(db.Model):
program = db.ReferenceProperty(Program, collection_name='topics')
resource = db.ReferenceProperty(Resource, collection_name="topic")
criteria = db.StringProperty()
value = db.StringProperty()
created = db.DateTimeProperty(auto_now_add=True)
@classmethod
def create(cls,program,topic):
results = cls.gql("WHERE program=:1 and criteria=:2 and value=:3",program, topic.get('criteria'), topic.get('value'))
count = results.count()
if count==0:
new_topic = Topic(
program = program,
criteria = topic.get('criteria'),
value = topic.get('value')
)
new_topic.put()
@classmethod
def createResource(cls, resource, topic):
results = cls.gql("WHERE resource=:1 and criteria=:2 and value=:3", resource, topic.get('criteria'), topic.get('value'))
count = results.count()
if count==0:
new_topic = Topic(
resource = resource,
criteria = topic.get('criteria'),
value = topic.get('value')
)
new_topic.put()
@classmethod
def delete(cls,program):
result = cls.gql("WHERE program=:1",program)
if result.count() == 0:
return True
else:
db.delete(result)
@classmethod
def remove_mentor_topic(cls, value, criteria, program):
result = cls.gql('WHERE program=:1 and value=:2 and criteria=:3', program, value, criteria)
count = result.count()
if count > 0:
db.delete(result)
return True
else:
return True
class Sector(db.Model):
program = db.ReferenceProperty(Program, collection_name='sectors')
resource = db.ReferenceProperty(Resource, collection_name="sector")
value = db.StringProperty()
created = db.DateTimeProperty(auto_now_add=True)
@classmethod
def create(cls,program,sector):
results = cls.gql("WHERE program=:1 and value=:2",program, sector.get('value'))
count = results.count()
if count==0:
new_sector = Sector(
program = program,
value = sector.get('value')
)
new_sector.put()
@classmethod
def createResource(cls, resource, sector):
results = cls.gql("WHERE resource=:1 and value=:2",resource, sector.get('value'))
count = results.count()
if count==0:
new_sector = Sector(
resource = resource,
value = sector.get('value')
)
new_sector.put()
@classmethod
def delete(cls,program):
result = cls.gql("WHERE program=:1",program)
if result.count() == 0:
return True
else:
db.delete(result)
@classmethod
def remove_mentor_sector(cls, value, program):
result = cls.gql('WHERE program=:1 and value=:2', program, value)
count = result.count()
if count > 0:
db.delete(result)
return True
else:
return True
class Discovery(db.Model):
program = db.ReferenceProperty(Program, collection_name='discovery')
method = db.StringProperty()
value = db.StringProperty()
created = db.DateTimeProperty(auto_now_add=True)
@classmethod
def create(cls,program,discovery):
results = cls.gql("WHERE program=:1 and method=:2",program, discovery.get("method"))
count = results.count()
if count==0:
new_discovery = Discovery(
program = program,
method = discovery.get("method"),
value = discovery.get("value")
)
new_discovery.put()
@classmethod
def delete(cls,program):
result = cls.gql("WHERE program=:1",program)
if result.count() == 0:
return True
else:
db.delete(result)
class Message(db.Model):
msg_id = db.StringProperty()
msg_type = db.StringProperty()
sender = db.ReferenceProperty(User, collection_name='sent')
receiver = db.ReferenceProperty(User, collection_name='received')
receiver_email = db.StringProperty()
sender_email = db.StringProperty()
subject = db.StringProperty()
starred = db.StringProperty()
category = db.StringProperty()
content = db.TextProperty()
status = db.StringProperty(choices=set(['read','unread','trash']))
date_sent = db.StringProperty()
created = db.DateTimeProperty(auto_now_add=True)
deleted = db.DateTimeProperty(auto_now_add=False)
sender_deleted = db.StringProperty()
receiver_deleted = db.StringProperty()
@classmethod
def state_generator(cls,size=50, chars=string.ascii_uppercase + string.digits):
msg_id = "".join(random.choice(chars) for x in range(size))
results = cls.gql("WHERE msg_id=:1",msg_id)
if results.count() == 0:
return msg_id
else:
state_generator()
@classmethod
def create(cls,message):
if message:
msg = Message(
msg_id = Message.state_generator(),
sender = message.get('sender'),
sender_email = message.get('sender_email'),
receiver_email = message.get('receiver_email'),
receiver = message.get('receiver'),
subject = message.get('subject'),
category = message.get('category'),
content = message.get('content'),
date_sent = message.get('date'),
msg_type = message.get('type'),
status = "unread",
starred = "False",
sender_deleted = "False",
receiver_deleted = "False"
)
msg.put()
return True
else:
return False
@classmethod
def getMessage(cls,msg_id, read_status):
msg = Message.get_by_id(int(msg_id))
msg.status = read_status
msg.put()
return msg
@classmethod
def displayMessage(cls,msg_id):
msg = Message.get_by_id(int(msg_id))
msg.status = "read"
msg.put()
return msg
@classmethod
def change_status(cls,msg_id,status):
print msg_id
msg = Message.get_by_id(int(msg_id))
if msg:
msg.status = status
msg.put()
return True
else:
return False
@classmethod
def star(cls, msg_id):
msg = Message.get_by_id(int(msg_id))
msg.starred = "True"
msg.put()
return "starred"
@classmethod
def unstar(cls, msg_id):
msg = Message.get_by_id(int(msg_id))
msg.starred = "False"
msg.put()
return "unstarred"
@classmethod
def delete(cls,msg_id):
result = cls.gql("WHERE msg_id=:1",msg_id)
if result.count() == 0:
return True
else:
db.delete(result)
return True
@classmethod
def delete_message(cls, msg_id, user):
msg = Message.get_by_id(int(msg_id))
if msg:
Message.delete_sent(msg, user)
Message.delete_received(msg, user)
return True
else:
return True
@classmethod
def delete_sent(cls, msg, user):
if msg.sender.email == user.email:
msg.sender_deleted = "True"
msg.put()
return True
else:
return True
@classmethod
def delete_received(cls, msg, user):
if msg.receiver.email == user.email:
msg.receiver_deleted = "True"
msg.put()
return True
else:
return True
class Favorite(db.Model):
user = db.ReferenceProperty(User, collection_name ="favorites")
favorite_type = db.StringProperty(required=True, choices=set(['Mentor', 'Job Applicant', 'Resource']))
favorite_id = db.StringProperty()
created = db.DateTimeProperty(auto_now_add='True')
@classmethod
def create(cls, user, favorite_type, favorite_id):
result = cls.gql("WHERE user=:1 and favorite_type=:2 and favorite_id=:3", user, favorite_type, favorite_id)
count = result.count()
if count == 0:
new_favorite = Favorite(
user = user,
favorite_type = favorite_type,
favorite_id = favorite_id
)
new_favorite.put()
return True
else:
return "Already favorited"
@classmethod
def check(cls, user, item_id):
result = cls.gql("WHERE user=:1 and favorite_id=:2", user, item_id).count()
if result == 0:
msg = {"favorite":"false", "src":"scripts/img/unlike.png"}
return msg
else:
msg = {"favorite":"true", "src":"scripts/img/like.png"}
return msg
@classmethod
def delete(cls, user, favorite_type, favorite_id):
result = cls.gql("WHERE user=:1 and favorite_type=:2 and favorite_id=:3", user, favorite_type, favorite_id)
if result.count() == 0:
return True
else:
db.delete(result)
return True
class Rating(db.Model):
user = db.ReferenceProperty(User, collection_name ="rating")
rating_type = db.StringProperty(required=True, choices=set(['Mentor', 'Job Applicant', 'Resource']))
rating_id = db.StringProperty()
rating_value = db.StringProperty()
created = db.DateTimeProperty(auto_now_add='True')
@classmethod
def rate(cls, user, rating_type, rating_id,rating_value):
result = cls.gql("WHERE user=:1 and rating_type=:2 and rating_id=:3", user, rating_type, rating_id)
count = result.count()
if count == 0:
new_rating = Rating(
user = user,
rating_type = rating_type,
rating_id = rating_id,
rating_value = rating_value
)
new_rating.put()
return True
else:
rating = result.get()
rating.rating_value = rating_value
rating.put()
return True
@classmethod
def check(cls, user, item_id):
result = cls.gql("WHERE user=:1 and rating_id=:2", user, item_id)
count = result.count()
if count == 0:
value = 0
msg = {"rated":"false", "value":value}
return msg
else:
rating = result.get()
value = rating.rating_value
msg = {"rated":"true", "value":float(value)}
return msg
class Comment(db.Model):
user = db.ReferenceProperty(User, collection_name="comments_received")
resource = db.ReferenceProperty(Resource, collection_name='comments_received')
entity_id = db.StringProperty()
commentor = db.ReferenceProperty(User, collection_name="comments_made")
commentor_name = db.StringProperty()
commentor_id = db.StringProperty()
content = db.TextProperty()
created = db.DateTimeProperty(auto_now_add=True)
@classmethod
def create(cls, comment):
if (comment.get('type')) == "resource" and (Comment.check(comment) == 0):
# if (comment.get('type')) == "resource":
comment = Comment(
resource = comment.get('entity'),
entity_id = comment.get('entity_id'),
commentor = comment.get('commentor'),
commentor_name = comment.get('commentor').first_name + " " + comment.get('commentor').last_name,
commentor_id = comment.get('commentor_id'),
content = comment.get('content')
)
comment.put()
return {"status": True, "value": comment.key().id()}
elif (comment.get('type')) == "user" and (Comment.check(comment) == 0):
# elif (comment.get('type')) == "user":
print "hello========================"
comment = Comment(
user = comment.get('entity'),
entity_id = comment.get('entity_id'),
commentor = comment.get('commentor'),
commentor_name = comment.get('commentor').first_name + " " + comment.get('commentor').last_name,
commentor_id = comment.get('commentor_id'),
content = comment.get('content')
)
comment.put()
print comment.key().id()
return {"status": True, "value": comment.key().id()}
else:
return {"status": False, "value": None}
@classmethod
def edit(cls, comment):
comment_id = comment.get("comment_id")
try:
editable_comment = Comment.get_by_id(int(comment_id))
editable_comment.content = comment.get("content")
editable_comment.put()
return {"status": True, "value": editable_comment.content}
except:
return {"status": False, "value": None}
@classmethod
def delete(cls, comment):
comment_id = comment.get("comment_id")
print "=============================="
print comment_id
print "=============================="
try:
item_to_delete = Comment.get_by_id(int(comment_id))
db.delete(item_to_delete)
return {"status": True, "value": None}
except:
print "yeah"
return {"status": False, "value": None}
@classmethod
def check(cls, comment):
print "=============================="
print comment
print "=============================="
result = cls.gql("WHERE entity_id=:1 and commentor_id=:2", comment.get('entity_id'), comment.get('commentor_id'))
print result.count()
if result.count() > 0:
# count = 0
# for item in result:
# if comment.get('content') == item.content:
# count +=1
return result.count()
else:
return 0
class Administrator(db.Model):
user = db.ReferenceProperty(User, collection_name = "administrator")
username = db.StringProperty()
password_hash = db.StringProperty()
salt = db.StringProperty()
created = db.DateTimeProperty(auto_now_add = True)
@staticmethod
def make_salt():
return ''.join(random.choice(string.letters) for i in range(5))
@classmethod
def make_pw_hash(cls,pw,salt):
pw_hash = pw+salt
return hashlib.sha256(pw_hash).hexdigest()
@classmethod
def create(cls, user, password, username):
result = cls.gql("WHERE user=:1", user)
count = result.count()
if count==0:
salt = ''
salt = Administrator.make_salt()
password_hash = cls.make_pw_hash(password, salt)
admin = Administrator(user = user, password_hash = password_hash, username = username, salt = salt)
admin.put()
return True
else:
return False
@classmethod
def change_password(cls, user, old_password, new_password):
admin = cls.gql("WHERE user=:1", user).get()
count = result.count()
if admin:
password_hash = cls.make_pw_hash(old_password, admin.salt)
if password_hash == admin.password:
admin.password = new_password
admin.put()
return True
else:
return False
@classmethod
def log_in_admin(cls,username,password):
# user = User.all().filter("email =", email).get()
results = Administrator.all().filter("username =", username).get()
if results:
password_hash = cls.make_pw_hash(password, results.salt)
if password_hash == results.password_hash:
return results.key().id()
else:
return False
@classmethod
def delete_admin(cls, user):
result = cls.gql("WHERE user=:1", user)
count = result.count()
if count == 1:
admin_to_be_deleted = result.get()
admin_to_be_deleted.delete()
return True
else:
return False
class Jobs(db.Model):
user = db.ReferenceProperty(User, collection_name="jobs")
job_unique_id = db.StringProperty()
job_title = db.StringProperty()
hiring_company = db.StringProperty()
job_description = db.TextProperty()
job_status = db.StringProperty(required =True, choices=set(['Available', 'Removed']))
job_requirements = db.StringProperty()
job_url = db.StringProperty()
deadline = db.StringProperty()
created = db.DateTimeProperty(auto_now_add=True)
removed_on = db.DateTimeProperty()
@classmethod
def state_generator(cls,size=50, chars=string.ascii_uppercase + string.digits):
job_unique_id = "".join(random.choice(chars) for x in range(size))
results = cls.gql("WHERE resource_id=:1",job_unique_id)
if results.count() == 0:
return job_unique_id
else:
state_generator()
@classmethod
def create(cls, user_id, job):
new_job = Jobs(user=User.get_by_id(int(user_id)),
job_unique_id=Jobs.state_generator(),
job_title=job.get("title"),
hiring_company=job.get("company"),
job_requirements = job.get("requirements"),
job_description=job.get("description"),
job_status=job.get("status"),
job_url=job.get("url"),
deadline=job.get("deadline"),
)
new_job.put()
return True
@classmethod
def edit(cls, user, job):
try:
edited_job = Jobs.get_by_id(int(job.get("job_id")))
edited_job.job_title = job.get("job_title")
edited_job.hiring_company = job.get("hiring_company")
edited_job.job_description = job.get("description")
edited_job.deadline = job.get("deadline")
edited_job.job_requirements = job.get("requirements")
edited_job.put()
return {"action":"edit","status":True, "job_id": job.get("job_id")}
except:
return {"action":"edit","status":False}
@classmethod
def delete(cls, job_id, user):
job = Jobs.get_by_id(int(job_id))
if user.user_profile == "Administrator" or job.user == user:
job.job_status = "Removed"
job.removed_on = datetime.datetime.now()
job.put()
return {"action":"delete", "status":True}
else:
return {"action":"delete", "status":False}
class Subscriber(db.Model):
subscriber_name = db.StringProperty()
subscriber_email = db.StringProperty()
created = db.DateTimeProperty(auto_now_add=True)
unsubscribed = db.DateTimeProperty()
status = db.StringProperty()
@classmethod
def subscribe(cls, email, name):
result = Subscriber.all().filter("subscriber_email =", email).count()
if result == 0:
new_subscriber = Subscriber(
subscriber_name = name,
subscriber_email= email,
status = "subscribed"
)
new_subscriber.put()
value = True
else:
value = False
return {"action":"subscribe", "status":value}
@classmethod
def unsubscribe(cls, email):
result = cls.gql("WHERE subscriber_email=:1 and status=:2", email, "subscribed")
if result.count() > 0:
unsubscriber = result.get()
unsubscriber.status = "unsubscribed"
unsubscriber.unsubscribed = datetime.datetime.now()
unsubscriber.put()
value = True
else:
value = False
return {"action":"unsubscribe", "status":value}
@classmethod
def delete(cls, email):
result = cls.gql("WHERE subscriber_email=:1", email)
if result.count() > 0:
deleted_subscriber = result.get()
db.delete(deleted_subscriber)
value = True
else:
value = False
return {"action":"delete", "status":value}
|
nicodee/mestmentorplatform
|
models.py
|
Python
|
mit
| 33,885
|
# File generated from python blocks in "doc/quick-start.tex"
>>> import sys
>>> HOST = sys.argv[2]
>>> PORT = int(sys.argv[3])
>>> import hyperdex.admin
>>> a = hyperdex.admin.Admin(HOST, PORT)
>>> a.add_space('''
... space phonebook
... key username
... attributes first, last, int phone
... subspace first, last, phone
... create 8 partitions
... tolerate 2 failures
... ''')
True
>>> import hyperdex.client
>>> c = hyperdex.client.Client(HOST, PORT)
>>> c.put('phonebook', 'jsmith1', {'first': 'John', 'last': 'Smith',
... 'phone': 6075551024})
True
>>> c.get('phonebook', 'jsmith1')
{'first': 'John', 'last': 'Smith', 'phone': 6075551024}
>>> [x for x in c.search('phonebook', {'first': 'John'})]
[{'first': 'John',
'last': 'Smith',
'phone': 6075551024,
'username': 'jsmith1'}]
>>> [x for x in c.search('phonebook', {'last': 'Smith'})]
[{'first': 'John',
'last': 'Smith',
'phone': 6075551024,
'username': 'jsmith1'}]
>>> [x for x in c.search('phonebook', {'phone': 6075551024})]
[{'first': 'John',
'last': 'Smith',
'phone': 6075551024,
'username': 'jsmith1'}]
>>> [x for x in c.search('phonebook',
... {'first': 'John', 'last': 'Smith', 'phone': 6075551024})]
[{'first': 'John',
'last': 'Smith',
'phone': 6075551024,
'username': 'jsmith1'}]
>>> c.put('phonebook', 'jd', {'first': 'John', 'last': 'Doe', 'phone': 6075557878})
True
>>> [x for x in c.search('phonebook',
... {'first': 'John', 'last': 'Smith', 'phone': 6075551024})]
[{'first': 'John',
'last': 'Smith',
'phone': 6075551024,
'username': 'jsmith1'}]
>>> [x for x in c.search('phonebook', {'first': 'John'})]
[{'first': 'John',
'last': 'Smith',
'phone': 6075551024,
'username': 'jsmith1'},
{'first': 'John',
'last': 'Doe',
'phone': 6075557878,
'username': 'jd'}]
>>> [x for x in c.search('phonebook', {'last': 'Smith'})]
[{'first': 'John',
'last': 'Smith',
'phone': 6075551024,
'username': 'jsmith1'}]
>>> [x for x in c.search('phonebook', {'last': 'Doe'})]
[{'first': 'John',
'last': 'Doe',
'phone': 6075557878,
'username': 'jd'}]
>>> c.delete('phonebook', 'jd')
True
>>> [x for x in c.search('phonebook', {'first': 'John'})]
[{'first': 'John',
'last': 'Smith',
'phone': 6075551024,
'username': 'jsmith1'}]
>>> c.put('phonebook', 'jsmith1', {'phone': 6075552048})
True
>>> c.get('phonebook', 'jsmith1')
{'first': 'John',
'last': 'Smith',
'phone': 6075552048}
>>> c.put('phonebook', 'jsmith2',
... {'first': 'John', 'last': 'Smith', 'phone': 5855552048})
True
>>> c.get('phonebook', 'jsmith2')
{'first': 'John',
'last': 'Smith',
'phone': 5855552048}
>>> [x for x in c.search('phonebook',
... {'last': 'Smith', 'phone': (6070000000, 6080000000)})]
[{'first': 'John',
'last': 'Smith',
'phone': 6075552048,
'username': 'jsmith1'}]
>>> [x for x in c.search('phonebook',
... {'first': ('Jack', 'Joseph')})]
[{'first': 'John',
'last': 'Smith',
'phone': 6075552048,
'username': 'jsmith1'},
{'first': 'John',
'last': 'Smith',
'phone': 5855552048,
'username': 'jsmith2'}]
>>> a.rm_space('phonebook')
True
|
hyc/HyperDex
|
test/doc.quick-start.py
|
Python
|
bsd-3-clause
| 3,110
|
from __future__ import unicode_literals
from django.db import models
from django.conf import settings
from django.utils.encoding import python_2_unicode_compatible
from viewflow.models import Process, Task
class Carrier(models.Model):
DEFAULT = 'Default'
name = models.CharField(max_length=50)
phone = models.CharField(max_length=15)
def is_default(self):
return self.name == Carrier.DEFAULT
def __str__(self):
return self.name
@python_2_unicode_compatible
class Insurance(models.Model):
company_name = models.CharField(max_length=50)
cost = models.IntegerField()
def __str__(self):
return '{} ${}'.format(self.company_name, self.cost)
class Shipment(models.Model):
shipment_no = models.CharField(max_length=50)
carrier = models.ForeignKey(Carrier, null=True)
# customer
first_name = models.CharField(max_length=150)
last_name = models.CharField(max_length=150)
email = models.EmailField()
# shipment addres
address = models.CharField(max_length=150)
city = models.CharField(max_length=150)
state = models.CharField(max_length=150)
zipcode = models.CharField(max_length=10)
country = models.CharField(max_length=150)
phone = models.CharField(max_length=50)
# shipment data
need_insurance = models.BooleanField(default=False)
insurance = models.ForeignKey('Insurance', null=True)
carrier_quote = models.IntegerField(default=0)
post_label = models.TextField(blank=True, null=True)
package_tag = models.CharField(max_length=50)
class ShipmentItem(models.Model):
shipment = models.ForeignKey(Shipment)
name = models.CharField(max_length=250)
quantity = models.IntegerField(default=1)
class ShipmentProcess(Process):
shipment = models.ForeignKey(Shipment, blank=True, null=True)
created_by = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True)
def is_normal_post(self):
try:
if self.shipment.carrier:
return self.shipment.carrier.is_default()
else:
return None
except (Shipment.DoesNotExist, Carrier.DoesNotExist):
return None
def need_extra_insurance(self):
try:
return self.shipment.need_insurance
except Shipment.DoesNotExist:
return None
class Meta:
verbose_name_plural = 'Shipment process list'
permissions = [
('can_start_request', 'Can start shipment request'),
('can_take_extra_insurance', 'Can take extra insurance'),
('can_package_goods', 'Can package goods'),
('can_move_package', 'Can move package')
]
class ShipmentTask(Task):
class Meta:
proxy = True
|
codingjoe/viewflow
|
tests/examples/shipment/models.py
|
Python
|
agpl-3.0
| 2,867
|
#! /usr/bin/env python
"""
Module with frame parallactica angles calculations and de-rotation routines for
ADI.
"""
__author__ = 'V. Christiaens @ UChile/ULg, Carlos Alberto Gomez Gonzalez'
__all__ = ['compute_paral_angles',
'compute_derot_angles_pa',
'compute_derot_angles_cd',
'check_pa_vector']
import math
import numpy as np
import os
from ..fits import open_fits
from astropy.coordinates import FK5
from astropy.coordinates import sky_coordinate
from astropy.time import Time
from astropy.units import hourangle, degree
def compute_paral_angles(header, latitude, ra_key, dec_key, lst_key,
acqtime_key, date_key='DATE-OBS'):
"""Calculates the parallactic angle for a frame, taking coordinates and
local sidereal time from fits-headers (frames taken in an alt-az telescope
with the image rotator off).
The coordinates in the header are assumed to be J2000 FK5 coordinates.
The spherical trigonometry formula for calculating the parallactic angle
is taken from Astronomical Algorithms (Meeus, 1998).
Parameters
----------
header : dictionary
Header of current frame.
latitude : float
Latitude of the observatory in degrees. The dictionaries in
vip_hci/conf/param.py can be used like: latitude=LBT['latitude'].
ra_key, dec_key, lst_key, acqtime_key, date_key : strings
Keywords where the values are stored in the header.
Returns
-------
pa.value : float
Parallactic angle in degrees for current header (frame).
"""
obs_epoch = Time(header[date_key], format='iso', scale='utc')
# equatorial coordinates in J2000
ra = header[ra_key]
dec = header[dec_key]
coor = sky_coordinate.SkyCoord(ra=ra, dec=dec, unit=(hourangle,degree),
frame=FK5, equinox='J2000.0')
# recalculate for DATE-OBS (precession)
coor_curr = coor.transform_to(FK5(equinox=obs_epoch))
# new ra and dec in radians
ra_curr = coor_curr.ra
dec_curr = coor_curr.dec
lst_split = header[lst_key].split(':')
lst = float(lst_split[0])+float(lst_split[1])/60+float(lst_split[2])/3600
exp_delay = (header[acqtime_key] * 0.5) / 3600
# solar to sidereal time
exp_delay = exp_delay*1.0027
# hour angle in degrees
hour_angle = (lst + exp_delay) * 15 - ra_curr.deg
hour_angle = np.deg2rad(hour_angle)
latitude = np.deg2rad(latitude)
# PA formula from Astronomical Algorithms
pa = -np.rad2deg(np.arctan2(-np.sin(hour_angle), np.cos(dec_curr) * \
np.tan(latitude) - np.sin(dec_curr) * np.cos(hour_angle)))
#if dec_curr.value > latitude: pa = (pa.value + 360) % 360
return pa.value
def compute_derot_angles_pa(objname_tmp_A, digit_format=3, objname_tmp_B='',
inpath='./', writing=False, outpath='./',
list_obj=None,
PosAng_st_key='HIERARCH ESO ADA POSANG',
PosAng_nd_key='HIERARCH ESO ADA POSANG END',
verbose=False):
"""
Function that returns a numpy vector of angles to derotate datacubes so as
to match North up, East left, based on the mean of the Position Angle at
the beginning and the end of the exposure.
=> It is twice more precise than function derot_angles_CD (there can be
>1deg difference in the resulting angle vector returned for fast rotators
with long exposures!), but it requires:
1. a header keyword for both the position angle at start and end of exposure
2. no skewness of the frames
The output is in appropriate format for the pca algorithm in the sense that:
1. all angles of the output are in degrees
2. all angles of the ouput are positive
3. there is no jump of more than 180 deg between consecutive values (e.g. no
jump like [350deg,355deg,0deg,5deg] => replaced by
[350deg,355deg,360deg,365deg])
Parameters
----------
objname_tmp_A: string
Contains the common name of the cubes BEFORE the digits
digit_format: int, optional
Number of digits in the name of the cube. The digits are supposed to be
the only changing part in the name of one cube to another.
objname_tmp_B: string, optional
Contains the name of the cubes AFTER the digits
inpath: string, optional
Contains the full path of the directory with the data
writing: bool, optional
True if you want to write the derotation angles in a txt file.
outpath: string, optional
Contains the full path of the directory where you want the txt file to
be saved.
list_obj: integer list or 1-D array, optional
List of the digits corresponding to the cubes to be considered.
If not provided, the function will consider automatically all the cubes
with objname_tmp_A+digit+objname_tmp_B+'.fits' name structure in the
provided "inpath".
PosAng_st_key, PosAng_nd_key: strings, optional
Name of the keywords to be looked up in the header, to provide the PA
from North at start and end of integration.
verbose: bool, optional
True if you want more info to be printed.
Examples
--------
If your cubes are: ``/home/foo/out_cube_obj_HK_025_000_sorted.fits``,
``/home/foo/out_cube_obj_HK_025_001_sorted.fits``,
``/home/foo/out_cube_obj_HK_025_002_sorted.fits``, etc,
the first arguments should be:
.. code-block:: python
objname_tmp_A = 'out_cube_obj_HK_025_'
digit_format = 3
objname_tmp_B = '_sorted'
inpath = '/home/foo/'
Returns
-------
angle_list: 1-D numpy ndarray
vector of angles corresponding to the angular difference between the
positive y axis and the North in the image.
sign convention: positive angles in anti-clockwise direction.
Opposite values are applied when rotating the image to match North up.
"""
posang_st = []
posang_nd = []
def _fitsfile(ii):
return "{}{}{:0{}d}{}.fits".format(inpath, objname_tmp_A, ii,
digit_format, objname_tmp_B)
if list_obj is None:
list_obj = []
for ii in range(10**digit_format):
if os.path.exists(_fitsfile(ii)):
list_obj.append(ii)
_, header = open_fits(_fitsfile(ii), verbose=False, header=True)
posang_st.append(header[PosAng_st_key])
posang_nd.append(header[PosAng_nd_key])
else:
for ii in list_obj:
_, header = open_fits(_fitsfile(ii), verbose=False, header=True)
posang_st.append(header[PosAng_st_key])
posang_nd.append(header[PosAng_nd_key])
# Write the vector containing parallactic angles
rot = np.zeros(len(list_obj))
for ii in range(len(list_obj)):
rot[ii]=-(posang_st[ii]+posang_nd[ii])/2
# Check and correct to output at the right format
rot = check_pa_vector(rot,'deg')
if verbose:
print("This is the list of angles to be applied: ")
for ii in range(len(list_obj)):
print(ii, ' -> ', rot[ii])
if writing:
if outpath == '' or outpath is None: outpath=inpath
f=open(outpath+'Parallactic_angles.txt','w')
for ii in range(len(list_obj)):
print(rot[ii], file=f)
f.close()
return rot
def compute_derot_angles_cd(objname_tmp_A, digit_format=3, objname_tmp_B='',
inpath='./', skew=False, writing=False,
outpath='./', list_obj=None, cd11_key='CD1_1',
cd12_key='CD1_2', cd21_key='CD2_1',
cd22_key='CD2_2', verbose=False):
"""
Function that returns a numpy vector of angles to derotate datacubes so as
to match North up, East left, based on the CD matrix information contained
in the header.
In case the PosAng keyword is present in the header and there is no skewness
between x and y axes, favor the use of function compute_derot_angles_PA
(more precise as it averages for the middle of the exposure).
The output is in appropriate format for the pca algorithm in the sense that:
1. all angles of the output are in degrees
2. all angles of the ouput are positive
3. there is no jump of more than 180 deg between consecutive values (e.g. no
jump like [350deg,355deg,0deg,5deg] => replaced by
[350deg,355deg,360deg,365deg])
Parameters
----------
objname_tmp_A: string
Contains the common name of the cubes BEFORE the digits
digit_format: int, optional
Number of digits in the name of the cube. The digits are supposed to be
the only changing part in the name of one cube to another.
objname_tmp_B: string, optional
Contains the name of the cubes AFTER the digits
inpath: string, optional
Contains the full path of the directory with the data
skew: bool, optional
True if you know there is a different rotation between y- and x- axes.
The code also detects automatically if there is >1deg skew between y and
x axes. In case of skewing, 2 vectors of derotation angles are returned:
one for x and one for y, instead of only one vector.
writing: bool, optional
True if you want to write the derotation angles in a txt file.
outpath: string, opt
Contains the full path of the directory where you want the txt file to
be saved.
list_obj: integer list or 1-D array or None, optional
List of the digits corresponding to the cubes to be considered.
If not provided, the function will consider automatically all the cubes
with objname_tmp_A+digit+objname_tmp_B+'.fits' name structure in the
provided "inpath".
cd11_key,cd12_key,cd21_key,cd22_key: strings, optional
Name of the keywords to be looked up in the header, to provide the:
- partial of first axis coordinate w.r.t. x (cd11_key)
- partial of first axis coordinate w.r.t. y (cd12_key)
- partial of second axis coordinate w.r.t. x (cd21_key)
- partial of second axis coordinate w.r.t. y (cd22_key)
Default values are the ones in the headers of ESO or HST fits files.
For more information, go to:
http://www.stsci.edu/hst/HST_overview/documents/multidrizzle/ch44.html
verbose: bool, optional
True if you want more info to be printed.
Examples
--------
If your cubes are: ``/home/foo/out_cube_obj_HK_025_000_sorted.fits``,
``/home/foo/out_cube_obj_HK_025_001_sorted.fits``,
``/home/foo/out_cube_obj_HK_025_002_sorted.fits``, etc,
the first arguments should be:
.. code:: python
objname_tmp_A = 'out_cube_obj_HK_025_'
digit_format = 3
objname_tmp_B = '_sorted'
inpath = '/home/foo/'
Returns
-------
angle_list: 1-D numpy ndarray
vector of angles corresponding to the angular difference between the
positive y axis and the North in the image.
sign convention: positive angles in anti-clockwise direction.
Opposite values are applied when rotating the image to match North up.
**Note:** if skew is set to True, there are 2 angle_list vectors
returned; the first to rotate the x-axis and the second for the y-axis.
"""
cd1_1 = []
cd1_2 = []
cd2_1 = []
cd2_2 = []
def _fitsfile(ii):
return "{}{}{:0{}d}{}.fits".format(inpath, objname_tmp_A, ii,
digit_format, objname_tmp_B)
if list_obj is None:
list_obj = []
for ii in range(10**digit_format):
if os.path.exists(_fitsfile(ii)):
list_obj.append(ii)
_, header = open_fits(_fitsfile(ii), verbose=False, header=True)
cd1_1.append(header[cd11_key])
cd1_2.append(header[cd12_key])
cd2_1.append(header[cd21_key])
cd2_2.append(header[cd22_key])
else:
for ii in list_obj:
_, header = open_fits(_fitsfile(ii), verbose=False, header=True)
cd1_1.append(header[cd11_key])
cd1_2.append(header[cd12_key])
cd2_1.append(header[cd21_key])
cd2_2.append(header[cd22_key])
# Determine if it's a right- or left-handed coord system from first cube
det=cd1_1[0]*cd2_2[0]-cd1_2[0]*cd2_1[0]
if det<0: sgn = -1
else: sgn = 1
# Write the vector containing parallactic angles
rot = np.zeros(len(list_obj))
rot2 = np.zeros(len(list_obj))
for ii in range(len(cd1_1)):
if cd2_1[ii]==0 and cd1_2[ii]==0:
rot[ii]=0
rot2[ii]=0
else:
rot[ii]=-np.arctan2(sgn*cd1_2[ii],sgn*cd1_1[ii])
rot2[ii]=-np.arctan2(-cd2_1[ii],cd2_2[ii])
if rot2[ii] < 0:
rot2[ii] = 2*math.pi + rot2[ii]
if np.floor(rot[ii]) != np.floor(rot2[ii]):
msg = "There is more than 1deg skewness between y and x! "
msg2 = "Please re-run the function with argument skew=True"
raise ValueError(msg+msg2)
# Check and correct to output at the right format
rot = check_pa_vector(rot,'rad')
if skew:
rot2 = check_pa_vector(rot2,'rad')
if verbose:
print("This is the list of angles to be applied: ")
for ii in range(len(cd1_1)):
print(ii, ' -> ', rot[ii])
if skew: print('rot2: ', ii, ' -> ', rot2[ii])
if writing:
if outpath == '' or outpath is None: outpath=inpath
f=open(outpath+'Parallactic_angles.txt','w')
if skew:
for ii in range(len(cd1_1)):
print(rot[ii], rot2[ii], file=f)
else:
for ii in range(len(cd1_1)):
print(rot[ii], file=f)
f.close()
if skew:
return rot, rot2
else:
return rot
def check_pa_vector(angle_list, unit='deg'):
""" Checks if the angle list has the right format to avoid any bug in the
pca-adi algorithm. The right format complies to 3 criteria:
1. angles are expressed in degree
2. the angles are positive
3. there is no jump of more than 180 deg between consecutive values (e.g.
no jump like [350deg,355deg,0deg,5deg] => replaced by
[350deg,355deg,360deg,365deg])
Parameters
----------
angle_list: 1D-numpy ndarray
Vector containing the derotation angles
unit: string, {'deg','rad'}, optional
The unit type of the input angle list
Returns
-------
angle_list: 1-D numpy ndarray
Vector containing the derotation angles (after correction to comply with
the 3 criteria, if needed)
"""
angle_list = angle_list.copy()
if unit != 'rad' and unit != 'deg':
raise ValueError("The input unit should either be 'deg' or 'rad'")
npa = angle_list.shape[0]
for ii in range(npa):
if unit == 'rad':
angle_list[ii] = np.rad2deg(angle_list[ii])
if angle_list[ii] < 0:
angle_list[ii] = 360+angle_list[ii]
correct = False
#sorted_rot = np.sort(angle_list)
# Check if there is a jump > 180deg within the angle list
for ii in range(npa-1):
#if abs(sorted_rot[ii+1]-sorted_rot[ii]) > 180:
if abs(angle_list[ii+1]-angle_list[ii]) > 180:
correct = True
break
# In the previous case, correct for it by adding 360deg to angles < 180deg
if correct:
for ii in range(npa):
if angle_list[ii] < 180:
angle_list[ii] = 360+angle_list[ii]
return angle_list
|
vortex-exoplanet/VIP
|
vip_hci/preproc/parangles.py
|
Python
|
mit
| 16,243
|
# -*- coding: utf-8 -*-
from datetime import datetime
import configs.signals as Signals
import application.models as Models
from application.models.order.logistic import gen_uid
import application.services.jobs as Jobs
from configs.enum import LOG_STATS, ORDER_TYPE
from application.utils import groupby, to_utc
class LogisticSpliter(object):
def __init__(self, order=None, entries=None, log=None, debug=False):
if order:
self.order = order
self.entries = entries or order.entries
self.log = log
if log:
self.order = log.order
self.entries = entries or log.entries
self.log = log
self.debug = debug
def check_shoes_and_handbags(self):
main_categories = [e.item_snapshot.main_category for e in self.entries]
if 'shoes' in main_categories and not len(main_categories) == 1:
# all are shoes
if len(set(main_categories)) == 1:
return False
else:
grouped_entries = groupby(
self.entries,
lambda x: x.item_snapshot.main_category == 'shoes'
)
return [self.do(self.order, list(e))
for k, e in grouped_entries]
else:
return False
def split_with_amount(self):
def gen_split_case(entries, packages=[], p_total=0):
for e in entries:
if e.amount_usd > 200:
yield [e]
continue
if p_total + e.amount_usd > 200:
p_total = e.amount_usd
if packages:
pkg = packages
packages = [e]
yield pkg
else:
yield [e]
else:
packages.append(e)
p_total += e.amount_usd
if entries.index(e) == len(entries) - 1:
if packages:
yield packages
amount = reduce(lambda x, y: x + y.amount_usd, self.entries, 0)
if not amount > 200:
return False
else:
ents = sorted(self.entries, key=lambda x: x.amount_usd, reverse=True)
cases = [i for i in gen_split_case(ents)]
if len(cases) == 1:
return False
return [self.do(self.order, e) for e in cases]
def create(self):
try:
log = Models.Logistic(detail=self.log.detail)
except:
log = Models.Logistic(detail=Models.LogisticDetail())
log.order = self.order
log.entries = list(self.entries)
self.order.logistics.append(log)
if not self.debug:
log.detail.partner = Models.Partner.objects().first()
log.detail.partner_tracking_no = gen_uid()
log.save()
self.order.save()
return log
def do(self, order=None, entries=None):
self.__init__(order=order, entries=entries, log=self.log)
return reduce(lambda x, y: x or y(),
[self.check_shoes_and_handbags,
self.split_with_amount,
self.create], False)
@Signals.logistic_info_updated.connect
def look_for_new_tracking_no(sender, logistic):
tracking = logistic.express_tracking
if not tracking: return
if not tracking.is_subscribed:
Jobs.express.kuaidi_request.delay(
tracking.company, tracking.number
)
else:
print (tracking.number, ' is already subscribed')
def logistic_provider_dispatcher(order):
Models.Logistic.create(order)
for lo in order.logistics:
if len(lo.entries) > 1:
logs = LogisticSpliter(log=lo).do()
# splitted
if len(logs) > 1:
lo.close(reason="close by auto spliter")
order.reload()
for lo in order.logistics:
if order.order_type == ORDER_TYPE.TRANSFER:
lo.detail.route = order.order_type
lo.save()
return order
|
seasonstar/bibi
|
application/services/logistic.py
|
Python
|
apache-2.0
| 4,141
|
"""This file contains functions that implement the following approaches
to consistency-based belief change in a graph-oriented setting:
1. Global completion: ``eb.global_completion(G)``
2. Simple iteration: ``eb.iterate_simple(G)``
3. Expanding iteration: ``eb.iterate_expanding(G)``
4. Augmenting iteration: ``eb.iterate_augmenting(G)``
5. The ring method: ``eb.iterate_ring(G)``
Each of the approaches has two separate implementations, corresponding to
the *semantic* and *syntactic* characterizations. In addition, there are
two ways of maximizing equivalences used by any approach: *inclusion-based*
or *cardinality-based* maximization.
Each function listed above takes three optional arguments:
1. ``method``, which is a string that is either "semantic" or "syntactic",
representing the method by which to perform the completion; e.g. based
on either the syntactic or semantic characterizations
2. ``opt_type``, which is a string that is either "inclusion" or "cardinality",
representing the type of maximization to be performed over equivalences
3. ``simplify``, which is a Boolean flag specifying whether to simplify the
final formulas at each node.
"""
# Copyright (C) 2016
# Paul Vicol <pvicol@sfu.ca>
# All rights reserved.
# MIT license.
from __future__ import absolute_import
import multiprocessing as mp
import platform
import pkg_resources
from collections import defaultdict
import copy
import tempfile
import networkx as nx
from sympy import *
import equibel as eb
from equibel import asprin
if platform.system() == 'Linux':
if platform.architecture()[0] == '64bit':
from equibel.includes.linux_64 import gringo
#elif platform.architecture()[0] == '32bit':
# from equibel.includes.linux_32 import gringo
elif platform.system() == 'Darwin':
from equibel.includes.mac import gringo
SEMANTIC = 'semantic'
SYNTACTIC = 'syntactic'
CARDINALITY = 'cardinality'
INCLUSION = 'inclusion'
PROJECTION = 'projection'
CONSENSUS = 'consensus'
###################################################################
### ENCODING AND ASPRIN PREFERENCE FILE FOR GLOBAL COMPLETION ###
###################################################################
EQ_GLOBAL = pkg_resources.resource_filename('equibel', 'asp/problem_encodings/global.lp')
PREFERENCE_GLOBAL = pkg_resources.resource_filename('equibel', 'asp/preferences/global_pref.lp')
##############################################
#### FILES FOR CARDINALITY MAXIMIZATION ####
##############################################
GLOBAL_CARDINALITY_MAX = pkg_resources.resource_filename('equibel', 'asp/auxilliary/global_card_max.lp')
EXPANDING_CARDINALITY_MAX = pkg_resources.resource_filename('equibel', 'asp/auxilliary/expanding_card_max.lp')
#####################################################################
### ENCODING AND ASPRIN PREFERENCE FILE FOR EXPANDING ITERATION ###
### (ALSO USED FOR SIMPLE ITERATION AND THE RING METHOD) ###
#####################################################################
EXPANDING_ENCODING = pkg_resources.resource_filename('equibel', 'asp/problem_encodings/expanding.lp')
EXPANDING_PREFERENCE = pkg_resources.resource_filename('equibel', 'asp/preferences/expanding_pref.lp')
######################################################################
### ENCODING AND ASPRIN PREFERENCE FILE FOR AUGMENTING ITERATION ###
######################################################################
AUGMENTING_ENCODING = pkg_resources.resource_filename('equibel', 'asp/problem_encodings/augmenting.lp')
AUGMENTING_PREFERENCE = pkg_resources.resource_filename('equibel', 'asp/preferences/augmenting_pref.lp')
##################################
### AUXILLIARY ASP ENCODINGS ###
##################################
DECONSTRUCT = pkg_resources.resource_filename('equibel', 'asp/auxilliary/deconstruct.lp')
TRANSITIVE = pkg_resources.resource_filename('equibel', 'asp/auxilliary/transitive.lp')
SYMMETRIC = pkg_resources.resource_filename('equibel', 'asp/auxilliary/symmetric.lp')
################################
### NEIGHBORHOOD ENCODINGS ###
################################
WEB = pkg_resources.resource_filename('equibel', 'asp/neighborhoods/web.lp')
USPT = pkg_resources.resource_filename('equibel', 'asp/neighborhoods/uspt.lp')
RING = pkg_resources.resource_filename('equibel', 'asp/neighborhoods/ring.lp')
def expanding_maximal_answer_sets(G, distances, center, radius, atoms=None, neighborhood_type=USPT,
method=SEMANTIC, opt_type=INCLUSION):
if opt_type == INCLUSION:
if method == SEMANTIC:
models = asprin.compute_optimal_models([EXPANDING_ENCODING, EXPANDING_PREFERENCE, DECONSTRUCT, neighborhood_type],
[("base", [], G.to_asp(atoms)),
("base", [], distances),
("base", [], "#const center={}.".format(center)),
("base", [], "#const radius={}.".format(radius))])
return models
elif method == SYNTACTIC:
ctl = gringo.Control()
ctl.conf.configuration = 'crafty'
ctl.conf.solver.heuristic = 'domain'
ctl.conf.solve.enum_mode = 'domRec'
ctl.conf.solve.models = 0
ctl.load(EXPANDING_ENCODING)
ctl.load(TRANSITIVE)
ctl.load(SYMMETRIC)
ctl.load(DECONSTRUCT)
ctl.load(neighborhood_type)
ctl.add("base", [], G.to_asp(atoms))
ctl.add("base", [], distances)
ctl.add("base", [], "#const center={}.".format(center))
ctl.add("base", [], "#const radius={}.".format(radius))
ctl.ground([('base', [])])
answer_sets = []
for ans in ctl.solve_iter():
answer_sets.append(ans.atoms(gringo.Model.SHOWN))
return answer_sets
elif opt_type == CARDINALITY:
ctl = gringo.Control()
ctl.conf.configuration = 'crafty'
ctl.conf.solver.heuristic = 'domain'
ctl.conf.solve.opt_mode = 'optN'
ctl.conf.solve.models = 0
ctl.load(EXPANDING_ENCODING)
ctl.load(SYMMETRIC)
ctl.load(DECONSTRUCT)
ctl.load(neighborhood_type)
ctl.load(EXPANDING_CARDINALITY_MAX)
if method == SYNTACTIC:
ctl.load(TRANSITIVE)
ctl.add("base", [], G.to_asp(atoms))
ctl.add("base", [], distances)
ctl.add("base", [], "#const center={}.".format(center))
ctl.add("base", [], "#const radius={}.".format(radius))
#if method == SEMANTIC:
# ctl.add("base", [], "#show tv/4.")
ctl.ground([('base', [])])
old_opt_value = None
answer_sets = []
for ans in ctl.solve_iter():
print(ans.atoms(gringo.Model.SHOWN))
print("OPTIMIZATION = {}".format(ans.optimization()))
current_opt_value = ans.optimization()
if current_opt_value != old_opt_value:
old_opt_value = current_opt_value
continue
answer_sets.append(ans.atoms(gringo.Model.SHOWN))
return answer_sets
def augmenting_iteration_maximal_answer_sets(G, distances, center, eccentricity):
models = asprin.compute_optimal_models([AUGMENTING_ENCODING, AUGMENTING_PREFERENCE, DECONSTRUCT],
[("base", [], G.to_asp()),
("base", [], distances),
("base", [], "#const center={}.".format(center)),
("base", [], "#const eccentricity={}.".format(eccentricity))])
return models
def maximal_answer_sets(G, method=SEMANTIC, opt_type=INCLUSION):
if opt_type == INCLUSION:
if method == SEMANTIC:
answer_sets = asprin.compute_optimal_models([EQ_GLOBAL, PREFERENCE_GLOBAL, DECONSTRUCT],
[("base", [], G.to_asp()),
("base", [], "#show tv/3.")])
return answer_sets
elif method == SYNTACTIC:
ctl = gringo.Control()
ctl.conf.configuration = 'crafty'
ctl.conf.solver.heuristic = 'domain'
ctl.conf.solve.enum_mode = 'domRec'
ctl.conf.solve.models = 0
ctl.load(EQ_GLOBAL)
ctl.load(TRANSITIVE)
ctl.load(SYMMETRIC)
ctl.load(DECONSTRUCT)
ctl.add("base", [], G.to_asp())
ctl.ground([('base', [])])
answer_sets = []
for ans in ctl.solve_iter():
answer_sets.append(ans.atoms(gringo.Model.SHOWN))
return answer_sets
elif opt_type == CARDINALITY:
ctl = gringo.Control()
ctl.conf.configuration = 'crafty'
ctl.conf.solver.heuristic = 'domain'
ctl.conf.solve.opt_mode = 'optN'
ctl.conf.solve.models = 0
ctl.load(EQ_GLOBAL)
ctl.load(SYMMETRIC)
ctl.load(DECONSTRUCT)
ctl.load(GLOBAL_CARDINALITY_MAX)
if method == SYNTACTIC:
ctl.load(TRANSITIVE)
ctl.add("base", [], G.to_asp())
if method == SEMANTIC:
ctl.add("base", [], "#show tv/3.")
ctl.ground([('base', [])])
old_opt_value = None
answer_sets = []
for ans in ctl.solve_iter():
current_opt_value = ans.optimization()
if current_opt_value != old_opt_value:
old_opt_value = current_opt_value
continue
answer_sets.append(ans.atoms(gringo.Model.SHOWN))
return answer_sets
def revise(K, alpha, simplify=False):
"""Revises a knowledge base $K$ by a formula $\alpha$.
This function implements the consistency-based revision operator introduced in [?], by:
1. Constructing a two-node path graph 0 <--> 1;
2. Associating the formulas in $K$ with node 0, and the formula $\alpha$ with node 1;
3. Computing the new belief at node 1 by determining what parts of $K$ can be incorporated
while maintaining consistency with $\alpha$.
Parameters
----------
K : A formula string or a list of formula strings (taken conjunctively as a knowledge base)
alpha : A formula string or a list of formula strings (taken conjunctively)
simplify : A Boolean flag specifying whether to simplify the result of revision.
Returns
-------
R : A single formula representing the resultant knowledge base $K \dot{+} \alpha$, taken
conjunctively.
Examples
--------
The simplest way to call this function is to provide a single formula for each of ``K``
and ``alpha``:
>>> eb.revise('p & q', '~p | ~q')
And(Or(And(Not(p), q), And(Not(q), p)), Or(Not(p), Not(q)))
To pretty-print formulas in infix notation with Unicode symbols, we can use:
>>> eb.pprint(eb.revise('p & q', '~p | ~q'))
Note that, by default, the formula representing the result of revision is not simplified.
Setting the optional argument ``simplify=True`` enables this final simplification step:
>>> eb.revise('p & q', '~p | ~q', simplify=True)
Or(And(Not(p), q), And(Not(q), p))
The above revision is equivalent to the following form, using a list of formula strings
['p', 'q'] (taken conjunctively) instead of the single formula string 'p & q':
>>> eb.revise(['p', 'q'], '~p | ~q', simplify=True)
Or(And(Not(p), q), And(Not(q), p))
"""
G = eb.path_graph(2)
if isinstance(K, list):
for item in K:
G.add_formula(0, item)
else:
G.add_formula(0, K)
if isinstance(alpha, list):
for item in alpha:
G.add_formula(1, item)
else:
G.add_formula(1, alpha)
print(G.to_asp())
atoms = G.atoms()
eq_sets = []
ctl = gringo.Control()
ctl.conf.configuration = 'crafty'
ctl.conf.solver.heuristic = 'domain'
ctl.conf.solve.enum_mode = 'domRec'
ctl.conf.solve.models = 0
ctl.load(EQ_GLOBAL)
ctl.load(TRANSITIVE)
ctl.load(SYMMETRIC)
ctl.load(DECONSTRUCT)
ctl.add("base", [], G.to_asp(atoms))
ctl.ground([('base', [])])
answer_sets = ctl.solve_iter()
#num_answer_sets = 0
for answer_set in answer_sets:
#num_answer_sets += 1
current_eq_atoms = set()
for term in answer_set.atoms(gringo.Model.SHOWN):
if term.name() == 'eq':
atom_fun, X, Y = term.args()
atom = symbols(atom_fun.name())
current_eq_atoms.add(atom)
eq_sets.append(current_eq_atoms)
#print("Found {} EQ sets".format(num_answer_sets))
disjuncts = set()
for eq_atoms in eq_sets:
conj = true
diff_atoms = atoms - eq_atoms
original_formula = G.formula_conj(0)
translated_formula = original_formula.xreplace({x: ~x for x in diff_atoms})
conj &= translated_formula
disjuncts.add(conj)
resultant_formula = G.formula_conj(1) & disjunction(disjuncts)
if simplify:
return eb.simplify_logic(resultant_formula)
else:
return resultant_formula
def global_completion(G, method=SEMANTIC, opt_type=INCLUSION, simplify=False):
"""Finds the global completion of a graph and associated scenario.
Parameters
----------
G : An EquibelGraph object, representing a graph and associated scenario
method : A string that is either "semantic" or "syntactic", representing the
method by which to perform the completion; e.g. based on either the
syntactic or semantic characterizations.
opt_type : A string that is either "inclusion" or "cardinality",
representing the type of maximization to be performed
over equivalences.
simplify : A Boolean flag specifying whether to simplify the final formulas at each node.
Returns
-------
R : A new EquibelGraph object, representing the global completion of ``G``.
"""
if method == SEMANTIC:
R = global_completion_semantic(G, opt_type=opt_type)
elif method == SYNTACTIC:
R = global_completion_syntactic(G, opt_type=opt_type)
if simplify:
simplify_all_formulas(R)
return R
def global_completion_semantic(G, opt_type=INCLUSION):
"""Finds the global completion of a graph and associated scenario, using the
**semantic characterization**.
Parameters
----------
G : An EquibelGraph object, representing a graph and associated scenario
opt_type : A string that is either "inclusion" or "cardinality", representing
the type of maximization to be performed over equivalences.
simplify : A Boolean flag specifying whether to simplify the final formulas at each node.
Returns
-------
R : A new EquibelGraph object, representing the global completion of ``G``.
"""
node_models = defaultdict(set)
node_model_dict = defaultdict(list)
models = maximal_answer_sets(G, method=SEMANTIC, opt_type=opt_type)
for model in models:
node_model_dict.clear()
for term in model:
if term.name() == 'tv':
node, atom_fun, truth_value = term.args()
atom = symbols(atom_fun.name())
if truth_value == 1:
node_model_dict[node].append(atom)
for node in G.nodes():
if node_model_dict[node]:
node_models[node].add(frozenset(node_model_dict[node]))
else:
# node has no models with true atoms
node_models[node].add(frozenset())
atoms = G.atoms()
R = copy.deepcopy(G)
R.clear_formulas()
for node in node_models:
t = tuple(node_models[node])
formula = formula_from_models(t, atoms)
R.add_formula(node, formula)
return R
def create_eq_dicts(answer_sets):
"""Extracts ``eq/3`` predicates from a set of answer sets, and structures them into a
dictionary that makes it easy to retrieve the set of atoms on which two nodes agree.
Parameters
----------
answer_sets : An iterable container of answer sets.
Each answer set is represented as a list of *terms* (defined in the
``gringo`` module).
"""
eq_dicts = []
for answer_set in answer_sets:
current_eq_dict = defaultdict(lambda: defaultdict(set))
for term in answer_set:
if term.name() == 'eq':
atom_fun, X, Y = term.args()
atom = symbols(atom_fun.name())
current_eq_dict[X][Y].add(atom)
eq_dicts.append(current_eq_dict)
return eq_dicts
def global_completion_syntactic(G, opt_type=INCLUSION):
"""Finds the global completion of a graph and associated scenario, using the
**syntactic characterization**.
Parameters
----------
G : An EquibelGraph object, representing a graph and associated scenario
opt_type : A string that is either "inclusion" or "cardinality", representing
the type of maximization to be performed over equivalences.
simplify : A Boolean flag specifying whether to simplify the final formulas at each node.
Returns
-------
R : A new EquibelGraph object, representing the global completion of ``G``.
"""
atoms = G.atoms()
answer_sets = maximal_answer_sets(G, method=SYNTACTIC, opt_type=opt_type)
eq_dicts = create_eq_dicts(answer_sets)
R = copy.deepcopy(G)
R.clear_formulas()
disjuncts = defaultdict(set)
for eq_dict in eq_dicts:
for j in G:
conj = true
for i in G:
if i != j:
eq_atoms = eq_dict[j][i]
diff_atoms = atoms - eq_atoms
original_formula = G.formula_conj(i)
translated_formula = original_formula.xreplace({x: ~x for x in diff_atoms})
conj &= translated_formula
disjuncts[j].add(conj)
for node in G:
resultant_formula = G.formula_conj(node) & disjunction(disjuncts[node])
R.add_formula(node, resultant_formula)
return R
def simple_semantic(G, center, atoms, opt_type=INCLUSION):
"""Computes the formula for node ``center`` in graph ``G`` that results from
one iteration of the simple (fixed-radius) approach, using the *semantic characterization*.
Parameters
----------
G : An EquibelGraph object, representing a graph and an associated scenario
center : A node in ``G`` for which to compute the result of simple iteration
atoms : A set of Sympy atomic propositions, representing the *alphabet*.
opt_type : A string that is either "inclusion" or "cardinality", representing
the type of maximization to be performed over equivalences.
"""
dist_str = create_distance_string(G)
ecc = nx.eccentricity(G, center)
answer_sets = expanding_maximal_answer_sets(G, dist_str, center, 1, atoms=atoms, neighborhood_type=USPT,
method=SEMANTIC, opt_type=opt_type)
center_models = set()
node_model_dict = dict()
for answer_set in answer_sets:
current_eq_set = set()
for term in answer_set:
if term.name() == 'tv':
center_node, node, atom_fun, truth_value = term.args()
atom = symbols(atom_fun.name())
if center_node == node:
node_model_dict[atom] = truth_value
true_atoms = frozenset([atom for atom in node_model_dict if node_model_dict[atom] == 1])
center_models.add(true_atoms)
formula = formula_from_models(center_models, atoms)
return {center: formula}
def simple_syntactic(G, center, atoms, opt_type=INCLUSION):
"""Computes the formula for node ``center`` in graph ``G`` that results from
one iteration of the simple (fixed-radius) approach, using the *syntactic characterization*.
Parameters
----------
G : An EquibelGraph object, representing a graph and an associated scenario
center : A node in ``G`` for which to compute the result of simple iteration
atoms : A set of Sympy atomic propositions, representing the *alphabet*.
opt_type : A string that is either "inclusion" or "cardinality", representing
the type of maximization to be performed over equivalences.
"""
dist_str = create_distance_string(G)
ecc = nx.eccentricity(G, center)
eq_sets = []
answer_sets = expanding_maximal_answer_sets(G, dist_str, center, 1, atoms=atoms, neighborhood_type=USPT,
method=SYNTACTIC, opt_type=opt_type)
for answer_set in answer_sets:
# current_eq_set is a mapping like current_eq_set[2] = {p,q,r},
# current_eq_set[3] = {p,q}, all with respect to a given node (e.g. node 1)
current_eq_set = defaultdict(set)
for term in answer_set:
if term.name() == 'eq':
center_node, atom_fun, X, Y = term.args()
atom = symbols(atom_fun.name())
if center_node == X:
current_eq_set[Y].add(atom)
eq_sets.append(current_eq_set)
formula = formula_from_eq_sets(G, center, eq_sets, atoms)
return {center: formula}
def only_one_model_for_each_eq_set(models_for_eq_set_dict):
"""Checks whether each EQ set in ``models_for_eq_set_dict`` is associated with
*only one* model.
Parameters
----------
models_for_eq_set_dict : A dictionary where *keys* are EQ sets (represented by any
hashable objects, in this case strings) and *values* are
sets of models (where each model is a set of atoms).
Returns
-------
Returns True if each EQ set in the dictionary is associated with a single model;
returns False otherwise (that is, if any EQ set in the dictionary is associated
with *more than* one model).
"""
for eq_set in models_for_eq_set_dict:
if len(models_for_eq_set_dict[eq_set]) > 1:
return False
return True
def expanding_semantic(G, center, atoms, opt_type=INCLUSION):
"""Computes the formula for node ``center`` in graph ``G`` that results from
one iteration of the expanding approach, using the *semantic characterization*.
Parameters
----------
G : An ``EquibelGraph`` object, representing a graph and an associated scenario
center : A node in ``G`` for which to compute the result of simple iteration
atoms : A set of Sympy atomic propositions, representing the *alphabet*.
opt_type : A string that is either "inclusion" or "cardinality", representing
the type of maximization to be performed over equivalences.
"""
dist_str = create_distance_string(G)
ecc = nx.eccentricity(G, center)
#print("CENTER = {}".format(center))
#print("Eccentricity = {}".format(ecc))
for radius in range(1, ecc+1):
#print("RADIUS = {}".format(radius))
#print(G.to_asp(atoms))
center_models = set()
models = expanding_maximal_answer_sets(G, dist_str, center, radius, atoms=atoms,
method=SEMANTIC, opt_type=opt_type)
node_model_dict = dict()
models_for_eq_set_dict = defaultdict(set)
for model in models:
current_eq_set = set()
for term in model:
if term.name() == 'tv':
center_node, node, atom_fun, truth_value = term.args()
atom = symbols(atom_fun.name())
if center_node == node:
node_model_dict[atom] = truth_value
elif term.name() == 'eq':
current_eq_set.add(str(term))
true_atoms = frozenset([atom for atom in node_model_dict if node_model_dict[atom] == 1])
center_models.add(true_atoms)
models_for_eq_set_dict[frozenset(current_eq_set)].add(true_atoms)
mid_formula = formula_from_models(center_models, atoms)
G.clear_formulas_from(center)
G.add_formula(center, mid_formula)
# Early stopping condition:
if only_one_model_for_each_eq_set(models_for_eq_set_dict):
break
formula = formula_from_models(center_models, atoms)
return {center: formula}
def expanding_syntactic(G, center, atoms, opt_type=INCLUSION):
"""Computes the formula for node ``center`` in graph ``G`` that results from
one iteration of the expanding approach, using the *syntactic characterization*.
Parameters
----------
G : An ``EquibelGraph`` object, representing a graph and an associated scenario
center : A node in ``G`` for which to compute the result of simple iteration
atoms : A set of Sympy atomic propositions, representing the *alphabet*.
opt_type : A string that is either "inclusion" or "cardinality", representing
the type of maximization to be performed over equivalences.
"""
dist_str = create_distance_string(G)
ecc = nx.eccentricity(G, center)
#print("CENTER = {}".format(center))
#print("Eccentricity = {}".format(ecc))
for radius in range(1, ecc+1):
eq_sets = []
#print("RADIUS = {}".format(radius))
#print(G.to_asp(atoms))
answer_sets = expanding_maximal_answer_sets(G, dist_str, center, radius, atoms=atoms,
method=SYNTACTIC, opt_type=opt_type)
for answer_set in answer_sets:
current_eq_set = defaultdict(set) # mapping like current_eq_set[2] = {p,q,r}, current_eq_set[3] = {p,q}, all for node 1
for term in answer_set:
if term.name() == 'eq':
center_node, atom_fun, X, Y = term.args()
atom = symbols(atom_fun.name())
if center_node == X:
current_eq_set[Y].add(atom)
eq_sets.append(current_eq_set)
mid_formula = formula_from_eq_sets(G, center, eq_sets, atoms)
G.clear_formulas_from(center)
G.add_formula(center, mid_formula)
final_formula = G.formula_conj(center)
return {center: final_formula}
def formula_from_eq_sets(G, current_node, eq_sets, atoms):
disjuncts = set()
for eq_set in eq_sets:
conj = true
for node in eq_set:
eq_atoms = eq_set[node]
diff_atoms = atoms - eq_atoms
original_formula = G.formula_conj(node)
translated_formula = original_formula.xreplace({x: ~x for x in diff_atoms})
conj &= translated_formula
disjuncts.add(conj)
return G.formula_conj(current_node) & disjunction(disjuncts)
def simplify_all_formulas(G):
"""Simplifies the formulas of all nodes in ``G``.
This function modifies ``G`` in place; it does **not** return a new graph.
Parameters
----------
G : An ``EquibelGraph`` object.
Example
-------
>>> G = eb.path_graph(2)
>>> G.add_formula(0, 'p & (p | q | r | s)')
>>> G.add_formula(1, '(p & q) | (p & ~q)')
>>> eb.simplify_all_formulas(G)
>>> G.formulas()
{0: set([p]), 1: set([p])}
"""
for node in G:
formula = G.formula_conj(node)
simplified_formula = simplify_logic(formula)
G.clear_formulas_from(node)
if simplified_formula != True:
G.add_formula(node, simplified_formula)
def iterate_function_fixpoint(G, iteration_function, method=SEMANTIC, opt_type=INCLUSION, simplify=False):
"""Finds the fixpoint of ``iteration_function`` applied on the graph ``G``.
This is a higher-order function that repeatedly applies a *function* ``iteration_function``,
starting with the initial graph ``G``, producing a sequence of ``EquibelGraph`` objects,
until reaching a state where the current ``EquibelGraph`` is equal to the previous one; then,
the final ``EquibelGraph`` is returned.
Parameters
----------
G : An ``EquibelGraph`` object
iteration_function : The function to be applied iteratively, starting with ``G``
method : A string that is either "semantic" or "syntactic", representing the
method by which to perform the completion; e.g. based on either the
syntactic or semantic characterizations.
opt_type : A string that is either "inclusion" or "cardinality",
representing the type of maximization to be performed
over equivalences.
simplify : A Boolean flag specifying whether to simplify the final formulas at each node.
"""
old_R = None
R = G
num_iterations = 0
while R != old_R:
num_iterations += 1
old_R = copy.deepcopy(R)
R = iteration_function(R, method=method, opt_type=opt_type, simplify=simplify)
return R, num_iterations
def iterate_expanding_fixpoint(G, method=SEMANTIC, opt_type=INCLUSION, simplify=False):
"""Finds the fixpoint of *expanding iteration* with respect the graph ``G``."""
return iterate_function_fixpoint(G, iterate_expanding, method=method, opt_type=opt_type, simplify=simplify)
def iterate_augmenting_fixpoint(G, method=SEMANTIC, opt_type=INCLUSION, simplify=False):
"""Finds the fixpoint of *augmenting iteration* with respect the graph ``G``."""
return iterate_function_fixpoint(G, iterate_augmenting, method=method, opt_type=opt_type, simplify=simplify)
def iterate_simple_fixpoint(G, method=SEMANTIC, opt_type=INCLUSION, simplify=False):
"""Finds the fixpoint of *simple iteration* with respect the graph ``G``."""
return iterate_function_fixpoint(G, iterate_simple, method=method, opt_type=opt_type, simplify=simplify)
def iterate_ring_fixpoint(G, method=SEMANTIC, opt_type=INCLUSION, simplify=False):
"""Finds the fixpoint of the *ring method* with respect to the graph ``G``."""
return iterate_function_fixpoint(G, iterate_ring, method=method, opt_type=opt_type, simplify=simplify)
def iterate_expanding(G, method=SEMANTIC, opt_type=INCLUSION, simplify=False):
if method == SEMANTIC:
R = parallel_iteration(G, expanding_semantic, opt_type=opt_type)
elif method == SYNTACTIC:
R = parallel_iteration(G, expanding_syntactic, opt_type=opt_type)
if simplify:
simplify_all_formulas(R)
return R
def iterate_simple(G, method=SEMANTIC, opt_type=INCLUSION, simplify=False):
if method == SEMANTIC:
R = parallel_iteration(G, simple_semantic, opt_type=opt_type)
elif method == SYNTACTIC:
R = parallel_iteration(G, simple_syntactic, opt_type=opt_type)
if simplify:
simplify_all_formulas(R)
return R
def iterate_augmenting(G, method=SEMANTIC, opt_type=INCLUSION, simplify=False):
if method == SEMANTIC:
R = parallel_iteration(G, augmenting_semantic, opt_type=opt_type)
elif method == SYNTACTIC:
R = parallel_iteration(G, augmenting_syntactic, opt_type=opt_type)
if simplify:
simplify_all_formulas(R)
return R
def iterate_ring(G, method=SEMANTIC, opt_type=INCLUSION, simplify=False):
if method == SEMANTIC:
R = parallel_iteration(G, ring_semantic, opt_type=opt_type)
elif method == SYNTACTIC:
R = parallel_iteration(G, ring_syntactic, opt_type=opt_type)
if simplify:
simplify_all_formulas(R)
return R
def augmenting_semantic(G, center, atoms, opt_type=INCLUSION):
dist_str = create_distance_string(G)
ecc = nx.eccentricity(G, center)
center_models = set()
models = augmenting_iteration_maximal_answer_sets(G, dist_str, center, ecc)
node_model_dict = dict()
for model in models:
for term in model:
if term.name() == 'tv':
center_node, node, atom_fun, truth_value = term.args()
atom = symbols(atom_fun.name())
if center_node == node:
node_model_dict[atom] = truth_value
true_atoms = frozenset([atom for atom in node_model_dict if node_model_dict[atom] == 1])
center_models.add(true_atoms)
formula = formula_from_models(center_models, atoms)
return {center: formula}
def augmenting_syntactic(G, center, atoms, opt_type=INCLUSION):
dist_str = create_distance_string(G)
ecc = nx.eccentricity(G, center)
answer_sets = augmenting_iteration_maximal_answer_sets(G, dist_str, center, ecc)
eq_sets = []
for answer_set in answer_sets:
current_eq_set = defaultdict(set)
for term in answer_set:
if term.name() == 'eq':
center_node, atom_fun, X, Y = term.args()
atom = symbols(atom_fun.name())
if center_node == X:
current_eq_set[Y].add(atom)
#print("CURRENT EQ SET = {}".format(current_eq_set))
eq_sets.append(current_eq_set)
#print("ALL EQ SETS AT RADIUS {} = {}".format(radius, eq_sets))
formula = formula_from_eq_sets(G, center, eq_sets, atoms)
return {center: formula}
def ring_semantic(G, center, atoms, opt_type=INCLUSION):
dist_str = create_distance_string(G)
ecc = nx.eccentricity(G, center)
iteration_model_sets = []
for radius in range(1, ecc+1):
center_models = set()
models = expanding_maximal_answer_sets(G, dist_str, center, radius, atoms=atoms, neighborhood_type=RING,
method=SEMANTIC, opt_type=opt_type)
node_model_dict = dict()
for model in models:
for term in model:
if term.name() == 'tv':
center_node, node, atom_fun, truth_value = term.args()
atom = symbols(atom_fun.name())
if center_node == node:
node_model_dict[atom] = truth_value
true_atoms = frozenset([atom for atom in node_model_dict if node_model_dict[atom] == 1])
center_models.add(true_atoms)
iteration_model_sets.append(frozenset(center_models))
mid_formula = formula_from_models(center_models, atoms)
G.clear_formulas_from(center)
G.add_formula(center, mid_formula)
formula = formula_from_models(center_models, atoms)
simplified_formula = simplify_logic(formula)
return {center: simplified_formula}
def ring_syntactic(G, center, atoms, opt_type=INCLUSION):
dist_str = create_distance_string(G)
ecc = nx.eccentricity(G, center)
#print("CENTER = {}".format(center))
#print("Eccentricity = {}".format(ecc))
for radius in range(1, ecc+1):
eq_sets = []
#print("RADIUS = {}".format(radius))
#print(G.to_asp(atoms))
answer_sets = expanding_maximal_answer_sets(G, dist_str, center, radius, atoms=atoms, neighborhood_type=RING,
method=SYNTACTIC, opt_type=opt_type)
for answer_set in answer_sets:
# current_eq_set is a mapping like current_eq_set[2] = {p,q,r},
# current_eq_set[3] = {p,q}, all with respect to a given node (e.g. node 1)
current_eq_set = defaultdict(set)
for term in answer_set:
if term.name() == 'eq':
center_node, atom_fun, X, Y = term.args()
atom = symbols(atom_fun.name())
if center_node == X:
current_eq_set[Y].add(atom)
#print("CURRENT EQ SET = {}".format(current_eq_set))
eq_sets.append(current_eq_set)
#print("ALL EQ SETS AT RADIUS {} = {}".format(radius, eq_sets))
mid_formula = formula_from_eq_sets(G, center, eq_sets, atoms)
G.clear_formulas_from(center)
G.add_formula(center, mid_formula)
final_formula = G.formula_conj(center)
return {center: final_formula}
def serial_iteration(G, iteration_function, opt_type=INCLUSION):
"""Applies ``iteration_function`` over all nodes in ``G`` in a *serial fashion* (i.e. one
node at a time).
Parameters
----------
G : An EquibelGraph object
iteration_function : A function that conforms to the following input/output interface:
**Input:** Takes a graph ``G``, a node ``center`` for which to compute
the results of the iteration, and a set of ``atoms``.
**Output:** Returns a singleton dictionary of the form ``{ center: formula }``
mapping the node ``center`` to a formula resulting from the iteration.
"""
atoms = G.atoms()
final_formula_dict = dict()
for center in G:
result = iteration_function(copy.deepcopy(G), center, atoms, opt_type=opt_type)
final_formula_dict.update(result)
R = copy.deepcopy(G)
R.clear_formulas()
for node in final_formula_dict:
R.add_formula(node, final_formula_dict[node])
return R
def parallel_iteration(G, iteration_function, opt_type=INCLUSION):
"""Applies ``iteration_function`` over all nodes in ``G`` *in parallel* (using process pools).
Parameters
----------
G : An EquibelGraph object
iteration_function : A function that conforms to the following input/output interface:
**Input:** Takes a graph ``G``, a node ``center`` for which to compute
the results of the iteration, and a set of ``atoms``.
**Output:** Returns a singleton dictionary of the form ``{ center: formula }``
mapping the node ``center`` to a formula resulting from the iteration.
"""
atoms = G.atoms()
# Using "processes=None" results in as many processes being created as
# there are cores (real or virtual) on the current machine.
pool = mp.Pool(processes=None)
results = [pool.apply_async(iteration_function, (G, center, atoms), {"opt_type": opt_type}) for center in G.nodes()]
output = [p.get() for p in results]
pool.close()
pool.join()
final_formula_dict = merge_dicts(output)
R = copy.deepcopy(G)
R.clear_formulas()
for node in final_formula_dict:
R.add_formula(node, final_formula_dict[node])
return R
def merge_dicts(dictionaries):
"""Merges multiple separate dictionaries into a single dictionary.
Parameters
----------
dictionaries : An iterable container of Python dictionaries.
Returns
-------
merged : A single dictionary that represents the result of merging the all the
dicts in ``dictionaries``.
Example
-------
The primary purpose of this function is to create a single dictionary
by combining multiple singleton dictionaries, as shown in the following example:
>>> dicts = [{'a': 1}, {'b': 2}, {'c': 3}]
>>> eb.merge_dicts(dicts)
{'a': 1, 'c': 3, 'b': 2}
"""
merged = dictionaries[0].copy()
for i in range(1, len(dictionaries)):
merged.update(dictionaries[i])
return merged
def print_formulas(G):
"""Pretty-prints the formulas associated with nodes in ``G``.
Parameters
----------
G : An ``EquibelGraph`` object
"""
for node in G.nodes():
print("Node {}:".format(node))
pprint(G.formula_conj(node))
print("\n")
def create_distance_string(G):
"""Creates a string containing the ASP encoding of the shortest-path
distances between all pairs of nodes in ``G``.
Parameters
----------
G : An EquibelGraph object.
Returns
-------
dist_str : A string containing ``dist/3`` predicates, where ``dist(x,y,d)``
represents that the distance between nodes ``x`` and ``y`` is ``d``.
Example
-------
>>> G = eb.path_graph(3)
>>> print(eb.create_distance_string(G))
dist(0,0,0).
dist(0,1,1).
dist(0,2,2).
dist(1,0,1).
dist(1,1,0).
dist(1,2,1).
dist(2,0,2).
dist(2,1,1).
dist(2,2,0).
"""
dist_str = ""
lengths = nx.all_pairs_shortest_path_length(G)
for node1 in lengths:
for node2 in lengths[node1]:
dist = lengths[node1][node2]
dist_str += "dist({0},{1},{2}).\n".format(node1, node2, dist)
return dist_str
def formula_from_models(models, alphabet):
"""Creates a formula in disjunctive normal form (DNF) given a set of models
and an alphabet.
Parameters
----------
models : An iterable container (set, list, etc.) of *sets of atoms*, where
an atom is represented by a Sympy symbol
alphabet : An iterable container of atoms, represented by Sympy symbols
Example
-------
>>> p,q,r,s = [eb.parse_formula(f) for f in "pqrs"]
>>> alphabet = [p,q,r,s]
>>> models = [{p,q}]
>>> eb.formula_from_models(models, alphabet)
And(Not(r), Not(s), p, q)
>>> models = [{p,q}, {p}]
>>> eb.formula_from_models(models, alphabet)
Or(And(Not(q), Not(r), Not(s), p), And(Not(r), Not(s), p, q))
"""
conjuncts = set()
for model in models:
conj = true
for atom in alphabet:
if atom in model:
conj &= atom
else:
conj &= ~atom
conjuncts.add(conj)
return disjunction(conjuncts)
def conjunction(formulas):
"""Computes the conjunction of a set of propositional formulas.
Parameters
----------
formulas : an iterable container of Sympy formulas
Example
-------
>>> formulas = [eb.parse_formula(s) for s in "pqrst"]
>>> formulas
[p, q, r, s, t]
>>> eb.conjunction(formulas)
And(p, q, r, s, t)
"""
return And(*formulas)
def disjunction(formulas):
"""Computes the disjunction of a set of propositional formulas.
Parameters
----------
formulas : An iterable container of Sympy formulas
Example
-------
>>> formulas = [eb.parse_formula(s) for s in "pqrst"]
>>> formulas
[p, q, r, s, t]
>>> eb.disjunction(formulas)
Or(p, q, r, s, t)
"""
return Or(*formulas)
|
asteroidhouse/equibel
|
equibel/solver.py
|
Python
|
mit
| 42,474
|
import django
try:
from django.conf.urls import url, patterns
except ImportError:
from django.conf.urls.defaults import url, patterns
from actstream import feeds
urlpatterns = [
url(r'^custom/(?P<verb>[-\w\s]+)/$',
feeds.CustomJSONActivityFeed.as_view(name='testbar'),
name='testapp_custom_feed'),
]
if django.VERSION[:2] < (1, 9):
urlpatterns = patterns('', *urlpatterns)
|
mikek2/actstream
|
actstream/runtests/testapp/urls.py
|
Python
|
bsd-3-clause
| 408
|
#!/usr/bin/env python
import xmlrpclib
import time
import threading
import random
server = 'http://localhost:9911'
def run(server, count, known_crackers):
#print("Connecting to server {}".format(server))
s = xmlrpclib.ServerProxy(server)
start_time = time.time()
s.debug.test_bulk_insert(count, known_crackers, start_time - random.random()*7*24*3600)
#print("Inserting {} hosts took {} seconds".format(count, time.time() - start_time))
def run_insert_test(server, num_threads, count, known_crackers):
start_time = time.time()
threads = []
#print("Creating threads...")
for i in xrange(num_threads):
thread = threading.Thread(target=run, args=(server, count, known_crackers))
threads.append(thread)
#print("Starting threads...")
for i in xrange(num_threads):
threads[i].start()
#print("Waiting for threads...")
for i in xrange(num_threads):
threads[i].join()
print("Inserting {} hosts {} times took {} seconds".format(count, num_threads, time.time() - start_time))
print("Average time per insert: {} seconds".format( (time.time() - start_time) / count / num_threads))
s = xmlrpclib.ServerProxy(server)
for num_threads in xrange(20, 59):
print("Inserting {} hosts {} times, please wait...".format(100, num_threads))
s.debug.clear_bulk_cracker_list()
run_insert_test(server, num_threads, 100, True)
time.sleep(3)
print("==============================================")
#s = xmlrpclib.ServerProxy(server)
#for i in xrange(100):
# s.clear_bulk_cracker_list()
# print("Adding 20*5000=100,000 hosts...")
# run_insert_test(server, 20, 5000, False)
|
sergey-dryabzhinsky/denyhosts_sync
|
tests/fill_database.py
|
Python
|
agpl-3.0
| 1,675
|
"""
Evaluation metrics for quality of outputs of generative models.
"""
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.dummy import DummyClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import r2_score
import numpy as np
def condidional_similarity(G_true, G_pred, C_true=None, C_pred=None,
cross_testing=False):
"""Compares the similarity of two distributions using a set
of samples from "truth" distribution G_true and "predicted"
distribution G_pred. Is useful for estimation of quality
of GAN's and VAE's and the like.
Parameters
----------
G_true : array-like of shape = (n_samples, n_outputs)
Samples from "ground truth" distribution.
G_pred : array-like of shape = (n_samples, n_outputs)
Samples from an estimated sampler.
C_true : array-like of shape = (n_samples, n_features)
Condition for "ground truth" distribution.
C_pred : array-like of shape = (n_samples, n_features)
Condition for estimated sampler.
cross_testing : bool, optional
Whether to use cross-validation like approach for testing.
Returns
-------
z : float
The similarity score for two distributions, calculated
from the generalization estimate of the model that is
trained to distinguish between two sets of samples.
"""
pipe = Pipeline([
('scaler', StandardScaler()),
('model', DummyClassifier())
])
dummy_search = {
'model__strategy': ["stratified", "most_frequent", "uniform"]
}
lin_search = {
'model': [LinearSVC()],
'model__penalty': ['l1', 'l2'],
'model__dual': [False],
'model__C': 10 ** np.linspace(-10, 10),
'model__max_iter': [10000],
}
gb_search = {
'model': [GradientBoostingClassifier()],
'model__learning_rate': [1.0, 0.1, 0.01, 0.001],
'model__n_estimators': [2 ** i for i in range(11)],
}
model = GridSearchCV(
pipe,
[dummy_search, lin_search, gb_search], # svc_search
n_jobs=-1,
verbose=0
)
a = [G_true, G_pred, C_true, C_pred]
for i, v in enumerate(a):
if v is None:
continue
v = np.array(v)
v = v.astype('float')
if len(v.shape) == 1:
v = v[:, np.newaxis]
a[i] = v
G_true, G_pred, C_true, C_pred = a
X = np.row_stack([G_true, G_pred])
X = X.reshape((len(X), -1))
# add condition to the discriminatory features
if C_true is not None and C_pred is not None:
C = np.row_stack([C_true, C_pred])
C = C.reshape((len(C), -1))
X = np.column_stack([X, C])
y = np.concatenate([
np.ones(len(G_true)),
np.zeros(len(G_pred))
])
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.5, stratify=y)
score = model.fit(X_train, y_train).score(X_test, y_test)
# scale the error to be in range from 0.0 to 1.0
U, C = np.unique(y_test, return_counts=True)
scale = max(C * 1.0) / sum(C * 1.0)
score = (1.0 - score)/scale
score = min(1.0, score)
return score
def distribution_similarity(X_true, X_pred, cross_testing=False):
"""Compares the similarity of two distributions using a set
of samples from "truth" distribution X_true and "predicted"
distribution X_pred. Is useful for estimation of quality
of GAN's and VAE's and the like.
Parameters
----------
X_true : array-like of shape = (n_samples, n_outputs)
Samples from "ground truth" distribution.
X_pred : array-like of shape = (n_samples, n_outputs)
Samples from "ground truth" distribution.
cross_testing : bool, optional
Whether to use cross-validation like approach for testing.
Returns
-------
z : float
The similarity score for two distributions, calculated
from the generalization estimate of the model that is
trained to distinguish between two sets of samples.
"""
pipe = Pipeline([
('scaler', StandardScaler()),
('model', DummyClassifier())
])
dummy_search = {
'model__strategy': ["stratified", "most_frequent", "uniform"]
}
lin_search = {
'model': [LinearSVC()],
'model__penalty': ['l1', 'l2'],
'model__dual': [False],
'model__C': 10 ** np.linspace(-10, 10),
'model__max_iter': [10000],
}
gb_search = {
'model': [GradientBoostingClassifier()],
'model__learning_rate': [1.0, 0.1, 0.01, 0.001],
'model__n_estimators': [2 ** i for i in range(11)],
}
model = GridSearchCV(
pipe,
[dummy_search, lin_search, gb_search], # svc_search
n_jobs=-1,
verbose=0
)
X = np.row_stack([X_true, X_pred])
X = X.reshape((len(X), -1))
y = np.concatenate([
np.ones(len(X_true)),
np.zeros(len(X_pred))
])
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.5, stratify=y)
score = model.fit(X_train, y_train).score(X_test, y_test)
# scale the error to be in range from 0.0 to 1.0
U, C = np.unique(y_test, return_counts=True)
scale = max(C * 1.0) / sum(C * 1.0)
score = (1.0 - score)/scale
score = min(1.0, score)
return score
def conditional_wasserstein_distance(C_true, G_true, C_fake, G_fake, clazz):
model = clazz
if __name__ == "__main__":
# example usage
X1 = np.random.randn(512,2)
for offset in [0.1, 0.2, 0.4, 0.8, 1.6, 3.2]:
X2 = np.random.randn(512,2) + offset
sim = distribution_similarity(X1, X2)
print(sim)
|
iaroslav-ai/noxer
|
noxer/gm/metrics.py
|
Python
|
mit
| 5,869
|
"""Test the iRobot Roomba config flow."""
from unittest.mock import MagicMock, PropertyMock, patch
import pytest
from roombapy import RoombaConnectionError, RoombaInfo
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.dhcp import HOSTNAME, IP_ADDRESS, MAC_ADDRESS
from homeassistant.components.roomba import config_flow
from homeassistant.components.roomba.const import CONF_BLID, CONF_CONTINUOUS, DOMAIN
from homeassistant.const import CONF_DELAY, CONF_HOST, CONF_PASSWORD
from tests.common import MockConfigEntry
MOCK_IP = "1.2.3.4"
VALID_CONFIG = {CONF_HOST: MOCK_IP, CONF_BLID: "BLID", CONF_PASSWORD: "password"}
DHCP_DISCOVERY_DEVICES = [
{
IP_ADDRESS: MOCK_IP,
MAC_ADDRESS: "50:14:79:DD:EE:FF",
HOSTNAME: "irobot-blid",
},
{
IP_ADDRESS: MOCK_IP,
MAC_ADDRESS: "80:A5:89:DD:EE:FF",
HOSTNAME: "roomba-blid",
},
]
DHCP_DISCOVERY_DEVICES_WITHOUT_MATCHING_IP = [
{
IP_ADDRESS: "4.4.4.4",
MAC_ADDRESS: "50:14:79:DD:EE:FF",
HOSTNAME: "irobot-blid",
},
{
IP_ADDRESS: "5.5.5.5",
MAC_ADDRESS: "80:A5:89:DD:EE:FF",
HOSTNAME: "roomba-blid",
},
]
@pytest.fixture(autouse=True)
def roomba_no_wake_time():
"""Fixture that prevents sleep."""
with patch.object(config_flow, "ROOMBA_WAKE_TIME", 0):
yield
def _create_mocked_roomba(
roomba_connected=None, master_state=None, connect=None, disconnect=None
):
mocked_roomba = MagicMock()
type(mocked_roomba).roomba_connected = PropertyMock(return_value=roomba_connected)
type(mocked_roomba).master_state = PropertyMock(return_value=master_state)
type(mocked_roomba).connect = MagicMock(side_effect=connect)
type(mocked_roomba).disconnect = MagicMock(side_effect=disconnect)
return mocked_roomba
def _mocked_discovery(*_):
roomba_discovery = MagicMock()
roomba = RoombaInfo(
hostname="irobot-BLID",
robot_name="robot_name",
ip=MOCK_IP,
mac="mac",
firmware="firmware",
sku="sku",
capabilities="capabilities",
)
roomba_discovery.get_all = MagicMock(return_value=[roomba])
roomba_discovery.get = MagicMock(return_value=roomba)
return roomba_discovery
def _mocked_no_devices_found_discovery(*_):
roomba_discovery = MagicMock()
roomba_discovery.get_all = MagicMock(return_value=[])
roomba_discovery.get = MagicMock(return_value=None)
return roomba_discovery
def _mocked_failed_discovery(*_):
roomba_discovery = MagicMock()
roomba_discovery.get_all = MagicMock(side_effect=OSError)
roomba_discovery.get = MagicMock(side_effect=OSError)
return roomba_discovery
def _mocked_getpassword(*_):
roomba_password = MagicMock()
roomba_password.get_password = MagicMock(return_value="password")
return roomba_password
def _mocked_failed_getpassword(*_):
roomba_password = MagicMock()
roomba_password.get_password = MagicMock(return_value=None)
return roomba_password
def _mocked_connection_refused_on_getpassword(*_):
roomba_password = MagicMock()
roomba_password.get_password = MagicMock(side_effect=ConnectionRefusedError)
return roomba_password
async def test_form_user_discovery_and_password_fetch(hass):
"""Test we can discovery and fetch the password."""
await setup.async_setup_component(hass, "persistent_notification", {})
mocked_roomba = _create_mocked_roomba(
roomba_connected=True,
master_state={"state": {"reported": {"name": "myroomba"}}},
)
with patch(
"homeassistant.components.roomba.config_flow.RoombaDiscovery", _mocked_discovery
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
assert result["step_id"] == "user"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: MOCK_IP},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] is None
assert result2["step_id"] == "link"
with patch(
"homeassistant.components.roomba.config_flow.RoombaFactory.create_roomba",
return_value=mocked_roomba,
), patch(
"homeassistant.components.roomba.config_flow.RoombaPassword",
_mocked_getpassword,
), patch(
"homeassistant.components.roomba.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{},
)
await hass.async_block_till_done()
assert result3["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result3["title"] == "robot_name"
assert result3["result"].unique_id == "BLID"
assert result3["data"] == {
CONF_BLID: "BLID",
CONF_CONTINUOUS: True,
CONF_DELAY: 1,
CONF_HOST: MOCK_IP,
CONF_PASSWORD: "password",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_user_discovery_skips_known(hass):
"""Test discovery proceeds to manual if all discovered are already known."""
await setup.async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(domain=DOMAIN, data=VALID_CONFIG, unique_id="BLID")
entry.add_to_hass(hass)
with patch(
"homeassistant.components.roomba.config_flow.RoombaDiscovery", _mocked_discovery
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
assert result["step_id"] == "manual"
async def test_form_user_no_devices_found_discovery_aborts_already_configured(hass):
"""Test if we manually configure an existing host we abort."""
await setup.async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(domain=DOMAIN, data=VALID_CONFIG, unique_id="BLID")
entry.add_to_hass(hass)
with patch(
"homeassistant.components.roomba.config_flow.RoombaDiscovery",
_mocked_no_devices_found_discovery,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
assert result["step_id"] == "manual"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: MOCK_IP, CONF_BLID: "blid"},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "already_configured"
async def test_form_user_discovery_manual_and_auto_password_fetch(hass):
"""Test discovery skipped and we can auto fetch the password."""
await setup.async_setup_component(hass, "persistent_notification", {})
mocked_roomba = _create_mocked_roomba(
roomba_connected=True,
master_state={"state": {"reported": {"name": "myroomba"}}},
)
with patch(
"homeassistant.components.roomba.config_flow.RoombaDiscovery", _mocked_discovery
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
assert result["step_id"] == "user"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: None},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] is None
assert result2["step_id"] == "manual"
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{CONF_HOST: MOCK_IP, CONF_BLID: "blid"},
)
await hass.async_block_till_done()
assert result3["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result3["errors"] is None
with patch(
"homeassistant.components.roomba.config_flow.RoombaFactory.create_roomba",
return_value=mocked_roomba,
), patch(
"homeassistant.components.roomba.config_flow.RoombaPassword",
_mocked_getpassword,
), patch(
"homeassistant.components.roomba.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result4 = await hass.config_entries.flow.async_configure(
result3["flow_id"],
{},
)
await hass.async_block_till_done()
assert result4["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result4["title"] == "myroomba"
assert result4["result"].unique_id == "BLID"
assert result4["data"] == {
CONF_BLID: "BLID",
CONF_CONTINUOUS: True,
CONF_DELAY: 1,
CONF_HOST: MOCK_IP,
CONF_PASSWORD: "password",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_user_discover_fails_aborts_already_configured(hass):
"""Test if we manually configure an existing host we abort after failed discovery."""
await setup.async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(domain=DOMAIN, data=VALID_CONFIG, unique_id="BLID")
entry.add_to_hass(hass)
with patch(
"homeassistant.components.roomba.config_flow.RoombaDiscovery",
_mocked_failed_discovery,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
assert result["step_id"] == "manual"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: MOCK_IP, CONF_BLID: "blid"},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "already_configured"
async def test_form_user_discovery_manual_and_auto_password_fetch_but_cannot_connect(
hass,
):
"""Test discovery skipped and we can auto fetch the password then we fail to connect."""
await setup.async_setup_component(hass, "persistent_notification", {})
mocked_roomba = _create_mocked_roomba(
connect=RoombaConnectionError,
roomba_connected=True,
master_state={"state": {"reported": {"name": "myroomba"}}},
)
with patch(
"homeassistant.components.roomba.config_flow.RoombaDiscovery", _mocked_discovery
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
assert result["step_id"] == "user"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: None},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] is None
assert result2["step_id"] == "manual"
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{CONF_HOST: MOCK_IP, CONF_BLID: "blid"},
)
await hass.async_block_till_done()
assert result3["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result3["errors"] is None
with patch(
"homeassistant.components.roomba.config_flow.RoombaFactory.create_roomba",
return_value=mocked_roomba,
), patch(
"homeassistant.components.roomba.config_flow.RoombaPassword",
_mocked_getpassword,
), patch(
"homeassistant.components.roomba.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result4 = await hass.config_entries.flow.async_configure(
result3["flow_id"],
{},
)
await hass.async_block_till_done()
assert result4["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result4["reason"] == "cannot_connect"
assert len(mock_setup_entry.mock_calls) == 0
async def test_form_user_discovery_no_devices_found_and_auto_password_fetch(hass):
"""Test discovery finds no devices and we can auto fetch the password."""
await setup.async_setup_component(hass, "persistent_notification", {})
mocked_roomba = _create_mocked_roomba(
roomba_connected=True,
master_state={"state": {"reported": {"name": "myroomba"}}},
)
with patch(
"homeassistant.components.roomba.config_flow.RoombaDiscovery",
_mocked_no_devices_found_discovery,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
assert result["step_id"] == "manual"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: MOCK_IP, CONF_BLID: "blid"},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] is None
with patch(
"homeassistant.components.roomba.config_flow.RoombaFactory.create_roomba",
return_value=mocked_roomba,
), patch(
"homeassistant.components.roomba.config_flow.RoombaPassword",
_mocked_getpassword,
), patch(
"homeassistant.components.roomba.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{},
)
await hass.async_block_till_done()
assert result3["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result3["title"] == "myroomba"
assert result3["result"].unique_id == "BLID"
assert result3["data"] == {
CONF_BLID: "BLID",
CONF_CONTINUOUS: True,
CONF_DELAY: 1,
CONF_HOST: MOCK_IP,
CONF_PASSWORD: "password",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_user_discovery_no_devices_found_and_password_fetch_fails(hass):
"""Test discovery finds no devices and password fetch fails."""
await setup.async_setup_component(hass, "persistent_notification", {})
mocked_roomba = _create_mocked_roomba(
roomba_connected=True,
master_state={"state": {"reported": {"name": "myroomba"}}},
)
with patch(
"homeassistant.components.roomba.config_flow.RoombaDiscovery",
_mocked_no_devices_found_discovery,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
assert result["step_id"] == "manual"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: MOCK_IP, CONF_BLID: "blid"},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] is None
with patch(
"homeassistant.components.roomba.config_flow.RoombaPassword",
_mocked_failed_getpassword,
):
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{},
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.roomba.config_flow.RoombaFactory.create_roomba",
return_value=mocked_roomba,
), patch(
"homeassistant.components.roomba.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result4 = await hass.config_entries.flow.async_configure(
result3["flow_id"],
{CONF_PASSWORD: "password"},
)
await hass.async_block_till_done()
assert result4["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result4["title"] == "myroomba"
assert result4["result"].unique_id == "BLID"
assert result4["data"] == {
CONF_BLID: "BLID",
CONF_CONTINUOUS: True,
CONF_DELAY: 1,
CONF_HOST: MOCK_IP,
CONF_PASSWORD: "password",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_user_discovery_not_devices_found_and_password_fetch_fails_and_cannot_connect(
hass,
):
"""Test discovery finds no devices and password fetch fails then we cannot connect."""
await setup.async_setup_component(hass, "persistent_notification", {})
mocked_roomba = _create_mocked_roomba(
connect=RoombaConnectionError,
roomba_connected=True,
master_state={"state": {"reported": {"name": "myroomba"}}},
)
with patch(
"homeassistant.components.roomba.config_flow.RoombaDiscovery",
_mocked_no_devices_found_discovery,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
assert result["step_id"] == "manual"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: MOCK_IP, CONF_BLID: "blid"},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] is None
with patch(
"homeassistant.components.roomba.config_flow.RoombaPassword",
_mocked_failed_getpassword,
):
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{},
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.roomba.config_flow.RoombaFactory.create_roomba",
return_value=mocked_roomba,
), patch(
"homeassistant.components.roomba.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result4 = await hass.config_entries.flow.async_configure(
result3["flow_id"],
{CONF_PASSWORD: "password"},
)
await hass.async_block_till_done()
assert result4["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result4["errors"] == {"base": "cannot_connect"}
assert len(mock_setup_entry.mock_calls) == 0
async def test_form_user_discovery_and_password_fetch_gets_connection_refused(hass):
"""Test we can discovery and fetch the password manually."""
await setup.async_setup_component(hass, "persistent_notification", {})
mocked_roomba = _create_mocked_roomba(
roomba_connected=True,
master_state={"state": {"reported": {"name": "myroomba"}}},
)
with patch(
"homeassistant.components.roomba.config_flow.RoombaDiscovery", _mocked_discovery
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
assert result["step_id"] == "user"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: MOCK_IP},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] is None
assert result2["step_id"] == "link"
with patch(
"homeassistant.components.roomba.config_flow.RoombaPassword",
_mocked_connection_refused_on_getpassword,
):
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{},
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.roomba.config_flow.RoombaFactory.create_roomba",
return_value=mocked_roomba,
), patch(
"homeassistant.components.roomba.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result4 = await hass.config_entries.flow.async_configure(
result3["flow_id"],
{CONF_PASSWORD: "password"},
)
await hass.async_block_till_done()
assert result4["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result4["title"] == "myroomba"
assert result4["result"].unique_id == "BLID"
assert result4["data"] == {
CONF_BLID: "BLID",
CONF_CONTINUOUS: True,
CONF_DELAY: 1,
CONF_HOST: MOCK_IP,
CONF_PASSWORD: "password",
}
assert len(mock_setup_entry.mock_calls) == 1
@pytest.mark.parametrize("discovery_data", DHCP_DISCOVERY_DEVICES)
async def test_dhcp_discovery_and_roomba_discovery_finds(hass, discovery_data):
"""Test we can process the discovery from dhcp and roomba discovery matches the device."""
await setup.async_setup_component(hass, "persistent_notification", {})
mocked_roomba = _create_mocked_roomba(
roomba_connected=True,
master_state={"state": {"reported": {"name": "myroomba"}}},
)
with patch(
"homeassistant.components.roomba.config_flow.RoombaDiscovery", _mocked_discovery
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=discovery_data,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
assert result["step_id"] == "link"
assert result["description_placeholders"] == {"name": "robot_name"}
with patch(
"homeassistant.components.roomba.config_flow.RoombaFactory.create_roomba",
return_value=mocked_roomba,
), patch(
"homeassistant.components.roomba.config_flow.RoombaPassword",
_mocked_getpassword,
), patch(
"homeassistant.components.roomba.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == "robot_name"
assert result2["result"].unique_id == "BLID"
assert result2["data"] == {
CONF_BLID: "BLID",
CONF_CONTINUOUS: True,
CONF_DELAY: 1,
CONF_HOST: MOCK_IP,
CONF_PASSWORD: "password",
}
assert len(mock_setup_entry.mock_calls) == 1
@pytest.mark.parametrize("discovery_data", DHCP_DISCOVERY_DEVICES_WITHOUT_MATCHING_IP)
async def test_dhcp_discovery_falls_back_to_manual(hass, discovery_data):
"""Test we can process the discovery from dhcp but roomba discovery cannot find the specific device."""
await setup.async_setup_component(hass, "persistent_notification", {})
mocked_roomba = _create_mocked_roomba(
roomba_connected=True,
master_state={"state": {"reported": {"name": "myroomba"}}},
)
with patch(
"homeassistant.components.roomba.config_flow.RoombaDiscovery", _mocked_discovery
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=discovery_data,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
assert result["step_id"] == "user"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] is None
assert result2["step_id"] == "manual"
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{CONF_HOST: MOCK_IP, CONF_BLID: "blid"},
)
await hass.async_block_till_done()
assert result3["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result3["errors"] is None
with patch(
"homeassistant.components.roomba.config_flow.RoombaFactory.create_roomba",
return_value=mocked_roomba,
), patch(
"homeassistant.components.roomba.config_flow.RoombaPassword",
_mocked_getpassword,
), patch(
"homeassistant.components.roomba.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result4 = await hass.config_entries.flow.async_configure(
result3["flow_id"],
{},
)
await hass.async_block_till_done()
assert result4["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result4["title"] == "myroomba"
assert result4["result"].unique_id == "BLID"
assert result4["data"] == {
CONF_BLID: "BLID",
CONF_CONTINUOUS: True,
CONF_DELAY: 1,
CONF_HOST: MOCK_IP,
CONF_PASSWORD: "password",
}
assert len(mock_setup_entry.mock_calls) == 1
@pytest.mark.parametrize("discovery_data", DHCP_DISCOVERY_DEVICES_WITHOUT_MATCHING_IP)
async def test_dhcp_discovery_no_devices_falls_back_to_manual(hass, discovery_data):
"""Test we can process the discovery from dhcp but roomba discovery cannot find any devices."""
await setup.async_setup_component(hass, "persistent_notification", {})
mocked_roomba = _create_mocked_roomba(
roomba_connected=True,
master_state={"state": {"reported": {"name": "myroomba"}}},
)
with patch(
"homeassistant.components.roomba.config_flow.RoombaDiscovery",
_mocked_no_devices_found_discovery,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=discovery_data,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
assert result["step_id"] == "manual"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: MOCK_IP, CONF_BLID: "blid"},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] is None
with patch(
"homeassistant.components.roomba.config_flow.RoombaFactory.create_roomba",
return_value=mocked_roomba,
), patch(
"homeassistant.components.roomba.config_flow.RoombaPassword",
_mocked_getpassword,
), patch(
"homeassistant.components.roomba.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{},
)
await hass.async_block_till_done()
assert result3["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result3["title"] == "myroomba"
assert result3["result"].unique_id == "BLID"
assert result3["data"] == {
CONF_BLID: "BLID",
CONF_CONTINUOUS: True,
CONF_DELAY: 1,
CONF_HOST: MOCK_IP,
CONF_PASSWORD: "password",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_dhcp_discovery_with_ignored(hass):
"""Test ignored entries do not break checking for existing entries."""
await setup.async_setup_component(hass, "persistent_notification", {})
config_entry = MockConfigEntry(
domain=DOMAIN, data={}, source=config_entries.SOURCE_IGNORE
)
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.roomba.config_flow.RoombaDiscovery", _mocked_discovery
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data={
IP_ADDRESS: MOCK_IP,
MAC_ADDRESS: "AA:BB:CC:DD:EE:FF",
HOSTNAME: "irobot-blid",
},
)
await hass.async_block_till_done()
assert result["type"] == "form"
async def test_dhcp_discovery_already_configured_host(hass):
"""Test we abort if the host is already configured."""
await setup.async_setup_component(hass, "persistent_notification", {})
config_entry = MockConfigEntry(domain=DOMAIN, data={CONF_HOST: MOCK_IP})
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.roomba.config_flow.RoombaDiscovery", _mocked_discovery
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data={
IP_ADDRESS: MOCK_IP,
MAC_ADDRESS: "AA:BB:CC:DD:EE:FF",
HOSTNAME: "irobot-blid",
},
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_dhcp_discovery_already_configured_blid(hass):
"""Test we abort if the blid is already configured."""
await setup.async_setup_component(hass, "persistent_notification", {})
config_entry = MockConfigEntry(
domain=DOMAIN, data={CONF_BLID: "BLID"}, unique_id="BLID"
)
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.roomba.config_flow.RoombaDiscovery", _mocked_discovery
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data={
IP_ADDRESS: MOCK_IP,
MAC_ADDRESS: "AA:BB:CC:DD:EE:FF",
HOSTNAME: "irobot-blid",
},
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_dhcp_discovery_not_irobot(hass):
"""Test we abort if the discovered device is not an irobot device."""
await setup.async_setup_component(hass, "persistent_notification", {})
config_entry = MockConfigEntry(
domain=DOMAIN, data={CONF_BLID: "BLID"}, unique_id="BLID"
)
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.roomba.config_flow.RoombaDiscovery", _mocked_discovery
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data={
IP_ADDRESS: MOCK_IP,
MAC_ADDRESS: "AA:BB:CC:DD:EE:FF",
HOSTNAME: "Notirobot-blid",
},
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "not_irobot_device"
async def test_dhcp_discovery_partial_hostname(hass):
"""Test we abort flows when we have a partial hostname."""
await setup.async_setup_component(hass, "persistent_notification", {})
with patch(
"homeassistant.components.roomba.config_flow.RoombaDiscovery", _mocked_discovery
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data={
IP_ADDRESS: MOCK_IP,
MAC_ADDRESS: "AA:BB:CC:DD:EE:FF",
HOSTNAME: "irobot-blid",
},
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert result["step_id"] == "link"
with patch(
"homeassistant.components.roomba.config_flow.RoombaDiscovery", _mocked_discovery
):
result2 = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data={
IP_ADDRESS: MOCK_IP,
MAC_ADDRESS: "AA:BB:CC:DD:EE:FF",
HOSTNAME: "irobot-blidthatislonger",
},
)
await hass.async_block_till_done()
assert result2["type"] == "form"
assert result2["step_id"] == "link"
current_flows = hass.config_entries.flow.async_progress()
assert len(current_flows) == 1
assert current_flows[0]["flow_id"] == result2["flow_id"]
with patch(
"homeassistant.components.roomba.config_flow.RoombaDiscovery", _mocked_discovery
):
result3 = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data={
IP_ADDRESS: MOCK_IP,
MAC_ADDRESS: "AA:BB:CC:DD:EE:FF",
HOSTNAME: "irobot-bl",
},
)
await hass.async_block_till_done()
assert result3["type"] == "abort"
assert result3["reason"] == "short_blid"
current_flows = hass.config_entries.flow.async_progress()
assert len(current_flows) == 1
assert current_flows[0]["flow_id"] == result2["flow_id"]
|
kennedyshead/home-assistant
|
tests/components/roomba/test_config_flow.py
|
Python
|
apache-2.0
| 33,845
|
#/!usr/bin/python3
from ..handlers.phraselist import PhraseList;
LIST = [
"BQADBAADaAYAAocbZAciIWq9C74SAgI",
"BQADBAAD3TgAAu8cZAcqlrGqOSV22wI",
"BQADBAADiyAAAlgXZAezKLZ9MT_SrAI",
"BQADBAADdgMAAuAYZAdM22FCusLMPQI",
"BQADBAADsB4AAtUYZAd4L46CbUw5TQI",
"BQADBAADliEAAlgdZAf_QoDpHJTeXwI",
"BQADBAADwCAAAnUdZAciR8DiCEN5CQI",
"BQADBAAD3hYAAkwZZAfTw21n9HjSEwI",
"BQADBAADfxoAAtgYZAeCNTkLIBXZ-QI",
"BQADBAADwAUAAhQeZAf3dT6N0GF28QI",
"BQADBAADqSEAAusbZAfoeAjccdNgNgI",
"BQADBAADQhcAAnEaZAeqYUMfKuaQtgI",
"BQADBAAD8jcAAhMZZAeuDQABPVHH1ZMC"
]
class CatGifList(PhraseList):
def __init__(self,userR,cmd):
super().__init__(userR,cmd,LIST,"gif");
def get_max_cmd_response(self,update):
text = "Nunca hay demasiados gatos! Pide otro "
text += update.message.from_user.first_name.split()[0];
#return "BQADBAADKgAD15TmAAFDS0IqiyCZgwI","audio"
#return "AwADBAADJwAD15TmAAG3Lbh5kdhR6QI","voice"
return text,"message";
|
grcanosa/my-telegram-bot
|
src/mybot/data/catgifs.py
|
Python
|
gpl-3.0
| 958
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: git
author:
- "Ansible Core Team"
- "Michael DeHaan"
version_added: "0.0.1"
short_description: Deploy software (or files) from git checkouts
description:
- Manage I(git) checkouts of repositories to deploy files or software.
options:
repo:
required: true
aliases: [ name ]
description:
- git, SSH, or HTTP(S) protocol address of the git repository.
dest:
required: true
description:
- Absolute path of where the repository should be checked out to.
This parameter is required, unless C(clone) is set to C(no)
This change was made in version 1.8.3. Prior to this version,
the C(dest) parameter was always required.
version:
required: false
default: "HEAD"
description:
- What version of the repository to check out. This can be the
the literal string C(HEAD), a branch name, a tag name.
It can also be a I(SHA-1) hash, in which case C(refspec) needs
to be specified if the given revision is not already available.
accept_hostkey:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.5"
description:
- if C(yes), adds the hostkey for the repo url if not already
added. If ssh_opts contains "-o StrictHostKeyChecking=no",
this parameter is ignored.
ssh_opts:
required: false
default: None
version_added: "1.5"
description:
- Creates a wrapper script and exports the path as GIT_SSH
which git then automatically uses to override ssh arguments.
An example value could be "-o StrictHostKeyChecking=no"
key_file:
required: false
default: None
version_added: "1.5"
description:
- Specify an optional private key file to use for the checkout.
reference:
required: false
default: null
version_added: "1.4"
description:
- Reference repository (see "git clone --reference ...")
remote:
required: false
default: "origin"
description:
- Name of the remote.
refspec:
required: false
default: null
version_added: "1.9"
description:
- Add an additional refspec to be fetched.
If version is set to a I(SHA-1) not reachable from any branch
or tag, this option may be necessary to specify the ref containing
the I(SHA-1).
Uses the same syntax as the 'git fetch' command.
An example value could be "refs/meta/config".
force:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "0.7"
description:
- If C(yes), any modified files in the working
repository will be discarded. Prior to 0.7, this was always
'yes' and could not be disabled. Prior to 1.9, the default was
`yes`
depth:
required: false
default: null
version_added: "1.2"
description:
- Create a shallow clone with a history truncated to the specified
number or revisions. The minimum possible value is C(1), otherwise
ignored. Needs I(git>=1.9.1) to work correctly.
clone:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "1.9"
description:
- If C(no), do not clone the repository if it does not exist locally
update:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "1.2"
description:
- If C(no), do not retrieve new revisions from the origin repository
executable:
required: false
default: null
version_added: "1.4"
description:
- Path to git executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
bare:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.4"
description:
- if C(yes), repository will be created as a bare repo, otherwise
it will be a standard repo with a workspace.
umask:
required: false
default: null
version_added: "2.2"
description:
- The umask to set before doing any checkouts, or any other
repository maintenance.
recursive:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "1.6"
description:
- if C(no), repository will be cloned without the --recursive
option, skipping sub-modules.
track_submodules:
required: false
default: "no"
choices: ["yes", "no"]
version_added: "1.8"
description:
- if C(yes), submodules will track the latest commit on their
master branch (or other branch specified in .gitmodules). If
C(no), submodules will be kept at the revision specified by the
main project. This is equivalent to specifying the --remote flag
to git submodule update.
verify_commit:
required: false
default: "no"
choices: ["yes", "no"]
version_added: "2.0"
description:
- if C(yes), when cloning or checking out a C(version) verify the
signature of a GPG signed commit. This requires C(git) version>=2.1.0
to be installed. The commit MUST be signed and the public key MUST
be trusted in the GPG trustdb.
requirements:
- git>=1.7.1 (the command line tool)
notes:
- "If the task seems to be hanging, first verify remote host is in C(known_hosts).
SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
one solution is to use the option accept_hostkey. Another solution is to
add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
the git module, with the following command: ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts."
'''
EXAMPLES = '''
# Example git checkout from Ansible Playbooks
- git:
repo: 'https://foosball.example.org/path/to/repo.git'
dest: /srv/checkout
version: release-0.22
# Example read-write git checkout from github
- git:
repo: ssh://git@github.com/mylogin/hello.git
dest: /home/mylogin/hello
# Example just ensuring the repo checkout exists
- git:
repo: 'https://foosball.example.org/path/to/repo.git'
dest: /srv/checkout
update: no
# Example just get information about the repository whether or not it has
# already been cloned locally.
- git:
repo: 'https://foosball.example.org/path/to/repo.git'
dest: /srv/checkout
clone: no
update: no
# Example checkout a github repo and use refspec to fetch all pull requests
- git:
repo: https://github.com/ansible/ansible-examples.git
dest: /src/ansible-examples
refspec: '+refs/pull/*:refs/heads/*'
'''
RETURN = '''
after:
description: last commit revision of the repository retrived during the update
returned: success
type: string
sample: 4c020102a9cd6fe908c9a4a326a38f972f63a903
before:
description: commit revision before the repository was updated, "null" for new repository
returned: success
type: string
sample: 67c04ebe40a003bda0efb34eacfb93b0cafdf628
remote_url_changed:
description: Contains True or False whether or not the remote URL was changed.
returned: success
type: boolean
sample: True
warnings:
description: List of warnings if requested features were not available due to a too old git version.
returned: error
type: string
sample: Your git version is too old to fully support the depth argument. Falling back to full checkouts.
'''
import os
import re
import shlex
import stat
import sys
import tempfile
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, get_module_path
from ansible.module_utils.known_hosts import add_git_host_key
from ansible.module_utils.six import b, string_types
from ansible.module_utils._text import to_native
def head_splitter(headfile, remote, module=None, fail_on_error=False):
'''Extract the head reference'''
# https://github.com/ansible/ansible-modules-core/pull/907
res = None
if os.path.exists(headfile):
rawdata = None
try:
f = open(headfile, 'r')
rawdata = f.readline()
f.close()
except:
if fail_on_error and module:
module.fail_json(msg="Unable to read %s" % headfile)
if rawdata:
try:
rawdata = rawdata.replace('refs/remotes/%s' % remote, '', 1)
refparts = rawdata.split(' ')
newref = refparts[-1]
nrefparts = newref.split('/',2)
res = nrefparts[-1].rstrip('\n')
except:
if fail_on_error and module:
module.fail_json(msg="Unable to split head from '%s'" % rawdata)
return res
def unfrackgitpath(path):
if path is None:
return None
# copied from ansible.utils.path
return os.path.normpath(os.path.realpath(os.path.expanduser(os.path.expandvars(path))))
def get_submodule_update_params(module, git_path, cwd):
#or: git submodule [--quiet] update [--init] [-N|--no-fetch]
#[-f|--force] [--rebase] [--reference <repository>] [--merge]
#[--recursive] [--] [<path>...]
params = []
# run a bad submodule command to get valid params
cmd = "%s submodule update --help" % (git_path)
rc, stdout, stderr = module.run_command(cmd, cwd=cwd)
lines = stderr.split('\n')
update_line = None
for line in lines:
if 'git submodule [--quiet] update ' in line:
update_line = line
if update_line:
update_line = update_line.replace('[','')
update_line = update_line.replace(']','')
update_line = update_line.replace('|',' ')
parts = shlex.split(update_line)
for part in parts:
if part.startswith('--'):
part = part.replace('--', '')
params.append(part)
return params
def write_ssh_wrapper():
module_dir = get_module_path()
try:
# make sure we have full permission to the module_dir, which
# may not be the case if we're sudo'ing to a non-root user
if os.access(module_dir, os.W_OK|os.R_OK|os.X_OK):
fd, wrapper_path = tempfile.mkstemp(prefix=module_dir + '/')
else:
raise OSError
except (IOError, OSError):
fd, wrapper_path = tempfile.mkstemp()
fh = os.fdopen(fd, 'w+b')
template = b("""#!/bin/sh
if [ -z "$GIT_SSH_OPTS" ]; then
BASEOPTS=""
else
BASEOPTS=$GIT_SSH_OPTS
fi
if [ -z "$GIT_KEY" ]; then
ssh $BASEOPTS "$@"
else
ssh -i "$GIT_KEY" -o IdentitiesOnly=yes $BASEOPTS "$@"
fi
""")
fh.write(template)
fh.close()
st = os.stat(wrapper_path)
os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC)
return wrapper_path
def set_git_ssh(ssh_wrapper, key_file, ssh_opts):
if os.environ.get("GIT_SSH"):
del os.environ["GIT_SSH"]
os.environ["GIT_SSH"] = ssh_wrapper
if os.environ.get("GIT_KEY"):
del os.environ["GIT_KEY"]
if key_file:
os.environ["GIT_KEY"] = key_file
if os.environ.get("GIT_SSH_OPTS"):
del os.environ["GIT_SSH_OPTS"]
if ssh_opts:
os.environ["GIT_SSH_OPTS"] = ssh_opts
def get_version(module, git_path, dest, ref="HEAD"):
''' samples the version of the git repo '''
cmd = "%s rev-parse %s" % (git_path, ref)
rc, stdout, stderr = module.run_command(cmd, cwd=dest)
sha = to_native(stdout).rstrip('\n')
return sha
def get_submodule_versions(git_path, module, dest, version='HEAD'):
cmd = [git_path, 'submodule', 'foreach', git_path, 'rev-parse', version]
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg='Unable to determine hashes of submodules', stdout=out, stderr=err, rc=rc)
submodules = {}
subm_name = None
for line in out.splitlines():
if line.startswith("Entering '"):
subm_name = line[10:-1]
elif len(line.strip()) == 40:
if subm_name is None:
module.fail_json()
submodules[subm_name] = line.strip()
subm_name = None
else:
module.fail_json(msg='Unable to parse submodule hash line: %s' % line.strip())
if subm_name is not None:
module.fail_json(msg='Unable to find hash for submodule: %s' % subm_name)
return submodules
def clone(git_path, module, repo, dest, remote, depth, version, bare,
reference, refspec, verify_commit):
''' makes a new git repo if it does not already exist '''
dest_dirname = os.path.dirname(dest)
try:
os.makedirs(dest_dirname)
except:
pass
cmd = [ git_path, 'clone' ]
if bare:
cmd.append('--bare')
else:
cmd.extend([ '--origin', remote ])
if depth:
if version == 'HEAD' or refspec:
cmd.extend([ '--depth', str(depth) ])
elif is_remote_branch(git_path, module, dest, repo, version) \
or is_remote_tag(git_path, module, dest, repo, version):
cmd.extend([ '--depth', str(depth) ])
cmd.extend(['--branch', version])
else:
# only use depth if the remote object is branch or tag (i.e. fetchable)
module.warn("Ignoring depth argument. "
"Shallow clones are only available for "
"HEAD, branches, tags or in combination with refspec.")
if reference:
cmd.extend([ '--reference', str(reference) ])
cmd.extend([ repo, dest ])
module.run_command(cmd, check_rc=True, cwd=dest_dirname)
if bare:
if remote != 'origin':
module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest)
if refspec:
cmd = [git_path, 'fetch']
if depth:
cmd.extend([ '--depth', str(depth) ])
cmd.extend([remote, refspec])
module.run_command(cmd, check_rc=True, cwd=dest)
if verify_commit:
verify_commit_sign(git_path, module, dest, version)
def has_local_mods(module, git_path, dest, bare):
if bare:
return False
cmd = "%s status --porcelain" % (git_path)
rc, stdout, stderr = module.run_command(cmd, cwd=dest)
lines = stdout.splitlines()
lines = list(filter(lambda c: not re.search('^\\?\\?.*$', c), lines))
return len(lines) > 0
def reset(git_path, module, dest):
'''
Resets the index and working tree to HEAD.
Discards any changes to tracked files in working
tree since that commit.
'''
cmd = "%s reset --hard HEAD" % (git_path,)
return module.run_command(cmd, check_rc=True, cwd=dest)
def get_diff(module, git_path, dest, repo, remote, depth, bare, before, after):
''' Return the difference between 2 versions '''
if before is None:
return { 'prepared': '>> Newly checked out %s' % after }
elif before != after:
# Ensure we have the object we are referring to during git diff !
git_version_used = git_version(git_path, module)
fetch(git_path, module, repo, dest, after, remote, depth, bare, '', git_version_used)
cmd = '%s diff %s %s' % (git_path, before, after)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc == 0 and out:
return { 'prepared': out }
elif rc == 0:
return { 'prepared': '>> No visual differences between %s and %s' % (before, after) }
elif err:
return { 'prepared': '>> Failed to get proper diff between %s and %s:\n>> %s' % (before, after, err) }
else:
return { 'prepared': '>> Failed to get proper diff between %s and %s' % (before, after) }
return {}
def get_remote_head(git_path, module, dest, version, remote, bare):
cloning = False
cwd = None
tag = False
if remote == module.params['repo']:
cloning = True
else:
cwd = dest
if version == 'HEAD':
if cloning:
# cloning the repo, just get the remote's HEAD version
cmd = '%s ls-remote %s -h HEAD' % (git_path, remote)
else:
head_branch = get_head_branch(git_path, module, dest, remote, bare)
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, head_branch)
elif is_remote_branch(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
elif is_remote_tag(git_path, module, dest, remote, version):
tag = True
cmd = '%s ls-remote %s -t refs/tags/%s*' % (git_path, remote, version)
else:
# appears to be a sha1. return as-is since it appears
# cannot check for a specific sha1 on remote
return version
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=cwd)
if len(out) < 1:
module.fail_json(msg="Could not determine remote revision for %s" % version, stdout=out, stderr=err, rc=rc)
out = to_native(out)
if tag:
# Find the dereferenced tag if this is an annotated tag.
for tag in out.split('\n'):
if tag.endswith(version + '^{}'):
out = tag
break
elif tag.endswith(version):
out = tag
rev = out.split()[0]
return rev
def is_remote_tag(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -t refs/tags/%s' % (git_path, remote, version)
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if to_native(version, errors='surrogate_or_strict') in out:
return True
else:
return False
def get_branches(git_path, module, dest):
branches = []
cmd = '%s branch --no-color -a' % (git_path,)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Could not determine branch data - received %s" % out, stdout=out, stderr=err)
for line in out.split('\n'):
if line.strip():
branches.append(line.strip())
return branches
def get_tags(git_path, module, dest):
tags = []
cmd = '%s tag' % (git_path,)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Could not determine tag data - received %s" % out, stdout=out, stderr=err)
for line in to_native(out).split('\n'):
if line.strip():
tags.append(line.strip())
return tags
def is_remote_branch(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if to_native(version, errors='surrogate_or_strict') in out:
return True
else:
return False
def is_local_branch(git_path, module, dest, branch):
branches = get_branches(git_path, module, dest)
lbranch = '%s' % branch
if lbranch in branches:
return True
elif '* %s' % branch in branches:
return True
else:
return False
def is_not_a_branch(git_path, module, dest):
branches = get_branches(git_path, module, dest)
for branch in branches:
if branch.startswith('* ') and ('no branch' in branch or 'detached from' in branch):
return True
return False
def get_head_branch(git_path, module, dest, remote, bare=False):
'''
Determine what branch HEAD is associated with. This is partly
taken from lib/ansible/utils/__init__.py. It finds the correct
path to .git/HEAD and reads from that file the branch that HEAD is
associated with. In the case of a detached HEAD, this will look
up the branch in .git/refs/remotes/<remote>/HEAD.
'''
if bare:
repo_path = dest
else:
repo_path = os.path.join(dest, '.git')
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
git_conf = open(repo_path, 'rb')
for line in git_conf:
config_val = line.split(b(':'), 1)
if config_val[0].strip() == b('gitdir'):
gitdir = to_native(config_val[1].strip(), errors='surrogate_or_strict')
break
else:
# No repo path found
return ''
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path.split('.git')[0], gitdir)
except (IOError, AttributeError):
# No repo path found
return ''
# Read .git/HEAD for the name of the branch.
# If we're in a detached HEAD state, look up the branch associated with
# the remote HEAD in .git/refs/remotes/<remote>/HEAD
headfile = os.path.join(repo_path, "HEAD")
if is_not_a_branch(git_path, module, dest):
headfile = os.path.join(repo_path, 'refs', 'remotes', remote, 'HEAD')
branch = head_splitter(headfile, remote, module=module, fail_on_error=True)
return branch
def get_remote_url(git_path, module, dest, remote):
'''Return URL of remote source for repo.'''
command = [git_path, 'ls-remote', '--get-url', remote]
(rc, out, err) = module.run_command(command, cwd=dest)
if rc != 0:
# There was an issue getting remote URL, most likely
# command is not available in this version of Git.
return None
return to_native(out).rstrip('\n')
def set_remote_url(git_path, module, repo, dest, remote):
''' updates repo from remote sources '''
# Return if remote URL isn't changing.
remote_url = get_remote_url(git_path, module, dest, remote)
if remote_url == repo or unfrackgitpath(remote_url) == unfrackgitpath(repo):
return False
command = [git_path, 'remote', 'set-url', remote, repo]
(rc, out, err) = module.run_command(command, cwd=dest)
if rc != 0:
label = "set a new url %s for %s" % (repo, remote)
module.fail_json(msg="Failed to %s: %s %s" % (label, out, err))
# Return False if remote_url is None to maintain previous behavior
# for Git versions prior to 1.7.5 that lack required functionality.
return remote_url is not None
def fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used):
''' updates repo from remote sources '''
set_remote_url(git_path, module, repo, dest, remote)
commands = []
fetch_str = 'download remote objects and refs'
fetch_cmd = [git_path, 'fetch']
refspecs = []
if depth:
# try to find the minimal set of refs we need to fetch to get a
# successful checkout
currenthead = get_head_branch(git_path, module, dest, remote)
if refspec:
refspecs.append(refspec)
elif version == 'HEAD':
refspecs.append(currenthead)
elif is_remote_branch(git_path, module, dest, repo, version):
if currenthead != version:
# this workaround is only needed for older git versions
# 1.8.3 is broken, 1.9.x works
# ensure that remote branch is available as both local and remote ref
refspecs.append('+refs/heads/%s:refs/heads/%s' % (version, version))
refspecs.append('+refs/heads/%s:refs/remotes/%s/%s' % (version, remote, version))
else:
refspecs.append(version)
elif is_remote_tag(git_path, module, dest, repo, version):
refspecs.append('+refs/tags/'+version+':refs/tags/'+version)
if refspecs:
# if refspecs is empty, i.e. version is neither heads nor tags
# assume it is a version hash
# fall back to a full clone, otherwise we might not be able to checkout
# version
fetch_cmd.extend(['--depth', str(depth)])
if not depth or not refspecs:
# don't try to be minimalistic but do a full clone
# also do this if depth is given, but version is something that can't be fetched directly
if bare:
refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*']
else:
# ensure all tags are fetched
if git_version_used >= LooseVersion('1.9'):
fetch_cmd.append('--tags')
else:
# old git versions have a bug in --tags that prevents updating existing tags
commands.append((fetch_str, fetch_cmd + [remote]))
refspecs = ['+refs/tags/*:refs/tags/*']
if refspec:
refspecs.append(refspec)
fetch_cmd.extend([remote])
commands.append((fetch_str, fetch_cmd + refspecs))
for (label,command) in commands:
(rc,out,err) = module.run_command(command, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to %s: %s %s" % (label, out, err), cmd=command)
def submodules_fetch(git_path, module, remote, track_submodules, dest):
changed = False
if not os.path.exists(os.path.join(dest, '.gitmodules')):
# no submodules
return changed
gitmodules_file = open(os.path.join(dest, '.gitmodules'), 'r')
for line in gitmodules_file:
# Check for new submodules
if not changed and line.strip().startswith('path'):
path = line.split('=', 1)[1].strip()
# Check that dest/path/.git exists
if not os.path.exists(os.path.join(dest, path, '.git')):
changed = True
# add the submodule repo's hostkey
if line.strip().startswith('url'):
repo = line.split('=', 1)[1].strip()
if module.params['ssh_opts'] is not None:
if "-o StrictHostKeyChecking=no" not in module.params['ssh_opts']:
add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey'])
else:
add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey'])
# Check for updates to existing modules
if not changed:
# Fetch updates
begin = get_submodule_versions(git_path, module, dest)
cmd = [git_path, 'submodule', 'foreach', git_path, 'fetch']
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to fetch submodules: %s" % out + err)
if track_submodules:
# Compare against submodule HEAD
### FIXME: determine this from .gitmodules
version = 'master'
after = get_submodule_versions(git_path, module, dest, '%s/%s'
% (remote, version))
if begin != after:
changed = True
else:
# Compare against the superproject's expectation
cmd = [git_path, 'submodule', 'status']
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if rc != 0:
module.fail_json(msg='Failed to retrieve submodule status: %s' % out + err)
for line in out.splitlines():
if line[0] != ' ':
changed = True
break
return changed
def submodule_update(git_path, module, dest, track_submodules, force=False):
''' init and update any submodules '''
# get the valid submodule params
params = get_submodule_update_params(module, git_path, dest)
# skip submodule commands if .gitmodules is not present
if not os.path.exists(os.path.join(dest, '.gitmodules')):
return (0, '', '')
cmd = [ git_path, 'submodule', 'sync' ]
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if 'remote' in params and track_submodules:
cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ,'--remote' ]
else:
cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ]
if force:
cmd.append('--force')
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to init/update submodules: %s" % out + err)
return (rc, out, err)
def set_remote_branch(git_path, module, dest, remote, version, depth):
"""set refs for the remote branch version
This assumes the branch does not yet exist locally and is therefore also not checked out.
Can't use git remote set-branches, as it is not available in git 1.7.1 (centos6)
"""
branchref = "+refs/heads/%s:refs/heads/%s" % (version, version)
branchref += ' +refs/heads/%s:refs/remotes/%s/%s' % (version, remote, version)
cmd = "%s fetch --depth=%s %s %s" % (git_path, depth, remote, branchref)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to fetch branch from remote: %s" % version, stdout=out, stderr=err, rc=rc)
def switch_version(git_path, module, dest, remote, version, verify_commit, depth):
cmd = ''
if version == 'HEAD':
branch = get_head_branch(git_path, module, dest, remote)
(rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch), cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to checkout branch %s" % branch,
stdout=out, stderr=err, rc=rc)
cmd = "%s reset --hard %s/%s --" % (git_path, remote, branch)
else:
# FIXME check for local_branch first, should have been fetched already
if is_remote_branch(git_path, module, dest, remote, version):
if depth and not is_local_branch(git_path, module, dest, version):
# git clone --depth implies --single-branch, which makes
# the checkout fail if the version changes
# fetch the remote branch, to be able to check it out next
set_remote_branch(git_path, module, dest, remote, version, depth)
if not is_local_branch(git_path, module, dest, version):
cmd = "%s checkout --track -b %s %s/%s" % (git_path, version, remote, version)
else:
(rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, version), cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to checkout branch %s" % version,
stdout=out, stderr=err, rc=rc)
cmd = "%s reset --hard %s/%s" % (git_path, remote, version)
else:
cmd = "%s checkout --force %s" % (git_path, version)
(rc, out1, err1) = module.run_command(cmd, cwd=dest)
if rc != 0:
if version != 'HEAD':
module.fail_json(msg="Failed to checkout %s" % (version),
stdout=out1, stderr=err1, rc=rc, cmd=cmd)
else:
module.fail_json(msg="Failed to checkout branch %s" % (branch),
stdout=out1, stderr=err1, rc=rc, cmd=cmd)
if verify_commit:
verify_commit_sign(git_path, module, dest, version)
return (rc, out1, err1)
def verify_commit_sign(git_path, module, dest, version):
if version in get_tags(git_path, module, dest):
git_sub = "verify-tag"
else:
git_sub = "verify-commit"
cmd = "%s %s %s" % (git_path, git_sub, version)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg='Failed to verify GPG signature of commit/tag "%s"' % version, stdout=out, stderr=err, rc=rc)
return (rc, out, err)
def git_version(git_path, module):
"""return the installed version of git"""
cmd = "%s --version" % git_path
(rc, out, err) = module.run_command(cmd)
if rc != 0:
# one could fail_json here, but the version info is not that important, so let's try to fail only on actual git commands
return None
rematch = re.search('git version (.*)$', to_native(out))
if not rematch:
return None
return LooseVersion(rematch.groups()[0])
# ===========================================
def main():
module = AnsibleModule(
argument_spec = dict(
dest=dict(type='path'),
repo=dict(required=True, aliases=['name']),
version=dict(default='HEAD'),
remote=dict(default='origin'),
refspec=dict(default=None),
reference=dict(default=None),
force=dict(default='no', type='bool'),
depth=dict(default=None, type='int'),
clone=dict(default='yes', type='bool'),
update=dict(default='yes', type='bool'),
verify_commit=dict(default='no', type='bool'),
accept_hostkey=dict(default='no', type='bool'),
key_file=dict(default=None, type='path', required=False),
ssh_opts=dict(default=None, required=False),
executable=dict(default=None, type='path'),
bare=dict(default='no', type='bool'),
recursive=dict(default='yes', type='bool'),
track_submodules=dict(default='no', type='bool'),
umask=dict(default=None, type='raw'),
),
supports_check_mode=True
)
dest = module.params['dest']
repo = module.params['repo']
version = module.params['version']
remote = module.params['remote']
refspec = module.params['refspec']
force = module.params['force']
depth = module.params['depth']
update = module.params['update']
allow_clone = module.params['clone']
bare = module.params['bare']
verify_commit = module.params['verify_commit']
reference = module.params['reference']
git_path = module.params['executable'] or module.get_bin_path('git', True)
key_file = module.params['key_file']
ssh_opts = module.params['ssh_opts']
umask = module.params['umask']
result = dict(changed = False, warnings=list())
# evaluate and set the umask before doing anything else
if umask is not None:
if not isinstance(umask, string_types):
module.fail_json(msg="umask must be defined as a quoted octal integer")
try:
umask = int(umask, 8)
except:
module.fail_json(msg="umask must be an octal integer",
details=str(sys.exc_info()[1]))
os.umask(umask)
# Certain features such as depth require a file:/// protocol for path based urls
# so force a protocol here ...
if repo.startswith('/'):
repo = 'file://' + repo
# We screenscrape a huge amount of git commands so use C locale anytime we
# call run_command()
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
gitconfig = None
if not dest and allow_clone:
module.fail_json(msg="the destination directory must be specified unless clone=no")
elif dest:
dest = os.path.abspath(dest)
if bare:
gitconfig = os.path.join(dest, 'config')
else:
gitconfig = os.path.join(dest, '.git', 'config')
# create a wrapper script and export
# GIT_SSH=<path> as an environment variable
# for git to use the wrapper script
ssh_wrapper = None
if key_file or ssh_opts:
ssh_wrapper = write_ssh_wrapper()
set_git_ssh(ssh_wrapper, key_file, ssh_opts)
module.add_cleanup_file(path=ssh_wrapper)
# add the git repo's hostkey
if module.params['ssh_opts'] is not None:
if "-o StrictHostKeyChecking=no" not in module.params['ssh_opts']:
add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey'])
else:
add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey'])
git_version_used = git_version(git_path, module)
if depth is not None and git_version_used < LooseVersion('1.9.1'):
result['warnings'].append("Your git version is too old to fully support the depth argument. Falling back to full checkouts.")
depth = None
recursive = module.params['recursive']
track_submodules = module.params['track_submodules']
result.update(before=None)
local_mods = False
need_fetch = True
if (dest and not os.path.exists(gitconfig)) or (not dest and not allow_clone):
# if there is no git configuration, do a clone operation unless:
# * the user requested no clone (they just want info)
# * we're doing a check mode test
# In those cases we do an ls-remote
if module.check_mode or not allow_clone:
remote_head = get_remote_head(git_path, module, dest, version, repo, bare)
result.update(changed=True, after=remote_head)
if module._diff:
diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
if diff:
result['diff'] = diff
module.exit_json(**result)
# there's no git config, so clone
clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec, verify_commit)
need_fetch = False
elif not update:
# Just return having found a repo already in the dest path
# this does no checking that the repo is the actual repo
# requested.
result['before'] = get_version(module, git_path, dest)
result.update(after=result['before'])
module.exit_json(**result)
else:
# else do a pull
local_mods = has_local_mods(module, git_path, dest, bare)
result['before'] = get_version(module, git_path, dest)
if local_mods:
# failure should happen regardless of check mode
if not force:
module.fail_json(msg="Local modifications exist in repository (force=no).", **result)
# if force and in non-check mode, do a reset
if not module.check_mode:
reset(git_path, module, dest)
result.update(changed=True, msg='Local modifications exist.')
# exit if already at desired sha version
if module.check_mode:
remote_url = get_remote_url(git_path, module, dest, remote)
remote_url_changed = remote_url and remote_url != repo and unfrackgitpath(remote_url) != unfrackgitpath(repo)
else:
remote_url_changed = set_remote_url(git_path, module, repo, dest, remote)
result.update(remote_url_changed=remote_url_changed)
if module.check_mode:
remote_head = get_remote_head(git_path, module, dest, version, remote, bare)
result.update(changed=(result['before'] != remote_head or remote_url_changed), after=remote_head)
# FIXME: This diff should fail since the new remote_head is not fetched yet?!
if module._diff:
diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
if diff:
result['diff'] = diff
module.exit_json(**result)
else:
fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used)
result['after'] = get_version(module, git_path, dest)
# switch to version specified regardless of whether
# we got new revisions from the repository
if not bare:
switch_version(git_path, module, dest, remote, version, verify_commit, depth)
# Deal with submodules
submodules_updated = False
if recursive and not bare:
submodules_updated = submodules_fetch(git_path, module, remote, track_submodules, dest)
if submodules_updated:
result.update(submodules_changed=submodules_updated)
if module.check_mode:
result.update(changed=True, after=remote_head)
module.exit_json(**result)
# Switch to version specified
submodule_update(git_path, module, dest, track_submodules, force=force)
# determine if we changed anything
result['after'] = get_version(module, git_path, dest)
if result['before'] != result['after'] or local_mods or submodules_updated or remote_url_changed:
result.update(changed=True)
if module._diff:
diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
if diff:
result['diff'] = diff
# cleanup the wrapper script
if ssh_wrapper:
try:
os.remove(ssh_wrapper)
except OSError:
# No need to fail if the file already doesn't exist
pass
module.exit_json(**result)
if __name__ == '__main__':
main()
|
Sodki/ansible
|
lib/ansible/modules/source_control/git.py
|
Python
|
gpl-3.0
| 42,171
|
# Copyright 1999-2009 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import copy
from portage.cache import template
class database(template.database):
autocommits = True
serialize_eclasses = False
def __init__(self, *args, **config):
config.pop("gid", None)
super(database, self).__init__(*args, **config)
self._data = {}
self.__iter__ = self._data.__iter__
self._delitem = self._data.__delitem__
self.__contains__ = self._data.__contains__
def _setitem(self, name, values):
self._data[name] = copy.deepcopy(values)
def _getitem(self, cpv):
return copy.deepcopy(self._data[cpv])
|
fastinetserver/portage-idfetch
|
pym/portage/cache/volatile.py
|
Python
|
gpl-2.0
| 644
|
#!/usr/bin/env python
'''
Builds synthetic feature file that includes all core, provider, and application
features, so that we can pre-stage all bundles required to run ONOS off-line.
'''
import os
import xml.etree.ElementTree as ET
FEATURE_TAG = '{http://karaf.apache.org/xmlns/features/v1.2.0}feature'
STAGED_REPOS = 'target/staged-repos.xml'
if 'ONOS_ROOT' in os.environ:
ONOS_ROOT = os.environ['ONOS_ROOT']
else:
# fallback to working directory if ONOS_ROOT is not set
ONOS_ROOT = os.getcwd()
def findFeatureFiles(path=ONOS_ROOT):
#only descend into target directories that have pom
for root, dirs, files in os.walk(path):
if 'pom.xml' not in files:
if 'target' in dirs:
#pruning target dir with no pom.xml
dirs.remove('target')
if '/target' in root:
if '/classes/' in root:
#filter out features.xml for maven-plugin
continue
for f in files:
if f.endswith('features.xml'):
yield os.path.join(root, f)
def featuresFromFile(file):
features = []
tree = ET.parse(file)
root = tree.getroot()
for feature in root.findall(FEATURE_TAG):
features.append(feature.attrib['name'])
return features
if __name__ == '__main__':
outputTree = ET.Element('features')
uberFeature = ET.Element('feature', attrib={'name' : 'onos-uber-synthetic'})
for file in findFeatureFiles():
features = featuresFromFile(file)
if len(features) > 0:
ET.SubElement(outputTree, 'repository').text = 'file:%s' % file
for feature in features:
ET.SubElement(uberFeature, 'feature').text = feature
outputTree.append(uberFeature)
outputFile = os.path.join(os.path.dirname(os.path.realpath(__file__)), STAGED_REPOS)
outputDir = os.path.dirname(outputFile)
if not os.path.exists(outputDir):
os.mkdir(outputDir)
ET.ElementTree(outputTree).write(outputFile)
import sys
if '-d' in sys.argv:
# -------- TODO for debug only --------
def indent(elem, level=0):
#function borrowed from: http://effbot.org/zone/element-lib.htm#prettyprint
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
print 'Writing to file:', outputFile
indent(outputTree)
ET.dump(outputTree)
|
planoAccess/clonedONOS
|
tools/package/karaf-assembly/onos-assembly.py
|
Python
|
apache-2.0
| 2,879
|
# -*- coding: utf-8 -*-
import pytest
from tests.utils import Rule, CorrectedCommand
from thefuck import corrector, const
from thefuck.system import Path
from thefuck.types import Command
from thefuck.corrector import get_corrected_commands, organize_commands
class TestGetRules(object):
@pytest.fixture
def glob(self, mocker):
results = {}
mocker.patch('thefuck.system.Path.glob',
new_callable=lambda: lambda *_: results.pop('value', []))
return lambda value: results.update({'value': value})
@pytest.fixture(autouse=True)
def load_source(self, monkeypatch):
monkeypatch.setattr('thefuck.types.load_source',
lambda x, _: Rule(x))
def _compare_names(self, rules, names):
assert {r.name for r in rules} == set(names)
@pytest.mark.parametrize('paths, conf_rules, exclude_rules, loaded_rules', [
(['git.py', 'bash.py'], const.DEFAULT_RULES, [], ['git', 'bash']),
(['git.py', 'bash.py'], ['git'], [], ['git']),
(['git.py', 'bash.py'], const.DEFAULT_RULES, ['git'], ['bash']),
(['git.py', 'bash.py'], ['git'], ['git'], [])])
def test_get_rules(self, glob, settings, paths, conf_rules, exclude_rules,
loaded_rules):
glob([Path(path) for path in paths])
settings.update(rules=conf_rules,
priority={},
exclude_rules=exclude_rules)
rules = corrector.get_rules()
self._compare_names(rules, loaded_rules)
def test_get_corrected_commands(mocker):
command = Command('test', 'test')
rules = [Rule(match=lambda _: False),
Rule(match=lambda _: True,
get_new_command=lambda x: x.script + '!', priority=100),
Rule(match=lambda _: True,
get_new_command=lambda x: [x.script + '@', x.script + ';'],
priority=60)]
mocker.patch('thefuck.corrector.get_rules', return_value=rules)
assert ([cmd.script for cmd in get_corrected_commands(command)]
== ['test!', 'test@', 'test;'])
def test_organize_commands():
"""Ensures that the function removes duplicates and sorts commands."""
commands = [CorrectedCommand('ls'), CorrectedCommand('ls -la', priority=9000),
CorrectedCommand('ls -lh', priority=100),
CorrectedCommand(u'echo café', priority=200),
CorrectedCommand('ls -lh', priority=9999)]
assert list(organize_commands(iter(commands))) \
== [CorrectedCommand('ls'), CorrectedCommand('ls -lh', priority=100),
CorrectedCommand(u'echo café', priority=200),
CorrectedCommand('ls -la', priority=9000)]
|
SimenB/thefuck
|
tests/test_corrector.py
|
Python
|
mit
| 2,735
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Miscellaneous stuff for coverage.py."""
import errno
import hashlib
import inspect
import locale
import os
import sys
import types
from coverage import env
from coverage.backward import string_class, to_bytes, unicode_class
ISOLATED_MODULES = {}
def isolate_module(mod):
"""Copy a module so that we are isolated from aggressive mocking.
If a test suite mocks os.path.exists (for example), and then we need to use
it during the test, everything will get tangled up if we use their mock.
Making a copy of the module when we import it will isolate coverage.py from
those complications.
"""
if mod not in ISOLATED_MODULES:
new_mod = types.ModuleType(mod.__name__)
ISOLATED_MODULES[mod] = new_mod
for name in dir(mod):
value = getattr(mod, name)
if isinstance(value, types.ModuleType):
value = isolate_module(value)
setattr(new_mod, name, value)
return ISOLATED_MODULES[mod]
os = isolate_module(os)
# Use PyContracts for assertion testing on parameters and returns, but only if
# we are running our own test suite.
if env.TESTING:
from contracts import contract # pylint: disable=unused-import
from contracts import new_contract
try:
# Define contract words that PyContract doesn't have.
new_contract('bytes', lambda v: isinstance(v, bytes))
if env.PY3:
new_contract('unicode', lambda v: isinstance(v, unicode_class))
except ValueError:
# During meta-coverage, this module is imported twice, and PyContracts
# doesn't like redefining contracts. It's OK.
pass
else: # pragma: not covered
# We aren't using real PyContracts, so just define a no-op decorator as a
# stunt double.
def contract(**unused):
"""Dummy no-op implementation of `contract`."""
return lambda func: func
def nice_pair(pair):
"""Make a nice string representation of a pair of numbers.
If the numbers are equal, just return the number, otherwise return the pair
with a dash between them, indicating the range.
"""
start, end = pair
if start == end:
return "%d" % start
else:
return "%d-%d" % (start, end)
def format_lines(statements, lines):
"""Nicely format a list of line numbers.
Format a list of line numbers for printing by coalescing groups of lines as
long as the lines represent consecutive statements. This will coalesce
even if there are gaps between statements.
For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and
`lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14".
"""
pairs = []
i = 0
j = 0
start = None
statements = sorted(statements)
lines = sorted(lines)
while i < len(statements) and j < len(lines):
if statements[i] == lines[j]:
if start is None:
start = lines[j]
end = lines[j]
j += 1
elif start:
pairs.append((start, end))
start = None
i += 1
if start:
pairs.append((start, end))
ret = ', '.join(map(nice_pair, pairs))
return ret
def expensive(fn):
"""A decorator to indicate that a method shouldn't be called more than once.
Normally, this does nothing. During testing, this raises an exception if
called more than once.
"""
if env.TESTING:
attr = "_once_" + fn.__name__
def _wrapped(self):
"""Inner function that checks the cache."""
if hasattr(self, attr):
raise Exception("Shouldn't have called %s more than once" % fn.__name__)
setattr(self, attr, True)
return fn(self)
return _wrapped
else:
return fn
def bool_or_none(b):
"""Return bool(b), but preserve None."""
if b is None:
return None
else:
return bool(b)
def join_regex(regexes):
"""Combine a list of regexes into one that matches any of them."""
return "|".join("(?:%s)" % r for r in regexes)
def file_be_gone(path):
"""Remove a file, and don't get annoyed if it doesn't exist."""
try:
os.remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def output_encoding(outfile=None):
"""Determine the encoding to use for output written to `outfile` or stdout."""
if outfile is None:
outfile = sys.stdout
encoding = (
getattr(outfile, "encoding", None) or
getattr(sys.__stdout__, "encoding", None) or
locale.getpreferredencoding()
)
return encoding
class Hasher(object):
"""Hashes Python data into md5."""
def __init__(self):
self.md5 = hashlib.md5()
def update(self, v):
"""Add `v` to the hash, recursively if needed."""
self.md5.update(to_bytes(str(type(v))))
if isinstance(v, string_class):
self.md5.update(to_bytes(v))
elif isinstance(v, bytes):
self.md5.update(v)
elif v is None:
pass
elif isinstance(v, (int, float)):
self.md5.update(to_bytes(str(v)))
elif isinstance(v, (tuple, list)):
for e in v:
self.update(e)
elif isinstance(v, dict):
keys = v.keys()
for k in sorted(keys):
self.update(k)
self.update(v[k])
else:
for k in dir(v):
if k.startswith('__'):
continue
a = getattr(v, k)
if inspect.isroutine(a):
continue
self.update(k)
self.update(a)
def hexdigest(self):
"""Retrieve the hex digest of the hash."""
return self.md5.hexdigest()
def _needs_to_implement(that, func_name):
"""Helper to raise NotImplementedError in interface stubs."""
if hasattr(that, "_coverage_plugin_name"):
thing = "Plugin"
name = that._coverage_plugin_name
else:
thing = "Class"
klass = that.__class__
name = "{klass.__module__}.{klass.__name__}".format(klass=klass)
raise NotImplementedError(
"{thing} {name!r} needs to implement {func_name}()".format(
thing=thing, name=name, func_name=func_name
)
)
class CoverageException(Exception):
"""An exception specific to coverage.py."""
pass
class NoSource(CoverageException):
"""We couldn't find the source for a module."""
pass
class NoCode(NoSource):
"""We couldn't find any code at all."""
pass
class NotPython(CoverageException):
"""A source file turned out not to be parsable Python."""
pass
class ExceptionDuringRun(CoverageException):
"""An exception happened while running customer code.
Construct it with three arguments, the values from `sys.exc_info`.
"""
pass
|
thundernet8/WRGameVideos-API
|
venv/lib/python2.7/site-packages/coverage/misc.py
|
Python
|
gpl-2.0
| 7,159
|
import asyncio
import builtins
from .base import NotFoundError, ParametersError
class _MethodCall:
__slots__ = ("_proto", "_timeout", "_names")
def __init__(self, proto, timeout=None, names=()):
self._proto = proto
self._timeout = timeout
self._names = names
def __getattr__(self, name):
return self.__class__(self._proto, self._timeout, self._names + (name,))
def __call__(self, *args, **kwargs):
if not self._names:
raise ValueError("RPC method name is empty")
fut = self._proto.call(".".join(self._names), args, kwargs)
loop = self._proto.loop
return asyncio.Task(
asyncio.wait_for(fut, timeout=self._timeout, loop=loop), loop=loop
)
def _fill_error_table():
# Fill error table with standard exceptions
error_table = {}
for name in dir(builtins):
val = getattr(builtins, name)
if isinstance(val, type) and issubclass(val, Exception):
error_table["builtins." + name] = val
for name in dir(asyncio):
val = getattr(asyncio, name)
if isinstance(val, type) and issubclass(val, Exception):
error_table["asyncio." + name] = val
error_table["aiozmq.rpc.base.NotFoundError"] = NotFoundError
error_table["aiozmq.rpc.base.ParametersError"] = ParametersError
return error_table
|
aio-libs/aiozmq
|
aiozmq/rpc/util.py
|
Python
|
bsd-2-clause
| 1,374
|
#pylint: skip-file
"""dnstwister web app.
This is the pattern from http://flask.pocoo.org/docs/0.11/patterns/packages/
which generates circular imports hence the comment at the top to just ignore
this file.
"""
import flask
import logging
app = flask.Flask(__name__)
# Logging
app.logger.setLevel(logging.INFO)
# Blueprints
import dnstwister.api
app.register_blueprint(api.app, url_prefix='/api')
# Import modules using dnstwister.app
import dnstwister.tools.template
import dnstwister.views.www.analyse
import dnstwister.views.www.index
import dnstwister.views.www.search
# Filters
app.jinja_env.filters['domain_renderer'] = tools.template.domain_renderer
app.jinja_env.filters['domain_encoder'] = tools.template.domain_encoder
|
thisismyrobot/dnstwister
|
dnstwister/__init__.py
|
Python
|
unlicense
| 763
|
__version__ = '1.0'
__author__ = 'Outernet Inc <apps@outernet.is>'
from .builder import *
|
Outernet-Project/sqlize-pg
|
sqlize_pg/__init__.py
|
Python
|
gpl-3.0
| 91
|
from __future__ import unicode_literals
import logging
import swapper
from django.conf import settings
from django.db import models
from accelerator_abstract.models.accelerator_model import AcceleratorModel
logger = logging.getLogger(__file__)
NOT_AVAILABLE_MSG = ("You have indicated that you are not available for "
"this round.")
class BaseJudgeRoundCommitment(AcceleratorModel):
judge = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
judging_round = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label,
"JudgingRound"),
on_delete=models.CASCADE)
commitment_state = models.BooleanField(default=True)
capacity = models.IntegerField(default=0)
current_quota = models.IntegerField(blank=True, null=True)
class Meta(AcceleratorModel.Meta):
db_table = 'accelerator_judgeroundcommitment'
abstract = True
verbose_name_plural = ("Judge commitment to participate in a "
"Judging Round")
unique_together = ('judge', 'judging_round')
def __str__(self):
return "%s commited to %s" % (self.judge, self.judging_round)
|
masschallenge/django-accelerator
|
accelerator_abstract/models/base_judge_round_commitment.py
|
Python
|
mit
| 1,258
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import _, api, exceptions, fields, models, modules
from odoo.addons.base.models.res_users import is_selection_groups
class Users(models.Model):
""" Update of res.users class
- add a preference about sending emails about notifications
- make a new user follow itself
- add a welcome message
- add suggestion preference
- if adding groups to a user, check mail.channels linked to this user
group, and the user. This is done by overriding the write method.
"""
_name = 'res.users'
_inherit = ['res.users']
_description = 'Users'
notification_type = fields.Selection([
('email', 'Handle by Emails'),
('inbox', 'Handle in Odoo')],
'Notification', required=True, default='email',
help="Policy on how to handle Chatter notifications:\n"
"- Handle by Emails: notifications are sent to your email address\n"
"- Handle in Odoo: notifications appear in your Odoo Inbox")
# channel-specific: moderation
is_moderator = fields.Boolean(string='Is moderator', compute='_compute_is_moderator')
moderation_counter = fields.Integer(string='Moderation count', compute='_compute_moderation_counter')
moderation_channel_ids = fields.Many2many(
'mail.channel', 'mail_channel_moderator_rel',
string='Moderated channels')
@api.depends('moderation_channel_ids.moderation', 'moderation_channel_ids.moderator_ids')
def _compute_is_moderator(self):
moderated = self.env['mail.channel'].search([
('id', 'in', self.mapped('moderation_channel_ids').ids),
('moderation', '=', True),
('moderator_ids', 'in', self.ids)
])
user_ids = moderated.mapped('moderator_ids')
for user in self:
user.is_moderator = user in user_ids
def _compute_moderation_counter(self):
self._cr.execute("""
SELECT channel_moderator.res_users_id, COUNT(msg.id)
FROM "mail_channel_moderator_rel" AS channel_moderator
JOIN "mail_message" AS msg
ON channel_moderator.mail_channel_id = msg.res_id
AND channel_moderator.res_users_id IN %s
AND msg.model = 'mail.channel'
AND msg.moderation_status = 'pending_moderation'
GROUP BY channel_moderator.res_users_id""", [tuple(self.ids)])
result = dict(self._cr.fetchall())
for user in self:
user.moderation_counter = result.get(user.id, 0)
def __init__(self, pool, cr):
""" Override of __init__ to add access rights on notification_email_send
fields. Access rights are disabled by default, but allowed on some
specific fields defined in self.SELF_{READ/WRITE}ABLE_FIELDS.
"""
init_res = super(Users, self).__init__(pool, cr)
# duplicate list to avoid modifying the original reference
type(self).SELF_WRITEABLE_FIELDS = list(self.SELF_WRITEABLE_FIELDS)
type(self).SELF_WRITEABLE_FIELDS.extend(['notification_type'])
# duplicate list to avoid modifying the original reference
type(self).SELF_READABLE_FIELDS = list(self.SELF_READABLE_FIELDS)
type(self).SELF_READABLE_FIELDS.extend(['notification_type'])
return init_res
@api.model_create_multi
def create(self, vals_list):
for values in vals_list:
if not values.get('login', False):
action = self.env.ref('base.action_res_users')
msg = _("You cannot create a new user from here.\n To create new user please go to configuration panel.")
raise exceptions.RedirectWarning(msg, action.id, _('Go to the configuration panel'))
users = super(Users, self).create(vals_list)
# Auto-subscribe to channels
self.env['mail.channel'].search([('group_ids', 'in', users.groups_id.ids)])._subscribe_users()
return users
def write(self, vals):
write_res = super(Users, self).write(vals)
if 'active' in vals and not vals['active']:
self._unsubscribe_from_channels()
sel_groups = [vals[k] for k in vals if is_selection_groups(k) and vals[k]]
if vals.get('groups_id'):
# form: {'group_ids': [(3, 10), (3, 3), (4, 10), (4, 3)]} or {'group_ids': [(6, 0, [ids]}
user_group_ids = [command[1] for command in vals['groups_id'] if command[0] == 4]
user_group_ids += [id for command in vals['groups_id'] if command[0] == 6 for id in command[2]]
self.env['mail.channel'].search([('group_ids', 'in', user_group_ids)])._subscribe_users()
elif sel_groups:
self.env['mail.channel'].search([('group_ids', 'in', sel_groups)])._subscribe_users()
return write_res
def unlink(self):
self._unsubscribe_from_channels()
return super().unlink()
def _unsubscribe_from_channels(self):
""" This method un-subscribes users from private mail channels. Main purpose of this
method is to prevent sending internal communication to archived / deleted users.
We do not un-subscribes users from public channels because in most common cases,
public channels are mailing list (e-mail based) and so users should always receive
updates from public channels until they manually un-subscribe themselves.
"""
self.mapped('partner_id.channel_ids').filtered(lambda c: c.public != 'public').write({
'channel_partner_ids': [(3, pid) for pid in self.mapped('partner_id').ids]
})
@api.model
def systray_get_activities(self):
query = """SELECT m.id, count(*), act.res_model as model,
CASE
WHEN %(today)s::date - act.date_deadline::date = 0 Then 'today'
WHEN %(today)s::date - act.date_deadline::date > 0 Then 'overdue'
WHEN %(today)s::date - act.date_deadline::date < 0 Then 'planned'
END AS states
FROM mail_activity AS act
JOIN ir_model AS m ON act.res_model_id = m.id
WHERE user_id = %(user_id)s
GROUP BY m.id, states, act.res_model;
"""
self.env.cr.execute(query, {
'today': fields.Date.context_today(self),
'user_id': self.env.uid,
})
activity_data = self.env.cr.dictfetchall()
model_ids = [a['id'] for a in activity_data]
model_names = {n[0]: n[1] for n in self.env['ir.model'].browse(model_ids).name_get()}
user_activities = {}
for activity in activity_data:
if not user_activities.get(activity['model']):
module = self.env[activity['model']]._original_module
icon = module and modules.module.get_module_icon(module)
user_activities[activity['model']] = {
'name': model_names[activity['id']],
'model': activity['model'],
'type': 'activity',
'icon': icon,
'total_count': 0, 'today_count': 0, 'overdue_count': 0, 'planned_count': 0,
}
user_activities[activity['model']]['%s_count' % activity['states']] += activity['count']
if activity['states'] in ('today', 'overdue'):
user_activities[activity['model']]['total_count'] += activity['count']
user_activities[activity['model']]['actions'] = [{
'icon': 'fa-clock-o',
'name': 'Summary',
}]
return list(user_activities.values())
class res_groups_mail_channel(models.Model):
""" Update of res.groups class
- if adding users from a group, check mail.channels linked to this user
group and subscribe them. This is done by overriding the write method.
"""
_name = 'res.groups'
_inherit = 'res.groups'
_description = 'Access Groups'
def write(self, vals, context=None):
write_res = super(res_groups_mail_channel, self).write(vals)
if vals.get('users'):
# form: {'group_ids': [(3, 10), (3, 3), (4, 10), (4, 3)]} or {'group_ids': [(6, 0, [ids]}
user_ids = [command[1] for command in vals['users'] if command[0] == 4]
user_ids += [id for command in vals['users'] if command[0] == 6 for id in command[2]]
self.env['mail.channel'].search([('group_ids', 'in', self._ids)])._subscribe_users()
return write_res
|
ygol/odoo
|
addons/mail/models/res_users.py
|
Python
|
agpl-3.0
| 8,616
|
# encoding: utf-8
# Copyright (c) 2001-2021, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import
ENUM_SHAPE_SCOPE = ('admin', 'street', 'addr', 'poi', 'stop')
DEFAULT_SHAPE_SCOPE = ('admin', 'street', 'addr', 'poi')
ENUM_EXTERNAL_SERVICE = ('free_floatings', 'vehicle_occupancies', 'realtime_proxies', 'vehicle_positions')
|
CanalTP/navitia
|
source/navitiacommon/navitiacommon/constants.py
|
Python
|
agpl-3.0
| 1,530
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
# pylint: disable=line-too-long
r"""Run training loop.
"""
# pylint: enable=line-too-long
import os
import random
import time
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
from tf_agents.policies import random_tf_policy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.specs.tensor_spec import TensorSpec
import tqdm
from representation_batch_rl.batch_rl import asac
from representation_batch_rl.batch_rl import awr
from representation_batch_rl.batch_rl import ddpg
from representation_batch_rl.batch_rl import evaluation
from representation_batch_rl.batch_rl import pcl
from representation_batch_rl.batch_rl import sac
from representation_batch_rl.batch_rl import sac_v1
from representation_batch_rl.batch_rl.image_utils import image_aug
from representation_batch_rl.twin_sac import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('env_name', 'pixels-dm-cartpole-swingup',
'Environment for training/evaluation.')
flags.DEFINE_integer('seed', 42, 'Fixed random seed for training.')
flags.DEFINE_float('actor_lr', 3e-4, 'Actor learning rate.')
flags.DEFINE_float('alpha_lr', 3e-4, 'Temperature learning rate.')
flags.DEFINE_float('critic_lr', 3e-4, 'Critic learning rate.')
flags.DEFINE_integer('deployment_batch_size', 1, 'Batch size.')
flags.DEFINE_integer('sample_batch_size', 256, 'Batch size.')
flags.DEFINE_float('discount', 0.99, 'Discount used for returns.')
flags.DEFINE_float('tau', 0.005,
'Soft update coefficient for the target network.')
flags.DEFINE_integer('max_timesteps', 200_000, 'Max timesteps to train.')
flags.DEFINE_integer('max_length_replay_buffer', 100_000,
'Max replay buffer size (image observations use 100k).')
flags.DEFINE_integer('num_random_actions', 10_000,
'Fill replay buffer with N random actions.')
flags.DEFINE_integer('start_training_timesteps', 10_000,
'Start training when replay buffer contains N timesteps.')
flags.DEFINE_string('save_dir', '/tmp/save/', 'Directory to save results to.')
flags.DEFINE_integer('log_interval', 1_000, 'Log every N timesteps.')
flags.DEFINE_integer('eval_interval', 10_000, 'Evaluate every N timesteps.')
flags.DEFINE_integer('action_repeat', 8,
'(optional) action repeat used when instantiating env.')
flags.DEFINE_integer('frame_stack', 0,
'(optional) frame stack used when instantiating env.')
flags.DEFINE_enum('algo_name', 'sac', [
'ddpg',
'crossnorm_ddpg',
'sac',
'pc_sac',
'pcl',
'crossnorm_sac',
'crr',
'awr',
'sac_v1',
'asac',
], 'Algorithm.')
flags.DEFINE_boolean('eager', False, 'Execute functions eagerly.')
def main(_):
if FLAGS.eager:
tf.config.experimental_run_functions_eagerly(FLAGS.eager)
tf.random.set_seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
random.seed(FLAGS.seed)
action_repeat = FLAGS.action_repeat
_, _, domain_name, _ = FLAGS.env_name.split('-')
if domain_name in ['cartpole']:
FLAGS.set_default('action_repeat', 8)
elif domain_name in ['reacher', 'cheetah', 'ball_in_cup', 'hopper']:
FLAGS.set_default('action_repeat', 4)
elif domain_name in ['finger', 'walker']:
FLAGS.set_default('action_repeat', 2)
FLAGS.set_default('max_timesteps', FLAGS.max_timesteps // FLAGS.action_repeat)
env = utils.load_env(
FLAGS.env_name, FLAGS.seed, action_repeat, FLAGS.frame_stack)
eval_env = utils.load_env(
FLAGS.env_name, FLAGS.seed, action_repeat, FLAGS.frame_stack)
is_image_obs = (isinstance(env.observation_spec(), TensorSpec) and
len(env.observation_spec().shape) == 3)
spec = (
env.observation_spec(),
env.action_spec(),
env.reward_spec(),
env.reward_spec(), # discount spec
env.observation_spec() # next observation spec
)
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
spec, batch_size=1, max_length=FLAGS.max_length_replay_buffer)
@tf.function
def add_to_replay(state, action, reward, discount, next_states):
replay_buffer.add_batch((state, action, reward, discount, next_states))
hparam_str = utils.make_hparam_string(
FLAGS.xm_parameters, seed=FLAGS.seed, env_name=FLAGS.env_name,
algo_name=FLAGS.algo_name)
summary_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.save_dir, 'tb', hparam_str))
results_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.save_dir, 'results', hparam_str))
if 'ddpg' in FLAGS.algo_name:
model = ddpg.DDPG(
env.observation_spec(),
env.action_spec(),
cross_norm='crossnorm' in FLAGS.algo_name)
elif 'crr' in FLAGS.algo_name:
model = awr.AWR(
env.observation_spec(),
env.action_spec(), f='bin_max')
elif 'awr' in FLAGS.algo_name:
model = awr.AWR(
env.observation_spec(),
env.action_spec(), f='exp_mean')
elif 'sac_v1' in FLAGS.algo_name:
model = sac_v1.SAC(
env.observation_spec(),
env.action_spec(),
target_entropy=-env.action_spec().shape[0])
elif 'asac' in FLAGS.algo_name:
model = asac.ASAC(
env.observation_spec(),
env.action_spec(),
target_entropy=-env.action_spec().shape[0])
elif 'sac' in FLAGS.algo_name:
model = sac.SAC(
env.observation_spec(),
env.action_spec(),
target_entropy=-env.action_spec().shape[0],
cross_norm='crossnorm' in FLAGS.algo_name,
pcl_actor_update='pc' in FLAGS.algo_name)
elif 'pcl' in FLAGS.algo_name:
model = pcl.PCL(
env.observation_spec(),
env.action_spec(),
target_entropy=-env.action_spec().shape[0])
initial_collect_policy = random_tf_policy.RandomTFPolicy(
env.time_step_spec(), env.action_spec())
dataset = replay_buffer.as_dataset(
num_parallel_calls=tf.data.AUTOTUNE,
sample_batch_size=FLAGS.sample_batch_size)
if is_image_obs:
# Augment images as in DRQ.
dataset = dataset.map(image_aug,
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=False).prefetch(3)
else:
dataset = dataset.prefetch(3)
def repack(*data):
return data[0]
dataset = dataset.map(repack)
replay_buffer_iter = iter(dataset)
previous_time = time.time()
timestep = env.reset()
episode_return = 0
episode_timesteps = 0
step_mult = 1 if action_repeat < 1 else action_repeat
for i in tqdm.tqdm(range(FLAGS.max_timesteps)):
if i % FLAGS.deployment_batch_size == 0:
for _ in range(FLAGS.deployment_batch_size):
if timestep.is_last():
if episode_timesteps > 0:
current_time = time.time()
with summary_writer.as_default():
tf.summary.scalar(
'train/returns',
episode_return,
step=(i + 1) * step_mult)
tf.summary.scalar(
'train/FPS',
episode_timesteps / (current_time - previous_time),
step=(i + 1) * step_mult)
timestep = env.reset()
episode_return = 0
episode_timesteps = 0
previous_time = time.time()
if (replay_buffer.num_frames() < FLAGS.num_random_actions or
replay_buffer.num_frames() < FLAGS.deployment_batch_size):
# Use policy only after the first deployment.
policy_step = initial_collect_policy.action(timestep)
action = policy_step.action
else:
action = model.actor(timestep.observation, sample=True)
next_timestep = env.step(action)
add_to_replay(timestep.observation, action, next_timestep.reward,
next_timestep.discount, next_timestep.observation)
episode_return += next_timestep.reward[0]
episode_timesteps += 1
timestep = next_timestep
if i + 1 >= FLAGS.start_training_timesteps:
with summary_writer.as_default():
info_dict = model.update_step(replay_buffer_iter)
if (i + 1) % FLAGS.log_interval == 0:
with summary_writer.as_default():
for k, v in info_dict.items():
tf.summary.scalar(f'training/{k}', v, step=(i + 1) * step_mult)
if (i + 1) % FLAGS.eval_interval == 0:
logging.info('Performing policy eval.')
average_returns, evaluation_timesteps = evaluation.evaluate(
eval_env, model)
with results_writer.as_default():
tf.summary.scalar(
'evaluation/returns', average_returns, step=(i + 1) * step_mult)
tf.summary.scalar(
'evaluation/length', evaluation_timesteps, step=(i+1) * step_mult)
logging.info('Eval at %d: ave returns=%f, ave episode length=%f',
(i + 1) * step_mult, average_returns, evaluation_timesteps)
if (i + 1) % FLAGS.eval_interval == 0:
model.save_weights(
os.path.join(FLAGS.save_dir, 'results',
FLAGS.env_name + '__' + str(i + 1)))
if __name__ == '__main__':
app.run(main)
|
google-research/google-research
|
representation_batch_rl/batch_rl/train_eval_online.py
|
Python
|
apache-2.0
| 9,735
|
""" Pyjamas UI BuilderWidget: takes a PyJsGlade builder spec
and creates a widget.
Copyright (C) 2010 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
Create a BuilderWidget as follows:
from pyjamas.builder.Builder import Builder
from pyjamas.ui.BuilderWidget import BuilderWidget
either:
class ApplicationEventReceivingClassWhatever:
def onSomeRandomClickThing(self, sender):
print "some random widget was clicked, it was this one:", sender
app = ApplicationEventReceivingClassWhatever()
b = Builder()
xml = "<?xml .... ?><pyjsglade> .... </pyjsglade>"
bw = BuilderWidget(Builder=b,
EventReceiver=app,
BuilderText=xml_file,
InstanceName="WidgetListedInXmlFile")
or:
app = ApplicationEventReceivingClassWhatever()
xml = "<?xml .... ?><pyjsglade> .... </pyjsglade>"
b = Builder(xml)
bw = BuilderWidget(Builder=b,
EventReceiver=app,
InstanceName="WidgetListedInXmlFile")
or:
class BuilderWidgetWithIntegratedEventHandling(BuilderWidget):
def onSomeRandomClickThing(self, sender):
print "some random widget was clicked, it was this one:", sender
b = Builder(xml)
bw = BuilderWidgetWithIntegratedEventHandling(Builder=b,
InstanceName="WidgetListedInXmlFile")
or:
b = Builder()
bw = BuilderWidgetWithIntegratedEventHandling(Builder=b,
BuilderText=xml_file,
InstanceName="WidgetListedInXmlFile")
"""
from pyjamas.ui.Composite import Composite
class BuilderWidget(Composite):
def __init__(self, **kwargs):
self.b = None
self.text = None
self.instance_name = None
self.event_receiver = None
Composite.__init__(self, **kwargs)
def setBuilderText(self, text):
self.text = text
self.autoCreateInstance()
def setBuilder(self, builder):
self.b = builder
self.autoCreateInstance()
def setEventReceiver(self, event_receiver):
""" sets the instance where the events named in the builder
will be received (callbacks called).
passing in None will set the event receiver to be this
widget.
"""
self.event_receiver = event_receiver or self
self.autoCreateInstance()
def setInstanceName(self, instance_name):
self.instance_name = instance_name
self.autoCreateInstance()
def autoCreateInstance(self):
""" when all the required arguments have been set, the
widget instance will be created. it's done this way
because **kwargs goes through to pyjamas.ui.Applier,
and the order in which the setXXX functions will be called
cannot be determined or guaranteed (kwargs is a dictionary).
"""
if self.b and self.text:
self.b.setText(self.text)
if not self.b or not self.instance_name or not self.event_receiver:
return
if not self.b.builder_text:
return
widget = self.b.createInstance(self.instance_name, self.event_receiver)
self.initWidget(widget)
|
minghuascode/pyj
|
library/pyjamas/ui/BuilderWidget.py
|
Python
|
apache-2.0
| 3,231
|
"""
This script makes a dataset of two million approximately whitened patches,
extracted at random uniformly from a downsampled version of the STL-10
unlabeled and train dataset.
It assumes that you have already run make_downsampled_stl10.py, which
downsamples the STL-10 images to 1/3 of their original resolution.
This script is intended to reproduce the preprocessing used by Adam Coates
et. al. in their work from the first half of 2011.
"""
from __future__ import print_function
from pylearn2.utils import serial
from pylearn2.datasets import preprocessing
from pylearn2.utils import string_utils as string
import numpy as np
import textwrap
def main():
data_dir = string.preprocess('${PYLEARN2_DATA_PATH}/stl10')
print('Loading STL10-10 unlabeled and train datasets...')
downsampled_dir = data_dir + '/stl10_32x32'
data = serial.load(downsampled_dir + '/unlabeled.pkl')
supplement = serial.load(downsampled_dir + '/train.pkl')
print('Concatenating datasets...')
data.set_design_matrix(np.concatenate((data.X, supplement.X), axis=0))
del supplement
print("Preparing output directory...")
patch_dir = data_dir + '/stl10_patches_8x8'
serial.mkdir(patch_dir)
README = open(patch_dir + '/README', 'w')
README.write(textwrap.dedent("""
The .pkl files in this directory may be opened in python using
cPickle, pickle, or pylearn2.serial.load.
data.pkl contains a pylearn2 Dataset object defining an unlabeled
dataset of 2 million 6x6 approximately whitened, contrast-normalized
patches drawn uniformly at random from a downsampled (to 32x32)
version of the STL-10 train and unlabeled datasets.
preprocessor.pkl contains a pylearn2 Pipeline object that was used
to extract the patches and approximately whiten / contrast normalize
them. This object is necessary when extracting features for
supervised learning or test set classification, because the
extracted features must be computed using inputs that have been
whitened with the ZCA matrix learned and stored by this Pipeline.
They were created with the pylearn2 script make_stl10_patches.py.
All other files in this directory, including this README, were
created by the same script and are necessary for the other files
to function correctly.
"""))
README.close()
print("Preprocessing the data...")
pipeline = preprocessing.Pipeline()
pipeline.items.append(preprocessing.ExtractPatches(patch_shape=(8, 8),
num_patches=2*1000*1000))
pipeline.items.append(
preprocessing.GlobalContrastNormalization(sqrt_bias=10., use_std=True))
pipeline.items.append(preprocessing.ZCA())
data.apply_preprocessor(preprocessor=pipeline, can_fit=True)
data.use_design_loc(patch_dir + '/data.npy')
serial.save(patch_dir + '/data.pkl', data)
serial.save(patch_dir + '/preprocessor.pkl', pipeline)
if __name__ == "__main__":
main()
|
TNick/pylearn2
|
pylearn2/scripts/datasets/make_stl10_patches_8x8.py
|
Python
|
bsd-3-clause
| 2,978
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorflow_addons.optimizers import AveragedOptimizerWrapper
from tensorflow_addons.utils import types
from typing import Union
from typeguard import typechecked
@tf.keras.utils.register_keras_serializable(package="Addons")
class MovingAverage(AveragedOptimizerWrapper):
"""Optimizer that computes a moving average of the variables.
Empirically it has been found that using the moving average of the trained
parameters of a deep network is better than using its trained parameters
directly. This optimizer allows you to compute this moving average and swap
the variables at save time so that any code outside of the training loop
will use by default the average values instead of the original ones.
Example of usage:
```python
opt = tf.keras.optimizers.SGD(learning_rate)
opt = tfa.optimizers.MovingAverage(opt)
```
"""
@typechecked
def __init__(
self,
optimizer: types.Optimizer,
average_decay: types.FloatTensorLike = 0.99,
num_updates: Union[None, int, tf.Variable] = None,
start_step: int = 0,
dynamic_decay: bool = False,
name: str = "MovingAverage",
**kwargs,
):
r"""Construct a new MovingAverage optimizer.
Args:
optimizer: str or `tf.keras.optimizers.Optimizer` that will be
used to compute and apply gradients.
average_decay: float. Decay to use to maintain the moving averages
of trained variables.
num_updates: Optional count of the number of updates applied to
variables.
start_step: int. What step to start the moving average.
dynamic_decay: bool. Whether to change the decay based on the number
of optimizer updates. Decay will start at 0.1 and gradually
increase up to `average_decay` after each optimizer update.
name: Optional name for the operations created when applying
gradients. Defaults to "MovingAverage".
**kwargs: keyword arguments. Allowed to be {`clipnorm`,
`clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by
norm; `clipvalue` is clip gradients by value, `decay` is
included for backward compatibility to allow time inverse
decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
"""
super().__init__(optimizer, name, **kwargs)
self._num_updates = num_updates
if self._num_updates is not None:
if isinstance(self._num_updates, tf.Variable):
tf.debugging.assert_integer(
self._num_updates,
(
'type of argument "num_updates" must be '
"int; got {} instead".format(self._num_updates.dtype)
),
)
num_updates = tf.cast(self._num_updates, tf.float32, name="num_updates")
average_decay = tf.minimum(
average_decay, (1.0 + num_updates) / (10.0 + num_updates)
)
self._set_hyper("average_decay", average_decay)
self._start_step = start_step
self._dynamic_decay = dynamic_decay
@tf.function
def _get_decay(self, step: tf.Tensor):
average_decay = self._get_hyper("average_decay", tf.dtypes.float32)
step = tf.cast(step, tf.float32)
if step < self._start_step:
return tf.constant(0.0, tf.float32)
elif self._dynamic_decay:
step_count = step - self._start_step
return tf.minimum(average_decay, (1.0 + step_count) / (10.0 + step_count))
else:
return average_decay
def _prepare_local(self, var_device, var_dtype, apply_state):
super()._prepare_local(var_device, var_dtype, apply_state)
apply_state[(var_device, var_dtype)]["tfa_ma_decay"] = self._get_decay(
self._optimizer.iterations
)
def average_op(self, var, average_var, local_apply_state):
return tf.keras.backend.moving_average_update(
average_var, var, local_apply_state["tfa_ma_decay"]
)
def get_config(self):
config = {
"average_decay": self._serialize_hyperparameter("average_decay"),
"num_updates": self._num_updates,
"start_step": self._start_step,
"dynamic_decay": self._dynamic_decay,
}
base_config = super().get_config()
return {**base_config, **config}
def _create_slots(self, var_list):
self._optimizer._create_slots(var_list=var_list)
for var in var_list:
self.add_slot(var, "average", var.read_value())
self._average_weights = [self.get_slot(var, "average") for var in var_list]
self._model_weights = var_list
def shadow_copy(self, model_weights):
"""Creates shadow variables for the given model weights."""
for var in model_weights:
self.add_slot(var, "average", initializer="zeros")
self._average_weights = [self.get_slot(var, "average") for var in model_weights]
self._model_weights = model_weights
@property
def has_shadow_copy(self):
"""Whether this optimizer has created shadow variables."""
return self._model_weights is not None
def swap_weights(self):
"""Swap the average and moving weights.
This is a convenience method to allow one to evaluate the averaged weights
at test time. Loads the weights stored in `self._average_weights` into the model,
keeping a copy of the original model weights. Swapping twice will return
the original weights.
"""
if tf.distribute.in_cross_replica_context():
strategy = tf.distribute.get_strategy()
return strategy.run(self._swap_weights, args=())
else:
raise ValueError(
"Swapping weights must occur under a " "tf.distribute.Strategy"
)
@tf.function
def _swap_weights(self):
def fn_0(a, b):
return a.assign_add(b, use_locking=self._use_locking)
def fn_1(b, a):
return b.assign(a - b, use_locking=self._use_locking)
def fn_2(a, b):
return a.assign_sub(b, use_locking=self._use_locking)
def swap(strategy, a, b):
"""Swap `a` and `b` and mirror to all devices."""
for a_element, b_element in zip(a, b):
strategy.extended.update(
a_element, fn_0, args=(b_element,)
) # a = a + b
strategy.extended.update(
b_element, fn_1, args=(a_element,)
) # b = a - b
strategy.extended.update(
a_element, fn_2, args=(b_element,)
) # a = a - b
ctx = tf.distribute.get_replica_context()
return ctx.merge_call(swap, args=(self._average_weights, self._model_weights))
|
tensorflow/addons
|
tensorflow_addons/optimizers/moving_average.py
|
Python
|
apache-2.0
| 7,824
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/ui/events
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into depot_tools.
"""
def GetPreferredTryMasters(project, change):
return {
'tryserver.chromium.linux': {
'linux_chromium_chromeos_rel_ng': set(['defaulttests']),
'linux_chromium_chromeos_asan_rel_ng': set(['defaulttests']),
},
'tryserver.chromium.win': {
'win_chromium_compile_dbg_ng': set(['defaulttests']),
}
}
|
js0701/chromium-crosswalk
|
ui/events/PRESUBMIT.py
|
Python
|
bsd-3-clause
| 684
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MirroredStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import sys
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import strategy_test_lib
from tensorflow.python.distribute import values
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras.engine import training as keras_training
from tensorflow.python.keras.layers import core as keras_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import optimizer as optimizer_lib
from tensorflow.python.training import server_lib
GPU_TEST = "test_gpu" in sys.argv[0]
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
mode=["graph", "eager"]))
class MirroredTwoDeviceDistributionTest(
strategy_test_lib.DistributionTestBase,
strategy_test_lib.TwoDeviceDistributionTestBase,
parameterized.TestCase):
def testMinimizeLoss(self, distribution):
if context.executing_eagerly():
self._test_minimize_loss_eager(distribution)
else:
self._test_minimize_loss_graph(distribution)
def testReplicaId(self, distribution):
self._test_replica_id(distribution)
def testNumReplicasInSync(self, distribution):
self.assertEqual(2, distribution.num_replicas_in_sync)
def testCallAndMergeExceptions(self, distribution):
self._test_call_and_merge_exceptions(distribution)
def testRunRegroupError(self, distribution):
def run_fn():
replica_id = int(self.evaluate(_replica_id()))
# Generates a list with different lengths on different devices.
# Will fail in _regroup() (if more than one device).
return list(range(replica_id))
with distribution.scope(), self.assertRaises(AssertionError):
distribution.extended.call_for_each_replica(run_fn)
def testReduceToCpu(self, distribution):
with distribution.scope():
result = distribution.extended.call_for_each_replica(_replica_id)
reduced = distribution.reduce(reduce_util.ReduceOp.SUM, result, axis=None)
expected = sum(range(distribution.num_replicas_in_sync))
self.assertEqual(expected, self.evaluate(reduced))
def reduce_axis_helper(self, distribution, replica_squared_fn):
with distribution.scope():
num_replicas = distribution.num_replicas_in_sync
result = distribution.extended.call_for_each_replica(replica_squared_fn)
# sum
reduced = distribution.reduce(reduce_util.ReduceOp.SUM, result, axis=0)
expected = sum(x * (x + 1) for x in range(num_replicas))
self.assertNear(expected, self.evaluate(reduced), 0.00001)
# mean
reduced = distribution.reduce(reduce_util.ReduceOp.MEAN, result, axis=0)
expected /= sum(x + 1 for x in range(num_replicas))
self.assertNear(expected, self.evaluate(reduced), 0.00001)
def testReduceAxisToCpu(self, distribution):
for dtype in (dtypes.float32, dtypes.int32):
def replica_squared_fn(dtype=dtype):
# Lists with different lengths on different replicas.
replica_id = _replica_id_as_int()
return math_ops.cast([replica_id] * (replica_id + 1), dtype)
self.reduce_axis_helper(distribution, replica_squared_fn)
def set_v2_tensorshape(self, v2):
if v2:
tensor_shape.enable_v2_tensorshape()
else:
tensor_shape.disable_v2_tensorshape()
def testReduceAxisToCpuUnknownShape(self, distribution):
original_v2 = tensor_shape._TENSORSHAPE_V2_OVERRIDE # pylint: disable=protected-access
try:
for v2 in (False, True):
self.set_v2_tensorshape(v2)
for dtype in (dtypes.float32, dtypes.int32):
for shape in ((None,), None): # Test both unknown size and rank.
def replica_squared_fn(dtype=dtype, shape=shape):
# Lists with different lengths on different replicas.
replica_id = _replica_id_as_int()
tensor = math_ops.cast([replica_id] * (replica_id + 1), dtype)
# Erase shape information
return array_ops.placeholder_with_default(tensor, shape=shape)
self.reduce_axis_helper(distribution, replica_squared_fn)
finally:
self.set_v2_tensorshape(original_v2)
def testReplicateDataset(self, distribution):
dataset_fn = lambda: dataset_ops.Dataset.range(10)
expected_values = [[i, i+1] for i in range(0, 10, 2)]
input_fn = self._input_fn_to_test_input_context(
dataset_fn,
expected_num_replicas_in_sync=2,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
self._test_input_fn_iterable(distribution, input_fn, expected_values)
def testMakeInputFnIteratorWithDataset(self, distribution):
dataset_fn = lambda: dataset_ops.Dataset.range(10)
expected_values = [[i, i+1] for i in range(0, 10, 2)]
input_fn = self._input_fn_to_test_input_context(
dataset_fn,
expected_num_replicas_in_sync=2,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(iterator, distribution.extended.worker_devices,
expected_values)
def testMakeInputFnIteratorWithCallable(self, distribution):
def fn():
dataset = dataset_ops.Dataset.range(2).interleave(
(lambda _: dataset_ops.Dataset.range(10)), cycle_length=2)
it = dataset_ops.make_one_shot_iterator(dataset)
return it.get_next
expected_values = [[i, i] for i in range(0, 10)]
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=2,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(iterator, distribution.extended.worker_devices,
expected_values, test_reinitialize=False,
ignore_order=True)
def testNumpyDataset(self, distribution):
self._test_numpy_dataset(distribution)
def testGlobalStepUpdate(self, distribution):
self._test_global_step_update(distribution)
def testRun(self, distribution):
self._test_run(distribution)
def testAllReduceSum(self, distribution):
self._test_all_reduce_sum(distribution)
def testAllReduceSumGradients(self, distribution):
self._test_all_reduce_sum_gradients(distribution)
def testAllReduceSumGradientTape(self, distribution):
self._test_all_reduce_sum_gradient_tape(distribution)
def testAllReduceMean(self, distribution):
self._test_all_reduce_mean(distribution)
def testAllReduceMeanGradients(self, distribution):
self._test_all_reduce_mean_gradients(distribution)
def testAllReduceMeanGradientTape(self, distribution):
self._test_all_reduce_mean_gradient_tape(distribution)
def testSummaryForReplicaZeroOnly(self, distribution):
self._test_summary_for_replica_zero_only(distribution)
def testTrainableVariables(self, distribution):
self._test_trainable_variable(distribution)
def one_device_combinations():
return combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_one_gpu,
],
mode=["graph", "eager"])
@combinations.generate(one_device_combinations())
class MirroredOneDeviceDistributionTest(
strategy_test_lib.DistributionTestBase,
strategy_test_lib.OneDeviceDistributionTestBase,
parameterized.TestCase):
def testMinimizeLoss(self, distribution):
if context.executing_eagerly():
self._test_minimize_loss_eager(distribution)
else:
self._test_minimize_loss_graph(distribution)
def testReplicaId(self, distribution):
self._test_replica_id(distribution)
def testCallAndMergeExceptions(self, distribution):
self._test_call_and_merge_exceptions(distribution)
def testRun(self, distribution):
self._test_run(distribution)
def testAllReduceSum(self, distribution):
self._test_all_reduce_sum(distribution)
def testAllReduceSumGradients(self, distribution):
self._test_all_reduce_sum_gradients(distribution)
def testAllReduceSumGradientTape(self, distribution):
self._test_all_reduce_sum_gradient_tape(distribution)
def testAllReduceMean(self, distribution):
self._test_all_reduce_mean(distribution)
def testAllReduceMeanGradients(self, distribution):
self._test_all_reduce_mean_gradients(distribution)
def testAllReduceMeanGradientTape(self, distribution):
self._test_all_reduce_mean_gradient_tape(distribution)
class MirroredStrategyVariableCreatorStackTest(
test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph"]))
def testCreatorStacksAreThreadLocal(self, distribution):
def model_fn():
replica_id_str = str(self.evaluate(_replica_id()))
def thread_creator_fn(next_creator, *args, **kwargs):
return next_creator(*args, **kwargs) + ":thread_" + replica_id_str
with variable_scope.variable_creator_scope(thread_creator_fn):
# Create a variable in this scope.
v = variable_scope.variable(1.0)
# This will pause the current thread, and execute the other thread.
ds_context.get_replica_context().merge_call(lambda _: _)
return v
def main_thread_creator(next_creator, *args, **kwargs):
# We are not using the underlying next_creator for test purposes.
del next_creator, args, kwargs
return "main_thread"
with context.graph_mode(), \
distribution.scope(), \
variable_scope.variable_creator_scope(main_thread_creator):
result = distribution.extended.call_for_each_replica(model_fn)
result = distribution.experimental_local_results(result)
expected = ("main_thread:thread_0", "main_thread:thread_1")
self.assertEqual(expected, result)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class MirroredStrategyCallForEachReplicaTest(test.TestCase):
def testExecutingEagerlyOutsideFunction(self, distribution):
"""Verify we preserve the value of executing_eagerly_outside_functions()."""
def model_fn():
return ops.executing_eagerly_outside_functions()
originally = ops.executing_eagerly_outside_functions()
with distribution.scope():
in_scope = ops.executing_eagerly_outside_functions()
in_model_fn = distribution.extended.call_for_each_replica(model_fn)
unwrapped = distribution.experimental_local_results(in_model_fn)
self.assertEqual(in_scope, unwrapped[0])
self.assertEqual(in_scope, originally)
# Verify this all again, but this time in a FuncGraph.
with func_graph.FuncGraph("fg").as_default(), distribution.scope():
in_scope = ops.executing_eagerly_outside_functions()
in_model_fn = distribution.extended.call_for_each_replica(model_fn)
unwrapped = distribution.experimental_local_results(in_model_fn)
self.assertEqual(in_scope, unwrapped[0])
self.assertEqual(in_scope, originally)
def testFunctionInCallForEachReplica(self, distribution):
traces = []
@def_function.function
def model_fn():
traces.append(1)
return ds_context.get_replica_context().replica_id_in_sync_group
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual((0, 1), self.evaluate(result.values))
self.assertLen(traces, distribution.num_replicas_in_sync)
def testFunctionInCallForEachReplicaInsideAnotherFunction(self, distribution):
traces = []
@def_function.function
def model_fn():
traces.append(1)
return ds_context.get_replica_context().replica_id_in_sync_group
@def_function.function
def step():
return distribution.extended.call_for_each_replica(model_fn)
with distribution.scope():
result = step()
self.assertEqual((0, 1), self.evaluate(result.values))
self.assertLen(traces, distribution.num_replicas_in_sync)
def testNestedFunctionInCallForEachReplicaWithMergeCall(self, distribution):
def merge_fn(_):
pass
@def_function.function
def model_fn():
def body_fn(i):
ds_context.get_replica_context().merge_call(merge_fn)
return i + 1
return control_flow_ops.while_loop_v2(lambda i: i < 2, body_fn, [0])
with distribution.scope():
with self.assertRaisesRegexp(
RuntimeError, "`merge_call` called while defining a new graph."):
distribution.extended.call_for_each_replica(model_fn)
def testFunctionInCallForEachReplicaWithMergeCall(self, distribution):
def merge_fn(_):
pass
@def_function.function
def model_fn():
ds_context.get_replica_context().merge_call(merge_fn)
return 0.
with distribution.scope():
self.assertEqual(
self.evaluate(distribution.extended.call_for_each_replica(model_fn)),
0.)
def testFunctionInCallForEachReplicaCached(self, distribution):
traces = []
@def_function.function
def model_fn():
traces.append(None)
self.assertEmpty(traces)
for i in range(10):
distribution.extended.call_for_each_replica(model_fn)
if i == 0:
num_devices = len(traces)
self.assertGreater(num_devices, 0)
else:
# model_fn should not have been re-evaluated so the length should remain
# the same.
self.assertLen(traces, num_devices)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph"]))
class MirroredStrategyNameScopeTest(test.TestCase):
# NOTE(priyag): Names and name scopes are ignored in eager, hence we are not
# testing this in eager mode.
def testNameScope(self, distribution):
def model_fn():
with ops.name_scope("foo"):
a = constant_op.constant(1.0, name="a")
ds_context.get_replica_context().merge_call(lambda _: _)
b = constant_op.constant(1.0, name="b")
return a, b
with context.graph_mode(), distribution.scope():
with ops.name_scope("main"):
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual(2, len(result))
for v, name in zip(result, ["a", "b"]):
self.assertIsInstance(v, values.DistributedValues)
v0, v1 = distribution.experimental_local_results(v)
self.assertEqual("main/foo/" + name + ":0", v0.name)
self.assertEqual("main/replica_1/foo/" + name + ":0", v1.name)
def testWithDefaultName(self, distribution):
def model_fn():
with ops.name_scope(None, "foo"):
a = constant_op.constant(1.0, name="a")
ds_context.get_replica_context().merge_call(lambda _: _)
b = constant_op.constant(2.0, name="b")
return a, b
with context.graph_mode(), distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual(2, len(result))
for v, name in zip(result, ["a", "b"]):
self.assertIsInstance(v, values.DistributedValues)
v0, v1 = distribution.experimental_local_results(v)
self.assertEqual("foo/" + name + ":0", v0.name)
self.assertEqual("replica_1/foo/" + name + ":0", v1.name)
# variable_scope.variable() respects name scopes when creating
# variables. On the other hand variable_scope.get_variable() ignores name
# scopes but respects variable scope when creating variables. We test both
# methods of creating variables to make sure that we have the same
# variable names in both cases.
def testNameScopeWithVariable(self, distribution):
def in_cross_replica(_):
c = variable_scope.variable(1.0, name="c")
return c
def model_fn():
b = variable_scope.variable(1.0, name="b")
with ops.name_scope("foo"):
c = ds_context.get_replica_context().merge_call(in_cross_replica)
return b, c
with context.graph_mode(), distribution.scope():
with ops.name_scope("main"):
a = variable_scope.variable(1.0, name="a")
result = distribution.extended.call_for_each_replica(model_fn)
result_b = result[0]
result_c = result[1]
self.assertIsInstance(result_b, values.DistributedValues)
self.assertIsInstance(result_c, values.DistributedValues)
a0, a1 = distribution.experimental_local_results(a)
b0, b1 = distribution.experimental_local_results(result_b)
c0, c1 = distribution.experimental_local_results(result_c)
self.assertEqual("main/a:0", a0.name)
self.assertEqual("main/a/replica_1:0", a1.name)
self.assertEqual("main/b:0", b0.name)
self.assertEqual("main/b/replica_1:0", b1.name)
self.assertEqual("main/foo/c:0", c0.name)
self.assertEqual("main/foo/c/replica_1:0", c1.name)
def testNameScopeWithGetVariable(self, distribution):
def in_cross_replica(_):
c = variable_scope.get_variable("c", [1])
return c
def model_fn():
b = variable_scope.get_variable("b", [1])
with ops.name_scope("foo"):
c = ds_context.get_replica_context().merge_call(in_cross_replica)
return b, c
with context.graph_mode(), distribution.scope():
with ops.name_scope("main"):
a = variable_scope.get_variable("a", [1])
result = distribution.extended.call_for_each_replica(model_fn)
result_b = result[0]
result_c = result[1]
self.assertIsInstance(result_b, values.DistributedValues)
self.assertIsInstance(result_c, values.DistributedValues)
a0, a1 = distribution.experimental_local_results(a)
b0, b1 = distribution.experimental_local_results(result_b)
c0, c1 = distribution.experimental_local_results(result_c)
self.assertEqual("a:0", a0.name)
self.assertEqual("a/replica_1:0", a1.name)
self.assertEqual("b:0", b0.name)
self.assertEqual("b/replica_1:0", b1.name)
self.assertEqual("c:0", c0.name)
self.assertEqual("c/replica_1:0", c1.name)
def testVariableScopeWithGetVariable(self, distribution):
def in_cross_replica(_):
c = variable_scope.get_variable("c", [1])
return c
def model_fn():
b = variable_scope.get_variable("b", [1])
with variable_scope.variable_scope("foo"):
c = ds_context.get_replica_context().merge_call(in_cross_replica)
return b, c
with context.graph_mode(), distribution.scope():
with variable_scope.variable_scope("main"):
a = variable_scope.get_variable("a", [1])
result = distribution.extended.call_for_each_replica(model_fn)
result_b = result[0]
result_c = result[1]
self.assertIsInstance(result_b, values.DistributedValues)
self.assertIsInstance(result_c, values.DistributedValues)
a0, a1 = distribution.experimental_local_results(a)
b0, b1 = distribution.experimental_local_results(result_b)
c0, c1 = distribution.experimental_local_results(result_c)
self.assertEqual("main/a:0", a0.name)
self.assertEqual("main/a/replica_1:0", a1.name)
self.assertEqual("main/b:0", b0.name)
self.assertEqual("main/b/replica_1:0", b1.name)
self.assertEqual("main/foo/c:0", c0.name)
self.assertEqual("main/foo/c/replica_1:0", c1.name)
@combinations.generate(
combinations.combine(
distribution=[
combinations.NamedDistribution(
"Mirrored3Devices",
# pylint: disable=g-long-lambda
lambda: mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:GPU:1", "/device:CPU:0"]),
required_gpus=2)
],
mode=["graph", "eager"]))
class MirroredThreeDeviceDistributionTest(
strategy_test_lib.DistributionTestBase,
parameterized.TestCase):
def testThreeDevices(self, distribution):
def model_fn():
v = variable_scope.variable(1.0, name="foo")
ds_context.get_replica_context().merge_call(lambda _: _)
return v
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self.assertIsInstance(result, values.MirroredVariable)
self.assertEqual("foo:0", result.name)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class MirroredVariableUpdateTest(test.TestCase):
# The following tests check assign, assign_add and assign_sub on Mirrored
# variables in replica and cross replica context.
def testAssignMirroredVarReplicaContextWithoutAggregationType(self,
distribution):
# Test that we always have an aggregation type set on the mirrored variable
# if we assign to it in replica mode.
def var_fn():
v = variable_scope.variable(1.0, name="foo")
return v
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
def model_fn():
return mirrored_var.assign(5.0)
with self.assertRaisesRegexp(
ValueError, "You must specify an aggregation method to update a "
"MirroredVariable in Replica Context. You can do so by"):
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
def testAssignMirroredVarReplicaContextWithSum(self, distribution):
# Test that we don't reduce a non-per-replica value with the "sum"
# aggregation type.
def var_fn():
v = variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.SUM)
return v
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
def model_fn():
return mirrored_var.assign(5.0)
with self.assertRaisesRegexp(
ValueError, "A non-DistributedValues value 5.0 cannot be reduced "
"with the given reduce op ReduceOp.SUM."):
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
def testAssignMirroredVarCrossDeviceContext(self, distribution):
def var_fn():
return variable_scope.variable(1.0, name="foo")
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
mirrored_var_result = self.evaluate(mirrored_var.assign(6.0))
self.assertEqual(6.0, mirrored_var_result)
def testAssignMirroredVarReplicaContext(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(
ds_context.get_replica_context().replica_id_in_sync_group,
mirrored_var.dtype)
return mirrored_var.assign(value)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(0.5, self.evaluate(mirrored_var))
def testAssignMirroredVarReplicaContextWithSingleValue(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
return mirrored_var.assign(5.0)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(5.0, self.evaluate(mirrored_var))
def testAssignAddMirroredVarCrossDeviceContext(self, distribution):
def var_fn():
return variable_scope.variable(1.0, name="foo")
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
# read_value == True
mirrored_var_result = self.evaluate(
mirrored_var.assign_add(6.0, read_value=True))
self.assertEqual(7.0, mirrored_var_result)
self.assertEqual(7.0, self.evaluate(mirrored_var.get("/device:CPU:0")))
self.assertEqual(7.0, self.evaluate(mirrored_var.get("/device:GPU:0")))
# read_value == False
self.evaluate(mirrored_var.assign_add(2.0, read_value=False))
self.assertEqual(9.0, self.evaluate(mirrored_var.get("/device:CPU:0")))
self.assertEqual(9.0, self.evaluate(mirrored_var.get("/device:GPU:0")))
def testAssignAddMirroredVarReplicaContext(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(
ds_context.get_replica_context().replica_id_in_sync_group,
mirrored_var.dtype)
return mirrored_var.assign_add(value)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(1.5, self.evaluate(mirrored_var))
def testAssignAddMirroredVarReplicaContextWithSingleValue(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
return mirrored_var.assign_add(5.0)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(6.0, self.evaluate(mirrored_var))
def testAssignSubMirroredVarCrossDeviceContext(self, distribution):
def var_fn():
return variable_scope.variable(5.0, name="foo")
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(5.0, self.evaluate(mirrored_var))
mirrored_var_result = self.evaluate(mirrored_var.assign_sub(2.0))
self.assertEqual(3.0, mirrored_var_result)
self.assertEqual(3.0, self.evaluate(mirrored_var.get("/device:GPU:0")))
self.assertEqual(3.0, self.evaluate(mirrored_var.get("/device:CPU:0")))
def testAssignSubMirroredVarReplicaContext(self, distribution):
def var_fn():
return variable_scope.variable(
5.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(5.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(
ds_context.get_replica_context().replica_id_in_sync_group,
mirrored_var.dtype)
return mirrored_var.assign_sub(value)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(4.5, self.evaluate(mirrored_var))
def testAssignSubMirroredVarReplicaContextWithSingleValue(self, distribution):
def var_fn():
return variable_scope.variable(
5.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(5.0, self.evaluate(mirrored_var))
def model_fn():
return mirrored_var.assign_sub(1.0)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(4.0, self.evaluate(mirrored_var))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class MirroredAndSyncOnReadVariableInitializerTest(test.TestCase):
def testAssignMirroredVarInitializer(self, distribution):
# This test is not eager compatible since in eager variables are initialized
# upon construction instead of once the initialization op is run.
with context.graph_mode():
def var_fn():
v = variable_scope.variable(1.0, name="foo")
return v
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.assertFalse(self.evaluate(mirrored_var.is_initialized()))
self.evaluate(mirrored_var.initializer)
self.assertTrue(self.evaluate(mirrored_var.is_initialized()))
def testAssignReplicaLocalVarInitializer(self, distribution):
# This test is not eager compatible since in eager variables are initialized
# upon construction instead of once the initialization op is run.
with context.graph_mode():
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
self.assertIsInstance(v_sum, values.SyncOnReadVariable)
return v_sum
with distribution.scope():
sync_on_read_var = distribution.extended.call_for_each_replica(
model_fn)
self.assertIsInstance(sync_on_read_var, values.SyncOnReadVariable)
self.assertFalse(self.evaluate(sync_on_read_var.is_initialized()))
self.evaluate(sync_on_read_var.initializer)
self.assertTrue(self.evaluate(sync_on_read_var.is_initialized()))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class SyncOnReadVariableAssignTest(test.TestCase):
def testAssignReplicaLocalVarSumAggregation(self, distribution):
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
return v_sum
with distribution.scope():
sync_on_read_var = distribution.extended.call_for_each_replica(model_fn)
self.assertIsInstance(sync_on_read_var, values.SyncOnReadVariable)
self.evaluate(variables.global_variables_initializer())
# Each replica has a value of 1.0 assigned to it in replica context.
# When we read the value using `read_var` we should see the SUM of each of
# values on each of the replicas.
self.assertEqual(2.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
# Assigning 6.0 in cross replica context will assign a value of
# 6.0/num_replicas to each replica.
tlv_ops = sync_on_read_var.assign(6.0)
self.evaluate(tlv_ops)
# On reading the sync on read var we should get the assigned value back.
# The value on all the replicas are added before being returned by
# `read_var`.
self.assertEqual(6.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
def testAssignReplicaLocalVarMeanAggregation(self, distribution):
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.MEAN)
return v_sum
with distribution.scope():
sync_on_read_var = distribution.extended.call_for_each_replica(model_fn)
self.assertIsInstance(sync_on_read_var, values.SyncOnReadVariable)
self.evaluate(variables.global_variables_initializer())
# Each replica has a value of 1.0 assigned to it in replica context.
# When we read the value using `read_var` we should see the MEAN of values
# on all replicas which is the value assigned in replica context.
self.assertEqual(1.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
tlv_ops = sync_on_read_var.assign(6.0)
self.evaluate(tlv_ops)
# On reading the sync on read var we should get the MEAN of all values
# which is equal to the value assigned.
self.assertEqual(6.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
class MockModel(object):
def __init__(self, two_variables=False):
self.variables = []
self.variables.append(variable_scope.variable(1.25, name="dummy_var1"))
if two_variables:
self.variables.append(variable_scope.variable(2.0, name="dummy_var2"))
def __call__(self, factor=2):
x = factor * self.variables[0]
if len(self.variables) > 1:
x += self.variables[1]
return x
class MiniModel(keras_training.Model):
"""Minimal model for mnist.
Useful for testing and debugging on slow TPU simulators.
"""
def __init__(self):
super(MiniModel, self).__init__(name="")
self.fc = keras_core.Dense(1, name="fc", kernel_initializer="ones",
bias_initializer="ones")
def call(self, inputs, training=True):
inputs = array_ops.ones([1, 10])
return self.fc(inputs)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class MirroredStrategyDefunTest(test.TestCase):
def _call_and_check(self, distribution, model_fn, inputs, expected_result,
defuns, two_variables=False):
cpu_dev = device_util.canonicalize("CPU:0")
gpu_dev = device_util.canonicalize("GPU:0")
devices = [cpu_dev, gpu_dev]
with distribution.scope():
mock_model = MockModel(two_variables)
self.evaluate(variables.global_variables_initializer())
result = distribution.extended.call_for_each_replica(
model_fn, args=[mock_model] + inputs)
for r in range(len(devices)):
device_result = values.select_replica(r, result)
device_expected_result = values.select_replica(r, expected_result)
self.assertAllClose(device_expected_result,
self.evaluate(device_result))
for defun in defuns:
# `Function`s are specialized to the current device stack, so
# call_for_each has one trace per device. To check that the expected set
# of variables was accessed on each trace, we first retrieve each
# device-specific graph function.
per_replica_graph_functions = (
distribution.extended.call_for_each_replica(
defun.get_concrete_function, args=[mock_model] + inputs))
for device in devices:
graph_function = per_replica_graph_functions.get(device=device)
# TODO(b/129555712): re-enable an assertion here that the two sets of
# variables are the same.
# self.assertEqual(set(graph_function.graph.variables),
# set(mock_model.variables))
del graph_function
def testVariableInDefun(self, distribution):
@function.defun
def times_two(mock_model):
return mock_model()
def model_fn(mock_model):
return times_two(mock_model)
self._call_and_check(distribution, model_fn, [], 2.5, [times_two])
def testVariableInNestedDefun(self, distribution):
@function.defun
def times_two(mock_model):
return mock_model()
@function.defun
def two_x_plus_one(mock_model):
return times_two(mock_model) + 1
def model_fn(mock_model):
return two_x_plus_one(mock_model)
self._call_and_check(distribution, model_fn, [], 3.5,
[times_two, two_x_plus_one])
def testTwoVariablesInNestedDefun(self, distribution):
@function.defun
def fn1(mock_model):
return mock_model()
@function.defun
def fn2(mock_model):
return fn1(mock_model) + 1
def model_fn(mock_model):
return fn2(mock_model)
self._call_and_check(distribution, model_fn, [], 5.5, [fn1, fn2],
two_variables=True)
def testGradientTapeOverNestedDefuns(self, distribution):
@function.defun
def fn1(mock_model):
return mock_model()
@function.defun
def fn2(mock_model):
return fn1(mock_model) + 1
def model_fn(mock_model):
with backprop.GradientTape(persistent=True) as gtape:
result = fn2(mock_model)
grads = gtape.gradient(result,
[v.get() for v in mock_model.variables])
return grads
self._call_and_check(distribution, model_fn, [], [2.0, 1.0], [fn1, fn2],
two_variables=True)
def testPassPerReplica(self, distribution):
@function.defun
def fn1(mock_model, factor):
return mock_model(factor)
device_map = values.ReplicaDeviceMap(("/device:CPU:0", "/device:GPU:0"))
factors = values.PerReplica(device_map, (5.0, 3.0))
expected_result = values.PerReplica(device_map, (5.0 * 1.25, 3.0 * 1.25))
self._call_and_check(distribution, fn1, [factors], expected_result, [fn1])
def testTrain(self, distribution):
with distribution.scope():
mock_model = MiniModel()
mock_model.call = function.defun(mock_model.call)
def loss_fn(ctx):
del ctx
return mock_model(array_ops.ones([1, 10]))
gradients_fn = backprop.implicit_grad(loss_fn)
gradients_fn = optimizer_lib.get_filtered_grad_fn(gradients_fn)
grads_and_vars = distribution.extended.call_for_each_replica(
gradients_fn, args=(None,))
optimizer = gradient_descent.GradientDescentOptimizer(0.25)
update_ops = optimizer._distributed_apply(distribution, grads_and_vars) # pylint: disable=protected-access
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.evaluate(update_ops)
updated_var_values = self.evaluate(mock_model.variables)
# All variables start at 1.0 and get two updates of 0.25.
self.assertAllEqual(0.5 * np.ones([10, 1]), updated_var_values[0])
self.assertAllEqual([0.5], updated_var_values[1])
@combinations.generate(
combinations.combine(
distribution=[
combinations.NamedDistribution(
"Mirrored",
# pylint: disable=g-long-lambda
lambda: mirrored_strategy.MirroredStrategy(
devices=mirrored_strategy.all_local_devices(),
cross_device_ops=cross_device_ops_lib.MultiWorkerAllReduce([
"/job:worker/task:0", "/job:worker/task:1"
], context.num_gpus())),
required_gpus=1)
],
mode=["graph"]))
class MultiWorkerMirroredStrategyTest(
multi_worker_test_base.MultiWorkerTestBase,
strategy_test_lib.DistributionTestBase):
def _configure_distribution_strategy(self, distribution):
cluster_spec = server_lib.ClusterSpec({
"worker": ["/job:worker/task:0", "/job:worker/task:1"]
})
distribution.configure(cluster_spec=cluster_spec)
def test_num_replicas_in_sync(self, distribution):
self._configure_distribution_strategy(distribution)
# We calculate the total number of gpus across the workers(2) specified in
# the cluster spec.
self.assertEqual(context.num_gpus() * 2, distribution.num_replicas_in_sync)
def testMinimizeLossGraph(self, distribution):
self._configure_distribution_strategy(distribution)
self._test_minimize_loss_graph(distribution, learning_rate=0.05)
def testDeviceScope(self, distribution):
"""Test the device scope of multi-worker MirroredStrategy."""
self._configure_distribution_strategy(distribution)
with distribution.scope():
a = constant_op.constant(1.)
with ops.device("/cpu:0"):
b = constant_op.constant(1.)
self.assertEqual(a.device, "/job:worker/task:0")
self.assertEqual(b.device, "/job:worker/task:0/device:CPU:0")
def testMakeInputFnIteratorWithDataset(self, distribution):
self._configure_distribution_strategy(distribution)
dataset_fn = lambda: dataset_ops.Dataset.range(100)
num_gpus = context.num_gpus()
num_workers = 2
expected_values = [[i+j for j in range(num_gpus)] * num_workers
for i in range(0, 100, num_gpus)]
with context.graph_mode(), self.cached_session() as sess:
# `expected_input_pipeline_id` is None because the input_fn will be called
# multiple times, each with a different input_pipeline_id.
input_fn = self._input_fn_to_test_input_context(
dataset_fn,
expected_num_replicas_in_sync=num_workers*num_gpus,
expected_num_input_pipelines=num_workers,
expected_input_pipeline_id=None)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(
iterator, distribution.extended.worker_devices, expected_values, sess)
def testMakeInputFnIteratorWithCallable(self, distribution):
self._configure_distribution_strategy(distribution)
def fn():
dataset = dataset_ops.Dataset.range(100)
it = dataset_ops.make_one_shot_iterator(dataset)
return it.get_next
num_gpus = context.num_gpus()
num_workers = 2
expected_values = []
for i in range(0, 100, num_gpus):
expected_values.append([i+j for j in range(num_gpus)] * num_workers)
with context.graph_mode(), self.cached_session() as sess:
# `expected_input_pipeline_id` is None because the input_fn will be called
# multiple times, each with a different input_pipeline_id.
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=num_workers*num_gpus,
expected_num_input_pipelines=num_workers,
expected_input_pipeline_id=None)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(
iterator, distribution.extended.worker_devices, expected_values, sess,
test_reinitialize=False, ignore_order=True)
def testUpdateConfigProto(self, distribution):
distribution.configure(cluster_spec={"worker": ["fake1", "fake2"]})
config_proto = config_pb2.ConfigProto()
new_config = distribution.update_config_proto(config_proto)
# Verify isolate_session_state
self.assertTrue(new_config.isolate_session_state)
@combinations.generate(
combinations.combine(
distribution=[
combinations.NamedDistribution(
"Mirrored",
# pylint: disable=g-long-lambda
lambda: mirrored_strategy.MirroredStrategy(
devices=["/job:worker/task:0/gpu:{}".format(
i) for i in range(context.num_gpus())]),
required_gpus=1)
],
mode=["graph"]))
class RemoteSingleWorkerMirroredStrategyGraph(
multi_worker_test_base.SingleWorkerTestBaseGraph,
strategy_test_lib.RemoteSingleWorkerMirroredStrategyBase):
def _get_num_gpus(self):
return context.num_gpus()
def testNumReplicasInSync(self, distribution):
self._testNumReplicasInSync(distribution)
def testMinimizeLoss(self, distribution):
self._testMinimizeLoss(distribution)
def testDeviceScope(self, distribution):
self._testDeviceScope(distribution)
def testMakeInputFnIteratorWithDataset(self, distribution):
self._testMakeInputFnIteratorWithDataset(distribution)
def testMakeInputFnIteratorWithCallable(self, distribution):
self._testMakeInputFnIteratorWithCallable(distribution)
class MultiWorkerMirroredStrategyTestWithChief(
multi_worker_test_base.MultiWorkerTestBase,
strategy_test_lib.DistributionTestBase):
@classmethod
def setUpClass(cls):
"""Create a local cluster with 2 workers and 1 chief."""
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=2, num_ps=0, has_chief=True)
cls._default_target = "grpc://" + cls._cluster_spec["chief"][0]
def _make_cross_device_ops(self):
return cross_device_ops_lib.MultiWorkerAllReduce(
["/job:chief/task:0", "/job:worker/task:0", "/job:worker/task:1"],
context.num_gpus())
def testMinimizeLossGraph(self):
with context.graph_mode():
strategy = mirrored_strategy.MirroredStrategy(
cross_device_ops=self._make_cross_device_ops())
strategy.configure(cluster_spec=self._cluster_spec)
self._test_minimize_loss_graph(strategy, learning_rate=0.05)
def testMinimizeLossGraphMirroredStrategy(self):
with context.graph_mode():
strategy = mirrored_strategy.MirroredStrategy(
mirrored_strategy.all_local_devices(),
cross_device_ops=self._make_cross_device_ops())
strategy.configure(cluster_spec=self._cluster_spec)
self._test_minimize_loss_graph(strategy, learning_rate=0.05)
def testMinimizeLossGraphMirroredStrategyWithOneNode(self):
with context.graph_mode():
cluster_spec = {}
cluster_spec["chief"] = self._cluster_spec["chief"]
tf_config = {"cluster": cluster_spec}
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(tf_config)}):
strategy = mirrored_strategy.MirroredStrategy()
if context.num_gpus() > 0:
self.assertIsInstance(strategy.extended._inferred_cross_device_ops,
cross_device_ops_lib.NcclAllReduce)
else:
self.assertIsInstance(strategy.extended._inferred_cross_device_ops,
cross_device_ops_lib.ReductionToOneDevice)
self.skipTest("b/130551176, run the following once fixed.")
self._test_minimize_loss_graph(strategy, learning_rate=0.05)
def testInitializeFromTFConfig(self):
with context.graph_mode():
tf_config = {"cluster": self._cluster_spec}
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(tf_config)}):
strategy = mirrored_strategy.MirroredStrategy(
cross_device_ops=self._make_cross_device_ops())
self.assertEqual(
max(context.num_gpus(), 1) * 3, strategy.num_replicas_in_sync)
def testSummaryForReplicaZeroOnly(self):
with context.graph_mode():
strategy = mirrored_strategy.MirroredStrategy(
mirrored_strategy.all_local_devices(),
cross_device_ops=self._make_cross_device_ops())
strategy.configure(cluster_spec=self._cluster_spec)
self._test_summary_for_replica_zero_only(strategy)
class MirroredVariableStopGradientTest(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_one_gpu,
],
mode=["graph"]))
def testMirroredVariableAsStopGradient(self, distribution):
with distribution.scope():
inp = constant_op.constant(1.0)
x = variables.Variable(1.0)
y = inp*x
grads = gradients.gradients(x, y, stop_gradients=x)
self.assertIsNone(grads[0])
class FunctionTest(test.TestCase):
def testBackwardFuctionDevicePlacement(self):
if context.num_gpus() < 1:
self.skipTest("At least one GPU is required.")
devices = [device_util.resolve("/device:GPU:0"),
device_util.resolve("/device:CPU:0")]
ms = mirrored_strategy.MirroredStrategy(devices)
with ms.scope():
w = variable_scope.variable([1.5], name="w")
b = variable_scope.variable([0.5], name="b")
@def_function.function
def forward(x, w, b):
return x * w + b
x = constant_op.constant([1.0], name="x_useless")
concrete_forward = forward.get_concrete_function(x, w.primary, b.primary)
with ms.scope():
def replica_fn():
with backprop.GradientTape() as t:
x = constant_op.constant([1.0], name="x")
loss = concrete_forward(x, w.get(), b.get()) - [1.0]
return t.gradient(loss, [w, b])
def step_fn():
return ms.experimental_run_v2(replica_fn)
context.enable_run_metadata()
g1, g2 = step_fn()
run_metadata = context.export_run_metadata()
context.disable_run_metadata()
self.assertEqual(self.evaluate(g1.primary), 1.0)
self.assertEqual(self.evaluate(g2.primary), 1.0)
# Verify that this node runs on both devices.
node_name = "gradients_mul_grad_mul_1_x"
devices_for_this_node = set()
for partition_graph in run_metadata.partition_graphs:
for node in partition_graph.node:
if node.name == node_name:
devices_for_this_node.add(node.device)
self.assertSetEqual(devices_for_this_node, set(devices))
def _replica_id():
replica_id = ds_context.get_replica_context().replica_id_in_sync_group
if not isinstance(replica_id, ops.Tensor):
replica_id = constant_op.constant(replica_id)
return replica_id
def _replica_id_as_int():
replica_id = ds_context.get_replica_context().replica_id_in_sync_group
if isinstance(replica_id, ops.Tensor):
replica_id = tensor_util.constant_value(replica_id)
return replica_id
if __name__ == "__main__":
test.main()
|
ppwwyyxx/tensorflow
|
tensorflow/python/distribute/mirrored_strategy_test.py
|
Python
|
apache-2.0
| 53,908
|
# (c) 2013, Jayson Vantuyl <jayson@aggressive.ly>
# (c) 2012-17 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
name: sequence
author: Jayson Vantuyl (!UNKNOWN) <jayson@aggressive.ly>
version_added: "1.0"
short_description: generate a list based on a number sequence
description:
- generates a sequence of items. You can specify a start value, an end value, an optional "stride" value that specifies the number of steps
to increment the sequence, and an optional printf-style format string.
- 'Arguments can be specified as key=value pair strings or as a shortcut form of the arguments string is also accepted: [start-]end[/stride][:format].'
- 'Numerical values can be specified in decimal, hexadecimal (0x3f8) or octal (0600).'
- Starting at version 1.9.2, negative strides are allowed.
- Generated items are strings. Use Jinja2 filters to convert items to preferred type, e.g. ``{{ 1 + item|int }}``.
- See also Jinja2 ``range`` filter as an alternative.
options:
start:
description: number at which to start the sequence
default: 0
type: integer
end:
description: number at which to end the sequence, dont use this with count
type: integer
default: 0
count:
description: number of elements in the sequence, this is not to be used with end
type: integer
default: 0
stride:
description: increments between sequence numbers, the default is 1 unless the end is less than the start, then it is -1.
type: integer
format:
description: return a string with the generated number formatted in
"""
EXAMPLES = """
- name: create some test users
user:
name: "{{ item }}"
state: present
groups: "evens"
with_sequence: start=0 end=32 format=testuser%02x
- name: create a series of directories with even numbers for some reason
file:
dest: "/var/stuff/{{ item }}"
state: directory
with_sequence: start=4 end=16 stride=2
- name: a simpler way to use the sequence plugin create 4 groups
group:
name: "group{{ item }}"
state: present
with_sequence: count=4
- name: the final countdown
debug:
msg: "{{item}} seconds to detonation"
with_sequence: start=10 end=0 stride=-1
- name: Use of variable
debug:
msg: "{{ item }}"
with_sequence: start=1 end="{{ end_at }}"
vars:
- end_at: 10
"""
RETURN = """
_list:
description:
- A list containing generated sequence of items
type: list
elements: str
"""
from re import compile as re_compile, IGNORECASE
from ansible.errors import AnsibleError
from ansible.parsing.splitter import parse_kv
from ansible.plugins.lookup import LookupBase
# shortcut format
NUM = "(0?x?[0-9a-f]+)"
SHORTCUT = re_compile(
"^(" + # Group 0
NUM + # Group 1: Start
"-)?" +
NUM + # Group 2: End
"(/" + # Group 3
NUM + # Group 4: Stride
")?" +
"(:(.+))?$", # Group 5, Group 6: Format String
IGNORECASE
)
class LookupModule(LookupBase):
"""
sequence lookup module
Used to generate some sequence of items. Takes arguments in two forms.
The simple / shortcut form is:
[start-]end[/stride][:format]
As indicated by the brackets: start, stride, and format string are all
optional. The format string is in the style of printf. This can be used
to pad with zeros, format in hexadecimal, etc. All of the numerical values
can be specified in octal (i.e. 0664) or hexadecimal (i.e. 0x3f8).
Negative numbers are not supported.
Some examples:
5 -> ["1","2","3","4","5"]
5-8 -> ["5", "6", "7", "8"]
2-10/2 -> ["2", "4", "6", "8", "10"]
4:host%02d -> ["host01","host02","host03","host04"]
The standard Ansible key-value form is accepted as well. For example:
start=5 end=11 stride=2 format=0x%02x -> ["0x05","0x07","0x09","0x0a"]
This format takes an alternate form of "end" called "count", which counts
some number from the starting value. For example:
count=5 -> ["1", "2", "3", "4", "5"]
start=0x0f00 count=4 format=%04x -> ["0f00", "0f01", "0f02", "0f03"]
start=0 count=5 stride=2 -> ["0", "2", "4", "6", "8"]
start=1 count=5 stride=2 -> ["1", "3", "5", "7", "9"]
The count option is mostly useful for avoiding off-by-one errors and errors
calculating the number of entries in a sequence when a stride is specified.
"""
def reset(self):
"""set sensible defaults"""
self.start = 1
self.count = None
self.end = None
self.stride = 1
self.format = "%d"
def parse_kv_args(self, args):
"""parse key-value style arguments"""
for arg in ["start", "end", "count", "stride"]:
try:
arg_raw = args.pop(arg, None)
if arg_raw is None:
continue
arg_cooked = int(arg_raw, 0)
setattr(self, arg, arg_cooked)
except ValueError:
raise AnsibleError(
"can't parse %s=%s as integer"
% (arg, arg_raw)
)
if 'format' in args:
self.format = args.pop("format")
if args:
raise AnsibleError(
"unrecognized arguments to with_sequence: %s"
% list(args.keys())
)
def parse_simple_args(self, term):
"""parse the shortcut forms, return True/False"""
match = SHORTCUT.match(term)
if not match:
return False
_, start, end, _, stride, _, format = match.groups()
if start is not None:
try:
start = int(start, 0)
except ValueError:
raise AnsibleError("can't parse start=%s as integer" % start)
if end is not None:
try:
end = int(end, 0)
except ValueError:
raise AnsibleError("can't parse end=%s as integer" % end)
if stride is not None:
try:
stride = int(stride, 0)
except ValueError:
raise AnsibleError("can't parse stride=%s as integer" % stride)
if start is not None:
self.start = start
if end is not None:
self.end = end
if stride is not None:
self.stride = stride
if format is not None:
self.format = format
return True
def sanity_check(self):
if self.count is None and self.end is None:
raise AnsibleError("must specify count or end in with_sequence")
elif self.count is not None and self.end is not None:
raise AnsibleError("can't specify both count and end in with_sequence")
elif self.count is not None:
# convert count to end
if self.count != 0:
self.end = self.start + self.count * self.stride - 1
else:
self.start = 0
self.end = 0
self.stride = 0
del self.count
if self.stride > 0 and self.end < self.start:
raise AnsibleError("to count backwards make stride negative")
if self.stride < 0 and self.end > self.start:
raise AnsibleError("to count forward don't make stride negative")
if self.format.count('%') != 1:
raise AnsibleError("bad formatting string: %s" % self.format)
def generate_sequence(self):
if self.stride >= 0:
adjust = 1
else:
adjust = -1
numbers = range(self.start, self.end + adjust, self.stride)
for i in numbers:
try:
formatted = self.format % i
yield formatted
except (ValueError, TypeError):
raise AnsibleError(
"problem formatting %r with %r" % (i, self.format)
)
def run(self, terms, variables, **kwargs):
results = []
for term in terms:
try:
self.reset() # clear out things for this iteration
try:
if not self.parse_simple_args(term):
self.parse_kv_args(parse_kv(term))
except AnsibleError:
raise
except Exception as e:
raise AnsibleError("unknown error parsing with_sequence arguments: %r. Error was: %s" % (term, e))
self.sanity_check()
if self.stride != 0:
results.extend(self.generate_sequence())
except AnsibleError:
raise
except Exception as e:
raise AnsibleError(
"unknown error generating sequence: %s" % e
)
return results
|
nitzmahone/ansible
|
lib/ansible/plugins/lookup/sequence.py
|
Python
|
gpl-3.0
| 9,093
|
#
# Copyright 2003-2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Convert Gettext PO localization files to Comma-Separated Value (.csv) files.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/csv2po.html
for examples and usage instructions.
"""
from translate.storage import csvl10n, po
class po2csv:
@staticmethod
def convertcomments(inputunit):
return " ".join(inputunit.getlocations())
def convertunit(self, inputunit):
csvunit = csvl10n.csvunit()
if inputunit.isheader():
return None
# csvunit.location = "location"
# csvunit.source = "source"
# csvunit.target = "target"
elif inputunit.isblank():
return None
else:
csvunit.location = self.convertcomments(inputunit)
csvunit.source = inputunit.source
csvunit.target = inputunit.target
return csvunit
def convertplurals(self, inputunit):
"""Convert PO plural units
We only convert the first plural form. So languages with multiple
plurals are not handled. For single plural languages we simply
skip this plural extraction.
"""
if len(inputunit.target.strings) == 1: # No plural forms
return None
csvunit = csvl10n.csvunit()
csvunit.location = self.convertcomments(inputunit)
csvunit.source = inputunit.source.strings[1]
csvunit.target = inputunit.target.strings[1]
return csvunit
def convertstore(self, inputstore, columnorder=None):
if columnorder is None:
columnorder = ["location", "source", "target"]
outputstore = csvl10n.csvfile(fieldnames=columnorder)
for inputunit in inputstore.units:
outputunit = self.convertunit(inputunit)
if outputunit is not None:
outputstore.addunit(outputunit)
if inputunit.hasplural():
outputunit = self.convertplurals(inputunit)
if outputunit is not None:
outputstore.addunit(outputunit)
return outputstore
def convertcsv(inputfile, outputfile, templatefile, columnorder=None):
"""reads in inputfile using po, converts using po2csv, writes to outputfile"""
# note that templatefile is not used, but it is required by the converter...
inputstore = po.pofile(inputfile)
if inputstore.isempty():
return 0
convertor = po2csv()
outputstore = convertor.convertstore(inputstore, columnorder)
outputstore.serialize(outputfile)
return 1
def columnorder_callback(option, opt, value, parser):
setattr(parser.values, option.dest, value.split(","))
def main(argv=None):
from translate.convert import convert
formats = {"po": ("csv", convertcsv)}
parser = convert.ConvertOptionParser(formats, description=__doc__)
parser.add_option(
"",
"--columnorder",
dest="columnorder",
action="callback",
callback=columnorder_callback,
type="str",
default=None,
help="specify the order and position of columns (location,source,target)",
)
parser.passthrough.append("columnorder")
parser.run(argv)
if __name__ == "__main__":
main()
|
translate/translate
|
translate/convert/po2csv.py
|
Python
|
gpl-2.0
| 3,950
|
# Copyright 2020 Alfredo de la Fuente - AvanzOSC
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
def pre_init_hook(cr):
stored_partner_in_stock_move(cr)
def stored_partner_in_stock_move(cr):
cr.execute("""
UPDATE stock_move
set partner_id = (SELECT stock_picking.partner_id
FROM stock_picking
WHERE stock_picking.id = stock_move.picking_id)
WHERE partner_id is null
AND picking_id is not null
""")
|
oihane/odoo-addons
|
stock_move_with_partner/init_hook.py
|
Python
|
agpl-3.0
| 536
|
#!/usr/bin/env python
# testing.py
#
# TODO
# have buttons for cvc selection rather than the scale.
# Will do when rest is OK.
# Libraries needed.
from psychopy import visual, event, core, misc, data, gui, sound, log
import datetime, string, sys, os, time
# Participant needs to press y to continue or q to go. Can easily add other letters this
# way for other things.
def ready_cont():
stim_win.flip()
user_response=None
while user_response==None:
allKeys=event.waitKeys()
for thisKey in allKeys:
if thisKey=='y':
user_response=1
if thisKey=='q':
core.quit()
# cvc rates
cvc_slow_rate = 1.0
# A cvc every 395ms with Warker and others (2008)
cvc_faster_rate = 0.395
# interval between each sequence
stim_interval = 1.0
between_tests_interval = 2.0
# Metronome function - This plays the metronome; the timing can also be altered here.
# The timing required needs to be passed to metronome function.
# music.play()
music = sound.Sound(900,secs=0.01)
def metronome(met_time):
music.play()
core.wait(met_time)
music.play()
core.wait(met_time)
music.play()
core.wait(met_time)
music.play()
core.wait(met_time)
# The metronome alone so the participant can become familiar with
# the speed (no stimuli).
def metronome_alone():
stim_win.flip()
core.wait(stim_interval)
metronome(cvc_slow_rate)
metronome(cvc_faster_rate)
metronome(cvc_faster_rate)
metronome(cvc_faster_rate)
# Instruction variables
welcome_message = """Welcome to the testing session.This will be the same as before, but this time there will be twice as many trials.
Please read the entire row four times out loud in time to the metronome, and try to say one syllable per beat. Try and read the items as fluently as possible. Do not worry if you make mistakes, just keep in time with the beat.
Press y now to hear the speed of the metronome."""
metronome_alone_message = """Playing the metronome..."""
sample_welcome = """The following will be a practice session to familiarize you with the sequences (press y to continue)"""
sample_goodbye = """The practice has ended, please press y when you are ready for the main session """
firstHalf_message = """You are half way through, please say "Half way" and press y when you are ready to continue."""
choose_and_rate = """You will now be presented with some pairs of items. You will have already seen one of these items before, and the other one will be new. From each pair please choose which one you think you have seen before.
Then use the mouse to indicate how confident you are you have seen the item before, using a scale of 1 to 5, with one being not at all confident (guess) and 5 being certain. Click on the item to select your answer, then click accept
Press y key to start."""
generalization_message = """You will now be presented with pairs of items, neither of which you have seen before. Please choose which one out of the pair you find most similar to items from the sequences you have seen before.
Press y key to start."""
welcome_recog = """Please indicate how confident you are that you have seen the item before using a scale of 1 to 5, 1 meaning not at all (just a guess) and 5 meaning certain"""
thank_you = """The testing session is complete.
Please inform a researcher you have finished.
Thank you."""
# Stimuli variables
sample_stim = ['fam gang han kas',
'sim nif gik hing',
'ging nim hik sif']
# First set of 48
real_stim_1 =['nam fas gak hang',
'kig hing nif sim',
'hag fas nak mang',
'sik mig hing nif',
'fam gan has kang',
'sif nik mig hing',
'fang has mag kan',
'hin gif sing kim',
'fang mak gas han',
'kif sing min hig',
'gang fan has mak',
'sin gik hing mif',
'has nam gang fak',
'kif hing sin gim',
'kas fam gan hang',
'kif ning hig sim',
'ham fas kang gan',
'ming sin hig kif',
'fang nam hak gas',
'king sig mif hin',
'has kan mag fang',
'king gif sim hin',
'man hang fas kag',
'sim hing kif gin',
'gang nas fak ham',
'sif him ning kig',
'fak nang has mag',
'him ning sif kig',
'man kas hag fang',
'gin kif sing him',
'gang mas fan hak',
'sif nik hing mig',
'gan ham fas kang',
'nik hif ging sim',
'kas fan gang ham',
'hif sik ning mig',
'gas kang fan ham',
'hif gim ning sik',
'fang gas nam hak',
'hing kif gin sim',
'hak gam fang nas',
'gin mif sing hik',
'fas mang hak gan',
'ming sin hik gif',
'fang man hag kas',
'sig nim king hif',
'gam has nak fang',
'hif ging sin mik']
# For the second set of 48
real_stim_2 = ['gak ham nang fas',
'kim ning sig hif',
'fang nas gam hak',
'gin him sing kif',
'gang kan ham fas',
'ning mik hif sig',
'fas han kang mag',
'king hif sig nim',
'hak fang man gas',
'nif sing hik mig',
'han gang kam fas',
'sim gik ning hif',
'hak fang nam gas',
'kif sig hin ming',
'kas ham fan gang',
'sing him gin kif',
'fang man gas hak',
'kin sing mig hif',
'hak fam gas nang',
'ning hig kif sim',
'fang gan has kam',
'gik nim sif hing',
'fan has gam kang',
'hing nif sim kig',
'mas fang kag han',
'sing hif mik gin',
'fas nang gam hak',
'him ning gik sif',
'nag fang mas hak',
'kif nim sig hing',
'fam hag kas nang',
'mig ning hif sik',
'fak has nang mag',
'ning sig kif him',
'has kam fan gang',
'hif sing min gik',
'has gan kam fang',
'hig nif sim king',
'gak nas fam hang',
'nim sif ging hik',
'nag kas ham fang',
'hing gif kin sim',
'gak ham nang fas',
'hing nik sig mif',
'fan gang ham kas',
'sif king gin him',
'fas nang hag kam',
'ming gin sif hik']
# recognition variables.
# These are two arrays with both foils and legal cvc's. So recog_mix_left will appear on the left
# and recog_mix_right will appear on the right.
recog_mix_left = ['fim',
'fam',
'sik',
'gif',
'haf',
'maf',
'sam',
'has',
'fan',
'saf',
'fak',
'sig',
'mif',
'sim',
'sing',
'gaf',
'kis',
'fig',
'sin',
'nif']
recog_mix_right = ['hif',
'nis',
'naf',
'gis',
'mas',
'fang',
'gas',
'mis',
'fing',
'kif',
'fin',
'fik',
'his',
'sak',
'sang',
'fas',
'nas',
'kas',
'kaf',
'san']
recog_correct = ['has',
'gif',
'sik',
'hif',
'fak',
'fam',
'fang',
'gas',
'kif',
'mas',
'fan',
'fas',
'mif',
'sing',
'sig',
'sim',
'sin',
'nif',
'nas',
'kas',]
# Generalization variables
gen_left = ['mif',
'fiz',
'sat',
'tis',
'fal',
'das',
'tif',
'mis',
'fat',
'mas',
'zis',
'sim',
'sam',
'saz',
'zif',
'fam',
'taf',
'laf',
'zaf',
'sal']
gen_right = ['fit',
'sit',
'sil',
'fad',
'fim',
'fid',
'fil',
'tas',
'sad',
'lis',
'sid',
'maf',
'faz',
'zas',
'dis',
'daf',
'las',
'dif',
'lif',
'siz']
gen_correct = ['tif',
'sit',
'sim',
'fad',
'sil',
'mif',
'das',
'fat',
'sid',
'fam',
'zas',
'mas',
'zif',
'siz',
'tas',
'faz',
'fal',
'lif',
'las',
'dif']
# Compare answer to correct (legal) cvc's and report whether the cvc they
# chose was legal.
def check_answer(cvc_response, correct_array):
if cvc_response in correct_array:
return 'correct'
else:
return 'incorrect'
# User data - Experimenter should write in the participant's identification code so
# results can be distinguished.
myDlg = gui.Dlg(title="Participant details")
myDlg.addField('Numeric ID:')
myDlg.show() #you have to call show() for a Dlg (it gets done implicitly by a DlgFromDict)
if myDlg.OK:
sub_id = myDlg.data #this will be a list of data returned from each field added in order
else:
print 'user cancelled, exit.'
core.quit()
# Create output file name. This also gets the response timings.
current_time = datetime.datetime.now() # Retrieve the current time
formatted_time = current_time.strftime("%d-%m-%y_%H-%M-%S") # Format it nicely
sub_id=''.join(sub_id) # converting list to string :/
output_filename = "%s_%s" % (sub_id, formatted_time) # Add participant name and datetime together
# Creating output file.
print "Creating output file"
textfile = open(output_filename +".csv", "w")
# Setting up the monitor and presenting the welcome message.
stim_win = visual.Window(monitor = "testMonitor", units ='norm', fullscr=True)
message = visual.TextStim(stim_win, text = welcome_message, font = "Arial")
message.setAutoDraw(True)
ready_cont()
stim_win.flip()
core.wait(stim_interval)
# The metronome so participant's know what it's like.
# Hmm allow participant to repeat? - Not really fair if
# some participants' run it more than others and pronounce
# cvc's better due to familiarity with the beat.
stim_win.flip()
message.setText(metronome_alone_message)
metronome_alone()
stim_win.flip()
core.wait(between_tests_interval)
# Welcome the participant.
message.setText(sample_welcome)
ready_cont()
stim_win.flip()
core.wait(between_tests_interval)
# The sample loop
for i in range(len(sample_stim)):
message.setText(sample_stim[i])
stim_win.flip()
core.wait(stim_interval)
metronome(cvc_slow_rate)
metronome(cvc_faster_rate)
metronome(cvc_faster_rate)
metronome(cvc_faster_rate)
core.wait(stim_interval)
# Ask participant if they are ready to continue
message.setText(sample_goodbye)
ready_cont()
stim_win.flip()
core.wait(stim_interval)
# The first (48) real stimuli loop
for i in range(len(real_stim_1)):
message.setText(real_stim_1[i])
stim_win.flip()
core.wait(stim_interval)
metronome(cvc_slow_rate)
metronome(cvc_faster_rate)
metronome(cvc_faster_rate)
metronome(cvc_faster_rate)
core.wait(stim_interval)
# Ask participant if they want a break at this point?
# Break needed to compare the two and so we can discern between the
# first 48 and last 48 easily.
message.setText(firstHalf_message)
ready_cont()
stim_win.flip()
core.wait(between_tests_interval)
# The second (48) real stimuli loop
for i in range(len(real_stim_2)):
message.setText(real_stim_2[i])
stim_win.flip()
core.wait(stim_interval)
metronome(cvc_slow_rate)
metronome(cvc_faster_rate)
metronome(cvc_faster_rate)
metronome(cvc_faster_rate)
core.wait(stim_interval)
# The generalisation function - Which have already been seen before TODO Pass the idtime variable here.
def recognition(cvc1, cvc2):
stim_win.clearBuffer()
stim_win.flip()
# create a window before creating your rating scale.
RatingScale = visual.RatingScale(stim_win, choices=[cvc1, cvc2], acceptText='accept?', markerStyle='circle', markerColor='DarkGreen')
# cvc to be rated
question = "Which item do you find most recognisable?"
myItem = visual.TextStim(stim_win, text=question, units='norm')
event.clearEvents()
# Start participant response timing.
start_cvc = time.time()
while RatingScale.noResponse: # show & update until a response has been made
myItem.draw()
RatingScale.draw()
stim_win.flip()
# End response timing
elapsed_cvc = (time.time() - start_cvc)
rating = RatingScale.getRating() # get the value indicated
chosen_cvc = rating
# mark the response
answer = check_answer(rating, recog_correct)
## Now the confidence
RatingScale = visual.RatingScale(stim_win, choices=['1', '2', '3', '4', '5'], acceptText='accept?', markerStyle='circle', markerColor='DarkGreen')
RatingScale.scaleDescription.setText("""1 = not at all (guess), 5 = Certain.""")
question = """How confident are you that you have seen """ + rating + " before.\n"
myItem = visual.TextStim(stim_win, text=question, units='norm')
event.clearEvents()
# Response timing for rating.
start_rating = time.time()
while RatingScale.noResponse: # show & update until a response has been made
myItem.draw()
RatingScale.draw()
stim_win.flip()
elapsed_rating = (time.time() - start_rating)
rating = RatingScale.getRating() # get the value indicated by the subject, 'None' if skipped
complete_answer = output_filename + " " + chosen_cvc + " " + rating + " " + answer + " " + str(elapsed_cvc) + " " + str(elapsed_rating) + "\n"
print complete_answer
textfile.write(complete_answer)
# Second generalization test - the sets with new consonants.
def generalize(cvc1, cvc2):
stim_win.clearBuffer()
stim_win.flip()
# create a window before creating your rating scale.
RatingScale = visual.RatingScale(stim_win, choices=[cvc1, cvc2], acceptText='accept?', markerStyle='circle', markerColor='DarkGreen')
# cvc to be rated
question = "Which one do you think is most similar to the items you have seen before?"
myItem = visual.TextStim(stim_win, text=question, units='norm')
event.clearEvents()
start_cvc = time.time()
while RatingScale.noResponse: # show & update until a response has been made
myItem.draw()
RatingScale.draw()
stim_win.flip()
# Getting response for cvc choice
elapsed_cvc = (time.time() - start_cvc)
rating = RatingScale.getRating() # get the value indicated
chosen_cvc = rating
# mark the response
answer = check_answer(rating, gen_correct)
## Now the confidence
RatingScale = visual.RatingScale(stim_win, choices=['1', '2', '3', '4', '5'], acceptText='accept?', showValue=False, markerStyle='circle', markerColor='DarkGreen')
RatingScale.scaleDescription.setText("""1 = not at all (guess), 5 = Certain.""")
question = "How confident are you that " + rating + " was more similar to the items you have seen before?\n"
myItem = visual.TextStim(stim_win, text=question, units='norm')
# start timing rating
start_rating = time.time()
event.clearEvents()
while RatingScale.noResponse: # show & update until a response has been made
myItem.draw()
RatingScale.draw()
stim_win.flip()
# Get rating time
elapsed_rating = (time.time() - start_rating)
rating = RatingScale.getRating() # get the value indicated by the subject, 'None' if skipped
complete_answer = output_filename + " " + chosen_cvc + " " + rating + " " + answer + " " + str(elapsed_cvc) + " " + str(elapsed_rating) + "\n"
print complete_answer
textfile.write(complete_answer)
# Printing out the table headers
header = "id cvc rating mark cvctime ratingtime\n"
textfile.write(header)
print header
# instructions for choosing and rating.
message.setText(choose_and_rate)
ready_cont()
stim_win.flip()
message.setAutoDraw(False)
# Recognition loop
for i in range(len(recog_mix_left)): # recog_mix_left is being relied on to go through the list, so make sure it's equal to the right.
recognition(recog_mix_left[i], recog_mix_right[i])
# Have a clear row to seperate the two tests.
header = "\n"
textfile.write(header)
print header
stim_win.flip()
message.setText(generalization_message)
message.draw()
ready_cont()
stim_win.flip()
# Generalization loop
for i in range(len(gen_left)): # recog_mix_left is being relied on to go through the list, so make sure it's equal to the right.
generalize(gen_left[i], gen_right[i])
# Saying goodby
core.wait(between_tests_interval)
message.setText(thank_you)
message.draw()
ready_cont()
stim_win.flip()
# Cleanup
closingtext = "closing " + output_filename + '.csv'
print closingtext
textfile.close
print 'Successful exit'
# Done!
stim_win.close()
core.quit()
|
vivithemage/constraints
|
testing/fas-sif/testing-fas-sif-3.py
|
Python
|
gpl-2.0
| 15,257
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Label.color'
db.add_column('lizard_waterbalance_label', 'color', self.gf('lizard_map.models.ColorField')(default='000000', max_length=8), keep_default=False)
# Adding field 'Label.color_increment'
db.add_column('lizard_waterbalance_label', 'color_increment', self.gf('lizard_map.models.ColorField')(default='000000', max_length=8), keep_default=False)
def backwards(self, orm):
# Deleting field 'Label.color'
db.delete_column('lizard_waterbalance_label', 'color')
# Deleting field 'Label.color_increment'
db.delete_column('lizard_waterbalance_label', 'color_increment')
models = {
'lizard_waterbalance.bucket': {
'Meta': {'object_name': 'Bucket'},
'crop_evaporation_factor': ('django.db.models.fields.FloatField', [], {}),
'drainage_fraction': ('django.db.models.fields.FloatField', [], {}),
'equi_water_level': ('django.db.models.fields.FloatField', [], {}),
'external_discharge': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indraft_fraction': ('django.db.models.fields.FloatField', [], {}),
'init_water_level': ('django.db.models.fields.FloatField', [], {}),
'max_water_level': ('django.db.models.fields.FloatField', [], {}),
'min_crop_evaporation_factor': ('django.db.models.fields.FloatField', [], {}),
'min_water_level': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'open_water': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'buckets'", 'to': "orm['lizard_waterbalance.OpenWater']"}),
'porosity': ('django.db.models.fields.FloatField', [], {}),
'results': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bucket_results'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['lizard_waterbalance.WaterbalanceTimeserie']"}),
'seepage': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bucket_seepage'", 'null': 'True', 'to': "orm['lizard_waterbalance.WaterbalanceTimeserie']"}),
'surface': ('django.db.models.fields.IntegerField', [], {}),
'surface_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'upper_drainage_fraction': ('django.db.models.fields.FloatField', [], {}),
'upper_equi_water_level': ('django.db.models.fields.FloatField', [], {}),
'upper_indraft_fraction': ('django.db.models.fields.FloatField', [], {}),
'upper_init_water_level': ('django.db.models.fields.FloatField', [], {}),
'upper_max_water_level': ('django.db.models.fields.FloatField', [], {}),
'upper_min_water_level': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'upper_porosity': ('django.db.models.fields.FloatField', [], {})
},
'lizard_waterbalance.concentration': {
'Meta': {'unique_together': "(('configuration', 'label'),)", 'object_name': 'Concentration'},
'cl_concentration': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'configuration': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'config_concentrations'", 'to': "orm['lizard_waterbalance.WaterbalanceConf']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'label_concentrations'", 'to': "orm['lizard_waterbalance.Label']"}),
'n_incremental': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'n_lower_concentration': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'p_incremental': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'p_lower_concentration': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'so4_incremental': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'so4_lower_concentration': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'stof_increment': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'stof_lower_concentration': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
'lizard_waterbalance.label': {
'Meta': {'ordering': "('order', 'name')", 'object_name': 'Label'},
'color': ('lizard_map.models.ColorField', [], {'max_length': '8'}),
'color_increment': ('lizard_map.models.ColorField', [], {'max_length': '8'}),
'flow_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_waterbalance.Label']", 'null': 'True', 'blank': 'True'}),
'program_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'lizard_waterbalance.openwater': {
'Meta': {'object_name': 'OpenWater'},
'bottom_height': ('django.db.models.fields.FloatField', [], {}),
'evaporation': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'configuration_evaporation'", 'to': "orm['lizard_waterbalance.WaterbalanceTimeserie']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'infiltration': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'open_water_infiltration'", 'to': "orm['lizard_waterbalance.WaterbalanceTimeserie']"}),
'init_water_level': ('django.db.models.fields.FloatField', [], {}),
'maximum_level': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'open_water_max_level'", 'null': 'True', 'to': "orm['lizard_waterbalance.WaterbalanceTimeserie']"}),
'minimum_level': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'open_water_min_level'", 'null': 'True', 'to': "orm['lizard_waterbalance.WaterbalanceTimeserie']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'precipitation': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'configuration_precipitation'", 'to': "orm['lizard_waterbalance.WaterbalanceTimeserie']"}),
'seepage': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'open_water_seepage'", 'to': "orm['lizard_waterbalance.WaterbalanceTimeserie']"}),
'surface': ('django.db.models.fields.IntegerField', [], {}),
'target_level': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'open_water_targetlevel'", 'null': 'True', 'to': "orm['lizard_waterbalance.WaterbalanceTimeserie']"})
},
'lizard_waterbalance.parameter': {
'Meta': {'object_name': 'Parameter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'parameter': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'sourcetype': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'lizard_waterbalance.pumpingstation': {
'Meta': {'unique_together': "(('open_water', 'label'),)", 'object_name': 'PumpingStation'},
'computed_level_control': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'into': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'label': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pumping_stations'", 'to': "orm['lizard_waterbalance.Label']"}),
'max_discharge': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'open_water': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pumping_stations'", 'to': "orm['lizard_waterbalance.OpenWater']"}),
'percentage': ('django.db.models.fields.FloatField', [], {}),
'results': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'pumping_station_result'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['lizard_waterbalance.WaterbalanceTimeserie']"})
},
'lizard_waterbalance.pumpline': {
'Meta': {'object_name': 'PumpLine'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'pumping_station': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pump_lines'", 'to': "orm['lizard_waterbalance.PumpingStation']"}),
'timeserie': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pump_line_timeserie'", 'to': "orm['lizard_waterbalance.WaterbalanceTimeserie']"})
},
'lizard_waterbalance.sobekbucket': {
'Meta': {'object_name': 'SobekBucket'},
'drainage_indraft': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sobekbucket_drainage_indraft'", 'null': 'True', 'to': "orm['lizard_waterbalance.WaterbalanceTimeserie']"}),
'flow_off': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sobekbucket_flow_off'", 'null': 'True', 'to': "orm['lizard_waterbalance.WaterbalanceTimeserie']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'open_water': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sobekbuckets'", 'to': "orm['lizard_waterbalance.OpenWater']"}),
'surface_type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'lizard_waterbalance.timeseries': {
'Meta': {'object_name': 'Timeseries'},
'default_value': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'stick_to_last_value': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'})
},
'lizard_waterbalance.timeseriesevent': {
'Meta': {'ordering': "['time']", 'object_name': 'TimeseriesEvent'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'timeseries': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'timeseries_events'", 'to': "orm['lizard_waterbalance.Timeseries']"}),
'value': ('django.db.models.fields.FloatField', [], {})
},
'lizard_waterbalance.timeseriesfews': {
'Meta': {'object_name': 'TimeseriesFews'},
'default_value': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'blank': 'True'}),
'fkey': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lkey': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'pkey': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'stick_to_last_value': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'})
},
'lizard_waterbalance.waterbalancearea': {
'Meta': {'ordering': "('name',)", 'object_name': 'WaterbalanceArea'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'geom': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'lizard_waterbalance.waterbalanceconf': {
'Meta': {'ordering': "('waterbalance_area__name', 'waterbalance_scenario__order')", 'unique_together': "(('waterbalance_area', 'waterbalance_scenario'),)", 'object_name': 'WaterbalanceConf'},
'calculation_end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'calculation_start_date': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labels': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'configuration_results'", 'to': "orm['lizard_waterbalance.Label']", 'through': "orm['lizard_waterbalance.Concentration']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'open_water': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_waterbalance.OpenWater']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'references': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'configuration_references'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['lizard_waterbalance.WaterbalanceTimeserie']"}),
'results': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'configuration_results'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['lizard_waterbalance.WaterbalanceTimeserie']"}),
'waterbalance_area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_waterbalance.WaterbalanceArea']"}),
'waterbalance_scenario': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_waterbalance.WaterbalanceScenario']"})
},
'lizard_waterbalance.waterbalancescenario': {
'Meta': {'ordering': "('order',)", 'object_name': 'WaterbalanceScenario'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'lizard_waterbalance.waterbalancetimeserie': {
'Meta': {'unique_together': "(('name', 'parameter', 'configuration', 'timestep'),)", 'object_name': 'WaterbalanceTimeserie'},
'configuration': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_waterbalance.WaterbalanceConf']", 'null': 'True', 'blank': 'True'}),
'fews_timeseries': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['lizard_waterbalance.TimeseriesFews']"}),
'hint_datetime_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'hint_datetime_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'local_timeseries': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['lizard_waterbalance.Timeseries']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_waterbalance.Parameter']"}),
'timestep': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'use_fews': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
}
}
complete_apps = ['lizard_waterbalance']
|
lizardsystem/lizard-waterbalance
|
lizard_waterbalance/migrations/0003_auto__add_field_label_color__add_field_label_color_increment.py
|
Python
|
gpl-3.0
| 18,021
|
###############################################################################
# Name: info.py #
# Purpose: Global project information #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2008 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""Editra Project information module"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: info.py 71271 2012-04-24 02:26:44Z CJP $"
__revision__ = "$Revision: 71271 $"
#-----------------------------------------------------------------------------#
AUTHOR = 'Cody Precord'
VERSION = '0.7.01'
PROG_NAME = 'Editra'
HOME_PAGE = "http://editra.org"
I18N_PAGE = "https://translations.launchpad.net/editra/trunk/+pots/editra"
CONTACT_MAIL = "staff@editra.org"
|
iut-ibk/P8-WSC-GUI
|
3dparty/Editra/src/info.py
|
Python
|
gpl-2.0
| 1,035
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
i_evapo_mh.py
-------------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'March 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
def checkParameterValuesBeforeExecuting(alg, parameters, context):
if (alg.parameterAsBoolean(parameters, '-h', context)
and alg.parameterAsLayer(parameters, 'precipitation', context)):
return False, alg.tr('You can\'t use original Hargreaves flag and precipitation parameter together!')
if (not alg.parameterAsBoolean(parameters, '-h', context)
and not alg.parameterAsLayer(parameters, 'precipitation', context)):
return False, alg.tr('If you don\'t use original Hargreaves flag, you must set the precipitation raster parameter!')
return True, None
|
ghtmtt/QGIS
|
python/plugins/processing/algs/grass7/ext/i_evapo_mh.py
|
Python
|
gpl-2.0
| 1,610
|
#/usr/bin/env python3
import sys, os
import pickle
from pprint import pprint
from subprocess import Popen, PIPE
import string
def answer_me(question):
print(question+" [yes/no]")
while True:
answer = input('-->')
if answer.lower() == "yes":
return True
elif answer.lower() == "no":
return False
else:
print("Please enter yes or no.")
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def epprint(*args, **kwargs):
pprint(*args, stream=sys.stderr, **kwargs)
def save_obj_text(obj, name ):
with open(name, 'wb') as f:
pickle.dump(obj, f, 0)
def save_obj_bin(obj, name ):
with open(name, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj_bin(name ):
with open(name, 'rb') as f:
return pickle.load(f)
def load_obj_text(name ):
with open(name, 'rb') as f:
return pickle.load(f)
# taken from: https://gist.github.com/seanh/93666
def format_filename(s):
"""Take a string and return a valid filename constructed from the string.
Uses a whitelist approach: any characters not present in valid_chars are
removed. Also spaces are replaced with underscores.
Note: this method may produce invalid filenames such as ``, `.` or `..`
When I use this method I prepend a date string like '2009_01_15_19_46_32_'
and append a file extension like '.txt', so I avoid the potential of using
an invalid filename."""
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
filename = ''.join(c for c in s if c in valid_chars)
filename = filename.replace(' ','_') # I don't like spaces in filenames.
return filename
# Some usefull functions for manipulating Khan content tree
def print_children_titles(content_tree):
if 'children' not in content_tree:
return
print("------------------------")
print("Topic title | Topic slug")
print("------------------------")
for child in content_tree['children']:
print("%s|%s" % (child['title'], child['slug']))
def print_dict_without_children(dictionary):
for k in dictionary.keys():
if k != 'children':
print(k, dictionary[k])
def read_unique_data_from_one_column(fname):
out = set()
with open(fname, 'r') as f:
for line in f:
l = line.split()
if len(l) != 1:
print("ERROR during reading file ", listed_content_file)
print("line: ", line)
sys.exit(1)
if len(l[0].strip()) == 0:
print("ERROR: Empty line in file ", listed_content_file)
sys.exit(1)
out.add(l[0])
return out
# We reuse this for EMA and Bakalari linking
# KA API returns also unlisted content, so we need to filter it out "manually"
def read_listed_content_slugs():
dir_path = os.path.dirname(os.path.realpath(__file__))
LISTED_CONTENT_FILE = dir_path + '/indexable_slugs.txt'
listed_content = read_unique_data_from_one_column(LISTED_CONTENT_FILE)
return listed_content
def read_listed_topic_slugs():
dir_path = os.path.dirname(os.path.realpath(__file__))
LISTED_TOPIC_FILE = dir_path + '/indexable_topic_slugs.txt'
listed_topic_slugs = read_unique_data_from_one_column(LISTED_TOPIC_FILE)
return listed_topic_slugs
def download_yt_subtitles(lang, sub_format, ytid, dirname = "subs"):
video_url = 'https://www.youtube.com/watch?v=%s' % ytid
yt_download_cmd = 'youtube-dl --sub-lang %s --sub-format %s --write-sub \
--skip-download --youtube-skip-dash-manifest %s' % (lang, sub_format, video_url)
FNAME_OUT = "youtubedl.out"
FNAME_ERR = "youtubedl.err"
p = Popen(yt_download_cmd, shell=True, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
with open(FNAME_OUT, 'a') as f:
f.write(out.decode('UTF-8'))
if err:
with open(FNAME_ERR, 'a') as f:
f.write(err.decode('UTF-8'))
# youtubedl automatically download subs to file named after video title
# Here we extract the name of the file and rename it to something more sane
fname = out.decode('UTF-8').split('Writing video subtitles to: ')
if len(fname) < 2:
print("ERROR: Requested subtitles were not found on YouTube.")
print("Look into file %s for more details" % FNAME_ERR)
sys.exit(1)
#return None
if not os.path.isdir(dirname):
os.mkdir(dirname)
fname = fname[1].rstrip();
fname_target = "%s/%s.%s.%s" % (dirname, ytid, lang, sub_format)
os.rename(fname, fname_target)
print('Subtitles downloaded to file %s' % fname_target)
with open(fname_target, 'r') as content_file:
subs = content_file.read()
return subs
|
danielhollas/AmaraUpload
|
utils.py
|
Python
|
mit
| 4,769
|
#!/usr/bin/env python
#
# molecular.py
# Copyright (c) 2001, Chris Gonnerman
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of the author nor the names of any contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""molecular.py -- molecular (ngenoid) name generator
This module knows how to generate "random" names for RPG characters.
It uses the same method as the "ngen" name generator by Kimmo Kulovesi,
and in fact it can use the same name files. molecular.py knows how
to merge multiple tables also, which can be handy...
If run as a command-line program, use the following options:
-r namefile -- read the given name file and add to the
current name table.
nnn -- generate nnn (a number) names and print
on standard output.
To generate names from a name file:
python molecular.py -r file 10
As a module (to be imported) you get the following classes and functions:
NameFile (class) -- a file wrapper with a disabled close() method,
used internally and probably not useful otherwise.
nameopen (function) -- opens a file; takes filename and mode options,
searches the default name file directory if not
found in current directory, handles "-" filenames,
and uses NameFile to disable closing of sys.stdin/
sys.stdout.
Molecule (class) -- the meat of the matter. A Molecule instance has
the following methods:
.load(file) -- loads a name file,
which may be a file-like
object with a .readline()
method or a filename as a
string.
.name() -- generate one name and
return it.
"""
__version__ = "1.0"
import string, re, sys, random
NAMEDIR = "/usr/local/share/molecular"
NAMESECTIONS = [ "inf", "prefix", "first", "mid", "final", "notes", "end" ]
class NameFile:
__file_attributes = ('closed','mode','name','softspace')
def __init__(self, file):
self.fd = file
def close(self):
pass
def flush(self):
return self.fd.flush()
def isatty(self):
return self.fd.isatty()
def fileno(self):
return self.fd.fileno()
def read(self, *args):
return apply(self.fd.read, args)
def readline(self, *args):
return apply(self.fd.readline, args)
def readlines(self, *args):
return apply(self.fd.readlines, args)
def seek(self, *args):
return apply(self.fd.seek, args)
def tell(self):
return self.fd.tell()
def write(self, str):
return self.fd.write(str)
def writelines(self, list):
return self.fd.writelines(list)
def __repr__(self):
return repr(self.fd)
def __getattr__(self, name):
if name in self.__file_attributes:
return getattr(self.fd, name)
else:
return self.__dict__[name]
def __setattr__(self, name, value):
if name in self.__file_attributes:
setattr(self.fd, name, value)
else:
self.__dict__[name] = value
def __cmp__(self, file):
"""I'm not sure what the correct behavior is, and therefore
this implementation is just a guess."""
if type(file) == type(self.fd):
return cmp(self.fd, file)
else:
return cmp(self.fd, file.fd)
class NameReader:
def __init__(self, file):
self.file = file
self.line = ""
def next(self):
self.line = self.file.readline()
return self.line
def close(self):
return self.file.close()
def safeopen(filename, mode):
try:
return open(filename, mode)
except IOError:
return None
def nameopen(filename, mode):
if filename == "-":
if "r" in mode:
return NameFile(sys.stdin)
else:
return NameFile(sys.stdout)
fp = safeopen(filename, mode)
if fp is None:
fp = safeopen(filename + ".nam", mode)
if "r" in mode and fp is None:
fp = safeopen(NAMEDIR + "/" + filename, mode)
# last call is open() instead of safeopen() to finally raise
# the exception if we just can't find the file.
if fp is None:
fp = open(NAMEDIR + "/" + filename + ".nam", mode)
return fp
class Molecule:
def __init__(self):
self.nametbl = {}
for i in NAMESECTIONS:
self.nametbl[i] = []
self.nametbl[""] = []
self.cursection = self.nametbl[""]
def load(self, fp):
if type(fp) is type(""):
fp = nameopen(fp, "r")
else:
fp = NameFile(fp)
rdr = NameReader(fp)
while rdr.next():
line = rdr.line[:-1]
if len(line) > 0 and line[0] == '[' and line[-1] == ']':
line = string.strip(line)[1:-1]
if not self.nametbl.has_key(line):
self.nametbl[line] = []
self.cursection = self.nametbl[line]
else:
self.cursection.append(line)
fp.close()
def name(self):
n = []
def choose(type):
if len(self.nametbl[type]) > 0:
n.append(random.choice(self.nametbl[type]))
choose("prefix")
choose("first")
choose("mid")
choose("final")
return string.join(n, "")
if __name__ == "__main__":
if len(sys.argv) <= 1:
sys.stderr.write( \
"Usage: molecular.py [ -r file ] [ nn ]\n")
sys.exit(0)
name = Molecule()
i = 1
while i < len(sys.argv):
arg = sys.argv[i]
if arg == "-r":
i += 1
name.load(sys.argv[i])
else:
n = int(sys.argv[i])
lst = []
for i in range(n):
print name.name()
i += 1
|
canvasnetworks/canvas
|
deploy/ec2/molecular.py
|
Python
|
bsd-3-clause
| 7,577
|
#!/usr/bin/env python
from hal.main import main
if __name__ == '__main__':
main()
|
smarkets/hal
|
main.py
|
Python
|
mit
| 88
|
# -*- coding: utf-8 -*-
from django.conf.urls import url, patterns, include
from rest_framework.routers import DefaultRouter
from coffeecups.views import TakeView, ThrowView, CupPolicyView
router = DefaultRouter()
router.register(r'takes', TakeView)
router.register(r'throws', ThrowView)
router.register(r'policies', CupPolicyView)
urlpatterns = patterns(
'',
url(r'^', include(router.urls)),
)
|
J1bz/ecoloscore
|
coffeecups/urls.py
|
Python
|
bsd-3-clause
| 406
|
# File: SchedulerServer.py ; This file is part of Twister.
# version: 3.003
# Copyright (C) 2012-2014 , Luxoft
# Authors:
# Cristi Constantin <crconstantin@luxoft.com>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import thread
import threading
import subprocess
import urlparse
import socket
socket.setdefaulttimeout(3)
import xmlrpclib
import logging
import json
import time
import calendar
from datetime import datetime
from ConfigParser import SafeConfigParser
import cherrypy
from cherrypy import _cptools
"""
Scheduler Server
****************
It's used to schedule the start of Central Engine weekly, daily, or one-time.
"""
# # #
def userHome(user):
"""
Find the home folder for the given user.
"""
return subprocess.check_output('echo ~' + user, shell=True).strip()
def _fix_date(date_time):
'''
Receives a date string and returns a Date-Time object and the type of task.
'''
# If DT has both date and time, or a weekday and time
if ' ' in date_time:
part1 = date_time.split()[0]
# If the first part is the Short name of a week day
if part1 in ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']:
try:
dt = time.strptime(date_time, '%a %H:%M:%S')
proj_type = 'weekly'
except:
log.error('Invalid Weekday-time format: `{0}` !'.format(date_time))
return False, ''
# If the first part is the Long name of a week day
elif part1 in ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']:
try:
dt = time.strptime(date_time, '%A %H:%M:%S')
proj_type = 'weekly'
except:
log.error('Invalid Weekday-time format: `{0}` !'.format(date_time))
return False, ''
else:
try:
dt = time.strptime(date_time, '%Y-%m-%d %H:%M:%S')
proj_type = 'one-time'
except:
log.error('Invalid Date-time format: `{0}` !'.format(date_time))
return False, ''
# If DT has only time, and Not date
else:
try:
dt = time.strptime(date_time, '%H:%M:%S')
proj_type = 'daily'
except:
log.error('Invalid Time-only format: `{0}` !'.format(date_time))
return False, ''
return dt, proj_type
# # #
class SchedulerServer(_cptools.XMLRPCController):
def __init__(self):
global __config__
log.debug('Initializing Server on http://{sched_ip}:{sched_port}/ ...'.format(**__config__))
self.acc_lock = thread.allocate_lock() # Task change lock
self.tasks = {}
self._load(v=True)
#
def _load(self, v=False):
global __dir__
if __dir__:
path = __dir__ + os.sep + 'schedule.json'
else:
path = 'schedule.json'
try:
f = open(path, 'r')
self.tasks = json.load(f)
f.close() ; del f
if v:
log.debug('Tasks loaded successfully.')
except:
if v:
log.debug('There are no tasks to load.')
def _save(self):
global __dir__
if __dir__:
path = __dir__ + os.sep + 'schedule.json'
else:
path = 'schedule.json'
f = open(path, 'w')
json.dump(self.tasks, f, indent=4)
f.close() ; del f
#
@cherrypy.expose
def rest(self):
html = '<html>\n<title>Scheduler Server REST</title>\n<body>\n<br>\n{0}\n</body>\n</html>'
strings = [
'<b>~ Task {0}:</b> {proj-type} task for user <b>{user}</b> ~<br> '
'File `{project-file}`, activation date '
'`{date-time}`, force `{force}`, time limit `{time-limit}`;<br>\n'
''.format(k.keys()[0], **k.values()[0]) for k in self.List()
]
return html.format('<br>'.join(strings))
@cherrypy.expose
def List(self, user=None):
"""
Return all available tasks.
"""
with self.acc_lock:
self._load()
result = []
if user:
for k, v in self.tasks.iteritems():
if v['user'] != user:
continue
d = dict(v)
d['key'] = k
result.append(d)
else:
for k, v in self.tasks.iteritems():
d = dict(v)
d['key'] = k
result.append(d)
return result
@cherrypy.expose
def Add(self, user, args):
"""
Create a New task.
A valid task must have:
- description
- date/ day/ time
- project file
- force?
- limit time to x
"""
# Example tasks:
# {'description':'', 'project-file':'C:/autoexec.bat', 'date-time':'12:12:12', 'force':'0', 'time-limit':'0'}
# {'description':'', 'project-file':'C:/autoexec.bat', 'date-time':'Wednesday 12:12:12', 'force':'0', 'time-limit':'0'}
# {'description':'', 'project-file':'C:/autoexec.bat', 'date-time':'2012-12-12 12:12:12', 'force':'1', 'time-limit':'0'}
# If argument is a string
if type(args) == type(str()):
task = urlparse.parse_qs(args)
# If argument is a valid dict
elif type(args) == type(dict()):
task = args
else:
msg = 'Add task: Invalid type of argument for add task: `{0}` !'.format(type(args))
log.error(msg)
return '*ERROR* ' + msg
# if not self.conn:
# print('Cannot add task! Central Engine connection not available !')
# return False
# elif self.conn.get_user_variable(user, 'status') == False:
# print('Cannot add task! Invalid username `{0}` !'.format(user))
# return False
descrip = task.get('description')
proj_file = task.get('project-file')
proj_dt = task.get('date-time')
proj_force = task.get('force')
time_limit = task.get('time-limit')
if not os.path.isfile(proj_file):
msg = 'Add task: Invalid file path `{0}` !'.format(proj_file)
log.error(msg)
return '*ERROR* ' + msg
dt, proj_type = _fix_date(proj_dt)
if not dt: return False
# Duplicate dates?
if proj_dt in [v['date-time'] for v in self.tasks.values()]:
msg = 'Add task: Duplicate date-time: `{0}` !'.format(proj_dt)
log.error(msg)
return '*ERROR* ' + msg
# If force is not valid, reset it. By default, force is enabled.
if proj_force != '0':
proj_force = '1'
try:
time_limit = int(time_limit)
except:
log.error('Add task: Invalid Time-limit number: `{0}` ! Will default to ZERO.'.format(time_limit))
time_limit = 0
if time_limit < 0:
time_limit = 0
# This can only be executed by 1 thread at a time,
# so there will never be 2 threads that create tasks at the same time
with self.acc_lock:
created_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
task_fixed = {
'user' : user,
'description' : descrip,
'project-file': proj_file,
'date-time' : proj_dt,
'force' : proj_force,
'time-limit' : time_limit,
'proj-type' : proj_type
}
self.tasks[created_time] = task_fixed
log.debug('Created {proj-type} task for user {user} :: File `{project-file}`, activation date '
'`{date-time}`, force `{force}`, time limit `{time-limit}`.\n'.format(**task_fixed))
self._save()
return created_time
@cherrypy.expose
def Change(self, key, args):
"""
Update a task. Must supply its key.
"""
if not key in self.tasks:
msg = 'Change task: Invalid task key `{0}` !'.format(key)
log.error(msg)
return '*ERROR* ' + msg
# If argument is a string
if type(args) == type(str()):
task = urlparse.parse_qs(args)
# If argument is a valid dict
elif type(args) == type(dict()):
task = args
else:
msg = 'Change task: Invalid type of argument for add task: `{0}` !'.format(type(args))
log.error(msg)
return '*ERROR* ' + msg
descrip = task.get('description')
proj_file = task.get('project-file')
proj_dt = task.get('date-time')
proj_force = task.get('force')
time_limit = task.get('time-limit')
# If user wants to change project path
if proj_file:
if not os.path.isfile(proj_file):
msg = 'Change task: Invalid file path `{0}` !'.format(proj_file)
log.error(msg)
return '*ERROR* ' + msg
# If user wants to change Date-Time
if proj_dt:
dt, proj_type = _fix_date(proj_dt)
if not dt: return False
# If user wants to change Force
if proj_force:
if proj_force != '0':
proj_force = '1'
# If user wants to change time limit
if time_limit:
try:
time_limit = int(time_limit)
except:
log.error('Change task: Invalid Time-limit number: `{0}` ! Will default to ZERO.'.format(time_limit))
time_limit = 0
if time_limit < 0:
time_limit = 0
# Preparing updated task
task_fixed = {}
if descrip:
if descrip != self.tasks[key]['description']:
task_fixed['description'] = descrip
if proj_file:
task_fixed['project-file'] = proj_file
if proj_dt:
task_fixed['date-time'] = proj_dt
task_fixed['proj-type'] = proj_type
if proj_force:
if proj_force != self.tasks[key]['force']:
task_fixed['force'] = proj_force
if time_limit is not None:
if time_limit != self.tasks[key]['time-limit']:
task_fixed['time-limit'] = time_limit
# This can only be executed by 1 thread at a time,
# so there will never be 2 threads that create tasks at the same time
with self.acc_lock:
self.tasks[key].update(task_fixed)
log.debug('Updated task {0} :: File `{project-file}`, activation date `{date-time}`,'
' type `{proj-type}`, force `{force}`, time limit `{time-limit}`.\n'.format(key, **self.tasks[key]))
self._save()
return True
@cherrypy.expose
def Update(self, key, args):
"""
Update a task. Must supply its key.
"""
return self.Change(user, key, args)
@cherrypy.expose
def Remove(self, user, key):
"""
Delete a task. Must supply its key.
"""
if not key in self.tasks:
msg = 'Remove task: Invalid task key `{0}` !'.format(key)
log.error(msg)
return '*ERROR* ' + msg
with self.acc_lock:
log.debug('Removing task key `{0}` !'.format(key))
del self.tasks[key]
self._save()
return True
@cherrypy.expose
def Delete(self, user, key):
"""
Delete a task. Must supply its key.
"""
return self.Remove(user, key)
# # #
class threadCheckTasks(threading.Thread):
'''
Threaded class for checking tasks.
'''
def __init__(self):
global __config__
self.errMsg = True
self.conns = {}
threading.Thread.__init__(self)
def getConnection(self, user):
'''
Shortcut function to get or reuse a Central Engine connection.
'''
proxy = self.conns.get(user)
# Try to reuse the old connection
if isinstance(proxy, xmlrpclib.ServerProxy):
try:
proxy.echo('ping')
return proxy
except:
log.debug('Disconnected from the Central Engine. Will reconnect...')
proxy = None
else:
log.debug('Connect to the Central Engine...')
proxy = None
proxy = xmlrpclib.ServerProxy('http://{u}:EP@{ce_ip}:{ce_port}/'.format(u=user, **__config__))
try:
# Try to ping the Central Engine!
proxy.echo('ping')
if not self.errMsg:
log.debug('Successfully connected to Central Engine. Tasks are now enabled.')
self.errMsg = True
except:
if self.errMsg:
log.debug('Central Engine is down, cannot run Tasks! Trying to reconnect...')
self.errMsg = False
proxy = None
self.conns[user] = proxy
return proxy
def run(self):
'''
Chech time to see if CE must be started.
If force, if CE is running, start it again with the new config.
Else, don't start CE.
If time limit, the status of CE must be recorded;
if CE is stopped, the time limit check must be aborted,
because it means the job is no longer running !
'''
time.sleep(0.1)
global root, programExit
while not programExit:
Tasks = root.List()
# Date time into standard format
date_time = time.strptime(time.strftime('%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S')
pure_time = time.strptime(time.strftime('%H:%M:%S'), '%H:%M:%S')
week_time = time.strptime(time.strftime('%a %H:%M:%S'), '%a %H:%M:%S')
# Cycle all known tasks
for glob_task in Tasks:
task_id = glob_task.get('key')
if not task_id:
log.error('Badly formed task! No key detected! Debug data: `{0}`.'.format(glob_task))
continue
task = dict(root.tasks[task_id]) # Make a copy
user = task.get('user')
proj_file = str(task.get('project-file'))
proj_dt = task.get('date-time')
proj_force = str(task.get('force'))
time_limit = task.get('time-limit')
if not user:
log.error('Fatal error in task `{0}`! No user defined!'.format(task_id))
continue
proxy = self.getConnection(user)
# No connection for this user
if not isinstance(proxy, xmlrpclib.ServerProxy):
time.sleep(2)
continue
proj_dt, proj_type = _fix_date(proj_dt)
if not proj_dt: continue
task.update({'proj-type': proj_type})
# If task is 1 time and the time is not right, continue
if proj_type == 'one-time' and proj_dt != date_time:
continue
# If the task is daily and the time is not right, continue
elif proj_type == 'daily' and proj_dt != pure_time:
continue
# If the task is weekly and time is not right, continue
elif proj_type == 'weekly' and proj_dt != week_time:
continue
log.debug('Starting {proj-type} task for user {user} :: File `{project-file}`, '
'activation date `{date-time}`, force `{force}`, time limit `{time-limit}`...'.format(**task))
# If Force is disabled and Central Engine is already running, break
if proj_force != '0' and proxy.get_user_variable(user, 'status') == 'running':
log.debug('Central Engine is already running! The task will not force!')
continue
else:
# Kill all processes for this user
proxy.set_exec_status_all(user, 0, 'Force stop from Scheduler!'.format(proj_file))
time.sleep(1)
# Start Central Engine !
proxy.set_exec_status_all(user, 2, '{}/twister/config/fwmconfig.xml,{}'.format(userHome(user), proj_file))
log.debug('Project file `{project-file}`, started for user `{user}`.'.format(**task))
# Wait before next cycle
time.sleep(1)
log.debug('Closing Tasks thread...')
# # #
def load_config():
global __dir__
cfg_folder = __dir__ + '/config.ini'
cfg_dict = {'ce_ip': '127.0.0.1', 'ce_port': '8000',
'sched_ip': '0.0.0.0', 'sched_port': '88'}
cfg = SafeConfigParser({'ALL': '0.0.0.0'})
cfg.read(cfg_folder)
if not os.path.isfile(cfg_folder):
cfg.add_section('CONFIG')
for k, v in cfg_dict.iteritems():
cfg.set('CONFIG', k, v)
cfg.write(open(cfg_folder, 'w'))
else:
cfg_dict.update( dict(cfg.items('CONFIG')) )
return cfg_dict
def close():
global programExit
log.debug('\nClosing Scheduler...')
programExit = True
# # #
if __name__ == '__main__':
__dir__ = os.path.split(__file__)[0]
if not __dir__: __dir__ = os.getcwd()
__config__ = load_config()
programExit = False
LOGS_PATH = __dir__ + '/Logs/'
if not os.path.exists(LOGS_PATH):
os.makedirs(LOGS_PATH)
# Config cherrypy logging
cherrypy.log.access_log.propagate = False
cherrypy.log.error_log.setLevel(logging.DEBUG)
log = cherrypy.log.error_log
# Config python logging
dateTag = datetime.now().strftime("%Y-%b-%d %H-%M-%S")
LOG_FILE = LOGS_PATH + 'Log %s.txt' % dateTag
logging.basicConfig(level=logging.NOTSET, format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%y-%m-%d %H:%M:%S', filename=LOG_FILE, filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.NOTSET)
log.addHandler(console)
# Root path
root = SchedulerServer()
# Config
conf = {
'global': {
'server.socket_host': str(__config__['sched_ip']),
'server.socket_port': int(__config__['sched_port']),
'engine.autoreload.on': False,
'log.screen': False
}
}
# Start !
threadCheckTasks().start()
cherrypy.engine.subscribe('exit', close)
cherrypy.quickstart(root, '/', config=conf)
programExit = True
#
|
ctgriffiths/twister
|
services/Scheduler/SchedulerServer.py
|
Python
|
apache-2.0
| 19,189
|
import sys
from rpython.rlib.rarithmetic import intmask, r_uint, LONG_BIT
from rpython.rlib.objectmodel import we_are_translated
from rpython.rlib import rmmap
from rpython.rlib.debug import debug_start, debug_print, debug_stop
from rpython.rlib.debug import have_debug_prints
from rpython.rtyper.lltypesystem import lltype, rffi
class AsmMemoryManager(object):
LARGE_ALLOC_SIZE = 1024 * 1024 # 1MB
MIN_FRAGMENT = 64
NUM_INDICES = 32 # good for all sizes between 64 bytes and ~490 KB
_allocated = None
def __init__(self, large_alloc_size = LARGE_ALLOC_SIZE,
min_fragment = MIN_FRAGMENT,
num_indices = NUM_INDICES):
self.total_memory_allocated = r_uint(0)
self.total_mallocs = r_uint(0)
self.large_alloc_size = large_alloc_size
self.min_fragment = min_fragment
self.num_indices = num_indices
self.free_blocks = {} # map {start: stop}
self.free_blocks_end = {} # map {stop: start}
self.blocks_by_size = [[] for i in range(self.num_indices)]
def get_stats(self):
"""Returns stats for rlib.jit.jit_hooks.stats_asmmemmgr_*()."""
return (self.total_memory_allocated, self.total_mallocs)
def malloc(self, minsize, maxsize):
"""Allocate executable memory, between minsize and maxsize bytes,
and return a pair (start, stop). Does not perform any rounding
of minsize and maxsize.
"""
result = self._allocate_block(minsize)
(start, stop) = result
smaller_stop = start + maxsize
if smaller_stop + self.min_fragment <= stop:
self._add_free_block(smaller_stop, stop)
stop = smaller_stop
result = (start, stop)
self.total_mallocs += r_uint(stop - start)
return result # pair (start, stop)
def free(self, start, stop):
"""Free a block (start, stop) returned by a previous malloc()."""
if r_uint is not None:
self.total_mallocs -= r_uint(stop - start)
self._add_free_block(start, stop)
def open_malloc(self, minsize):
"""Allocate at least minsize bytes. Returns (start, stop)."""
result = self._allocate_block(minsize)
(start, stop) = result
self.total_mallocs += r_uint(stop - start)
return result
def open_free(self, middle, stop):
"""Used for freeing the end of an open-allocated block of memory."""
if stop - middle >= self.min_fragment:
self.total_mallocs -= r_uint(stop - middle)
self._add_free_block(middle, stop)
return True
else:
return False # too small to record
def _allocate_large_block(self, minsize):
# Compute 'size' from 'minsize': it must be rounded up to
# 'large_alloc_size'. Additionally, we use the following line
# to limit how many mmap() requests the OS will see in total:
minsize = max(minsize, intmask(self.total_memory_allocated >> 4))
size = minsize + self.large_alloc_size - 1
size = (size // self.large_alloc_size) * self.large_alloc_size
data = rmmap.alloc(size)
if not we_are_translated():
if self._allocated is None:
self._allocated = []
self._allocated.append((data, size))
if sys.maxint > 2147483647:
# Hack to make sure that mcs are not within 32-bits of one
# another for testing purposes
rmmap.hint.pos += 0x80000000 - size
self.total_memory_allocated += r_uint(size)
data = rffi.cast(lltype.Signed, data)
return self._add_free_block(data, data + size)
def _get_index(self, length):
i = 0
while length > self.min_fragment:
length = (length * 3) >> 2
i += 1
if i == self.num_indices - 1:
break
return i
def _add_free_block(self, start, stop):
# Merge with the block on the left
if start in self.free_blocks_end:
left_start = self.free_blocks_end[start]
self._del_free_block(left_start, start)
start = left_start
# Merge with the block on the right
if stop in self.free_blocks:
right_stop = self.free_blocks[stop]
self._del_free_block(stop, right_stop)
stop = right_stop
# Add it to the dicts
self.free_blocks[start] = stop
self.free_blocks_end[stop] = start
i = self._get_index(stop - start)
self.blocks_by_size[i].append(start)
return start
def _del_free_block(self, start, stop):
del self.free_blocks[start]
del self.free_blocks_end[stop]
i = self._get_index(stop - start)
self.blocks_by_size[i].remove(start)
def _allocate_block(self, length):
# First look in the group of index i0 if there is a block that is
# big enough. Following an idea found in the Linux malloc.c, we
# prefer the oldest entries rather than the newest one, to let
# them have enough time to coalesce into bigger blocks. It makes
# a big difference on the purely random test (30% of total usage).
i0 = self._get_index(length)
bbs = self.blocks_by_size[i0]
for j in range(len(bbs)):
start = bbs[j]
stop = self.free_blocks[start]
if start + length <= stop:
del bbs[j]
break # found a block big enough
else:
# Then look in the larger groups
i = i0 + 1
while i < self.num_indices:
if len(self.blocks_by_size[i]) > 0:
# any block found in a larger group is big enough
start = self.blocks_by_size[i].pop(0)
stop = self.free_blocks[start]
break
i += 1
else:
# Exhausted the memory. Allocate the resulting block.
start = self._allocate_large_block(length)
stop = self.free_blocks[start]
i = self._get_index(stop - start)
assert self.blocks_by_size[i][-1] == start
self.blocks_by_size[i].pop()
#
del self.free_blocks[start]
del self.free_blocks_end[stop]
return (start, stop)
def _delete(self):
"NOT_RPYTHON"
if self._allocated:
for data, size in self._allocated:
rmmap.free(data, size)
self._allocated = None
class MachineDataBlockWrapper(object):
def __init__(self, asmmemmgr, allblocks):
self.asmmemmgr = asmmemmgr
self.allblocks = allblocks
self.rawstart = 0
self.rawposition = 0
self.rawstop = 0
def done(self):
if self.rawstart != 0:
if self.asmmemmgr.open_free(self.rawposition, self.rawstop):
self.rawstop = self.rawposition
self.allblocks.append((self.rawstart, self.rawstop))
self.rawstart = 0
self.rawposition = 0
self.rawstop = 0
def _allocate_next_block(self, minsize):
self.done()
self.rawstart, self.rawstop = self.asmmemmgr.open_malloc(minsize)
self.rawposition = self.rawstart
def malloc_aligned(self, size, alignment):
p = self.rawposition
p = (p + alignment - 1) & (-alignment)
if p + size > self.rawstop:
self._allocate_next_block(size + alignment - 1)
p = self.rawposition
p = (p + alignment - 1) & (-alignment)
assert p + size <= self.rawstop
self.rawposition = p + size
return p
class BlockBuilderMixin(object):
_mixin_ = True
# A base class to generate assembler. It is equivalent to just a list
# of chars, but it is potentially more efficient for that usage.
# It works by allocating the assembler SUBBLOCK_SIZE bytes at a time.
# Ideally, this number should be a power of two that fits the GC's most
# compact allocation scheme (which is so far 35 * WORD for minimark.py).
WORD = LONG_BIT // 8
SUBBLOCK_SIZE = 32 * WORD
SUBBLOCK_PTR = lltype.Ptr(lltype.GcForwardReference())
SUBBLOCK = lltype.GcStruct('SUBBLOCK',
('prev', SUBBLOCK_PTR),
('data', lltype.FixedSizeArray(lltype.Char, SUBBLOCK_SIZE)))
SUBBLOCK_PTR.TO.become(SUBBLOCK)
ALIGN_MATERIALIZE = 16
gcroot_markers = None
def __init__(self, translated=None):
if translated is None:
translated = we_are_translated()
if translated:
self.init_block_builder()
else:
self._become_a_plain_block_builder()
self.rawstart = 0
def init_block_builder(self):
self._cursubblock = lltype.nullptr(self.SUBBLOCK)
self._baserelpos = -self.SUBBLOCK_SIZE
self._make_new_subblock()
def _make_new_subblock(self):
nextsubblock = lltype.malloc(self.SUBBLOCK)
nextsubblock.prev = self._cursubblock
self._cursubblock = nextsubblock
self._cursubindex = 0
self._baserelpos += self.SUBBLOCK_SIZE
_make_new_subblock._dont_inline_ = True
def writechar(self, char):
index = self._cursubindex
if index == self.SUBBLOCK_SIZE:
self._make_new_subblock()
index = 0
self._cursubblock.data[index] = char
self._cursubindex = index + 1
def absolute_addr(self):
return self.rawstart
def overwrite(self, index, char):
assert 0 <= index < self.get_relative_pos(break_basic_block=False)
block = self._cursubblock
index -= self._baserelpos
while index < 0:
block = block.prev
index += self.SUBBLOCK_SIZE
block.data[index] = char
def overwrite32(self, index, val):
self.overwrite(index, chr(val & 0xff))
self.overwrite(index + 1, chr((val >> 8) & 0xff))
self.overwrite(index + 2, chr((val >> 16) & 0xff))
self.overwrite(index + 3, chr((val >> 24) & 0xff))
def get_relative_pos(self, break_basic_block=True):
# 'break_basic_block' is only used in x86
return self._baserelpos + self._cursubindex
def copy_to_raw_memory(self, addr):
# indirection for _become_a_plain_block_builder() and for subclasses
self._copy_to_raw_memory(addr)
def _copy_to_raw_memory(self, addr):
block = self._cursubblock
blocksize = self._cursubindex
targetindex = self._baserelpos
while targetindex >= 0:
dst = rffi.cast(rffi.CCHARP, addr + targetindex)
for j in range(blocksize):
dst[j] = block.data[j]
block = block.prev
blocksize = self.SUBBLOCK_SIZE
targetindex -= self.SUBBLOCK_SIZE
assert not block
def copy_core_dump(self, addr, offset=0, count=-1):
HEX = '0123456789ABCDEF'
dump = []
src = rffi.cast(rffi.CCHARP, addr)
end = self.get_relative_pos(break_basic_block=False)
if count != -1:
end = offset + count
for p in range(offset, end):
o = ord(src[p])
dump.append(HEX[o >> 4])
dump.append(HEX[o & 15])
return ''.join(dump)
def _dump(self, addr, logname, backend=None):
debug_start(logname)
if have_debug_prints():
#
if backend is not None:
debug_print('BACKEND', backend)
#
from rpython.jit.backend.hlinfo import highleveljitinfo
if highleveljitinfo.sys_executable:
debug_print('SYS_EXECUTABLE', highleveljitinfo.sys_executable)
else:
debug_print('SYS_EXECUTABLE', '??')
#
dump = self.copy_core_dump(addr)
debug_print('CODE_DUMP',
'@%x' % addr,
'+0 ', # backwards compatibility
dump)
#
debug_stop(logname)
def materialize(self, cpu, allblocks, gcrootmap=None):
size = self.get_relative_pos()
align = self.ALIGN_MATERIALIZE
size += align - 1
malloced = cpu.asmmemmgr.malloc(size, size)
allblocks.append(malloced)
rawstart = malloced[0]
rawstart = (rawstart + align - 1) & (-align)
self.rawstart = rawstart
self.copy_to_raw_memory(rawstart)
if self.gcroot_markers is not None:
assert gcrootmap is not None
for pos, mark in self.gcroot_markers:
gcrootmap.register_asm_addr(rawstart + pos, mark)
return rawstart
def _become_a_plain_block_builder(self):
# hack purely for speed of tests
self._data = _data = []
self.writechar = _data.append
self.overwrite = _data.__setitem__
def get_relative_pos(break_basic_block=True):
return len(_data)
self.get_relative_pos = get_relative_pos
def plain_copy_to_raw_memory(addr):
dst = rffi.cast(rffi.CCHARP, addr)
for i, c in enumerate(_data):
dst[i] = c
self._copy_to_raw_memory = plain_copy_to_raw_memory
def insert_gcroot_marker(self, mark):
if self.gcroot_markers is None:
self.gcroot_markers = []
self.gcroot_markers.append(
(self.get_relative_pos(break_basic_block=False), mark))
|
oblique-labs/pyVM
|
rpython/jit/backend/llsupport/asmmemmgr.py
|
Python
|
mit
| 13,596
|
# Copyright 2015-2017, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Information available about Tor from `its manual
<https://www.torproject.org/docs/tor-manual.html.en>`_. This provides three
methods of getting this information...
* :func:`~stem.manual.Manual.from_cache` provides manual content bundled with
Stem. This is the fastest and most reliable method but only as up-to-date as
Stem's release.
* :func:`~stem.manual.Manual.from_man` reads Tor's local man page for
information about it.
* :func:`~stem.manual.Manual.from_remote` fetches the latest manual information
remotely. This is the slowest and least reliable method but provides the most
recent information about Tor.
Manual information includes arguments, signals, and probably most usefully the
torrc configuration options. For example, say we want a little script that told
us what our torrc options do...
.. literalinclude:: /_static/example/manual_config_options.py
:language: python
|
.. image:: /_static/manual_output.png
|
**Module Overview:**
::
is_important - Indicates if a configuration option is of particularly common importance.
download_man_page - Downloads tor's latest man page.
Manual - Information about Tor available from its manual.
| |- from_cache - Provides manual information cached with Stem.
| |- from_man - Retrieves manual information from its man page.
| +- from_remote - Retrieves manual information remotely from tor's latest manual.
|
+- save - writes the manual contents to a given location
.. versionadded:: 1.5.0
"""
import os
import shutil
import sys
import tempfile
import stem.prereq
import stem.util.conf
import stem.util.enum
import stem.util.log
import stem.util.system
from stem.util import _hash_attr
try:
# added in python 2.7
from collections import OrderedDict
except ImportError:
from stem.util.ordereddict import OrderedDict
try:
# added in python 3.2
from functools import lru_cache
except ImportError:
from stem.util.lru_cache import lru_cache
try:
# account for urllib's change between python 2.x and 3.x
import urllib.request as urllib
except ImportError:
import urllib2 as urllib
Category = stem.util.enum.Enum('GENERAL', 'CLIENT', 'RELAY', 'DIRECTORY', 'AUTHORITY', 'HIDDEN_SERVICE', 'TESTING', 'UNKNOWN')
GITWEB_MANUAL_URL = 'https://gitweb.torproject.org/tor.git/plain/doc/tor.1.txt'
CACHE_PATH = os.path.join(os.path.dirname(__file__), 'cached_tor_manual.cfg')
CATEGORY_SECTIONS = {
'GENERAL OPTIONS': Category.GENERAL,
'CLIENT OPTIONS': Category.CLIENT,
'SERVER OPTIONS': Category.RELAY,
'DIRECTORY SERVER OPTIONS': Category.DIRECTORY,
'DIRECTORY AUTHORITY SERVER OPTIONS': Category.AUTHORITY,
'HIDDEN SERVICE OPTIONS': Category.HIDDEN_SERVICE,
'TESTING NETWORK OPTIONS': Category.TESTING,
}
class ConfigOption(object):
"""
Tor configuration attribute found in its torrc.
:var str name: name of the configuration option
:var stem.manual.Category category: category the config option was listed
under, this is Category.UNKNOWN if we didn't recognize the category
:var str usage: arguments accepted by the option
:var str summary: brief description of what the option does
:var str description: longer manual description with details
"""
def __init__(self, name, category = Category.UNKNOWN, usage = '', summary = '', description = ''):
self.name = name
self.category = category
self.usage = usage
self.summary = summary
self.description = description
def __hash__(self):
return _hash_attr(self, 'name', 'category', 'usage', 'summary', 'description')
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, ConfigOption) else False
def __ne__(self, other):
return not self == other
@lru_cache()
def _config(lowercase = True):
"""
Provides a dictionary for our settings.cfg. This has a couple categories...
* manual.important (list) - configuration options considered to be important
* manual.summary.* (str) - summary descriptions of config options
:param bool lowercase: uses lowercase keys if **True** to allow for case
insensitive lookups
"""
config = stem.util.conf.Config()
config_path = os.path.join(os.path.dirname(__file__), 'settings.cfg')
try:
config.load(config_path)
config_dict = dict([(key.lower() if lowercase else key, config.get_value(key)) for key in config.keys() if key.startswith('manual.summary.')])
config_dict['manual.important'] = [name.lower() if lowercase else name for name in config.get_value('manual.important', [], multiple = True)]
return config_dict
except Exception as exc:
stem.util.log.warn("BUG: stem failed to load its internal manual information from '%s': %s" % (config_path, exc))
return {}
def _manual_differences(previous_manual, new_manual):
"""
Provides a description of how two manuals differ.
"""
lines = []
for attr in ('name', 'synopsis', 'description', 'commandline_options', 'signals', 'files', 'config_options'):
previous_attr = getattr(previous_manual, attr)
new_attr = getattr(new_manual, attr)
if previous_attr != new_attr:
lines.append("* Manual's %s attribute changed\n" % attr)
if attr in ('name', 'synopsis', 'description'):
lines.append(' Previously...\n\n%s\n' % previous_attr)
lines.append(' Updating to...\n\n%s' % new_attr)
elif attr == 'config_options':
for config_name, config_attr in new_attr.items():
previous = previous_attr.get(config_name)
if previous is None:
lines.append(' adding new config option => %s' % config_name)
elif config_attr != previous:
for attr in ('name', 'category', 'usage', 'summary', 'description'):
if getattr(config_attr, attr) != getattr(previous, attr):
lines.append(' modified %s (%s) => %s' % (config_name, attr, getattr(config_attr, attr)))
for config_name in set(previous_attr.keys()).difference(new_attr.keys()):
lines.append(' removing config option => %s' % config_name)
else:
added_items = set(new_attr.items()).difference(previous_attr.items())
removed_items = set(previous_attr.items()).difference(new_attr.items())
for added_item in added_items:
lines.append(' adding %s => %s' % added_item)
for removed_item in removed_items:
lines.append(' removing %s => %s' % removed_item)
lines.append('\n')
return '\n'.join(lines)
def is_important(option):
"""
Indicates if a configuration option of particularly common importance or not.
:param str option: tor configuration option to check
:returns: **bool** that's **True** if this is an important option and
**False** otherwise
"""
return option.lower() in _config()['manual.important']
def download_man_page(path = None, file_handle = None, url = GITWEB_MANUAL_URL, timeout = 20):
"""
Downloads tor's latest man page from `gitweb.torproject.org
<https://gitweb.torproject.org/tor.git/plain/doc/tor.1.txt>`_. This method is
both slow and unreliable - please see the warnings on
:func:`~stem.manual.Manual.from_remote`.
:param str path: path to save tor's man page to
:param file file_handle: file handler to save tor's man page to
:param str url: url to download tor's asciidoc manual from
:param int timeout: seconds to wait before timing out the request
:raises: **IOError** if unable to retrieve the manual
"""
if not path and not file_handle:
raise ValueError("Either the path or file_handle we're saving to must be provided")
elif not stem.util.system.is_available('a2x'):
raise IOError('We require a2x from asciidoc to provide a man page')
dirpath = tempfile.mkdtemp()
asciidoc_path = os.path.join(dirpath, 'tor.1.txt')
manual_path = os.path.join(dirpath, 'tor.1')
try:
try:
with open(asciidoc_path, 'wb') as asciidoc_file:
request = urllib.urlopen(url, timeout = timeout)
shutil.copyfileobj(request, asciidoc_file)
except:
exc = sys.exc_info()[1]
raise IOError("Unable to download tor's manual from %s to %s: %s" % (url, asciidoc_path, exc))
try:
stem.util.system.call('a2x -f manpage %s' % asciidoc_path)
if not os.path.exists(manual_path):
raise OSError('no man page was generated')
except stem.util.system.CallError as exc:
raise IOError("Unable to run '%s': %s" % (exc.command, exc.stderr))
if path:
try:
path_dir = os.path.dirname(path)
if not os.path.exists(path_dir):
os.makedirs(path_dir)
shutil.copyfile(manual_path, path)
except OSError as exc:
raise IOError(exc)
if file_handle:
with open(manual_path, 'rb') as manual_file:
shutil.copyfileobj(manual_file, file_handle)
file_handle.flush()
finally:
shutil.rmtree(dirpath)
class Manual(object):
"""
Parsed tor man page. Tor makes no guarantees about its man page format so
this may not always be compatible. If not you can use the cached manual
information stored with Stem.
This does not include every bit of information from the tor manual. For
instance, I've excluded the 'THE CONFIGURATION FILE FORMAT' section. If
there's a part you'd find useful then `file an issue
<https://trac.torproject.org/projects/tor/wiki/doc/stem/bugs>`_ and we can
add it.
:var str name: brief description of the tor command
:var str synopsis: brief tor command usage
:var str description: general description of what tor does
:var dict commandline_options: mapping of commandline arguments to their descripton
:var dict signals: mapping of signals tor accepts to their description
:var dict files: mapping of file paths to their description
:var dict config_options: :class:`~stem.manual.ConfigOption` tuples for tor configuration options
:var str man_commit: latest tor commit editing the man page when this
information was cached
:var str stem_commit: stem commit to cache this manual information
"""
def __init__(self, name, synopsis, description, commandline_options, signals, files, config_options):
self.name = name
self.synopsis = synopsis
self.description = description
self.commandline_options = commandline_options
self.signals = signals
self.files = files
self.config_options = config_options
self.man_commit = None
self.stem_commit = None
@staticmethod
def from_cache(path = None):
"""
Provides manual information cached with Stem. Unlike
:func:`~stem.manual.Manual.from_man` and
:func:`~stem.manual.Manual.from_remote` this doesn't have any system
requirements, and is faster too. Only drawback is that this manual
content is only as up to date as the Stem release we're using.
:param str path: cached manual content to read, if not provided this uses
the bundled manual information
:returns: :class:`~stem.manual.Manual` with our bundled manual information
:raises: **IOError** if a **path** was provided and we were unable to read it
"""
conf = stem.util.conf.Config()
conf.load(path if path else CACHE_PATH, commenting = False)
config_options = OrderedDict()
for key in conf.keys():
if key.startswith('config_options.'):
key = key.split('.')[1]
if key not in config_options:
config_options[key] = ConfigOption(
conf.get('config_options.%s.name' % key, ''),
conf.get('config_options.%s.category' % key, ''),
conf.get('config_options.%s.usage' % key, ''),
conf.get('config_options.%s.summary' % key, ''),
conf.get('config_options.%s.description' % key, '')
)
manual = Manual(
conf.get('name', ''),
conf.get('synopsis', ''),
conf.get('description', ''),
conf.get('commandline_options', {}),
conf.get('signals', {}),
conf.get('files', {}),
config_options,
)
manual.man_commit = conf.get('man_commit', None)
manual.stem_commit = conf.get('stem_commit', None)
return manual
@staticmethod
def from_man(man_path = 'tor'):
"""
Reads and parses a given man page.
On OSX the man command doesn't have an '--encoding' argument so its results
may not quite match other platforms. For instance, it normalizes long
dashes into '--'.
:param str man_path: path argument for 'man', for example you might want
'/path/to/tor/doc/tor.1' to read from tor's git repository
:returns: :class:`~stem.manual.Manual` for the system's man page
:raises: **IOError** if unable to retrieve the manual
"""
man_cmd = 'man %s -P cat %s' % ('' if stem.util.system.is_mac() else '--encoding=ascii', man_path)
try:
man_output = stem.util.system.call(man_cmd, env = {'MANWIDTH': '10000000'})
except OSError as exc:
raise IOError("Unable to run '%s': %s" % (man_cmd, exc))
categories, config_options = _get_categories(man_output), OrderedDict()
for category_header, category_enum in CATEGORY_SECTIONS.items():
_add_config_options(config_options, category_enum, categories.get(category_header, []))
for category in categories:
if category.endswith(' OPTIONS') and category not in CATEGORY_SECTIONS and category != 'COMMAND-LINE OPTIONS':
_add_config_options(config_options, Category.UNKNOWN, categories.get(category, []))
return Manual(
_join_lines(categories.get('NAME', [])),
_join_lines(categories.get('SYNOPSIS', [])),
_join_lines(categories.get('DESCRIPTION', [])),
_get_indented_descriptions(categories.get('COMMAND-LINE OPTIONS', [])),
_get_indented_descriptions(categories.get('SIGNALS', [])),
_get_indented_descriptions(categories.get('FILES', [])),
config_options,
)
@staticmethod
def from_remote(timeout = 60):
"""
Reads and parses the latest tor man page `from gitweb.torproject.org
<https://gitweb.torproject.org/tor.git/plain/doc/tor.1.txt>`_. Note that
while convenient, this reliance on GitWeb means you should alway call with
a fallback, such as...
::
try:
manual = stem.manual.from_remote()
except IOError:
manual = stem.manual.from_cache()
In addition to our GitWeb dependency this requires 'a2x' which is part of
`asciidoc <http://asciidoc.org/INSTALL.html>`_ and... isn't quick.
Personally this takes ~7.41s, breaking down for me as follows...
* 1.67s to download tor.1.txt
* 5.57s to convert the asciidoc to a man page
* 0.17s for stem to read and parse the manual
:param int timeout: seconds to wait before timing out the request
:returns: latest :class:`~stem.manual.Manual` available for tor
:raises: **IOError** if unable to retrieve the manual
"""
with tempfile.NamedTemporaryFile() as tmp:
download_man_page(file_handle = tmp, timeout = timeout)
return Manual.from_man(tmp.name)
def save(self, path):
"""
Persists the manual content to a given location.
:param str path: path to save our manual content to
:raises: **IOError** if unsuccessful
"""
conf = stem.util.conf.Config()
conf.set('name', self.name)
conf.set('synopsis', self.synopsis)
conf.set('description', self.description)
if self.man_commit:
conf.set('man_commit', self.man_commit)
if self.stem_commit:
conf.set('stem_commit', self.stem_commit)
for k, v in self.commandline_options.items():
conf.set('commandline_options', '%s => %s' % (k, v), overwrite = False)
for k, v in self.signals.items():
conf.set('signals', '%s => %s' % (k, v), overwrite = False)
for k, v in self.files.items():
conf.set('files', '%s => %s' % (k, v), overwrite = False)
for k, v in self.config_options.items():
conf.set('config_options.%s.category' % k, v.category)
conf.set('config_options.%s.name' % k, v.name)
conf.set('config_options.%s.usage' % k, v.usage)
conf.set('config_options.%s.summary' % k, v.summary)
conf.set('config_options.%s.description' % k, v.description)
conf.save(path)
def __hash__(self):
return _hash_attr(self, 'name', 'synopsis', 'description', 'commandline_options', 'signals', 'files', 'config_options')
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, Manual) else False
def __ne__(self, other):
return not self == other
def _get_categories(content):
"""
The man page is headers followed by an indented section. First pass gets
the mapping of category titles to their lines.
"""
# skip header and footer lines
if content and 'TOR(1)' in content[0]:
content = content[1:]
if content and content[-1].startswith('Tor'):
content = content[:-1]
categories = {}
category, lines = None, []
for line in content:
# replace non-ascii characters
#
# \u2019 - smart single quote
# \u2014 - extra long dash
# \xb7 - centered dot
char_for = chr if stem.prereq.is_python_3() else unichr
line = line.replace(char_for(0x2019), "'").replace(char_for(0x2014), '-').replace(char_for(0xb7), '*')
if line and not line.startswith(' '):
if category:
if lines[-1] == '':
lines = lines[:-1] # sections end with an extra empty line
categories[category] = lines
category, lines = line.strip(), []
else:
if line.startswith(' '):
line = line[7:] # contents of a section have a seven space indentation
lines.append(line)
if category:
categories[category] = lines
return categories
def _get_indented_descriptions(lines):
"""
Parses the commandline argument and signal sections. These are options
followed by an indented description. For example...
::
-f FILE
Specify a new configuration file to contain further Tor configuration
options OR pass - to make Tor read its configuration from standard
input. (Default: /usr/local/etc/tor/torrc, or $HOME/.torrc if that file
is not found)
There can be additional paragraphs not related to any particular argument but
ignoring those.
"""
options, last_arg = OrderedDict(), None
for line in lines:
if line and not line.startswith(' '):
options[line], last_arg = [], line
elif last_arg and line.startswith(' '):
options[last_arg].append(line[4:])
return dict([(arg, ' '.join(desc_lines)) for arg, desc_lines in options.items() if desc_lines])
def _add_config_options(config_options, category, lines):
"""
Parses a section of tor configuration options. These have usage information,
followed by an indented description. For instance...
::
ConnLimit NUM
The minimum number of file descriptors that must be available to the
Tor process before it will start. Tor will ask the OS for as many file
descriptors as the OS will allow (you can find this by "ulimit -H -n").
If this number is less than ConnLimit, then Tor will refuse to start.
You probably don't need to adjust this. It has no effect on Windows
since that platform lacks getrlimit(). (Default: 1000)
"""
last_option, usage, description = None, None, []
if lines and lines[0].startswith('The following options'):
lines = lines[lines.index(''):] # drop the initial description
for line in lines:
if line and not line.startswith(' '):
if last_option:
summary = _config().get('manual.summary.%s' % last_option.lower(), '')
config_options[last_option] = ConfigOption(last_option, category, usage, summary, _join_lines(description).strip())
if ' ' in line:
last_option, usage = line.split(' ', 1)
else:
last_option, usage = line, ''
description = []
else:
if line.startswith(' '):
line = line[4:]
description.append(line)
if last_option:
summary = _config().get('manual.summary.%s' % last_option.lower(), '')
config_options[last_option] = ConfigOption(last_option, category, usage, summary, _join_lines(description).strip())
def _join_lines(lines):
"""
Simple join, except we want empty lines to still provide a newline.
"""
result = []
for line in lines:
if not line:
if result and result[-1] != '\n':
result.append('\n')
else:
result.append(line + '\n')
return ''.join(result).strip()
|
tparks5/tor-stem
|
stem/manual.py
|
Python
|
lgpl-3.0
| 20,582
|
import numpy as np
from copy import deepcopy
# note the assumption that location of the nucleosome is the first element in
# the corresponding vector
class layerOfNodes:
# list of nodes in the layer
nodes = []
def __init__(self, listNodes=None):
'''(listNodes=None)
Initialize node layer with nodes from the listNodes
'''
self.nodes = []
if listNodes is None:
# self.nodes = []
pass
else:
for node in listNodes:
self.nodes.append(node)
def __del__(self):
print 'deleting layer'
try:
self.nodes = []
except:
print 'deleting layer failed'
print len(self.nodes)
def GetNodes(self, id=None):
if id is None:
return self.nodes
else:
try:
return self.nodes[id]
except:
print 'smth went wrong while getNodes(id)'
def Location(self, listIDs=None):
'''(listIDs)
Returns a list of locations for the nodes in the listIDs.
If listIDs is not provided all of the locations are returned
'''
if listIDs is None:
output = []
try:
for x in self.nodes:
# assuming location is the first element in the node vector
output.append(x[0])
except:
print "smth is wrong with the list of nodes in the layer"
return output
else:
output = []
try:
for x in listIDs:
# assuming location is the first element in the node vector
output.append(self.nodes[x][0])
except:
print "smth went wrong with layerOfNodes location"
return output
def Vicinity(self, loc, size):
'''(loc, size)
returns list of IDs for nodes in the layer that are trapped in vicinity
(loc, size)
'''
res = []
idx = -1
for node in self.nodes:
idx += 1
if abs(node[0] - loc) < size:
res.append(idx)
return res
def Correct(self):
'''
Returns whether layerOfNodes is correct
'''
try:
ndim = len(self.nodes)
except:
return False
if ndim > 0:
for i in xrange(ndim):
# assuming index of node location is first element in the
# vector
if self.nodes[i][0] < 0:
return False
else:
return True
else:
return False
class hyperGraph:
# number of layers in the graph
nLayers = 0
# list of nodes
nodes = []
# list of layers
layers = []
# list of hyperedges (simple list of node IDs from list nodes)
edges = []
# list of corresponding hyperedge costs
costs = []
# list of corresponding hyperedge locations
edgeLocs = []
# vicinity Radius
okil = 100
# penalty for missing node
missPenalty = 50
def Clean(self):
'''
cleans the graph. This is patch code that is not suppose to be here
Don't do code like this
'''
self.nLayers = 0
self.nodes = []
self.layers = []
self.edges = []
self.costs = []
self.edgeLocs = []
def __init__(self, layer=None):
'''(listNodes=None)
Initialize hyperGraph with nodes from layer
'''
self.nLayers = 0
self.nodes = []
self.layers = []
self.edges = []
self.costs = []
self.edgeLocs = []
if layer is None:
pass
else:
self.layers.append(layer)
ind = -1
for node in layer.GetNodes():
ind += 1
self.edges.append([ind])
# assuming location is the first element in the vector
if hasattr(node,"__len__"):
tmp = node[0]
else:
tmp = node
self.edgeLocs.append(tmp)
self.costs.append(0)
self.nLayers = self.nLayers + 1
# self.nodes = np.array(nodes) #break functionality @addLayer()
def __del__(self):
print 'deleting graph'
try:
self.nodes = []
except:
print 'deleting graph.nodes failed'
try:
for layer in self.layers:
del layer
self.layers = []
except:
print 'deleting graph.layers failed'
try:
self.edges = []
except:
print 'deleting graph.edges failed'
try:
self.costs = []
except:
print 'deleting graph.costs failed'
try:
self.edgeLocs = []
except:
print 'deleting graph.edgeLocs failed'
self.Clean()
def EdgeNode(self, edgeID, edgePos):
'''(int ID, int POS)
returns node attributes for the ID hyperedge's node at position POS
'''
try:
nodeID = self.edges[edgeID][edgePos]
except IndexError:
print "node index out of bounds for current edge"
nodeID = -1
if nodeID >= 0:
return self.layers.getNodes(nodeID)
else:
return -1
def AddLayer(self, layerOfNodes):
'''(layerOfNodes)
adds a new layer of nodes to a hypergraph
'''
if layerOfNodes.Correct():
# add new layer of nodes to the list
self.layers.append(layerOfNodes)
# prolong each hyperedge with dummy node
edgesToDull = deepcopy(self.edges)
for x in edgesToDull:
x.append(-1)
# create new edges for every node in the new layer
dullToLayer = []
count = 0
for n in layerOfNodes.GetNodes():
tmpList = []
for i in xrange(self.nLayers):
tmpList += list([-1])
dullToLayer.append(tmpList + [count])
# assuming location is the first elem in node vector
self.edgeLocs.append(n[0])
self.costs.append(0)
count += 1
# create new edges for extension of existing edges
newEdges = []
ind = -1
for t in self.edges:
ind += 1
candidateNodes = layerOfNodes.Vicinity(
self.edgeLocs[ind], self.okil)
if len(candidateNodes) > 0:
for nodeID in candidateNodes:
newEdge = t + [nodeID]
newEdges.append(newEdge)
newEdgeLocation = self.EdgeAveLoc(newEdge)
self.edgeLocs.append(newEdgeLocation)
self.costs.append(0)
self.edges = edgesToDull + dullToLayer + newEdges
self.nLayers += 1
else:
print 'smth is wrong with the layer to add'
def EdgeAveLoc(self, edge):
'''(edge)
returns an average hyper edge location
'''
cumLoc = 0
cumCount = 0
curLayer = -1
for edgeID in edge:
curLayer += 1
if edgeID >= 0:
try:
loc = self.layers[curLayer].Location([edgeID])
increment = 1
except:
print 'smth wrong with extracting edge locations', curLayer, edgeID
loc = 0
increment = 0
cumLoc += loc[0]
cumCount += increment
if cumCount > 0:
return cumLoc / cumCount
else:
return -1000
def EdgeLoc(self, edge):
'''(edge)
returns a vector of hyper edge locations (NB!!! order doesn't matter)
'''
cumLoc = []
cumCount = 0
curLayer = -1
for edgeID in edge:
curLayer += 1
if edgeID >= 0:
try:
loc = self.layers[curLayer].Location([edgeID])
increment = 1
except:
print 'smth wrong with extracting edge locations', curLayer, edgeID
loc = 0
increment = 0
cumLoc.append(loc[0])
cumCount += increment
if cumCount > 0:
return cumLoc
else:
return None
def EdgeCostComputation(self):
'''
Updates the cost for every hyper edge in the graph
'''
try:
count = -1
for edge in self.edges:
count += 1
loc = self.edgeLocs[count]
locations = self.EdgeLoc(edge)
cumCost = 0
for x in locations:
cumCost += abs(x - loc)
self.costs[count] = cumCost + (
self.nLayers - len(locations)) * self.missPenalty
except:
print 'smth went wrong with updating edge costs'
def NodeToEdgeList(self, layerID, nodeID):
'''(layerID, nodeID)
given the node report all edgeIDs that contain that node
'''
edgeList = []
countID = -1
for edge in self.edges:
countID += 1
if edge[layerID] == nodeID:
edgeList.append(countID)
return edgeList
def GetTrackStat(self, listOfEdgeIDs):
'''(listOfEdgeIDs)
given the list of hyperedge IDs return a full table of nucleosome stats
'''
table = []
for edgeID in listOfEdgeIDs:
currentEdge = self.edges[edgeID]
curLayer = -1
trackAttr = np.array([])
for nodeID in currentEdge:
curLayer += 1
if nodeID >= 0:
nodeAttr = self.layers[curLayer].nodes[nodeID]
else:
nodeAttr = np.array([-1, -1])
# TODO make this more intelligently
trackAttr = np.concatenate((trackAttr, nodeAttr))
table.append(trackAttr)
return np.array(table)
def CPLEXprint(graph, fileName):
numVars = len(graph.edges)
numLayers = graph.nLayers
tmpSum = 0
for layer in graph.layers:
tmpSum += len(layer.Location())
numNodes = tmpSum
with open(fileName, 'w') as fout:
# code for printing objective function goes here
print >> fout, 'Minimize'
objectString = ''
for i in xrange(numVars):
objectString += ' +' + str(graph.costs[i]) + ' x' + str(i)
print >> fout, 'object:', objectString
# code for printing constraints goes here
print >> fout, 'Subject To'
countLayer = -1
for layer in graph.layers:
countLayer += 1
countNode = -1
for node in layer.GetNodes():
countNode += 1
boundaryString = 'l' + str(
countLayer) + 'n' + str(countNode) + ':'
coveringEdges = graph.NodeToEdgeList(countLayer, countNode)
if len(coveringEdges) > 0:
for ind in coveringEdges:
boundaryString += ' + x' + str(ind)
boundaryString += ' = 1'
print >> fout, boundaryString
else:
print >> 'l' + str(countLayer) + 'n' + str(
countNode) + ' has no covering edges'
# code for printint variable bountadires goes here
print >> fout, 'Bounds'
for i in xrange(numVars):
print >> fout, '0 <= x' + str(i) + ' <= 1'
def main():
# test = hyperGraph()
# test.init([11, 22, 33])
return 0
if __name__ == '__main__':
# main()
pass
|
ucrbioinfo/ThIEF
|
TrackGenerator/TrNM.py
|
Python
|
mit
| 12,080
|
from flask import Blueprint
user = Blueprint('user', __name__, template_folder='templates')
from . import views
|
cburmeister/flask-bones
|
app/user/__init__.py
|
Python
|
mit
| 114
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from unittest import TestCase, mock
from airflow.providers.google.marketing_platform.hooks.display_video import GoogleDisplayVideo360Hook
from tests.providers.google.cloud.utils.base_gcp_mock import mock_base_gcp_hook_default_project_id
API_VERSION = "v1"
GCP_CONN_ID = "google_cloud_default"
class TestGoogleDisplayVideo360Hook(TestCase):
def setUp(self):
with mock.patch(
"airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = GoogleDisplayVideo360Hook(gcp_conn_id=GCP_CONN_ID)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook._authorize"
)
@mock.patch("airflow.providers.google.marketing_platform.hooks."
"display_video.build")
def test_gen_conn(self, mock_build, mock_authorize):
result = self.hook.get_conn()
mock_build.assert_called_once_with(
"doubleclickbidmanager",
API_VERSION,
http=mock_authorize.return_value,
cache_discovery=False,
)
self.assertEqual(mock_build.return_value, result)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn"
)
def test_create_query(self, get_conn_mock):
body = {"body": "test"}
return_value = "TEST"
get_conn_mock.return_value.queries.return_value.createquery.return_value.execute.return_value = (
return_value
)
result = self.hook.create_query(query=body)
get_conn_mock.return_value.queries.return_value.createquery.assert_called_once_with(
body=body
)
self.assertEqual(return_value, result)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn"
)
def test_delete_query(self, get_conn_mock):
query_id = "QUERY_ID"
return_value = "TEST"
get_conn_mock.return_value.queries.return_value.deletequery.return_value.execute.return_value = (
return_value
)
self.hook.delete_query(query_id=query_id)
get_conn_mock.return_value.queries.return_value.deletequery.assert_called_once_with(
queryId=query_id
)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn"
)
def test_get_query(self, get_conn_mock):
query_id = "QUERY_ID"
return_value = "TEST"
get_conn_mock.return_value.queries.return_value.getquery.return_value.execute.return_value = (
return_value
)
result = self.hook.get_query(query_id=query_id)
get_conn_mock.return_value.queries.return_value.getquery.assert_called_once_with(
queryId=query_id
)
self.assertEqual(return_value, result)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn"
)
def test_list_queries(self, get_conn_mock):
queries = ["test"]
return_value = {"queries": queries}
get_conn_mock.return_value.queries.return_value.listqueries.return_value.execute.return_value = (
return_value
)
result = self.hook.list_queries()
get_conn_mock.return_value.queries.return_value.listqueries.assert_called_once_with()
self.assertEqual(queries, result)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn"
)
def test_run_query(self, get_conn_mock):
query_id = "QUERY_ID"
params = {"params": "test"}
self.hook.run_query(query_id=query_id, params=params)
get_conn_mock.return_value.queries.return_value.runquery.assert_called_once_with(
queryId=query_id, body=params
)
|
spektom/incubator-airflow
|
tests/providers/google/marketing_platform/hooks/test_display_video.py
|
Python
|
apache-2.0
| 4,876
|
#!/opt/local/bin/python2.7
import os
#import numpy
import math
from numpy import *
from sys import *
import sys
import getopt
### --- Arguments --- ###
program = "XYZ-to-ForceField.py"
ifile = ''
ofile = ''
### Read command line args
try:
myopts, args = getopt.getopt(sys.argv[1:],"i:o:h")
except getopt.GetoptError:
print program + " -i <inputfile.map> -o <outputfile.map>"
sys.exit(2)
###############################
# o == option
# a == argument passed to the o
###############################
for o, a in myopts:
if o == '-i':
ifile = a
elif o == '-o':
ofile = a
elif o == '-h':
print program + " -i <inputfile.map> -o <outputfile.map>"
sys.exit(0)
else:
print("Usage: %s -i inputfile.map" % sys.argv[0])
sys.exit(0)
class Atom(object):
def __init__(self):
self.e = 0
self.x = 0
self.y = 0
self.z = 0
self.neighbors = []
self.neighborsdist = []
self.nearest = 0
self.hybridization = ''
self.charge = 0
self.int = 0
self.string = ''
self.atomtype = ''
### --- Functions to get and give element numbers and names --- ###
elementList = ["h","he","li","be","b","c","n","o","f","ne","na","mg","al","si","p","s","cl","ar","k","ca","sc","ti","v","cr","mn","fe","co","ni","cu","zn","ga","ge","as","se","br","kr","rb","sr","y","zr","nb","mo","tc","ru","rh","pd","ag","cd","in","sn","sb","te","i","xe","cs","ba","la","ce","pr","nd","pm","sm","eu","gd","tb","dy","ho","er","tm","yb","lu","hf","ta","w","re","os","ir","pt","au","hg","tl","pb","bi","po","at","rn","fr","ra","ac","th","pa","u","np","pu","am","cm","bk","cf","es","fm","md","no","lr","rf","db","sg","bh","hs","mt","ds","rg","cn","uut","fl","uup","lv","uus","uuo"]
elementNames = ["H","He","Li","Be","B","C","N","O","F","Ne","Na","Mg","Al","Si","P","S","Cl","Ar","K","Ca","Sc","Ti","V","Cr","Mn","Fe","Co","Ni","Cu","Zn","Ga","Ge","As","Se","Br","Kr","Rb","Sr","Y","Zr","Nb","Mo","Tc","Ru","Rh","Pd","Ag","Cd","In","Sn","Sb","Te","I","Xe","Cs","Ba","La","Ce","Pr","Nd","Pm","Sm","Eu","Gd","Tb","Dy","Ho","Er","Tm","Yb","Lu","Hf","Ta","W","Re","Os","Ir","Pt","Au","Hg","Tl","Pb","Bi","Po","At","Rn","Fr","Ra","Ac","Th","Pa","U","Np","Pu","Am","Cm","Bk","Cf","Es","Fm","Md","No","Lr","Rf","Db","Sg","Bh","Hs","Mt","Ds","Rg","Cn","Uut","Fl","Uup","Lv","Uus","Uuo"]
elementLarge = ["na","mg","al","si","p","s","cl","ar","k","ca","sc","ti","v","cr","mn","fe","co","ni","cu","zn","ga","ge","as","se","br","kr","rb","sr","y","zr","nb","mo","tc","ru","rh","pd","ag","cd","in","sn","sb","te","i","xe","cs","ba","la","ce","pr","nd","pm","sm","eu","gd","tb","dy","ho","er","tm","yb","lu","hf","ta","w","re","os","ir","pt","au","hg","tl","pb","bi","po","at","rn","fr","ra","ac","th","pa","u","np","pu","am","cm","bk","cf","es","fm","md","no","lr","rf","db","sg","bh","hs","mt","ds","rg","cn","uut","fl","uup","lv","uus","uuo"]
def getElementNum(at1):
element = elementList.index(at1.lower())
return element+1
def getElementName(at1):
element = elementNames[at1-1]
return element
### --- Distance function --- ###
def getdistance(at1, at2, lol):
if isinstance(lol[at1], Atom):
atom1 = array([lol[at1].x, lol[at1].y, lol[at1].z])
atom2 = array([lol[at2].x, lol[at2].y, lol[at2].z])
else:
atom1 = array([float(lol[at1][1]), float(lol[at1][2]), float(lol[at1][3])])
atom2 = array([float(lol[at2][1]), float(lol[at2][2]), float(lol[at2][3])])
vector1 = atom2-atom1
dist = linalg.norm(vector1)
#print dist
return dist
### --- Angle function --- ###
def getangle(at1, at2, at3, lol):
if isinstance(lol[at1], Atom):
# put positions in array
atom1 = array([lol[at1].x, lol[at1].y, lol[at1].z])
atom2 = array([lol[at2].x, lol[at2].y, lol[at2].z])
atom3 = array([lol[at3].x, lol[at3].y, lol[at3].z])
else:
atom1 = array([float(lol[at1][1]), float(lol[at1][2]), float(lol[at1][3])])
atom2 = array([float(lol[at2][1]), float(lol[at2][2]), float(lol[at2][3])])
atom3 = array([float(lol[at3][1]), float(lol[at3][2]), float(lol[at3][3])])
# making appropriate vectors and normals
vector1 = atom1-atom2
vector2 = atom3-atom2
angle = arccos(dot(vector1,vector2)/(linalg.norm(vector1)*linalg.norm(vector2)))
#print degrees(angle)
return degrees(angle)
### --- Dihedral angle function --- ###
def getdihedral(at1, at2, at3, at4, lol):
if isinstance(lol[at1], Atom):
# put positions in array
atom1 = array([lol[at1].x, lol[at1].y, lol[at1].z])
atom2 = array([lol[at2].x, lol[at2].y, lol[at2].z])
atom3 = array([lol[at3].x, lol[at3].y, lol[at3].z])
atom4 = array([lol[at4].x, lol[at4].y, lol[at4].z])
else:
atom1 = array([float(lol[at1][1]), float(lol[at1][2]), float(lol[at1][3])])
atom2 = array([float(lol[at2][1]), float(lol[at2][2]), float(lol[at2][3])])
atom3 = array([float(lol[at3][1]), float(lol[at3][2]), float(lol[at3][3])])
atom4 = array([float(lol[at4][1]), float(lol[at4][2]), float(lol[at4][3])])
# making appropriate vectors and normals
vector1 = atom2-atom1
vector2 = atom3-atom2
plane1 = cross(vector1,vector2)
vector3 = atom2-atom3
vector4 = atom4-atom3
plane2 = cross(vector3,vector4)
# finding dihedral angle
dihedral = arccos(-dot(plane1,plane2)/(linalg.norm(plane1)*linalg.norm(plane2)))
# checking the sign of the dihedral then displaying result
if dot(plane1,vector4) > 0:
#print degrees(dihedral)
return degrees(dihedral)
else:
#print -degrees(dihedral)
return - degrees(dihedral)
### --- get the distance, angle and dihedrals for a Z-matrix --- ###
def getzmat(i):
line = []
line.append(getElementName(ifilelol[i+2].e))
if i > 0:
line.append(i)
dist = getdistance(i+1, i+2, ifilelol)
line.append(dist)
if i > 1:
line.append(i-1)
angle = getangle(i, i+1, i+2, ifilelol)
line.append(angle)
if i > 2:
line.append(i-2)
dihedral = getdihedral(i-1, i, i+1, i+2, ifilelol)
line.append(dihedral)
line.append(-1)
line.append(-1)
line.append(-1)
line.append(-1)
line.append(-1)
line.append(-1)
line.append("\n")
return line
### --- Get the XYZ coordinates from distance, angle and dihedral data --- ###
def getXYZfromZMAT(lol,at4):
### Set up the variables to be used for the function
dist = float(lol[2])
angle = float(lol[4])
dihedral = float(lol[6])
angleRad = radians(angle) # * math.pi / 180
dihedralRad = radians(dihedral) # * math.pi / 180
at1 = int(lol[5])-1
at2 = int(lol[3])-1
at3 = int(lol[1])-1
x = 0
y = 0
z = 0
line = []
### Start to place the atoms in their locations
if at4 == 0:
x = 0.00000
y = 0.00000
z = 0.00000
elif at4 == 1:
x = dist
y = 0.00000
z = 0.00000
elif at4 == 2:
a = xyzLOL[at3][1]
b = dist
x = a + (dist * cos(math.pi - angleRad))
y = 0.00000
z = -dist * sin(math.pi - angleRad)
elif at4 >= 3:
####The at4 x,y,z coordinates from spherical coord
Sx = dist * sin(angleRad) * cos(dihedralRad)
Sy = -dist * sin(angleRad) * sin(dihedralRad) #For some reason I need to have a negative here to get the correct sign in the output..... weird
Sz = dist * cos(angleRad)
at4L = [Sx, Sy, Sz]
###Finding the angle theta
#Make the list of lists for the three point (z-axis, origin, and translated atom 2) needed for an angle calculation
Z32 = [[0, 0, 0, 1], [0, 0, 0, 0], [0, xyzLOL[at2][1] - xyzLOL[at3][1], xyzLOL[at2][2] - xyzLOL[at3][2], xyzLOL[at2][3] - xyzLOL[at3][3]]]
#Get theta using the getangle function
theta = radians(getangle(0, 1, 2, Z32))
###Rodrigues' rotation formula
#Create the vectprs needed to calculate k
vector3 = array([xyzLOL[at3][1], xyzLOL[at3][2], xyzLOL[at3][3]])
vector2 = array([xyzLOL[at2][1], xyzLOL[at2][2], xyzLOL[at2][3]])
vector0 = array([0, 0, 1])
#Calculate k for the Rodrigues rotation formula
k = cross((vector2-vector3), vector0)/linalg.norm(cross((vector2-vector3), vector0))
#Generate an array for translated 1
T1 = [(xyzLOL[at1][1]-xyzLOL[at3][1]), (xyzLOL[at1][2]-xyzLOL[at3][2]), (xyzLOL[at1][3]-xyzLOL[at3][3])]
#Calculate the Rodrigues rotation matrix
RR23T1 = dot(T1, cos(theta)) + dot(cross(k,T1), sin(theta)) + dot(dot(k,(dot(k,T1))), (1-cos(theta)))
#Make the list of lists for the four points (x-axis, z-axis, origin, and rotated translated 1) needed for a dihedral calculation
XZ31 = [[0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0], [0, RR23T1[0], RR23T1[1], RR23T1[2]]]
#Get phi using the getdihedral function
phi = radians(getdihedral(0,1,2,3,XZ31))
###Rotation matrix
#Create the array for the rotation matrix including dihedral phi
RM = array([[cos(phi), sin(phi), 0], [-sin(phi), cos(phi), 0], [0, 0, 1]])
#Calculate the dot product of the rotation matrix and the coordinates for 4 (from spherical)
RM4 = dot(RM, at4L)
#Calculate the rotated coordinates of the rotated coordinates of atom 4
RRN23RM4 = dot(RM4, cos(-theta)) + dot(cross(k,RM4), sin(-theta)) + dot(dot(k,(dot(k,RM4))), (1-cos(-theta)))
#Final coordinates that are rotated, rotated and translated
x = RRN23RM4[0] + xyzLOL[at3][1]
y = RRN23RM4[1] + xyzLOL[at3][2]
z = RRN23RM4[2] + xyzLOL[at3][3]
#Putting everything into a list to send back
line.append(lol[0])
line.append(x)
line.append(y)
line.append(z)
return line
### --- Open parent file --- ###
f = open(ifile)
ifileList = f.readlines()
f.close()
### --- Create some variables needed for parsing the input file --- ###
# ifilelength is used to gauge what line we are on and also allows us to determine the number of lines in the file
# ifilelol is a list of lists (lol) that will hold all the data of the input file
# a_list is a temp list that will be appended to the ifilelol
################################
ifileLength = len(ifileList)
#iFileAtomNum = ifileLength - 2
ifilelol = [0] * ifileLength
a_list = []
#### --- Input/parse the input file into a list of lists called ifilelol --- ###
for i in range(ifileLength):
if i == 0:
ifilelol[i] = int(ifileList[i].rstrip())
iFileAtomNum = int(ifileList[i].rstrip())
elif i == 1:
ifilelol[i] = str(ifileList[i].rstrip())
elif i >= 2:
line = ifileList[i].rstrip().split()
ifilelol[i] = Atom()
ifilelol[i].e = getElementNum(line[0])
ifilelol[i].x = float(line[1])
ifilelol[i].y = float(line[2])
ifilelol[i].z = float(line[3])
### --- Get bonds --- ###
covBonds = []
covHBonds = []
covTMBonds = []
nearestNeighbor = []
neighborStart = [0, 1000000]
### --- Generate bond lists --- ###
for i in range(0,int(iFileAtomNum)):
nearestNeighbor.append(list(neighborStart))
for j in range(0,int(iFileAtomNum)):
if i != j:
distij = getdistance(i+2, j+2, ifilelol)
if j > i:
if distij <= 2.25 and ifilelol[i+2].e != 1 and ifilelol[j+2].e != 1 and ((getElementName(ifilelol[i+2].e).lower() not in elementLarge) or (getElementName(ifilelol[j+2].e).lower() not in elementLarge)):
# distList = [i+1, j+1, distij]
ifilelol[i+2].neighbors.append(j+1)
ifilelol[j+2].neighbors.append(i+1)
# covBonds.append(distList)
#print str(i+2) + "\t" + str(j+2) + "\t" + str(distij)
elif distij <= 3 and ((getElementName(ifilelol[i+2].e).lower() in elementLarge) and (getElementName(ifilelol[j+2].e).lower() in elementLarge)):
# distList = [i+1, j+1, distij]
ifilelol[i+2].neighbors.append(j+1)
ifilelol[j+2].neighbors.append(i+1)
# covTMBonds.append(distList)
elif distij <= 1.3 and (ifilelol[i+2].e == 1 or ifilelol[j+2].e == 1):
# distList = [i+1, j+1, distij]
ifilelol[i+2].neighbors.append(j+1)
ifilelol[j+2].neighbors.append(i+1)
# covHBonds.append(distList)
if distij < nearestNeighbor[i][1]:
# nearestNeighbor[i][0] = j + 1
# nearestNeighbor[i][1] = distij
ifilelol[i+2].nearest = j+1
### --- Remove hydrogen bonds from bond list --- ###
#for i in range(0,len(covHBonds)):
# if (covHBonds[i][0] != nearestNeighbor[covHBonds[i][0]][0]) and (covHBonds[i][1] != nearestNeighbor[covHBonds[i][0]][0]):
# del covHBonds[i]
#print "Covalent bonds to Hydrogen:"
#print covHBonds
#print "Covalent bonds between \"heavy\" atoms."
#print covBonds
#print "Covalent bonds between TM atoms."
#print covTMBonds
#print "Each Atoms nearest neighbor."
#print nearestNeighbor
### --- Reorder neighbor list to be in numerical order --- ###
for i in range(2,ifileLength):
for j in range(len(ifilelol[i].neighbors)):
for k in range(len(ifilelol[i].neighbors)):
if ifilelol[ifilelol[i].neighbors[k]+1].e < ifilelol[ifilelol[i].neighbors[j]+1].e:
ifilelol[i].neighbors.insert(0, ifilelol[i].neighbors.pop(k))
### --- Determine if the atom is in a ring --- ### NOT DONE!!!!
def isinring(at1, lol):
neighborList = []
for i in range(len(lol[at1].neighbors)):
#print lol[lol[at1].neighbors[i]+1].neighbors
neighborList.append(lol[lol[at1].neighbors[i]+1].neighbors)
#print neighborList
### --- Search for rings --- ###
ringSearchDone = "no"
while ringSearchDone == "no":
for j in range(len(neighborList)):
for i in range(len(neighborList[j])):
print lol[neighborList[j][i]+1].neighbors
#neighborList.append(lol[lol[neighborList[j][i]+1].neighbors[i]+1].neighbors)
print neighborList
ringSearchDone = "yes"
return
### --- Output strings to discern certain types of atoms based on their neighbors --- ###
ffsmilelol = []
for i in range(2,ifileLength):
line = [str(getElementName(ifilelol[i].e))]
###line = [getElementName(ifilelol[i].e), ifilelol[i].x, ifilelol[i].y, ifilelol[i].z, ifilelol[i].neighbors, ifilelol[i].neighborsdist, ifilelol[i].nearest, ifilelol[i].charge]
for atom in ifilelol[i].neighbors:
line.append(" | ")
line.append(str(getElementName(ifilelol[atom+1].e)))
for subatoms in ifilelol[atom+1].neighbors:
if subatoms + 1 != atom:
line.append("(")
line.append(str(getElementName(ifilelol[subatoms+1].e)))
if len(ifilelol[subatoms+1].neighbors) > 1:
line.append("(")
for subsubatoms in ifilelol[subatoms+1].neighbors:
if subsubatoms + 1 != subatoms:
line.append(str(getElementName(ifilelol[subsubatoms+1].e)))
line.append(")")
line.append(")")
ffsmilelol.append(''.join(str(e) for e in line).split('|'))
# print ''.join(str(e) for e in line)
line[:] = []
print ffsmilelol
#isinring(2, ifilelol)
#### --- Output file in Z-matrix format --- ###
#f = open(ofile, 'w+')
#f.write(str(iFileAtomNum) + "\n")
#f.write(ifilelol[1] + "\n")
#for i in range(0,int(iFileAtomNum)):
# linetemp = [x for x in zmatlol[i] if x != -1]
# line = '\t'.join(str(e) for e in linetemp) #, e != -1)
# f.write(line)
#f.close()
#C 0.0 0.0 0.0
#H 1.08667477625 0.0 0.0
#C -0.710304820313 0.0 -1.19862882797
#H -0.190303313054 0.00285187546684 -2.15257116105
#C -2.10196298123 0.00218056792464 -1.16393829937
#H -2.63732817294 0.0171920874033 -2.10597724946
#C -2.81964532451 -0.00437264458075 0.0480714706799
#P -4.6702532026 -0.0170742413099 0.00664801286375
#Pd -5.54865169312 0.128925626685 2.17205850687
######################################################################
### END OF SCRIPT
######################################################################
|
PHYC/Assign_Forcefield
|
XYZ-to-ForceField.py
|
Python
|
mit
| 14,964
|
# pyc2py - The smart python decompiler.
# Copyright (C) 2012 Centre National de la Recherche Scientifique
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Developper: Etienne Duble
# Contact me at: etienne _dot_ duble _at_ imag _dot_ fr
from inspection.data import get_data
from inspection.functions import show_function
from tools import print_doc_string, print_code_line
import inspect
def show_class(class_name, cls, known_globals):
base_names = []
for base in cls.__bases__:
baseclass_fullname = str(base).split("'")[1]
baseclass_name = baseclass_fullname.split('.')[-1]
base_names.append(baseclass_name)
print 'class %s(%s):' % (class_name, ', '.join(base_names))
print_doc_string(1, cls)
# retrieve class attributes
for name, value in get_data(cls):
print_code_line(1, name + ' = ' + repr(value))
# retrieve methods
for class_attr in inspect.classify_class_attrs(cls):
#print '#', class_attr
name, kind, owner_cls, attr = class_attr
# a method may be inherited from a parent class
# we want to only print methods defined in this
# object.
# also we only print methods.
if owner_cls == cls and kind != 'data':
show_function(name, attr, known_globals, indent=1)
print
# print class_name, name
# if class_name.startswith('rs232') and name == 'reset':
print
|
YelaSeamless/pyc2py
|
inspection/classes.py
|
Python
|
gpl-3.0
| 1,904
|
"""Leetcode 48. Rotate Image
Medium
URL: https://leetcode.com/problems/rotate-image/
You are given an n x n 2D matrix representing an image.
Rotate the image by 90 degrees (clockwise).
Note:
You have to rotate the image in-place, which means you have to modify the
input 2D matrix directly. DO NOT allocate another 2D matrix and do the rotation.
Example 1:
Given input matrix =
[
[1,2,3],
[4,5,6],
[7,8,9]
],
rotate the input matrix in-place such that it becomes:
[
[7,4,1],
[8,5,2],
[9,6,3]
]
Example 2:
Given input matrix =
[
[ 5, 1, 9,11],
[ 2, 4, 8,10],
[13, 3, 6, 7],
[15,14,12,16]
],
rotate the input matrix in-place such that it becomes:
[
[15,13, 2, 5],
[14, 3, 4, 1],
[12, 6, 8, 9],
[16, 7,10,11]
]
"""
class SolutionReverseSwapAlongDiagonal(object):
def rotate(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: None Do not return anything, modify matrix in-place instead.
Time complexity: O(n^2).
Space complexity: O(1), since in-place.
"""
n = len(matrix)
# For clockwise rotation, first reverse up to down.
matrix.reverse()
# Then swap along the diagonal for the left-bottom elements.
for i in range(n):
for j in range(i):
matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]
def main():
matrix = [
[1,2,3],
[4,5,6],
[7,8,9]
]
SolutionReverseSwapAlongDiagonal().rotate(matrix)
print matrix
matrix = [
[ 5, 1, 9,11],
[ 2, 4, 8,10],
[13, 3, 6, 7],
[15,14,12,16]
]
SolutionReverseSwapAlongDiagonal().rotate(matrix)
print matrix
if __name__ == '__main__':
main()
|
bowen0701/algorithms_data_structures
|
lc0048_rotate_image.py
|
Python
|
bsd-2-clause
| 1,807
|
from csrv.model import actions
from csrv.model import events
from csrv.model import modifiers
from csrv.model.cards import card_info
from csrv.model.cards import event
class Card01036RunAction(actions.PlayEventAction):
def __init__(self, game, player, card=None):
actions.PlayEventAction.__init__(self, game, player, card=card)
def resolve(self, response=None, ignore_clicks=False, ignore_all_costs=False):
actions.PlayEventAction.resolve(
self,
response=response,
ignore_clicks=ignore_clicks,
ignore_all_costs=ignore_all_costs)
modifiers.NumRndCardsToAccess(
self.game, 2,
server=self.game.corp.rnd,
until=events.RunEnds)
new_run = self.game.new_run(self.game.corp.rnd)
new_run.begin()
class Card01036(event.Event):
NAME = u"Card01036"
SET = card_info.CORE
NUMBER = 36
SIDE = card_info.RUNNER
FACTION = card_info.SHAPER
INFLUENCE = 2
UNIQUE = False
KEYWORDS = set([
card_info.RUN,
])
COST = 2
IMAGE_SRC = '01036.png'
def build_actions(self):
event.Event.build_actions(self)
self._play_event_action = Card01036RunAction(
self.game, self.player, card=self)
|
mrroach/CentralServer
|
csrv/model/cards/runner/card01036.py
|
Python
|
apache-2.0
| 1,190
|
from sys import exit
from ccquiz.logic import Quiz
def intro():
kind = input("What kind of quiz u want? Capitals, Countries, or Random? \n Q to quit, h for a hint (the answer)").lower().strip()
if kind not in 'capitals countries random q':
raise ValueError("not a valid quiz type")
def structure():
quiz = Quiz(kind)
debug = False
if debug:
print('ans: ' + quiz.ans)
# print(' hint: ' + quiz.hint, end=' ')
# print(' kind: ' + quiz.kind, end='\n')
if quiz.kind == 'q':
exit()
if quiz.kind == 'capitals':
question = "What is the capital of {}?"
if quiz.kind == 'countries':
question = "{} is the capital of what country?"
def loop():
guess = input(question.format(quiz.hint)).lower().strip()
if guess == 'q':
exit()
if guess == 'h':
print(quiz.ans)
loop()
if quiz.is_correct(guess):
print("Yes")
structure()
else:
print("Try again")
loop()
loop()
structure()
intro()
|
elanorigby/EndlessCapitalsQuiz
|
ccquiz/cmdlineapp.py
|
Python
|
apache-2.0
| 1,217
|
import unittest
import numpy as np
import pyoptima as opt
class SimulatedAnnealingTest(unittest.TestCase):
def test_with_parabola(self):
""" Test with a simple parabolic function with 2 variables """
def neighbour_func(params):
new_params = params
params['x0'] += np.random.uniform(-1., 1.)
params['x1'] += np.random.uniform(-1., 1.)
return new_params
hyper_params = {
'temperature_func': lambda t, i: t/np.log(i+2),
'neighbour_func': neighbour_func,
'initial_temp': 1000000.0
}
params = {}
params["x0"] = np.random.uniform(-10., 10.)
params["x1"] = np.random.uniform(-10., 10.)
s = opt.SimulatedAnnealing(params, hyper_params)
s.optimize(opt.parabola, 100000)
bst_solution = s.get_best_parameters()
self.assertAlmostEqual(bst_solution['x0'], 0, 2)
self.assertAlmostEqual(bst_solution['x1'], 0, 2)
if __name__ == "__main__":
unittest.main()
|
samueljackson92/metaopt
|
python_tests/simulated_annealing_test.py
|
Python
|
mit
| 1,038
|
from __future__ import print_function
import pyuv
import signal
import sys
import os
import optparse
def fsevent_callback(fsevent_handle, filename, events, error):
if error is not None:
txt = 'error %s: %s' % (error, pyuv.errno.strerror(error))
else:
evts = []
if events & pyuv.fs.UV_RENAME:
evts.append('rename')
if events & pyuv.fs.UV_CHANGE:
evts.append('change')
txt = 'events: %s' % ', '.join(evts)
print('file: %s, %s' % (filename, txt))
def sig_cb(handle, signum):
handle.close()
def main(path):
loop = pyuv.Loop.default_loop()
try:
fsevents = pyuv.fs.FSEvent(loop)
fsevents.start(path, 0, fsevent_callback)
fsevents.ref = False
except pyuv.error.FSEventError as e:
print('error: %s' % e)
sys.exit(2)
signal_h = pyuv.Signal(loop)
signal_h.start(sig_cb, signal.SIGINT)
print('Watching path %s' % os.path.abspath(path))
loop.run()
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-p', '--path', help='a path to watch', default='.')
opts, args = parser.parse_args()
main(opts.path)
|
fivejjs/pyuv
|
examples/filesystem_watch.py
|
Python
|
mit
| 1,191
|
from math import log10
from src.resolve import BasicResolver
class SimpleResolver(BasicResolver):
def load_sources(self, group_id):
cursor = self.conn.get_cursor("""
select runcatid, wm_ra, wm_ra_err, wm_decl, wm_decl_err
from runningcatalog r
where exists (select 1 from temp_associations ta
where ta.runcat_id = r.runcatid
and ta.group_head_id = %s)""" % (group_id))
sources = cursor.fetchall()
cursor.close()
return sources
def get_flux_distance(self, i1, i2):
"""
Get de Ruiter distance in the coordinates + flux space.
"""
dist = 1e-12
for i in range(1, 4, 2):
dist = dist + (i1[i] - i2[i])*(i1[i] - i2[i]) / \
(i1[i+1] * i1[i+1] + i2[i+1] * i2[i+1])
return dist
def resolve(self, detections, sources):
solution = []
is_ok = True
if (len(detections) == len(sources)):
source_minimum = [1e20]*len(sources)
source_second = [1e20]*len(sources)
source_isolation = [0.0]*len(sources)
source_index = [0]*len(sources)
detect_minimum = [1e20]*len(sources)
detect_second = [1e20]*len(sources)
detect_isolation = [0.0]*len(sources)
detect_index = [0]*len(sources)
for i, detect in enumerate(detections):
for j, source in enumerate(sources):
dist = self.get_flux_distance(detect, source)
if dist < detect_minimum[i]:
detect_second[i] = detect_minimum[i]
detect_minimum[i] = dist
detect_isolation[i] = dist/detect_second[i]
detect_index[i] = j
if dist < source_minimum[j]:
source_second[j] = source_minimum[j]
source_minimum[j] = dist
source_isolation[j] = dist/source_second[j]
source_index[j] = i
for i in range(len(detections)):
if detect_isolation[i] < 0.02 and \
source_index[detect_index[i]] == i:
solution.append([detections[i][0],
sources[detect_index[i]][0]])
else:
return False, []
else:
return False, []
return is_ok, solution
|
kernsuite-debian/lofar
|
CEP/GSM/bremen/src/resolveSimple.py
|
Python
|
gpl-3.0
| 2,541
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from unittest.mock import Mock, patch
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.urls import re_path
import pytest
from bedrock.base.urlresolvers import Prefixer, find_supported, reverse, split_path
@pytest.mark.parametrize(
"path, result",
[
# Basic
("en-US/some/action", ("en-US", "some/action")),
# First slash doesn't matter
("/en-US/some/action", ("en-US", "some/action")),
# Nor does capitalization
("En-uS/some/action", ("en-US", "some/action")),
# Unsupported languages return a blank language
("unsupported/some/action", ("", "unsupported/some/action")),
],
)
def test_split_path(path, result):
res = split_path(path)
assert res == result
# Test urlpatterns
urlpatterns = [re_path(r"^test/$", lambda r: None, name="test.view")]
class FakePrefixer:
def __init__(self, fix):
self.fix = fix
@patch("bedrock.base.urlresolvers.get_url_prefix")
@override_settings(ROOT_URLCONF="bedrock.base.tests.test_urlresolvers")
class TestReverse(TestCase):
def test_unicode_url(self, get_url_prefix):
# If the prefixer returns a unicode URL it should be escaped and cast
# as a str object.
get_url_prefix.return_value = FakePrefixer(lambda p: f"/Françoi{p}")
result = reverse("test.view")
# Ensure that UTF-8 characters are escaped properly.
self.assertEqual(result, "/Fran%C3%A7oi/test/")
self.assertEqual(type(result), str)
class TestPrefixer(TestCase):
def setUp(self):
self.factory = RequestFactory()
@override_settings(LANGUAGE_CODE="en-US")
def test_get_language_default_language_code(self):
"""
Should return default set by settings.LANGUAGE_CODE if no 'lang'
url parameter and no Accept-Language header
"""
request = self.factory.get("/")
self.assertFalse("lang" in request.GET)
self.assertFalse(request.headers.get("Accept-Language"))
prefixer = Prefixer(request)
assert prefixer.get_language() == "en-US"
def test_get_language_returns_best(self):
"""
Should pass Accept-Language header value to get_best_language
and return result
"""
request = self.factory.get("/")
request.META["HTTP_ACCEPT_LANGUAGE"] = "de, es"
prefixer = Prefixer(request)
prefixer.get_best_language = Mock(return_value="de")
assert prefixer.get_language() == "de"
prefixer.get_best_language.assert_called_once_with("de, es")
@override_settings(LANGUAGE_CODE="en-US")
def test_get_language_no_best(self):
"""
Should return default set by settings.LANGUAGE_CODE if
get_best_language return value is None
"""
request = self.factory.get("/")
request.META["HTTP_ACCEPT_LANGUAGE"] = "de, es"
prefixer = Prefixer(request)
prefixer.get_best_language = Mock(return_value=None)
assert prefixer.get_language() == "en-US"
prefixer.get_best_language.assert_called_once_with("de, es")
@override_settings(LANGUAGE_URL_MAP={"en-us": "en-US", "de": "de"})
def test_get_best_language_exact_match(self):
"""
Should return exact match if it is in settings.LANGUAGE_URL_MAP
"""
request = self.factory.get("/")
prefixer = Prefixer(request)
assert prefixer.get_best_language("de, es") == "de"
@override_settings(LANGUAGE_URL_MAP={"en-gb": "en-GB", "en-us": "en-US", "es-ar": "es-AR"}, CANONICAL_LOCALES={"es": "es-ES", "en": "en-US"})
def test_get_best_language_prefix_match(self):
"""
Should return a language with a matching prefix from
settings.LANGUAGE_URL_MAP + settings.CANONICAL_LOCALES if it exists but
no exact match does
"""
request = self.factory.get("/")
prefixer = Prefixer(request)
assert prefixer.get_best_language("en") == "en-US"
assert prefixer.get_best_language("en-CA") == "en-US"
assert prefixer.get_best_language("en-GB") == "en-GB"
assert prefixer.get_best_language("en-US") == "en-US"
assert prefixer.get_best_language("es") == "es-ES"
assert prefixer.get_best_language("es-AR") == "es-AR"
assert prefixer.get_best_language("es-CL") == "es-ES"
assert prefixer.get_best_language("es-MX") == "es-ES"
@override_settings(LANGUAGE_URL_MAP={"en-us": "en-US"})
def test_get_best_language_no_match(self):
"""
Should return None if there is no exact match or matching
prefix
"""
request = self.factory.get("/")
prefixer = Prefixer(request)
assert prefixer.get_best_language("de") is None
@override_settings(LANGUAGE_URL_MAP={"en-ar": "en-AR", "en-gb": "en-GB", "en-us": "en-US"}, CANONICAL_LOCALES={"en": "en-US"})
def test_prefixer_with_non_supported_locale(self):
"""
Should set prefixer.locale to a supported locale that repects CANONICAL_LOCALES
when given a URL with a non-supported locale.
"""
request = self.factory.get("/en-CA/")
prefixer = Prefixer(request)
assert prefixer.locale == "en-US"
@override_settings(LANGUAGE_URL_MAP={"es-ar": "es-AR", "en-gb": "en-GB", "es-us": "es-US"}, CANONICAL_LOCALES={"es": "es-ES", "en": "en-US"})
class TestFindSupported(TestCase):
def test_find_supported(self):
assert find_supported("en-CA") == "en-US"
assert find_supported("en-US") == "en-US"
assert find_supported("en-GB") == "en-GB"
assert find_supported("en") == "en-US"
assert find_supported("es-MX") == "es-ES"
assert find_supported("es-AR") == "es-AR"
assert find_supported("es") == "es-ES"
def test_find_supported_none(self):
"""
Should return None if it can't find any supported locale.
"""
assert find_supported("de") is None
assert find_supported("fr") is None
assert find_supported("dude") is None
|
mozilla/bedrock
|
bedrock/base/tests/test_urlresolvers.py
|
Python
|
mpl-2.0
| 6,341
|
import logging
import re
import sys
import emoji
import markdown2
from slackviewer.user import User
# Workaround for ASCII encoding error in Python 2.7
# See https://github.com/hfaran/slack-export-viewer/issues/81
if sys.version_info[0] == 2:
reload(sys)
sys.setdefaultencoding('utf8')
class SlackFormatter(object):
"This formats messages and provides access to workspace-wide data (user and channel metadata)."
# Class-level constants for precompilation of frequently-reused regular expressions
# URL detection relies on http://stackoverflow.com/a/1547940/1798683
_LINK_PAT = re.compile(r"<(https|http|mailto):[A-Za-z0-9_\.\-\/\?\,\=\#\:\@]+\|[^>]+>")
_MENTION_PAT = re.compile(r"<((?:#C|@[UB])\w+)(?:\|([A-Za-z0-9.-_]+))?>")
_HASHTAG_PAT = re.compile(r"(^| )#[A-Za-z][\w\.\-\_]+( |$)")
def __init__(self, USER_DATA, CHANNEL_DATA):
self.__USER_DATA = USER_DATA
self.__CHANNEL_DATA = CHANNEL_DATA
def find_user(self, message):
if message.get("subtype", "").startswith("bot_") and "bot_id" in message and message["bot_id"] not in self.__USER_DATA:
bot_id = message["bot_id"]
logging.debug("bot addition for %s", bot_id)
if "bot_link" in message:
(bot_url, bot_name) = message["bot_link"].strip("<>").split("|", 1)
elif "username" in message:
bot_name = message["username"]
bot_url = None
else:
bot_name = None
bot_url = None
self.__USER_DATA[bot_id] = User({
"user": bot_id,
"real_name": bot_name,
"bot_url": bot_url,
"is_bot": True,
"is_app_user": True
})
user_id = message.get("user") or message.get("bot_id")
if user_id in self.__USER_DATA:
return self.__USER_DATA.get(user_id)
logging.error("unable to find user in %s", message)
def render_text(self, message, process_markdown=True):
message = message.replace("<!channel>", "@channel")
message = message.replace("<!channel|@channel>", "@channel")
message = message.replace("<!here>", "@here")
message = message.replace("<!here|@here>", "@here")
message = message.replace("<!everyone>", "@everyone")
message = message.replace("<!everyone|@everyone>", "@everyone")
# Handle mentions of users, channels and bots (e.g "<@U0BM1CGQY|calvinchanubc> has joined the channel")
message = self._MENTION_PAT.sub(self._sub_annotated_mention, message)
# Handle links
message = self._LINK_PAT.sub(self._sub_hyperlink, message)
# Handle hashtags (that are meant to be hashtags and not headings)
message = self._HASHTAG_PAT.sub(self._sub_hashtag, message)
# Introduce unicode emoji
message = self.slack_to_accepted_emoji(message)
message = emoji.emojize(message, use_aliases=True)
if process_markdown:
# Handle bold (convert * * to ** **)
message = re.sub(r'\*', "**", message)
message = markdown2.markdown(
message,
extras=[
"cuddled-lists",
# This gives us <pre> and <code> tags for ```-fenced blocks
"fenced-code-blocks",
"pyshell"
]
).strip()
# Special handling cases for lists
message = message.replace("\n\n<ul>", "<ul>")
message = message.replace("\n<li>", "<li>")
return message
def slack_to_accepted_emoji(self, message):
"""Convert some Slack emoji shortcodes to more universal versions"""
# Convert -'s to _'s except for the 1st char (preserve things like :-1:)
# For example, Slack's ":woman-shrugging:" is converted to ":woman_shrugging:"
message = re.sub(
r":([^ <>/:])([^ <>/:]+):",
lambda x: ":{}{}:".format(x.group(1), x.group(2).replace("-", "_")),
message
)
# https://github.com/Ranks/emojione/issues/114
message = message.replace(":simple_smile:", ":slightly_smiling_face:")
return message
def _sub_annotated_mention(self, matchobj):
ref_id = matchobj.group(1)[1:] # drop #/@ from the start, we don't care
annotation = matchobj.group(2)
if ref_id.startswith('C'):
mention_format = "<b>#{}</b>"
if not annotation:
channel = self.__CHANNEL_DATA.get(ref_id)
annotation = channel["name"] if channel else ref_id
else:
mention_format = "@{}"
if not annotation:
user = self.__USER_DATA.get(ref_id)
annotation = user.display_name if user else ref_id
return mention_format.format(annotation)
def _sub_hyperlink(self, matchobj):
compound = matchobj.group(0)[1:-1]
if len(compound.split("|")) == 2:
url, title = compound.split("|")
else:
url, title = compound, compound
result = "<a href=\"{url}\">{title}</a>".format(url=url, title=title)
return result
def _sub_hashtag(self, matchobj):
text = matchobj.group(0)
starting_space = " " if text[0] == " " else ""
ending_space = " " if text[-1] == " " else ""
return "{}<b>{}</b>{}".format(
starting_space,
text.strip(),
ending_space
)
|
hfaran/slack-export-viewer
|
slackviewer/formatter.py
|
Python
|
mit
| 5,541
|
# coding=utf-8
# Copyright 2022 TF.Text Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Break sentence ops."""
import abc
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util import deprecation
from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader
gen_sentence_breaking_ops = load_library.load_op_library(resource_loader.get_path_to_datafile('_sentence_breaking_ops.so'))
class SentenceBreaker(object):
"""An abstract base class for sentence breaker implementations."""
@abc.abstractmethod
def break_sentences(self, input): # pylint: disable=redefined-builtin
"""Splits `input` into sentences.
Args:
input: A string `Tensor` of shape [batch] with a batch of documents.
Returns:
A string `RaggedTensor` of shape [batch, (num_sentences)] with each input
broken up into its constituent sentences.
"""
raise NotImplementedError()
class SentenceBreakerWithOffsets(SentenceBreaker):
"""An abstract base class for sentence breakers that support offsets."""
@abc.abstractmethod
def break_sentences_with_offsets(self, input): # pylint: disable=redefined-builtin
"""Splits `input` into sentences and returns the starting & ending offsets.
Args:
input: A string `Tensor` of shape [batch] with a batch of documents.
Returns:
A tuple of (sentences, begin_offset, end_offset) where:
sentences: A string `RaggedTensor` of shape [batch, (num_sentences)] with
each input broken up into its constituent sentences.
begin_offset: A int64 `RaggedTensor` of shape [batch, (num_sentences)]
where each entry is the inclusive beginning byte offset of a sentence.
end_offset: A int64 `RaggedTensor` of shape [batch, (num_sentences)]
where each entry is the exclusive ending byte offset of a sentence.
"""
raise NotImplementedError()
@deprecation.deprecated(None,
"Deprecated, use 'StateBasedSentenceBreaker' instead.")
def sentence_fragments(token_word,
token_starts,
token_ends,
token_properties,
input_encoding='UTF-8',
errors='replace',
replacement_char=0xFFFD,
replace_control_characters=False):
"""Find the sentence fragments in a given text.
A sentence fragment is a potential next sentence determined using
deterministic heuristics based on punctuation, capitalization, and similar
text attributes.
NOTE: This op is deprecated. Use `StateBasedSentenceBreaker` instead.
Args:
token_word: A Tensor (w/ rank=2) or a RaggedTensor (w/ ragged_rank=1)
containing the token strings.
token_starts: A Tensor (w/ rank=2) or a RaggedTensor (w/ ragged_rank=1)
containing offsets where the token starts.
token_ends: A Tensor (w/ rank=2) or a RaggedTensor (w/ ragged_rank=1)
containing offsets where the token ends.
token_properties: A Tensor (w/ rank=2) or a RaggedTensor (w/ ragged_rank=1)
containing a bitmask.
The values of the bitmask are:
* 0x01 (ILL_FORMED) - Text is ill-formed: typically applies to all
tokens of a paragraph that is too short or lacks terminal punctuation.
* 0x02 (HEADING)
* 0x04 (BOLD)
* 0x10 (UNDERLINED)
* 0x20 (LIST)
* 0x40 (TITLE)
* 0x80 (EMOTICON)
* 0x100 (ACRONYM) - Token was identified as an acronym. Period-,
hyphen-, and space-separated acronyms: "U.S.", "U-S", and "U S".
* 0x200 (HYPERLINK) - Indicates that the token (or part of the token) is
covered by at least one hyperlink.
input_encoding: String name for the unicode encoding that should be used to
decode each string.
errors: Specifies the response when an input string can't be converted
using the indicated encoding. One of:
* `'strict'`: Raise an exception for any illegal substrings.
* `'replace'`: Replace illegal substrings with `replacement_char`.
* `'ignore'`: Skip illegal substrings.
replacement_char: The replacement codepoint to be used in place of invalid
substrings in `input` when `errors='replace'`; and in place of C0 control
characters in `input` when `replace_control_characters=True`.
replace_control_characters: Whether to replace the C0 control characters
`(U+0000 - U+001F)` with the `replacement_char`.
Returns:
A RaggedTensor of `fragment_start`, `fragment_end`, `fragment_properties`
and `terminal_punc_token`.
`fragment_properties` is an int32 bitmask whose values may contain:
* 1 = fragment ends with terminal punctuation
* 2 = fragment ends with multiple terminal punctuations (e.g.
"She said what?!")
* 3 = Has close parenthesis (e.g. "Mushrooms (they're fungi).")
* 4 = Has sentential close parenthesis (e.g. "(Mushrooms are fungi!)")
`terminal_punc_token` is a RaggedTensor containing the index of terminal
punctuation token immediately following the last word in the fragment
-- or index of the last word itself, if it's an acronym (since acronyms
include the terminal punctuation). index of the terminal punctuation
token.
""" # pylint: disable=pointless-string-statement
if not isinstance(token_starts, ragged_tensor.RaggedTensor):
token_starts = ragged_tensor.RaggedTensor.from_tensor(token_starts)
if not isinstance(token_ends, ragged_tensor.RaggedTensor):
token_ends = ragged_tensor.RaggedTensor.from_tensor(token_ends)
if not isinstance(token_word, ragged_tensor.RaggedTensor):
token_word = ragged_tensor.RaggedTensor.from_tensor(token_word)
if not isinstance(token_properties, ragged_tensor.RaggedTensor):
token_properties = ragged_tensor.RaggedTensor.from_tensor(token_properties)
fragment = gen_sentence_breaking_ops.sentence_fragments(
errors=errors,
replacement_char=replacement_char,
replace_control_characters=replace_control_characters,
input_encoding=input_encoding,
row_lengths=token_starts.row_lengths(),
token_start=token_starts.flat_values,
token_end=token_ends.flat_values,
token_word=token_word.flat_values,
token_properties=token_properties.flat_values)
start, end, properties, terminal_punc_token, row_lengths = fragment
return tuple(
ragged_tensor.RaggedTensor.from_row_lengths(value, row_lengths)
for value in [start, end, properties, terminal_punc_token])
|
tensorflow/text
|
tensorflow_text/python/ops/sentence_breaking_ops.py
|
Python
|
apache-2.0
| 7,117
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import datetime
import mock
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.data import buildrequests
from buildbot.data import resultspec
from buildbot.test import fakedb
from buildbot.test.fake import fakemaster
from buildbot.test.reactor import TestReactorMixin
from buildbot.test.util import endpoint
from buildbot.test.util import interfaces
from buildbot.util import UTC
from buildbot.util import epoch2datetime
class TestBuildRequestEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = buildrequests.BuildRequestEndpoint
resourceTypeClass = buildrequests.BuildRequest
CLAIMED_AT = datetime.datetime(1978, 6, 15, 12, 31, 15, tzinfo=UTC)
CLAIMED_AT_EPOCH = 266761875
SUBMITTED_AT = datetime.datetime(1979, 6, 15, 12, 31, 15, tzinfo=UTC)
SUBMITTED_AT_EPOCH = 298297875
COMPLETE_AT = datetime.datetime(1980, 6, 15, 12, 31, 15, tzinfo=UTC)
COMPLETE_AT_EPOCH = 329920275
def setUp(self):
self.setUpEndpoint()
self.db.insertTestData([
fakedb.Builder(id=77, name='bbb'),
fakedb.Master(id=fakedb.FakeBuildRequestsComponent.MASTER_ID),
fakedb.Worker(id=13, name='wrk'),
fakedb.Buildset(id=8822),
fakedb.BuildRequest(id=44, buildsetid=8822, builderid=77,
priority=7, submitted_at=self.SUBMITTED_AT_EPOCH,
waited_for=1),
fakedb.BuildsetProperty(buildsetid=8822, property_name='prop1',
property_value='["one", "fake1"]'),
fakedb.BuildsetProperty(buildsetid=8822, property_name='prop2',
property_value='["two", "fake2"]'),
])
def tearDown(self):
self.tearDownEndpoint()
@defer.inlineCallbacks
def testGetExisting(self):
self.db.buildrequests.claimBuildRequests(
[44], claimed_at=self.CLAIMED_AT)
self.db.buildrequests.completeBuildRequests(
[44], 75, complete_at=self.COMPLETE_AT)
buildrequest = yield self.callGet(('buildrequests', 44))
self.validateData(buildrequest)
# check data formatting:
self.assertEqual(buildrequest['buildrequestid'], 44)
self.assertEqual(buildrequest['complete'], True)
self.assertEqual(buildrequest['builderid'], 77)
self.assertEqual(buildrequest['waited_for'], True)
self.assertEqual(buildrequest['claimed_at'], self.CLAIMED_AT)
self.assertEqual(buildrequest['results'], 75)
self.assertEqual(buildrequest['claimed_by_masterid'],
fakedb.FakeBuildRequestsComponent.MASTER_ID)
self.assertEqual(buildrequest['claimed'], True)
self.assertEqual(buildrequest['submitted_at'], self.SUBMITTED_AT)
self.assertEqual(buildrequest['complete_at'], self.COMPLETE_AT)
self.assertEqual(buildrequest['buildsetid'], 8822)
self.assertEqual(buildrequest['priority'], 7)
self.assertEqual(buildrequest['properties'], None)
@defer.inlineCallbacks
def testGetMissing(self):
buildrequest = yield self.callGet(('buildrequests', 9999))
self.assertEqual(buildrequest, None)
@defer.inlineCallbacks
def testGetProperty(self):
prop = resultspec.Property(b'property', 'eq', 'prop1')
buildrequest = yield self.callGet(('buildrequests', 44),
resultSpec=resultspec.ResultSpec(properties=[prop]))
self.assertEqual(buildrequest['buildrequestid'], 44)
self.assertEqual(buildrequest['properties'], {'prop1': ('one', 'fake1')})
@defer.inlineCallbacks
def testGetProperties(self):
prop = resultspec.Property(b'property', 'eq', '*')
buildrequest = yield self.callGet(('buildrequests', 44),
resultSpec=resultspec.ResultSpec(properties=[prop]))
self.assertEqual(buildrequest['buildrequestid'], 44)
self.assertEqual(buildrequest['properties'],
{'prop1': ('one', 'fake1'), 'prop2': ('two', 'fake2')})
class TestBuildRequestsEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = buildrequests.BuildRequestsEndpoint
resourceTypeClass = buildrequests.BuildRequest
CLAIMED_AT = datetime.datetime(1978, 6, 15, 12, 31, 15, tzinfo=UTC)
CLAIMED_AT_EPOCH = 266761875
SUBMITTED_AT = datetime.datetime(1979, 6, 15, 12, 31, 15, tzinfo=UTC)
SUBMITTED_AT_EPOCH = 298297875
COMPLETE_AT = datetime.datetime(1980, 6, 15, 12, 31, 15, tzinfo=UTC)
COMPLETE_AT_EPOCH = 329920275
def setUp(self):
self.setUpEndpoint()
self.db.insertTestData([
fakedb.Builder(id=77, name='bbb'),
fakedb.Builder(id=78, name='ccc'),
fakedb.Builder(id=79, name='ddd'),
fakedb.Master(id=fakedb.FakeBuildRequestsComponent.MASTER_ID),
fakedb.Worker(id=13, name='wrk'),
fakedb.Buildset(id=8822),
fakedb.BuildRequest(id=44, buildsetid=8822, builderid=77,
priority=7, submitted_at=self.SUBMITTED_AT_EPOCH,
waited_for=1),
fakedb.BuildRequest(id=45, buildsetid=8822, builderid=77),
fakedb.BuildRequest(id=46, buildsetid=8822, builderid=78),
])
def tearDown(self):
self.tearDownEndpoint()
@defer.inlineCallbacks
def testGetAll(self):
buildrequests = yield self.callGet(('buildrequests',))
for br in buildrequests:
self.validateData(br)
self.assertEqual(sorted([br['buildrequestid'] for br in buildrequests]),
[44, 45, 46])
@defer.inlineCallbacks
def testGetNoBuildRequest(self):
buildrequests = yield self.callGet(('builders', 79, 'buildrequests'))
self.assertEqual(buildrequests, [])
@defer.inlineCallbacks
def testGetBuilderid(self):
buildrequests = yield self.callGet(('builders', 78, 'buildrequests'))
for br in buildrequests:
self.validateData(br)
self.assertEqual(
sorted([br['buildrequestid'] for br in buildrequests]), [46])
@defer.inlineCallbacks
def testGetUnknownBuilderid(self):
buildrequests = yield self.callGet(('builders', 79, 'buildrequests'))
self.assertEqual(buildrequests, [])
@defer.inlineCallbacks
def testGetProperties(self):
self.master.db.insertTestData([
fakedb.BuildsetProperty(buildsetid=8822, property_name='prop1',
property_value='["one", "fake1"]'),
fakedb.BuildsetProperty(buildsetid=8822, property_name='prop2',
property_value='["two", "fake2"]'),
])
prop = resultspec.Property(b'property', 'eq', '*')
buildrequests = yield self.callGet(('builders', 78, 'buildrequests'),
resultSpec=resultspec.ResultSpec(properties=[prop]))
self.assertEqual(len(buildrequests), 1)
self.assertEqual(buildrequests[0]['buildrequestid'], 46)
self.assertEqual(buildrequests[0]['properties'],
{'prop1': ('one', 'fake1'), 'prop2': ('two', 'fake2')})
@defer.inlineCallbacks
def testGetNoFilters(self):
getBuildRequestsMock = mock.Mock(return_value={})
self.patch(
self.master.db.buildrequests, 'getBuildRequests', getBuildRequestsMock)
yield self.callGet(('buildrequests',))
getBuildRequestsMock.assert_called_with(
builderid=None,
bsid=None,
complete=None,
claimed=None,
resultSpec=resultspec.ResultSpec())
@defer.inlineCallbacks
def testGetFilters(self):
getBuildRequestsMock = mock.Mock(return_value={})
self.patch(
self.master.db.buildrequests, 'getBuildRequests', getBuildRequestsMock)
f1 = resultspec.Filter('complete', 'eq', [False])
f2 = resultspec.Filter('claimed', 'eq', [True])
f3 = resultspec.Filter('buildsetid', 'eq', [55])
f4 = resultspec.Filter('branch', 'eq', ['mybranch'])
f5 = resultspec.Filter('repository', 'eq', ['myrepo'])
yield self.callGet(
('buildrequests',),
resultSpec=resultspec.ResultSpec(filters=[f1, f2, f3, f4, f5]))
getBuildRequestsMock.assert_called_with(
builderid=None,
bsid=55,
complete=False,
claimed=True,
resultSpec=resultspec.ResultSpec(filters=[f4, f5]))
@defer.inlineCallbacks
def testGetClaimedByMasterIdFilters(self):
getBuildRequestsMock = mock.Mock(return_value={})
self.patch(
self.master.db.buildrequests, 'getBuildRequests', getBuildRequestsMock)
f1 = resultspec.Filter('claimed', 'eq', [True])
f2 = resultspec.Filter('claimed_by_masterid', 'eq',
[fakedb.FakeBuildRequestsComponent.MASTER_ID])
yield self.callGet(
('buildrequests',),
resultSpec=resultspec.ResultSpec(filters=[f1, f2]))
getBuildRequestsMock.assert_called_with(
builderid=None,
bsid=None,
complete=None,
claimed=fakedb.FakeBuildRequestsComponent.MASTER_ID,
resultSpec=resultspec.ResultSpec(filters=[f1]))
@defer.inlineCallbacks
def testGetSortedLimit(self):
yield self.master.db.buildrequests.completeBuildRequests([44], 1)
res = yield self.callGet(
('buildrequests',),
resultSpec=resultspec.ResultSpec(order=['results'], limit=2))
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['results'], -1)
res = yield self.callGet(
('buildrequests',),
resultSpec=resultspec.ResultSpec(order=['-results'], limit=2))
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['results'], 1)
class TestBuildRequest(interfaces.InterfaceTests, TestReactorMixin,
unittest.TestCase):
CLAIMED_AT = datetime.datetime(1978, 6, 15, 12, 31, 15, tzinfo=UTC)
COMPLETE_AT = datetime.datetime(1980, 6, 15, 12, 31, 15, tzinfo=UTC)
class dBLayerException(Exception):
pass
def setUp(self):
self.setup_test_reactor()
self.master = fakemaster.make_master(self, wantMq=True, wantDb=True,
wantData=True)
self.rtype = buildrequests.BuildRequest(self.master)
@defer.inlineCallbacks
def doTestCallthrough(self, dbMethodName, dbMockedMethod, method,
methodargs=None, methodkwargs=None,
expectedRes=None, expectedException=None,
expectedDbApiCalled=True):
self.patch(self.master.db.buildrequests, dbMethodName, dbMockedMethod)
if expectedException is not None:
try:
yield method(*methodargs, **methodkwargs)
except expectedException:
pass
except Exception as e:
self.fail(f'{expectedException} exception should be raised, but got {repr(e)}')
else:
self.fail(f'{expectedException} exception should be raised')
else:
res = yield method(*methodargs, **methodkwargs)
self.assertEqual(res, expectedRes)
if expectedDbApiCalled:
dbMockedMethod.assert_called_with(*methodargs, **methodkwargs)
def testSignatureClaimBuildRequests(self):
@self.assertArgSpecMatches(
self.master.data.updates.claimBuildRequests, # fake
self.rtype.claimBuildRequests) # real
def claimBuildRequests(self, brids, claimed_at=None):
pass
@defer.inlineCallbacks
def testFakeDataClaimBuildRequests(self):
self.master.db.insertTestData([
fakedb.BuildRequest(id=44, buildsetid=8822),
fakedb.BuildRequest(id=55, buildsetid=8822),
])
res = yield self.master.data.updates.claimBuildRequests(
[44, 55],
claimed_at=self.CLAIMED_AT)
self.assertTrue(res)
@defer.inlineCallbacks
def testFakeDataClaimBuildRequestsNoneArgs(self):
res = yield self.master.data.updates.claimBuildRequests([])
self.assertTrue(res)
@defer.inlineCallbacks
def testClaimBuildRequests(self):
self.master.db.insertTestData([
fakedb.Builder(id=123),
fakedb.BuildRequest(id=44, buildsetid=8822, builderid=123),
fakedb.BuildRequest(id=55, buildsetid=8822, builderid=123),
])
claimBuildRequestsMock = mock.Mock(return_value=defer.succeed(None))
yield self.doTestCallthrough('claimBuildRequests', claimBuildRequestsMock,
self.rtype.claimBuildRequests,
methodargs=[[44]],
methodkwargs=dict(claimed_at=self.CLAIMED_AT),
expectedRes=True,
expectedException=None)
msg = {
'buildrequestid': 44,
'complete_at': None,
'complete': False,
'builderid': 123,
'waited_for': False,
'claimed_at': None,
'results': -1,
'priority': 0,
'submitted_at': datetime.datetime(1970, 5, 23, 21, 21, 18, tzinfo=UTC),
'claimed': False,
'claimed_by_masterid': None,
'buildsetid': 8822,
'properties': None,
}
self.assertEqual(sorted(self.master.mq.productions), sorted([
(('buildrequests', '44', 'claimed'), msg),
(('builders', '123', 'buildrequests', '44', 'claimed'), msg),
(('buildsets', '8822', 'builders', '123',
'buildrequests', '44', 'claimed'), msg),
]))
@defer.inlineCallbacks
def testClaimBuildRequestsNoBrids(self):
claimBuildRequestsMock = mock.Mock(return_value=defer.succeed(None))
yield self.doTestCallthrough('claimBuildRequests', claimBuildRequestsMock,
self.rtype.claimBuildRequests,
methodargs=[[]],
methodkwargs={},
expectedRes=True,
expectedException=None,
expectedDbApiCalled=False)
self.assertEqual(self.master.mq.productions, [])
@defer.inlineCallbacks
def testClaimBuildRequestsAlreadyClaimed(self):
claimBuildRequestsMock = mock.Mock(
side_effect=buildrequests.AlreadyClaimedError('oups ! buildrequest already claimed'))
yield self.doTestCallthrough('claimBuildRequests', claimBuildRequestsMock,
self.rtype.claimBuildRequests,
methodargs=[[44]],
methodkwargs=dict(claimed_at=self.CLAIMED_AT),
expectedRes=False,
expectedException=None)
self.assertEqual(self.master.mq.productions, [])
@defer.inlineCallbacks
def testClaimBuildRequestsUnknownException(self):
claimBuildRequestsMock = mock.Mock(
side_effect=self.dBLayerException('oups ! unknown error'))
yield self.doTestCallthrough('claimBuildRequests', claimBuildRequestsMock,
self.rtype.claimBuildRequests,
methodargs=[[44]],
methodkwargs=dict(claimed_at=self.CLAIMED_AT),
expectedRes=None,
expectedException=self.dBLayerException)
self.assertEqual(self.master.mq.productions, [])
def testSignatureUnclaimBuildRequests(self):
@self.assertArgSpecMatches(
self.master.data.updates.unclaimBuildRequests, # fake
self.rtype.unclaimBuildRequests) # real
def unclaimBuildRequests(self, brids):
pass
@defer.inlineCallbacks
def testFakeDataUnclaimBuildRequests(self):
res = yield self.master.data.updates.unclaimBuildRequests([44, 55])
self.assertEqual(res, None)
@defer.inlineCallbacks
def testFakeDataUnclaimBuildRequestsNoneArgs(self):
res = yield self.master.data.updates.unclaimBuildRequests([])
self.assertEqual(res, None)
@defer.inlineCallbacks
def testUnclaimBuildRequests(self):
self.master.db.insertTestData([
fakedb.Builder(id=123),
fakedb.BuildRequest(id=44, buildsetid=8822, builderid=123),
])
unclaimBuildRequestsMock = mock.Mock(return_value=defer.succeed(None))
yield self.doTestCallthrough('unclaimBuildRequests',
unclaimBuildRequestsMock,
self.rtype.unclaimBuildRequests,
methodargs=[[44]],
methodkwargs={},
expectedRes=None,
expectedException=None)
msg = {
'buildrequestid': 44,
'complete_at': None,
'complete': False,
'builderid': 123,
'waited_for': False,
'claimed_at': None,
'results': -1,
'priority': 0,
'submitted_at': datetime.datetime(1970, 5, 23, 21, 21, 18, tzinfo=UTC),
'claimed': False,
'claimed_by_masterid': None,
'buildsetid': 8822,
'properties': None,
}
self.assertEqual(sorted(self.master.mq.productions), sorted([
(('buildrequests', '44', 'unclaimed'), msg),
(('builders', '123', 'buildrequests', '44', 'unclaimed'), msg),
(('buildsets', '8822', 'builders', '123',
'buildrequests', '44', 'unclaimed'), msg),
]))
@defer.inlineCallbacks
def testUnclaimBuildRequestsNoBrids(self):
unclaimBuildRequestsMock = mock.Mock(return_value=defer.succeed(None))
yield self.doTestCallthrough('unclaimBuildRequests',
unclaimBuildRequestsMock,
self.rtype.unclaimBuildRequests,
methodargs=[[]],
methodkwargs={},
expectedRes=None,
expectedException=None,
expectedDbApiCalled=False)
def testSignatureCompleteBuildRequests(self):
@self.assertArgSpecMatches(
self.master.data.updates.completeBuildRequests, # fake
self.rtype.completeBuildRequests) # real
def completeBuildRequests(self, brids, results, complete_at=None):
pass
@defer.inlineCallbacks
def testFakeDataCompleteBuildRequests(self):
res = yield self.master.data.updates.completeBuildRequests(
[44, 55],
12,
complete_at=self.COMPLETE_AT)
self.assertTrue(res)
@defer.inlineCallbacks
def testFakeDataCompleteBuildRequestsNoneArgs(self):
res = yield self.master.data.updates.completeBuildRequests([], 0)
self.assertTrue(res)
@defer.inlineCallbacks
def testCompleteBuildRequests(self):
completeBuildRequestsMock = mock.Mock(return_value=defer.succeed(None))
yield self.doTestCallthrough('completeBuildRequests',
completeBuildRequestsMock,
self.rtype.completeBuildRequests,
methodargs=[[46], 12],
methodkwargs=dict(complete_at=self.COMPLETE_AT),
expectedRes=True,
expectedException=None)
@defer.inlineCallbacks
def testCompleteBuildRequestsNoBrids(self):
completeBuildRequestsMock = mock.Mock(return_value=defer.succeed(None))
yield self.doTestCallthrough('completeBuildRequests',
completeBuildRequestsMock,
self.rtype.completeBuildRequests,
methodargs=[[], 0],
methodkwargs={},
expectedRes=True,
expectedException=None,
expectedDbApiCalled=False)
@defer.inlineCallbacks
def testCompleteBuildRequestsNotClaimed(self):
completeBuildRequestsMock = mock.Mock(
side_effect=buildrequests.NotClaimedError('oups ! buildrequest not claimed'))
yield self.doTestCallthrough('completeBuildRequests',
completeBuildRequestsMock,
self.rtype.completeBuildRequests,
methodargs=[[46], 12],
methodkwargs=dict(complete_at=self.COMPLETE_AT),
expectedRes=False,
expectedException=None)
@defer.inlineCallbacks
def testCompleteBuildRequestsUnknownException(self):
completeBuildRequestsMock = mock.Mock(
side_effect=self.dBLayerException('oups ! unknown error'))
yield self.doTestCallthrough('completeBuildRequests',
completeBuildRequestsMock,
self.rtype.completeBuildRequests,
methodargs=[[46], 12],
methodkwargs=dict(complete_at=self.COMPLETE_AT),
expectedRes=None,
expectedException=self.dBLayerException)
@defer.inlineCallbacks
def testRebuildBuildrequest(self):
self.master.db.insertTestData([
fakedb.Builder(id=77, name='builder'),
fakedb.Master(id=88),
fakedb.Worker(id=13, name='wrk'),
fakedb.Buildset(id=8822),
fakedb.SourceStamp(id=234),
fakedb.BuildsetSourceStamp(buildsetid=8822, sourcestampid=234),
fakedb.BuildRequest(id=82, buildsetid=8822, builderid=77),
fakedb.BuildsetProperty(buildsetid=8822, property_name='prop1',
property_value='["one", "fake1"]'),
fakedb.BuildsetProperty(buildsetid=8822, property_name='prop2',
property_value='["two", "fake2"]'),
])
buildrequest = yield self.master.data.get(('buildrequests', 82))
new_bsid, brid_dict = yield self.rtype.rebuildBuildrequest(buildrequest)
self.assertEqual(list(brid_dict.keys()), [77])
buildrequest = yield self.master.data.get(('buildrequests', brid_dict[77]))
# submitted_at is the time of the test, so better not depend on it
self.assertEqual(buildrequest, {'buildrequestid': 1001, 'complete': False,
'waited_for': False, 'claimed_at': None, 'results': -1,
'claimed': False, 'buildsetid': 200, 'complete_at': None,
'submitted_at': epoch2datetime(0),
'builderid': 77, 'claimed_by_masterid': None, 'priority': 0,
'properties': None})
buildset = yield self.master.data.get(('buildsets', new_bsid))
oldbuildset = yield self.master.data.get(('buildsets', 8822))
# assert same sourcestamp
self.assertEqual(buildset['sourcestamps'], oldbuildset['sourcestamps'])
buildset['sourcestamps'] = None
self.assertEqual(buildset, {'bsid': 200, 'complete_at': None, 'submitted_at': 0,
'sourcestamps': None, 'parent_buildid': None,
'results': -1, 'parent_relationship': None,
'reason': 'rebuild',
'external_idstring': 'extid',
'complete': False})
properties = yield self.master.data.get(('buildsets', new_bsid, 'properties'))
self.assertEqual(
properties, {'prop1': ('one', 'fake1'), 'prop2': ('two', 'fake2')})
|
pmisik/buildbot
|
master/buildbot/test/unit/data/test_buildrequests.py
|
Python
|
gpl-2.0
| 25,460
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from . import Node
from . import Edge
from collections import deque
class Graph:
"""
Main class which represent a Graph
:param name: name of the graph
"""
def __init__(self, name=""):
"""
"""
self.name = name
self._nodes = []
self._edges = []
self._root = None
self.directed = True
self.i = 0
def DFS_prefix(self, root=None):
"""
Depth-first search.
.. seealso::
`Wikipedia DFS descritpion <http://en.wikipedia.org/wiki/Depth-first_search>`_
:param root: first to start the search
:return: list of nodes
"""
if not root:
root = self._root
return self._DFS_prefix(root)
def _DFS_prefix(self, n, parent=None):
"""
"""
nodes = [n]
n['depth'] = self.i
for c in n.children():
nodes += self._DFS_prefix(c, n)
self.i += 1
return nodes
def BFS(self, root=None):
"""
Breadth-first search.
.. seealso::
`Wikipedia BFS descritpion <http://en.wikipedia.org/wiki/Breadth-first_search>`_
:param root: first to start the search
:return: list of nodes
"""
if not root:
root = self.root()
queue = deque()
queue.append(root)
nodes = []
while len(queue) > 0:
x = queue.popleft()
nodes.append(x)
for child in x.children():
queue.append(child)
return nodes
def get_depth(self, node):
"""
"""
depth = 0
while node.parent() and node != self.root():
node = node.parent()[0]
depth += 1
return depth
def nodes(self, ):
"""
"""
return self._nodes
def edges(self, ):
"""
"""
return self._edges
def children(self, node):
"""
"""
return node.children()
def add_node(self, label="", id=None):
"""
"""
n = Node(id)
n['label'] = label
self._nodes.append(n)
return n
def add_edge(self, n1, n2, directed=False):
"""
"""
if n1 not in self._nodes:
raise Test("fff")
if n2 not in self._nodes:
raise Test("fff")
e = Edge(n1, n2, directed)
self._edges.append(e)
return e
def add_edge_by_id(self, id1, id2):
try:
n1 = next(n for n in self._nodes if n.id == id1)
except StopIteration:
raise ValueError('Graph has no node with ID {}'.format(id1))
try:
n2 = next(n for n in self._nodes if n.id == id2)
except StopIteration:
raise ValueError('Graph has no node with ID {}'.format(id2))
return self.add_edge(n1, n2)
def add_edge_by_label(self, label1, label2):
"""
"""
n1 = None
n2 = None
for n in self._nodes:
if n['label'] == label1:
n1 = n
if n['label'] == label2:
n2 = n
if n1 and n2:
return self.add_edge(n1, n2)
else:
return
def set_root(self, node):
"""
"""
self._root = node
def root(self):
"""
"""
return self._root
def set_root_by_attribute(self, value, attribute='label'):
"""
"""
for n in self.nodes():
if n[attribute] in value:
self.set_root(n)
return n
def get_attributs(self):
"""
"""
attr = []
attr_obj = []
for n in self.nodes():
for a in n.attr:
if a not in attr:
attr.append(a)
attr_obj.append(n.attr[a])
for e in self.edges():
for a in e.attr:
if a not in attr:
attr.append(a)
attr_obj.append(e.attr[a])
return attr_obj
def show(self, show_label=False):
"""
"""
import matplotlib
import matplotlib.pyplot as plt
import networkx as nx
G = nx.Graph()
for n in self._nodes:
if show_label:
n_label = n['label']
else:
n_label = n.id
G.add_node(n_label)
for e in self._edges:
if show_label:
n1_label = e.node1['label']
n2_label = e.node2['label']
else:
n1_label = e.node1.id
n2_label = e.node2.id
G.add_edge(n1_label, n2_label)
nx.draw(G)
if show_label:
nx.draw_networkx_labels(G, pos=nx.spring_layout(G))
plt.show()
class NoDupesGraph(Graph):
'''Add nodes without worrying if it is a duplicate.
Add edges without worrying if nodes exist '''
def __init__(self,*args,**kwargs):
Graph.__init__(self,*args,**kwargs)
self._nodes = {}
def nodes(self):
return self._nodes.values()
def add_node(self,label):
'''Return a node with label. Create node if label is new'''
try:
n = self._nodes[label]
except KeyError:
n = Node()
n['label'] = label
self._nodes[label]=n
return n
def add_edge(self, n1_label, n2_label,directed=False):
"""
Get or create edges using get_or_create_node
"""
n1 = self.add_node(n1_label)
n2 = self.add_node(n2_label)
e = Edge(n1, n2, directed)
self._edges.append(e)
return e
def flush_empty_nodes(self):
'''not implemented'''
pass
def condense_edges(self):
'''if a node connects to only two edges, combine those
edges and delete the node.
not implemented
'''
pass
|
hadim/pygraphml
|
pygraphml/graph.py
|
Python
|
bsd-3-clause
| 6,171
|
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, 'tslib.cp35-win32.pyd')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
|
stevenzhang18/Indeed-Flask
|
lib/pandas/tslib.py
|
Python
|
apache-2.0
| 293
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('drchronoAPI', '0003_doctor'),
]
operations = [
migrations.AlterField(
model_name='doctor',
name='cell_phone',
field=models.CharField(max_length=255, null=True),
),
migrations.AlterField(
model_name='doctor',
name='email',
field=models.EmailField(max_length=254, null=True),
),
migrations.AlterField(
model_name='doctor',
name='home_phone',
field=models.CharField(max_length=255, null=True),
),
migrations.AlterField(
model_name='doctor',
name='job_title',
field=models.CharField(max_length=255, null=True),
),
migrations.AlterField(
model_name='doctor',
name='office_phone',
field=models.CharField(max_length=255, null=True),
),
migrations.AlterField(
model_name='doctor',
name='suffix',
field=models.CharField(max_length=255, null=True),
),
migrations.AlterField(
model_name='doctor',
name='website',
field=models.CharField(max_length=255, null=True),
),
]
|
TimothyBest/Happy_Birthday_drchrono
|
happy_birthday_drchrono/drchronoAPI/migrations/0004_auto_20150820_1823.py
|
Python
|
mit
| 1,414
|
def len_(env, expr):
if expr[0] != "string":
raise Exception("len() can only be called for a string.")
return ("number", len(expr[1]))
|
andybalaam/cell
|
pycell/prologue/native/len_.py
|
Python
|
mit
| 152
|
"""
fg.py: fg stands for FunctionGraph
Contains the FunctionGraph class and exception
types that it can raise
"""
from __future__ import print_function
import sys
import time
import traceback
import theano
from theano.gof import graph
from theano.gof import utils
from theano.gof import toolbox
from theano import config
from theano.compat import OrderedDict
from six import iteritems, itervalues
from six.moves import StringIO
from theano.misc.ordered_set import OrderedSet
NullType = None
class CachedConstantError(Exception):
"""An exception thrown when we put in a FunctionGraph a Constant
that is cached. This should not happen as the user can reuse this
cached constant in other FunctionGraph.
"""
pass
class InconsistencyError(Exception):
"""
This exception should be thrown by listeners to FunctionGraph when the
graph's state is invalid.
"""
pass
class MissingInputError(Exception):
"""
A symbolic input needed to compute the outputs is missing.
"""
pass
class FunctionGraph(utils.object2):
""" WRITEME
A FunctionGraph represents a subgraph bound by a set of input variables and a
set of output variables, ie a subgraph that specifies a theano function.
The inputs list should contain all the inputs
on which the outputs depend. Variables of type Constant are
not counted as inputs.
The FunctionGraph supports the replace operation which allows to replace a
variable in the subgraph by another, e.g. replace (x + x).out by (2
* x).out. This is the basis for optimization in theano.
This class is also reponsible for verifying that a graph is valid
(ie, all the dtypes and broadcast patterns are compatible with the
way the the Variables are used) and for annotating the Variables with
a .clients field that specifies which Apply nodes use the variable.
The .clients field combined with the .owner field and the Apply nodes'
.inputs field allows the graph to be traversed in both directions.
It can also be extended with new features using
FunctionGraph.attach_feature(<toolbox.Feature instance>).
See toolbox.Feature for event types and documentation.
Extra features allow the FunctionGraph to verify new properties of
a graph as it is optimized.
# TODO: are there other things features can do to the fgraph?
Historically, the FunctionGraph was called an Env. Keep this in mind
while reading out-of-date documentation, e-mail support threads, etc.
"""
def __init__(self, inputs, outputs, features=None, clone=True):
"""
Create an FunctionGraph which operates on the subgraph bound by the inputs and
outputs sets.
This class keeps a pointer to the inputs and outputs, and also modifies
them.
#TODO: document what variables are[not] set in the FunctionGraph when a feature
is added via the constructor. How constructed is the FunctionGraph?
Note: the intermediate nodes between 'inputs' and 'outputs' are not explicitely
passed.
:param inputs: inputs nodes of the graph, usually declared by the user
:param outputs: outputs nodes of the graph.
:param clone: If true, we will clone the graph. This is
useful to remove the constant cache problem.
"""
if clone:
inputs, outputs = graph.clone(inputs, outputs)
self.execute_callbacks_time = 0
self.execute_callbacks_times = {}
if features is None:
features = []
# XXX: Unless I'm missing something (but there's no documentation,
# so I probably am) this should be a set.
self._features = []
# All apply nodes in the subgraph defined by inputs and
# outputs are cached in this field
self.apply_nodes = set()
# Ditto for variable nodes.
# It must contain all fgraph.inputs and all apply_nodes
# outputs even if they aren't used in the graph.
self.variables = set()
self.inputs = list(inputs)
self.outputs = outputs
for f in features:
self.attach_feature(f)
self.attach_feature(toolbox.ReplaceValidate())
for input in self.inputs:
if input.owner is not None:
raise ValueError("One of the provided inputs is the output of"
"an already existing node. "
"If that is okay, either discard that "
"input's owner or use graph.clone.")
self.__setup_r__(input)
self.variables.add(input)
for output in outputs:
self.__import_r__(output, reason="init")
for i, output in enumerate(outputs):
output.clients.append(('output', i))
self.node_locks = {}
self.variable_locks = {}
self.profile = None
# Setup a Variable #
def __setup_r__(self, r):
# sets up r so it belongs to this fgraph
if getattr(r, 'cached', False):
raise CachedConstantError(
"You manually constructed a FunctionGraph, but you passed it a"
" graph that has a cached constant. This should not happen."
" Clone the graph before building the FunctionGraph.")
if (hasattr(r, 'fgraph') and
r.fgraph is not None and
r.fgraph is not self):
raise Exception("%s is already owned by another fgraph" % r)
r.fgraph = self
r.clients = []
# self.execute_callbacks('on_setup_variable', r)
def __setup_node__(self, node):
# sets up node so it belongs to this fgraph
if hasattr(node, 'fgraph') and node.fgraph is not self:
raise Exception("%s is already owned by another fgraph" % node)
if (hasattr(node.op, 'view_map') and
not all(isinstance(view, (list, tuple))
for view in itervalues(node.op.view_map))):
raise Exception("Op '%s' have a bad view map '%s',"
" the values must be tuples or lists." % (
str(node.op), str(node.op.view_map)))
if (hasattr(node.op, 'destroy_map') and
not all(isinstance(destroy, (list, tuple))
for destroy in itervalues(node.op.destroy_map))):
raise Exception("Op '%s' have a bad destroy map '%s',"
" the values must be tuples or lists." % (
str(node.op), str(node.op.destroy_map)))
node.fgraph = self
node.deps = {}
# self.execute_callbacks('on_setup_node', node)
def disown(self):
""" WRITEME
Cleans up all of this FunctionGraph's nodes and variables so they are not
associated with this FunctionGraph anymore.
The FunctionGraph should not be used anymore after disown is called.
This may not clean everything this FunctionGraph's features set in the
nodes and variables. If there are no features, this should set
them back to what they were originally.
"""
for apply_node in self.apply_nodes:
del apply_node.fgraph
del apply_node.deps
for variable in self.variables:
del variable.fgraph
del variable.clients
self.apply_nodes = set()
self.variables = set()
self.inputs = None
self.outputs = None
# clients #
def clients(self, r):
"""
Set of all the (node, i) pairs such that node.inputs[i] is r.
Tell differently, a list of (node,i) such that each node have
r as input at index i.
"""
return r.clients
def __add_clients__(self, r, new_clients):
""" WRITEME
r -> variable
new_clients -> list of (node, i) pairs such that node.inputs[i] is r.
Updates the list of clients of r with new_clients.
"""
if set(r.clients).intersection(set(new_clients)):
print('ERROR: clients intersect!', file=sys.stderr)
print(' RCLIENTS of', r, [(n, i, type(n), id(n))
for n, i in r.clients], file=sys.stderr)
print(' NCLIENTS of', r, [(n, i, type(n), id(n))
for n, i in new_clients], file=sys.stderr)
assert not set(r.clients).intersection(set(new_clients))
r.clients += new_clients
def __remove_clients__(self, r, clients_to_remove,
prune=True, reason=None):
""" WRITEME
r -> variable
clients_to_remove -> list of (op, i) pairs such that node.inputs[i] is not r anymore.
Removes all from the clients list of r.
"""
for entry in clients_to_remove:
r.clients.remove(entry)
assert entry not in r.clients # an op,i pair should be unique
if not r.clients:
if prune:
self.__prune_r__(r, reason)
return False
return True
return False
# import #
def __import_r__(self, variable, reason):
global NullType
if NullType is None:
from .null_type import NullType
# Imports the owners of the variables
if variable.owner and variable.owner not in self.apply_nodes:
self.__import__(variable.owner, reason=reason)
if (variable.owner is None and
not isinstance(variable, graph.Constant) and
variable not in self.inputs):
if isinstance(variable.type, NullType):
raise TypeError("Computation graph contains a NaN. " +
variable.type.why_null)
raise MissingInputError("Undeclared input", variable)
if not getattr(variable, 'fgraph', None) is self:
self.__setup_r__(variable)
self.variables.add(variable)
def __import__(self, apply_node, check=True, reason=None):
# We import the nodes in topological order. We only are interested
# in new nodes, so we use all variables we know of as if they were the input set.
# (the functions in the graph module only use the input set to
# know where to stop going down)
new_nodes = graph.io_toposort(self.variables, apply_node.outputs)
if check:
for node in new_nodes:
if hasattr(node, 'fgraph') and node.fgraph is not self:
raise Exception("%s is already owned by another fgraph" % node)
for r in node.inputs:
if hasattr(r, 'fgraph') and r.fgraph is not self:
raise Exception("%s is already owned by another fgraph" % r)
if (r.owner is None and
not isinstance(r, graph.Constant) and
r not in self.inputs):
# Verbose error message
# Show a complete chain of variables from the missing input to an output
if config.exception_verbosity == 'high':
def find_path_to(output_var, input_var):
""" Returns a list of each variable on a (not necessarily unique)
path from input_var to output_var, where each variable in the
list has the preceding variable as one of its inputs.
Returns None if no path exists"""
# If output and input are the same we have a singleton path
if output_var is input_var:
return [output_var]
# If output has no inputs then there is no path
owner = output_var.owner
if owner is None:
return None
# If input_var is an input to the output node, there is a
# simple two element path
inputs = owner.inputs
if input_var in inputs:
return [input_var, output_var]
# Otherwise we must recurse by searching for a path to one
# of our inputs, then appending the output to that path
for ipt in inputs:
path = find_path_to(ipt, input_var)
if path is not None:
path.append(output_var)
return path
# Since none of the above methods returned a path, there is none
return None
# Try different outputs until we find one that has a path to the missing input
for output in self.outputs:
path = find_path_to(output, r)
if path is not None:
break
# if there is no path then r isn't really a graph input so we shouldn't be running error
# handler code in the first place
assert path is not None
tr = getattr(r.tag, 'trace', [])
detailed_err_msg = ""
if len(tr) > 0:
detailed_err_msg += "\nBacktrace when the variable is created:\n"
# Print separate message for each element in
# the list of batcktraces
sio = StringIO()
for subtr in tr:
traceback.print_list(subtr, sio)
detailed_err_msg += str(sio.getvalue())
raise MissingInputError(
'A variable that is an input to the graph was '
'neither provided as an input to the function '
'nor given a value. A chain of variables '
'leading from this input to an output is %s. '
'This chain may not be unique' % str(path) +
detailed_err_msg)
# Standard error message
raise MissingInputError((
"An input of the graph, used to compute %s, "
"was not provided and not given a value."
"Use the Theano flag exception_verbosity='high',"
"for more information on this error."
% str(node)),
r)
for node in new_nodes:
assert node not in self.apply_nodes
self.__setup_node__(node)
self.apply_nodes.add(node)
for output in node.outputs:
self.__setup_r__(output)
self.variables.add(output)
for i, input in enumerate(node.inputs):
if input not in self.variables:
self.__setup_r__(input)
self.variables.add(input)
self.__add_clients__(input, [(node, i)])
assert node.fgraph is self
self.execute_callbacks('on_import', node, reason)
# prune #
def __prune_r__(self, variable, reason=None):
"""Should be called for variable that aren't used anymore:
len(var.clients) == 0
This do not mean we will remove it from fgraph.variables. If
the owner stay in the fgraph as other outputs are still used,
the variable will be stay in fgraph.variables.
"""
# Prunes the owners of the variables.
if variable.owner:
self.__prune__(variable.owner, reason)
# variable should not have any clients.
# assert not variable.clients
# variable should be in self.variables
# Why this assert fail? Making it True could cause opt speed up
# I think this is caused as we remove var in self.variables in
# another place.
# assert variable in self.variables
if variable in self.variables:
# If the owner have other outputs still used,
# then we must keep that variable in the graph.
if not variable.owner or not any(
[var for var in variable.owner.outputs
if var.clients]):
self.variables.remove(variable)
# This allow to quickly know if a var is still in the fgraph
# or not.
del variable.fgraph
def __prune__(self, apply_node, reason=None):
"""Always called on owner of pruned variable from the graph.
This do not mean we will remove it from the graph. If other
outputs are still used, we will keep the node in the graph.
"""
# If apply_node's outputs have no clients, removes it from the graph
# and recursively tries to prune its inputs. If at least one
# of the op's outputs is an output to the graph or has a client
# then __prune__ is a no-op.
for output in apply_node.outputs:
# Cannot prune an op which is an output or used somewhere
if output.clients or output in self.outputs:
return
self.apply_nodes.remove(apply_node)
self.variables.difference_update(apply_node.outputs)
self.execute_callbacks('on_prune', apply_node, reason)
for i, input in enumerate(apply_node.inputs):
self.__remove_clients__(input, [(apply_node, i)], reason=reason)
# self.__prune_r__(apply_node.inputs)
# change input #
def change_input(self, node, i, new_r, reason=None):
"""WRITEME
Changes node.inputs[i] to new_r.
new_r.type == old_r.type must be True, where old_r is the
current value of node.inputs[i] which we want to replace.
For each feature that has a 'on_change_input' method, calls:
feature.on_change_input(function_graph, node, i, old_r, new_r, reason)
"""
# TODO: ERROR HANDLING FOR LISTENERS (should it complete the change or revert it?)
if node == 'output':
r = self.outputs[i]
if not r.type == new_r.type:
raise TypeError("The type of the replacement must be the"
" same as the type of the original Variable.",
r, new_r)
self.outputs[i] = new_r
else:
if node.fgraph is not self:
raise Exception("Cannot operate on %s because it does not"
" belong to this FunctionGraph" % node)
r = node.inputs[i]
if not r.type == new_r.type:
raise TypeError("The type of the replacement must be the"
" same as the type of the original Variable.",
r, new_r)
node.inputs[i] = new_r
if r is new_r:
return
self.__import_r__(new_r, reason=reason)
self.__add_clients__(new_r, [(node, i)])
prune = self.__remove_clients__(r, [(node, i)], False)
# Precondition: the substitution is semantically valid
# However it may introduce cycles to the graph, in which case the
# transaction will be reverted later.
self.execute_callbacks('on_change_input', node, i,
r, new_r, reason=reason)
if prune:
self.__prune_r__(r, reason=reason)
# replace #
def replace(self, r, new_r, reason=None, verbose=None):
""" WRITEME
This is the main interface to manipulate the subgraph in FunctionGraph.
For every node that uses r as input, makes it use new_r instead.
"""
if verbose is None:
verbose = config.optimizer_verbose
if verbose:
print(reason, r, new_r)
if hasattr(r, 'fgraph') and r.fgraph is not self:
raise Exception("Cannot replace %s because it does not belong "
"to this FunctionGraph" % r, str(reason))
if r.type != new_r.type:
new_r2 = r.type.convert_variable(new_r)
# We still make sure that the type converts correctly
if new_r2 is None or new_r2.type != r.type:
raise TypeError("The type of the replacement must be "
"compatible with the type of the original "
"Variable.", r, new_r, r.type, new_r.type,
str(reason))
new_r = new_r2
if r not in self.variables:
# this variable isn't in the graph... don't raise an
# exception here, just return silently because it makes it
# easier to implement some optimizations for
# multiple-output ops
return
if theano.config.compute_test_value != 'off':
try:
tval = theano.gof.op.get_test_value(r)
new_tval = theano.gof.op.get_test_value(new_r)
except AttributeError:
pass
else:
tval_shape = getattr(tval, 'shape', None)
new_tval_shape = getattr(new_tval, 'shape', None)
if tval_shape != new_tval_shape:
raise AssertionError(
"The replacement variable has a test value with "
"a shape different from the original variable's "
"test value. Original: %s, new: %s"
% (tval_shape, new_tval_shape),
r, new_r, str(reason))
for node, i in list(r.clients): # copy the client list for iteration
assert (node == 'output' and self.outputs[i] is r) or (node.inputs[i] is r)
self.change_input(node, i, new_r, reason=reason)
# sometimes the following is triggered. If you understand why, please explain to James.
# He's curious... -JB20090331
# if len(r.clients) != 0:
# print >> sys.stderr, "WARNING: CLIENTS LEFT AFTER REPLACE", r, r.clients
def replace_all(self, pairs, reason=None):
"""WRITEME"""
for r, new_r in pairs:
self.replace(r, new_r, reason=reason)
def attach_feature(self, feature):
"""
Adds a gof.toolbox.Feature to this function_graph
and triggers its on_attach callback
"""
# Filter out literally identical features
if feature in self._features:
return # the feature is already present
# Filter out functionally identical features.
# Features may use their on_attach method to raise
# toolbox.AlreadyThere if they detect that some
# installed feature does the same thing already
attach = getattr(feature, 'on_attach', None)
if attach is not None:
try:
attach(self)
except toolbox.AlreadyThere:
return
self.execute_callbacks_times.setdefault(feature, 0)
# it would be nice if we could require a specific class instead of
# a "workalike" so we could do actual error checking
# if not isinstance(feature, toolbox.Feature):
# raise TypeError("Expected gof.toolbox.Feature instance, got "+\
# str(type(feature)))
# Add the feature
self._features.append(feature)
def remove_feature(self, feature):
"""WRITEME
Removes the feature from the graph.
Calls feature.on_detach(function_graph) if an on_detach method
is defined.
"""
try:
# Why do we catch the exeception anyway?
self._features.remove(feature)
except ValueError:
return
detach = getattr(feature, 'on_detach', None)
if detach is not None:
detach(self)
# callback utils #
def execute_callbacks(self, name, *args, **kwargs):
"""WRITEME
Calls
getattr(feature, name)(*args)
for each feature which has a method called after name.
"""
t0 = time.time()
for feature in self._features:
try:
fn = getattr(feature, name)
except AttributeError:
# this is safe because there is no work done inside the
# try; the AttributeError reall must come from feature.${name}
# not existing
continue
tf0 = time.time()
fn(self, *args, **kwargs)
self.execute_callbacks_times[feature] += time.time() - tf0
self.execute_callbacks_time += time.time() - t0
def collect_callbacks(self, name, *args):
"""WRITEME
Returns a dictionary d such that:
d[feature] == getattr(feature, name)(*args)
For each feature which has a method called after name.
"""
d = {}
for feature in self._features:
try:
fn = getattr(feature, name)
except AttributeError:
continue
d[feature] = fn(*args)
return d
# misc #
def toposort(self):
"""WRITEME
Returns an ordering of the graph's Apply nodes such that:
- All the nodes of the inputs of a node are before that node.
- Satisfies the orderings provided by each feature that has
an 'orderings' method.
If a feature has an 'orderings' method, it will be called with
this FunctionGraph as sole argument. It should return a dictionary of
{node: predecessors} where predecessors is a list of nodes
that should be computed before the key node.
"""
if len(self.apply_nodes) < 2:
# optimization
# when there are 0 or 1 nodes, no sorting is necessary
# This special case happens a lot because the OpWiseCLinker
# produces 1-element graphs.
return list(self.apply_nodes)
fg = self
ords = self.orderings()
order = graph.io_toposort(fg.inputs, fg.outputs, ords)
return order
def orderings(self):
"""
Return dict d s.t. d[node] is a list of nodes that must be evaluated
before node itself can be evaluated.
This is used primarily by the destroy_handler feature to ensure that
all clients of any destroyed inputs have already computed their
outputs.
:note: This only calls the orderings() fct on all features. It does not
take care of computing dependencies by itself.
"""
ords = OrderedDict()
assert isinstance(self._features, list)
for feature in self._features:
if hasattr(feature, 'orderings'):
orderings = feature.orderings(self)
if not isinstance(orderings, OrderedDict):
raise TypeError("Non-deterministic return value from " +
str(feature.orderings) +
". Nondeterministic object is " +
str(orderings))
for node, prereqs in iteritems(orderings):
if not isinstance(prereqs, (list, OrderedSet)):
raise TypeError(
"prereqs must be a type with a "
"deterministic iteration order, or toposort "
" will be non-deterministic.")
ords.setdefault(node, []).extend(prereqs)
# eliminate duplicate prereqs
for (node, prereqs) in iteritems(ords):
ords[node] = list(OrderedSet(prereqs))
return ords
def check_integrity(self):
"""WRITEME
Call this for a diagnosis if things go awry.
"""
nodes = graph.ops(self.inputs, self.outputs)
if self.apply_nodes != nodes:
missing = nodes.difference(self.apply_nodes)
excess = self.apply_nodes.difference(nodes)
raise Exception(
"The nodes are inappropriately cached. missing, in excess: ",
missing, excess)
for node in nodes:
if node.fgraph is not self:
raise Exception("Node should belong to the FunctionGraph.",
node)
for i, variable in enumerate(node.inputs):
if variable.fgraph is not self:
raise Exception(
"Input of node should belong to the FunctionGraph.",
variable, (node, i))
if (node, i) not in variable.clients:
raise Exception("Inconsistent clients list.",
(node, i), variable.clients)
variables = set(graph.variables(self.inputs, self.outputs))
if set(self.variables) != variables:
missing = variables.difference(self.variables)
excess = self.variables.difference(variables)
raise Exception(
"The variables are inappropriately cached. missing, in excess: ",
missing, excess)
for variable in variables:
if (variable.owner is None and
variable not in self.inputs and
not isinstance(variable, graph.Constant)):
raise Exception("Undeclared input.", variable)
if variable.fgraph is not self:
raise Exception("Variable should belong to the FunctionGraph.",
variable)
for node, i in variable.clients:
if node == 'output':
if self.outputs[i] is not variable:
raise Exception("Inconsistent clients list.",
variable, self.outputs[i])
continue
if node not in nodes:
raise Exception("Client not in FunctionGraph.",
variable, (node, i))
if node.inputs[i] is not variable:
raise Exception("Inconsistent clients list.",
variable, node.inputs[i])
def __str__(self):
return "[%s]" % ", ".join(graph.as_string(self.inputs, self.outputs))
def __repr__(self):
return self.__str__()
# clone #
def clone(self, check_integrity=True):
"""WRITEME"""
return self.clone_get_equiv(check_integrity)[0]
def clone_get_equiv(self, check_integrity=True):
"""WRITEME"""
equiv = graph.clone_get_equiv(self.inputs, self.outputs)
if check_integrity:
self.check_integrity()
e = FunctionGraph([equiv[i] for i in self.inputs],
[equiv[o] for o in self.outputs])
if check_integrity:
e.check_integrity()
for feature in self._features:
e.attach_feature(feature)
return e, equiv
def __getstate__(self):
"""This is needed as some feature introduce instancemethod and
this is not picklable.
"""
d = self.__dict__.copy()
for feature in self._features:
for attr in getattr(feature, "pickle_rm_attr", []):
del d[attr]
# The class Updater take fct as parameter and they are lambda function, so unpicklable.
# execute_callbacks_times have reference to optimizer, and they can't
# be pickled as the decorators with parameters aren't pickable.
if "execute_callbacks_times" in d:
del d["execute_callbacks_times"]
return d
def __setstate__(self, dct):
self.__dict__.update(dct)
for feature in self._features:
if hasattr(feature, "unpickle"):
feature.unpickle(self)
|
nke001/attention-lvcsr
|
libs/Theano/theano/gof/fg.py
|
Python
|
mit
| 32,569
|
#! /usr/bin/python
########################################################################
# Backend to dynamicly create and destroy virtual machines
# Copyright (C) 2015 Carl J Smith
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
########################################################################
import shutil
import os
import sys
########################################################################
def makeDir(remoteDir):
import os
''' Creates the defined directory, if a list of directorys are listed
that do not exist then they will be created aswell, so beware of
spelling mistakes as this will create the specified directory you
type mindlessly.'''
temp = remoteDir.split('/')
remoteDir= ''
for i in temp:
remoteDir += (i + '/')
if os.path.exists(remoteDir):
print remoteDir , ': Already exists!, Moving on...'
else:
os.mkdir(remoteDir)
########################################################################
def loadFile(fileName):
try:
sys.stdout.write(("Loading :"+fileName))
fileObject=open(fileName,'r');
except:
print "Failed to load :",fileName
return False
fileText=''
lineCount = 0
for line in fileObject:
if line[:1] != '#':
fileText += line
sys.stdout.write('Loading line '+str(lineCount)+'...\r')
lineCount += 1
sys.stdout.write(("Finished Loading :"+fileName+'\r'))
sys.stdout.write((' \r'))
fileObject.close()
if fileText == None:
return False
else:
return fileText
#if somehow everything fails return false
return False
########################################################################
def writeFile(fileName,contentToWrite):
# figure out the file path
filepath = fileName.split(os.sep)
filepath.pop()
filepath = os.sep.join(filepath)
# check if path exists
if os.path.exists(filepath):
try:
fileObject = open(fileName,'w')
fileObject.write(contentToWrite)
fileObject.close()
print 'Wrote file:',fileName
except:
print 'Failed to write file:',fileName
return False
else:
print 'Failed to write file, path:',filepath,'does not exist!'
return False
########################################################################
def currentDirectory():
currentDirectory = os.path.abspath(__file__)
temp = currentDirectory.split(os.path.sep)
currentDirectory = ''
for item in range((len(temp)-1)):
if len(temp[item]) != 0:
currentDirectory += os.path.sep+temp[item]
return (currentDirectory+os.path.sep)
########################################################################
runType = 'default'; # used for when system arguments are not used
# split the arguments by - signs to pull arguments more correctly
# this allows you to split that result by spaces for arguments with multuple entries
inputs = ' '.join(sys.argv).replace('--','-').split('-')
for arg in inputs:
# split the arguments by spaces
arguments = arg.split(' ')
# grab main argument into its own variable
mainArgument = arguments[0]
# cut off the first argument for reading subsequent arguments
arguments = arguments[1:]
if (mainArgument in ['h','help']):
# print the help file
print(openFile('help.txt'))
exit()
elif (mainArgument in ['c','connect']):
# set the address to the first given address in arguments
# address needs to be username@location like ssh
destAddress= arguments[0]
# set the runtype to connect
runType = 'connect'
elif (mainArgument in ['s','server-setup']):
destAddress= arguments[0]
runType='serverSetup'
elif (mainArgument in ['S','server-connect']):
destAddress= arguments[0]
runType='serverConnect'
####################################################################
# deliver the payload after reading all arguments to the program
####################################################################
if runType=='connect':
# create the mac address based string for name of virtual machine
machineName=os.popen('ifconfig eth0 | sed "s/eth0.*Link.*.HWaddr //g" | sed "s/ $^inet.*//g" | sed "/^$/d" | sed "s/:/_/g"').read().split(' ')[0]
# delete previous instance of virtual machine, if one does
# not exist then this does nothing
if '--debug-run' in sys.argv:
print('ssh -t '+destAddress+' "virsh undefine '+machineName+' --remove-all-storage --wipe-storage"')
else:
os.system('ssh -t '+destAddress+' "virsh undefine '+machineName+' --remove-all-storage --wipe-storage"')
# connect to a remote virt-manager instance and create
# a new instance of the virtual machine
if '--debug-run' in sys.argv:
#print('ssh -t '+destAddress+' "virt-clone --replace -o baseImage --name '+machineName+' --file /usr/share/diskimages/'+machineName+'.qcow2;"')
print('ssh -t '+destAddress+' "virt-clone -o baseImage --name '+machineName+' --file /var/lib/libvirt/images/'+machineName+'.qcow2;"')
else:
#os.system('ssh -t '+destAddress+' "virt-clone -o baseImage --name '+machineName+' --file /usr/share/diskimages/'+machineName+'.qcow2;"')
os.system('ssh -t '+destAddress+' "virt-clone -o baseImage --name '+machineName+' --file /var/lib/libvirt/images/'+machineName+'.qcow2;"')
# launch virt-viewer to remotely connect to newly created machine
#print('virt-viewer -frk --connect qemu+ssh://'+destAddress+'/ '+machineName)
#os.system('virt-viewer -frk --connect qemu+ssh://'+destAddress+'/ '+machineName)
# start the virtual machine
if '--debug-run' in sys.argv:
#print('ssh -t '+destAddress+' "aa-complain /usr/sbin/libvirtd"')
print('ssh -t '+destAddress+' "virsh start '+machineName+'"')
#print('ssh -t '+destAddress+' "aa-enforce /usr/sbin/libvirtd"')
else:
#os.system('ssh -t '+destAddress+' "aa-complain /usr/sbin/libvirtd"')
os.system('ssh -t '+destAddress+' "virsh start '+machineName+'"')
#os.system('ssh -t '+destAddress+' "aa-enforce /usr/sbin/libvirtd"')
# run virt-viewer though x11 forwarding
if '--debug-run' in sys.argv:
print('ssh '+destAddress+' -t -X virt-viewer -frk '+machineName)
else:
os.system('ssh '+destAddress+' -t -X virt-viewer -frk '+machineName)
# -r = reconnect, -k = kiosk mode, -f = fullscreen
elif runType=='serverConnect':
if os.path.exists('~/.ssh/id_rsa'):
print('SSH Key exists! Skipping key generation.')
else:
# create rsa key for client
os.system('ssh-keygen -N "" -f ~/.ssh/id_rsa')
# copy the key to the server
os.system('ssh-copy-id '+destAddress)
elif runType=='serverSetup':
os.system('ssh -t '+destAddress+' "sudo apt-get install virt-manager --assume-yes"')
os.system('ssh -t '+destAddress+' "sudo apt-get install virt-viewer --assume-yes"')
exit()
|
dude56987/TVD
|
tvd.py
|
Python
|
gpl-3.0
| 7,141
|
# Copyright 2009-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DEPRECATED - Different managers to handle when cursors are killed after
they are closed.
New cursor managers should be defined as subclasses of CursorManager and can be
installed on a connection by calling
`pymongo.connection.Connection.set_cursor_manager`.
.. versionchanged:: 2.1+
Deprecated.
"""
import weakref
class CursorManager(object):
"""The default cursor manager.
This manager will kill cursors one at a time as they are closed.
"""
def __init__(self, connection):
"""Instantiate the manager.
:Parameters:
- `connection`: a Mongo Connection
"""
self.__connection = weakref.ref(connection)
def close(self, cursor_id):
"""Close a cursor by killing it immediately.
Raises TypeError if cursor_id is not an instance of (int, long).
:Parameters:
- `cursor_id`: cursor id to close
"""
if not isinstance(cursor_id, (int, long)):
raise TypeError("cursor_id must be an instance of (int, long)")
self.__connection().kill_cursors([cursor_id])
class BatchCursorManager(CursorManager):
"""A cursor manager that kills cursors in batches.
"""
def __init__(self, connection):
"""Instantiate the manager.
:Parameters:
- `connection`: a Mongo Connection
"""
self.__dying_cursors = []
self.__max_dying_cursors = 20
self.__connection = weakref.ref(connection)
CursorManager.__init__(self, connection)
def __del__(self):
"""Cleanup - be sure to kill any outstanding cursors.
"""
self.__connection().kill_cursors(self.__dying_cursors)
def close(self, cursor_id):
"""Close a cursor by killing it in a batch.
Raises TypeError if cursor_id is not an instance of (int, long).
:Parameters:
- `cursor_id`: cursor id to close
"""
if not isinstance(cursor_id, (int, long)):
raise TypeError("cursor_id must be an instance of (int, long)")
self.__dying_cursors.append(cursor_id)
if len(self.__dying_cursors) > self.__max_dying_cursors:
self.__connection().kill_cursors(self.__dying_cursors)
self.__dying_cursors = []
|
cortext/crawtextV2
|
~/venvs/crawler/lib/python2.7/site-packages/pymongo/cursor_manager.py
|
Python
|
mit
| 2,846
|
import time
import gevent
import hashlib
import six
from six.moves import xrange
from . import events
from .exception import StopLocust
from .log import console_logger
STATS_NAME_WIDTH = 60
class RequestStatsAdditionError(Exception):
pass
class RequestStats(object):
def __init__(self):
self.entries = {}
self.errors = {}
self.num_requests = 0
self.num_failures = 0
self.max_requests = None
self.last_request_timestamp = None
self.start_time = None
def get(self, name, method):
"""
Retrieve a StatsEntry instance by name and method
"""
entry = self.entries.get((name, method))
if not entry:
entry = StatsEntry(self, name, method)
self.entries[(name, method)] = entry
return entry
def aggregated_stats(self, name="Total", full_request_history=False):
"""
Returns a StatsEntry which is an aggregate of all stats entries
within entries.
"""
total = StatsEntry(self, name, method=None)
for r in six.itervalues(self.entries):
total.extend(r, full_request_history=full_request_history)
return total
def reset_all(self):
"""
Go through all stats entries and reset them to zero
"""
self.start_time = time.time()
self.num_requests = 0
self.num_failures = 0
for r in six.itervalues(self.entries):
r.reset()
def clear_all(self):
"""
Remove all stats entries and errors
"""
self.num_requests = 0
self.num_failures = 0
self.entries = {}
self.errors = {}
self.max_requests = None
self.last_request_timestamp = None
self.start_time = None
class StatsEntry(object):
"""
Represents a single stats entry (name and method)
"""
name = None
""" Name (URL) of this stats entry """
method = None
""" Method (GET, POST, PUT, etc.) """
num_requests = None
""" The number of requests made """
num_failures = None
""" Number of failed request """
total_response_time = None
""" Total sum of the response times """
min_response_time = None
""" Minimum response time """
max_response_time = None
""" Maximum response time """
num_reqs_per_sec = None
""" A {second => request_count} dict that holds the number of requests made per second """
response_times = None
"""
A {response_time => count} dict that holds the response time distribution of all
the requests.
The keys (the response time in ms) are rounded to store 1, 2, ... 9, 10, 20. .. 90,
100, 200 .. 900, 1000, 2000 ... 9000, in order to save memory.
This dict is used to calculate the median and percentile response times.
"""
total_content_length = None
""" The sum of the content length of all the requests for this entry """
start_time = None
""" Time of the first request for this entry """
last_request_timestamp = None
""" Time of the last request for this entry """
def __init__(self, stats, name, method):
self.stats = stats
self.name = name
self.method = method
self.reset()
def reset(self):
self.start_time = time.time()
self.num_requests = 0
self.num_failures = 0
self.total_response_time = 0
self.response_times = {}
self.min_response_time = None
self.max_response_time = 0
self.last_request_timestamp = int(time.time())
self.num_reqs_per_sec = {}
self.total_content_length = 0
def log(self, response_time, content_length):
self.stats.num_requests += 1
self.num_requests += 1
self._log_time_of_request()
self._log_response_time(response_time)
# increase total content-length
self.total_content_length += content_length
def _log_time_of_request(self):
t = int(time.time())
self.num_reqs_per_sec[t] = self.num_reqs_per_sec.setdefault(t, 0) + 1
self.last_request_timestamp = t
self.stats.last_request_timestamp = t
def _log_response_time(self, response_time):
self.total_response_time += response_time
if self.min_response_time is None:
self.min_response_time = response_time
self.min_response_time = min(self.min_response_time, response_time)
self.max_response_time = max(self.max_response_time, response_time)
# to avoid to much data that has to be transfered to the master node when
# running in distributed mode, we save the response time rounded in a dict
# so that 147 becomes 150, 3432 becomes 3400 and 58760 becomes 59000
if response_time < 100:
rounded_response_time = response_time
elif response_time < 1000:
rounded_response_time = int(round(response_time, -1))
elif response_time < 10000:
rounded_response_time = int(round(response_time, -2))
else:
rounded_response_time = int(round(response_time, -3))
# increase request count for the rounded key in response time dict
self.response_times.setdefault(rounded_response_time, 0)
self.response_times[rounded_response_time] += 1
def log_error(self, error):
self.num_failures += 1
self.stats.num_failures += 1
key = StatsError.create_key(self.method, self.name, error)
entry = self.stats.errors.get(key)
if not entry:
entry = StatsError(self.method, self.name, error)
self.stats.errors[key] = entry
entry.occured()
@property
def fail_ratio(self):
try:
return float(self.num_failures) / (self.num_requests + self.num_failures)
except ZeroDivisionError:
if self.num_failures > 0:
return 1.0
else:
return 0.0
@property
def avg_response_time(self):
try:
return float(self.total_response_time) / self.num_requests
except ZeroDivisionError:
return 0
@property
def median_response_time(self):
if not self.response_times:
return 0
return median_from_dict(self.num_requests, self.response_times)
@property
def current_rps(self):
if self.stats.last_request_timestamp is None:
return 0
slice_start_time = max(self.stats.last_request_timestamp - 12, int(self.stats.start_time or 0))
reqs = [self.num_reqs_per_sec.get(t, 0) for t in range(slice_start_time, self.stats.last_request_timestamp-2)]
return avg(reqs)
@property
def total_rps(self):
if not self.stats.last_request_timestamp or not self.stats.start_time:
return 0.0
return self.num_requests / max(self.stats.last_request_timestamp - self.stats.start_time, 1)
@property
def avg_content_length(self):
try:
return self.total_content_length / self.num_requests
except ZeroDivisionError:
return 0
def extend(self, other, full_request_history=False):
"""
Extend the data fro the current StatsEntry with the stats from another
StatsEntry instance.
If full_request_history is False, we'll only care to add the data from
the last 20 seconds of other's stats. The reason for this argument is that
extend can be used to generate an aggregate of multiple different StatsEntry
instances on the fly, in order to get the *total* current RPS, average
response time, etc.
"""
self.last_request_timestamp = max(self.last_request_timestamp, other.last_request_timestamp)
self.start_time = min(self.start_time, other.start_time)
self.num_requests = self.num_requests + other.num_requests
self.num_failures = self.num_failures + other.num_failures
self.total_response_time = self.total_response_time + other.total_response_time
self.max_response_time = max(self.max_response_time, other.max_response_time)
self.min_response_time = min(self.min_response_time or 0, other.min_response_time or 0) or other.min_response_time
self.total_content_length = self.total_content_length + other.total_content_length
if full_request_history:
for key in other.response_times:
self.response_times[key] = self.response_times.get(key, 0) + other.response_times[key]
for key in other.num_reqs_per_sec:
self.num_reqs_per_sec[key] = self.num_reqs_per_sec.get(key, 0) + other.num_reqs_per_sec[key]
else:
# still add the number of reqs per seconds the last 20 seconds
for i in xrange(other.last_request_timestamp-20, other.last_request_timestamp+1):
if i in other.num_reqs_per_sec:
self.num_reqs_per_sec[i] = self.num_reqs_per_sec.get(i, 0) + other.num_reqs_per_sec[i]
def serialize(self):
return {
"name": self.name,
"method": self.method,
"last_request_timestamp": self.last_request_timestamp,
"start_time": self.start_time,
"num_requests": self.num_requests,
"num_failures": self.num_failures,
"total_response_time": self.total_response_time,
"max_response_time": self.max_response_time,
"min_response_time": self.min_response_time,
"total_content_length": self.total_content_length,
"response_times": self.response_times,
"num_reqs_per_sec": self.num_reqs_per_sec,
}
@classmethod
def unserialize(cls, data):
obj = cls(None, data["name"], data["method"])
for key in [
"last_request_timestamp",
"start_time",
"num_requests",
"num_failures",
"total_response_time",
"max_response_time",
"min_response_time",
"total_content_length",
"response_times",
"num_reqs_per_sec",
]:
setattr(obj, key, data[key])
return obj
def get_stripped_report(self):
"""
Return the serialized version of this StatsEntry, and then clear the current stats.
"""
report = self.serialize()
self.reset()
return report
def __str__(self):
try:
fail_percent = (self.num_failures/float(self.num_requests + self.num_failures))*100
except ZeroDivisionError:
fail_percent = 0
return (" %-" + str(STATS_NAME_WIDTH) + "s %7d %12s %7d %7d %7d | %7d %7.2f") % (
self.method + " " + self.name,
self.num_requests,
"%d(%.2f%%)" % (self.num_failures, fail_percent),
self.avg_response_time,
self.min_response_time or 0,
self.max_response_time,
self.median_response_time or 0,
self.current_rps or 0
)
def get_response_time_percentile(self, percent):
"""
Get the response time that a certain number of percent of the requests
finished within.
Percent specified in range: 0.0 - 1.0
"""
num_of_request = int((self.num_requests * percent))
processed_count = 0
for response_time in sorted(six.iterkeys(self.response_times), reverse=True):
processed_count += self.response_times[response_time]
if((self.num_requests - processed_count) <= num_of_request):
return response_time
def percentile(self, tpl=" %-" + str(STATS_NAME_WIDTH) + "s %8d %6d %6d %6d %6d %6d %6d %6d %6d %6d"):
if not self.num_requests:
raise ValueError("Can't calculate percentile on url with no successful requests")
return tpl % (
str(self.method) + " " + self.name,
self.num_requests,
self.get_response_time_percentile(0.5),
self.get_response_time_percentile(0.66),
self.get_response_time_percentile(0.75),
self.get_response_time_percentile(0.80),
self.get_response_time_percentile(0.90),
self.get_response_time_percentile(0.95),
self.get_response_time_percentile(0.98),
self.get_response_time_percentile(0.99),
self.max_response_time
)
class StatsError(object):
def __init__(self, method, name, error, occurences=0):
self.method = method
self.name = name
self.error = error
self.occurences = occurences
@classmethod
def parse_error(cls, error):
string_error = repr(error)
target = "object at 0x"
target_index = string_error.find(target)
if target_index < 0:
return string_error
start = target_index + len(target) - 2
end = string_error.find(">", start)
if end < 0:
return string_error
hex_address = string_error[start:end]
return string_error.replace(hex_address, "0x....")
@classmethod
def create_key(cls, method, name, error):
key = "%s.%s.%r" % (method, name, StatsError.parse_error(error))
return hashlib.md5(key.encode('utf-8')).hexdigest()
def occured(self):
self.occurences += 1
def to_name(self):
return "%s %s: %r" % (self.method,
self.name, repr(self.error))
def to_dict(self):
return {
"method": self.method,
"name": self.name,
"error": StatsError.parse_error(self.error),
"occurences": self.occurences
}
@classmethod
def from_dict(cls, data):
return cls(
data["method"],
data["name"],
data["error"],
data["occurences"]
)
def avg(values):
return sum(values, 0.0) / max(len(values), 1)
def median_from_dict(total, count):
"""
total is the number of requests made
count is a dict {response_time: count}
"""
pos = (total - 1) / 2
for k in sorted(six.iterkeys(count)):
if pos < count[k]:
return k
pos -= count[k]
global_stats = RequestStats()
"""
A global instance for holding the statistics. Should be removed eventually.
"""
def on_request_success(request_type, name, response_time, response_length):
if global_stats.max_requests is not None and (global_stats.num_requests + global_stats.num_failures) >= global_stats.max_requests:
raise StopLocust("Maximum number of requests reached")
global_stats.get(name, request_type).log(response_time, response_length)
def on_request_failure(request_type, name, response_time, exception):
if global_stats.max_requests is not None and (global_stats.num_requests + global_stats.num_failures) >= global_stats.max_requests:
raise StopLocust("Maximum number of requests reached")
global_stats.get(name, request_type).log_error(exception)
def on_report_to_master(client_id, data):
data["stats"] = [global_stats.entries[key].get_stripped_report() for key in six.iterkeys(global_stats.entries) if not (global_stats.entries[key].num_requests == 0 and global_stats.entries[key].num_failures == 0)]
data["errors"] = dict([(k, e.to_dict()) for k, e in six.iteritems(global_stats.errors)])
global_stats.errors = {}
def on_slave_report(client_id, data):
for stats_data in data["stats"]:
entry = StatsEntry.unserialize(stats_data)
request_key = (entry.name, entry.method)
if not request_key in global_stats.entries:
global_stats.entries[request_key] = StatsEntry(global_stats, entry.name, entry.method)
global_stats.entries[request_key].extend(entry, full_request_history=True)
global_stats.last_request_timestamp = max(global_stats.last_request_timestamp or 0, entry.last_request_timestamp)
for error_key, error in six.iteritems(data["errors"]):
if error_key not in global_stats.errors:
global_stats.errors[error_key] = StatsError.from_dict(error)
else:
global_stats.errors[error_key].occurences += error["occurences"]
events.request_success += on_request_success
events.request_failure += on_request_failure
events.report_to_master += on_report_to_master
events.slave_report += on_slave_report
def print_stats(stats):
console_logger.info((" %-" + str(STATS_NAME_WIDTH) + "s %7s %12s %7s %7s %7s | %7s %7s") % ('Name', '# reqs', '# fails', 'Avg', 'Min', 'Max', 'Median', 'req/s'))
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
total_rps = 0
total_reqs = 0
total_failures = 0
for key in sorted(six.iterkeys(stats)):
r = stats[key]
total_rps += r.current_rps
total_reqs += r.num_requests
total_failures += r.num_failures
console_logger.info(r)
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
try:
fail_percent = (total_failures/float(total_reqs))*100
except ZeroDivisionError:
fail_percent = 0
console_logger.info((" %-" + str(STATS_NAME_WIDTH) + "s %7d %12s %42.2f") % ('Total', total_reqs, "%d(%.2f%%)" % (total_failures, fail_percent), total_rps))
console_logger.info("")
def print_percentile_stats(stats):
console_logger.info("Percentage of the requests completed within given times")
console_logger.info((" %-" + str(STATS_NAME_WIDTH) + "s %8s %6s %6s %6s %6s %6s %6s %6s %6s %6s") % ('Name', '# reqs', '50%', '66%', '75%', '80%', '90%', '95%', '98%', '99%', '100%'))
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
for key in sorted(six.iterkeys(stats)):
r = stats[key]
if r.response_times:
console_logger.info(r.percentile())
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
total_stats = global_stats.aggregated_stats()
if total_stats.response_times:
console_logger.info(total_stats.percentile())
console_logger.info("")
def print_error_report():
if not len(global_stats.errors):
return
console_logger.info("Error report")
console_logger.info(" %-18s %-100s" % ("# occurences", "Error"))
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
for error in six.itervalues(global_stats.errors):
console_logger.info(" %-18i %-100s" % (error.occurences, error.to_name()))
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
console_logger.info("")
def stats_printer():
from .runners import locust_runner
while True:
print_stats(locust_runner.request_stats)
gevent.sleep(2)
|
cgoldberg/locust
|
locust/stats.py
|
Python
|
mit
| 18,833
|
# flake8: noqa
from . import account, email_events, geoname, markdown, oauth, pwa, resource, sms_events
|
hasgeek/funnel
|
funnel/views/api/__init__.py
|
Python
|
agpl-3.0
| 105
|
# coding=utf-8
# Copyright 2022 The init2winit Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for trainer.py.
"""
import os
import shutil
import tempfile
from absl.testing import absltest
from init2winit import hyperparameters
from init2winit import utils
from init2winit.dataset_lib import datasets
from init2winit.hessian import hessian_eval
from init2winit.hessian import run_lanczos
from init2winit.init_lib import initializers
from init2winit.model_lib import models
from init2winit.trainer_lib import trainer
import jax.random
import numpy as np
import tensorflow.compat.v1 as tf # importing this is needed for tfds mocking.
import tensorflow_datasets as tfds
class RunLanczosTest(absltest.TestCase):
"""Tests run_lanczos.py."""
def setUp(self):
super(RunLanczosTest, self).setUp()
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
super(RunLanczosTest, self).tearDown()
def test_run_lanczos(self):
"""Test training for two epochs on MNIST with a small model."""
rng = jax.random.PRNGKey(0)
# Set the numpy seed to make the fake data deterministc. mocking.mock_data
# ultimately calls numpy.random.
np.random.seed(0)
model_name = 'fully_connected'
loss_name = 'cross_entropy'
metrics_name = 'classification_metrics'
initializer_name = 'noop'
dataset_name = 'mnist'
model_cls = models.get_model(model_name)
initializer = initializers.get_initializer(initializer_name)
dataset_builder = datasets.get_dataset(dataset_name)
hparam_overrides = {
'lr_hparams': {
'base_lr': 0.1,
'schedule': 'cosine'
},
'batch_size': 8,
'train_size': 160,
'valid_size': 96,
'test_size': 80,
}
hps = hyperparameters.build_hparams(
model_name,
initializer_name,
dataset_name,
hparam_file=None,
hparam_overrides=hparam_overrides)
model = model_cls(hps, datasets.get_dataset_meta_data(dataset_name),
loss_name, metrics_name)
eval_batch_size = 16
num_examples = 256
def as_dataset(self, *args, **kwargs):
del args
del kwargs
# pylint: disable=g-long-lambda,g-complex-comprehension
return tf.data.Dataset.from_generator(
lambda: ({
'image': np.ones(shape=(28, 28, 1), dtype=np.uint8),
'label': 9,
} for i in range(num_examples)),
output_types=self.info.features.dtype,
output_shapes=self.info.features.shape,
)
# This will override the tfds.load(mnist) call to return 100 fake samples.
with tfds.testing.mock_data(
as_dataset_fn=as_dataset, num_examples=num_examples):
dataset = dataset_builder(
shuffle_rng=jax.random.PRNGKey(0),
batch_size=hps.batch_size,
eval_batch_size=eval_batch_size,
hps=hps)
num_train_steps = 41
eval_num_batches = 5
eval_every = 10
checkpoint_steps = [10, 30, 40]
metrics_logger, init_logger = None, None
_ = list(
trainer.train(
train_dir=self.test_dir,
model=model,
dataset_builder=lambda *unused_args, **unused_kwargs: dataset,
initializer=initializer,
num_train_steps=num_train_steps,
hps=hps,
rng=rng,
eval_batch_size=eval_batch_size,
eval_num_batches=eval_num_batches,
eval_train_num_batches=eval_num_batches,
eval_frequency=eval_every,
checkpoint_steps=checkpoint_steps,
metrics_logger=metrics_logger,
init_logger=init_logger))
checkpoint_dir = os.path.join(self.test_dir, 'checkpoints')
rng = jax.random.PRNGKey(0)
run_lanczos.eval_checkpoints(
checkpoint_dir,
hps,
rng,
eval_num_batches,
model_cls=model_cls,
dataset_builder=lambda *unused_args, **unused_kwargs: dataset,
dataset_meta_data=datasets.get_dataset_meta_data(dataset_name),
hessian_eval_config=hessian_eval.DEFAULT_EVAL_CONFIG,
)
# Load the saved file.
hessian_dir = os.path.join(checkpoint_dir, 'hessian')
pytree_list = utils.load_pytrees(hessian_dir)
# Test that the logged steps are correct.
saved_steps = [row['step'] for row in pytree_list]
self.assertEqual(saved_steps, checkpoint_steps)
def test_hessian_callback(self):
"""Test training for two epochs on MNIST with a small model."""
rng = jax.random.PRNGKey(0)
# Set the numpy seed to make the fake data deterministc. mocking.mock_data
# ultimately calls numpy.random.
np.random.seed(0)
model_name = 'fully_connected'
loss_name = 'cross_entropy'
metrics_name = 'classification_metrics'
initializer_name = 'noop'
dataset_name = 'mnist'
model_cls = models.get_model(model_name)
initializer = initializers.get_initializer(initializer_name)
dataset_builder = datasets.get_dataset(dataset_name)
hparam_overrides = {
'lr_hparams': {
'base_lr': 0.1,
'schedule': 'cosine'
},
'batch_size': 8,
'train_size': 160,
'valid_size': 96,
'test_size': 80,
}
callback_configs = [hessian_eval.DEFAULT_EVAL_CONFIG.copy()]
hessian_save_name = 'hessian2'
callback_configs[0]['callback_name'] = 'hessian'
callback_configs[0]['name'] = hessian_save_name
hps = hyperparameters.build_hparams(
model_name,
initializer_name,
dataset_name,
hparam_file=None,
hparam_overrides=hparam_overrides)
model = model_cls(hps, datasets.get_dataset_meta_data(dataset_name),
loss_name, metrics_name)
eval_batch_size = 16
num_examples = 256
def as_dataset(self, *args, **kwargs):
del args
del kwargs
# pylint: disable=g-long-lambda,g-complex-comprehension
return tf.data.Dataset.from_generator(
lambda: ({
'image': np.ones(shape=(28, 28, 1), dtype=np.uint8),
'label': 9,
} for i in range(num_examples)),
output_types=self.info.features.dtype,
output_shapes=self.info.features.shape,
)
# This will override the tfds.load(mnist) call to return 100 fake samples.
with tfds.testing.mock_data(
as_dataset_fn=as_dataset, num_examples=num_examples):
dataset = dataset_builder(
shuffle_rng=jax.random.PRNGKey(0),
batch_size=hps.batch_size,
eval_batch_size=eval_batch_size,
hps=hps)
num_train_steps = 41
eval_num_batches = 5
eval_every = 10
checkpoint_steps = [10, 20, 30, 40]
metrics_logger, init_logger = None, None
_ = list(
trainer.train(
train_dir=self.test_dir,
model=model,
dataset_builder=lambda *unused_args, **unused_kwargs: dataset,
initializer=initializer,
num_train_steps=num_train_steps,
hps=hps,
rng=rng,
eval_batch_size=eval_batch_size,
eval_num_batches=eval_num_batches,
eval_train_num_batches=eval_num_batches,
eval_frequency=eval_every,
checkpoint_steps=checkpoint_steps,
metrics_logger=metrics_logger,
callback_configs=callback_configs,
init_logger=init_logger))
checkpoint_dir = os.path.join(self.test_dir, 'checkpoints')
# Load the saved file.
hessian_dir = os.path.join(checkpoint_dir, hessian_save_name)
pytree_list = utils.load_pytrees(hessian_dir)
# Test that the logged steps are correct.
saved_steps = [int(row['step']) for row in pytree_list]
self.assertEqual(saved_steps, checkpoint_steps)
# Check the dict keys.
expected_keys = [
'step', 'tridiag_hess', 'max_eig_hess', 'tridiag_hess_grad_overlap'
]
self.assertEqual(set(pytree_list[0].keys()), set(expected_keys))
if __name__ == '__main__':
absltest.main()
|
google/init2winit
|
hessian/test_run_lanczos.py
|
Python
|
apache-2.0
| 8,567
|
from numpy.testing import assert_allclose, assert_equal
import numpy as np
import unittest
from qspectra import operator_tools
class TestVibrationOperators(unittest.TestCase):
def test(self):
assert_allclose(operator_tools.vib_annihilate(3),
[[0, 1, 0], [0, 0, np.sqrt(2)], [0, 0, 0]])
assert_allclose(operator_tools.vib_create(3),
[[0, 0, 0], [1, 0, 0], [0, np.sqrt(2), 0]])
assert_allclose(operator_tools.vib_create(3).dot(
operator_tools.vib_annihilate(3)),
[[0, 0, 0], [0, 1, 0], [0, 0, 2]])
def test_unit_vec():
assert_allclose(operator_tools.unit_vec(0, 3), [1, 0, 0])
class TestBasisTransform(unittest.TestCase):
def setUp(self):
self.U = np.array([[1, 1], [-1, 1]]) / np.sqrt(2)
def test_basis_transform_operator(self):
X = np.random.randn(4, 4)
for U in [np.eye(4), np.eye(2)]:
X_prime = operator_tools.basis_transform_operator(X, U)
assert_allclose(X, X_prime)
with self.assertRaises(ValueError):
operator_tools.basis_transform_operator(X, np.eye(3))
with self.assertRaises(ValueError):
operator_tools.basis_transform_operator(X, np.ones((1, 2)))
X = np.eye(2)
actual = operator_tools.basis_transform_operator(X, self.U)
assert_allclose(actual, X)
X = np.array([[1, -1], [-1, 1]]) / 2.0
actual = operator_tools.basis_transform_operator(X, self.U)
expected = np.array([[1, 0], [0, 0]])
assert_allclose(actual, expected)
def test_basis_transform_vector(self):
for rho in [np.random.randn(4),
np.random.randn(3, 4),
np.random.randn(1, 2, 3, 4)]:
for U in [np.eye(4), np.eye(2)]:
rho_prime = operator_tools.basis_transform_vector(rho, U)
assert_allclose(rho, rho_prime)
rho = np.random.randn(3)
U = np.ones((3, 2))
with self.assertRaises(ValueError):
operator_tools.basis_transform_vector(rho, U)
rho = [1, 0]
actual = operator_tools.basis_transform_vector(rho, self.U)
expected = np.sqrt([0.5, 0.5])
assert_allclose(actual, expected)
rho = [0.5, -0.5, -0.5, 0.5]
actual = operator_tools.basis_transform_vector(rho, self.U)
expected = np.array([1, 0, 0, 0])
assert_allclose(actual, expected)
class TestExtendedStates(unittest.TestCase):
def setUp(self):
self.M = np.array([[1., 2 - 2j], [2 + 2j, 3]])
def test_all_states(self):
self.assertEquals(operator_tools.all_states(1), [[], [0]])
self.assertEquals(operator_tools.all_states(2), [[], [0], [1], [0, 1]])
self.assertEquals(operator_tools.all_states(2, 'ge'), [[], [0], [1]])
def test_operator_1_to_2(self):
assert_allclose(operator_tools.operator_1_to_2(self.M), [[4]])
assert_allclose(operator_tools.operator_1_to_2(np.diag([1, 10, 100])),
np.diag([11, 101, 110]))
def test_operator_extend(self):
assert_allclose(operator_tools.operator_extend(self.M, 'e'), self.M)
assert_allclose(operator_tools.operator_extend(self.M, 'g'), [[0]])
assert_allclose(operator_tools.operator_extend(self.M, 'f'), [[4]])
assert_allclose(operator_tools.operator_extend(self.M),
[[0, 0, 0, 0],
[0, 1, 2 - 2j, 0],
[0, 2 + 2j, 3, 0],
[0, 0, 0, 4]])
def test_transition_operator(self):
assert_allclose(operator_tools.transition_operator(0, 2, 'ge'),
[[0, 1, 0], [1, 0, 0], [0, 0, 0]])
assert_allclose(operator_tools.transition_operator(0, 2),
[[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]])
assert_allclose(operator_tools.transition_operator(0, 2, 'gef', ''),
np.zeros((4, 4)))
minus = operator_tools.transition_operator(0, 2, 'gef', '-')
assert_allclose(minus,
[[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 0, 0]])
assert_allclose(minus.dot([0, 0, 0, 0]), [0, 0, 0, 0])
assert_allclose(minus.dot([0, 1, 0, 0]), [1, 0, 0, 0])
assert_allclose(minus.dot([0, 0, 0, 1]), [0, 0, 1, 0])
assert_allclose(minus.conj().T.dot([1, 0, 0, 0]), [0, 1, 0, 0])
assert_allclose(operator_tools.transition_operator(0, 2, 'gef', '+'),
[[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 1, 0]])
class TestSubspaces(unittest.TestCase):
def test_n_excitations(self):
assert_equal(operator_tools.n_excitations(1), [1, 1, 0])
assert_equal(operator_tools.n_excitations(2), [1, 2, 1])
assert_equal(operator_tools.n_excitations(3), [1, 3, 3])
assert_equal(operator_tools.n_excitations(3, 2), [2, 6, 6])
def test_extract_subspace(self):
self.assertItemsEqual(operator_tools.extract_subspace('gg,eg->gg'),
'ge')
self.assertItemsEqual(operator_tools.extract_subspace('gg,ee,ff'),
'gef')
def test_full_liouville_subspace(self):
self.assertEqual(operator_tools.full_liouville_subspace('gg'), 'gg')
self.assertEqual(operator_tools.full_liouville_subspace('ee'), 'ee')
self.assertEqual(operator_tools.full_liouville_subspace('ge,ee'),
'gg,ge,eg,ee')
self.assertEqual(operator_tools.full_liouville_subspace('ge,ee,ff'),
'gg,ge,gf,eg,ee,ef,fg,fe,ff')
def test_hilbert_space_index(self):
self.assertEquals(operator_tools.hilbert_subspace_index('g', 'gef', 3),
slice(0, 1))
self.assertEquals(operator_tools.hilbert_subspace_index('e', 'gef', 3),
slice(1, 4))
self.assertEquals(operator_tools.hilbert_subspace_index('f', 'gef', 3),
slice(4, 7))
self.assertEquals(operator_tools.hilbert_subspace_index('f', 'ef', 3),
slice(3, 6))
|
shoyer/qspectra
|
tests/test_operator_tools.py
|
Python
|
bsd-2-clause
| 6,434
|
#!/usr/bin/env python
"""
Convert a bit score to an e-value.
Example usage:
$ bit-score-to-evalue.py --dbSize 168142520 --dbSequenceCount 5660 \
--queryLength 111 --lengthAdjustment 26 --bitScore 37.3537
0.0813077725194
"""
from __future__ import print_function
import argparse
from dark.blast.score import bitScoreToEValue
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Convert a bit score to an e-value.')
parser.add_argument(
'--bitScore', type=float, required=True,
help='The bit score to convert')
parser.add_argument(
'--dbSize', type=int, required=True,
help='The total number of bases in the sequence database.')
parser.add_argument(
'--dbSequenceCount', type=int, required=True,
help='The number of sequences in the database.')
parser.add_argument(
'--queryLength', type=int, required=True,
help='The length of the query sequence.')
parser.add_argument(
'--lengthAdjustment', type=int, required=True,
help='The length adjustment.')
args = parser.parse_args()
print(bitScoreToEValue(args.bitScore, args.dbSize, args.dbSequenceCount,
args.queryLength, args.lengthAdjustment))
|
acorg/dark-matter
|
bin/bit-score-to-e-value.py
|
Python
|
mit
| 1,273
|
# coding: utf-8
# Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
# Maintainer: joaander / All Developers are free to add commands for new
# features
from hoomd.md import _md
import hoomd
from hoomd.integrate import _integrator
class FIRE(_integrator):
R""" Energy Minimizer (FIRE).
Args:
dt (float): This is the maximum step size the minimizer is permitted to use. Consider the stability of the system when setting. (in time units)
Nmin (int): Number of steps energy change is negative before allowing :math:`\alpha` and :math:`\delta t` to adapt.
finc (float): Factor to increase :math:`\delta t` by
fdec (float): Factor to decrease :math:`\delta t` by
alpha_start (float): Initial (and maximum) :math:`\alpha`
falpha (float): Factor to decrease :math:`\alpha t` by
ftol (float): force convergence criteria (in units of force over mass)
wtol (float): angular momentum convergence criteria (in units of angular momentum)
Etol (float): energy convergence criteria (in energy units)
min_steps (int): A minimum number of attempts before convergence criteria are considered
aniso (bool): Whether to integrate rotational degrees of freedom (bool), default None (autodetect).
Added in version 2.2
.. versionadded:: 2.1
.. versionchanged:: 2.2
:py:class:`mode_minimize_fire` uses the Fast Inertial Relaxation Engine (FIRE) algorithm to minimize the energy
for a group of particles while keeping all other particles fixed. This method is published in
`Bitzek, et. al., PRL, 2006 <http://dx.doi.org/10.1103/PhysRevLett.97.170201>`_.
At each time step, :math:`\delta t`, the algorithm uses the NVE Integrator to generate a x, v, and F, and then adjusts
v according to
.. math::
\vec{v} = (1-\alpha)\vec{v} + \alpha \hat{F}|\vec{v}|
where :math:`\alpha` and :math:`\delta t` are dynamically adaptive quantities. While a current search has been
lowering the energy of system for more than
:math:`N_{min}` steps, :math:`\alpha` is decreased by :math:`\alpha \rightarrow \alpha f_{alpha}` and
:math:`\delta t` is increased by :math:`\delta t \rightarrow max(\delta t \cdot f_{inc}, \delta t_{max})`.
If the energy of the system increases (or stays the same), the velocity of the particles is set to 0,
:math:`\alpha \rightarrow \alpha_{start}` and
:math:`\delta t \rightarrow \delta t \cdot f_{dec}`. Convergence is determined by both the force per particle and
the change in energy per particle dropping below *ftol* and *Etol*, respectively or
.. math::
\frac{\sum |F|}{N*\sqrt{N_{dof}}} <ftol \;\; and \;\ \Delta \frac{\sum |E|}{N} < Etol
where N is the number of particles the minimization is acting over (i.e. the group size)
Either of the two criterion can be effectively turned off by setting the tolerance to a large number.
If the minimization is acted over a subset of all the particles in the system, the "other" particles will be kept
frozen but will still interact with the particles being moved.
Examples::
fire=integrate.mode_minimize_fire(dt=0.05, ftol=1e-2, Etol=1e-7)
nve=integrate.nve(group=group.all())
while not(fire.has_converged()):
run(100)
Examples::
fire=integrate.mode_minimize_fire(dt=0.05, ftol=1e-2, Etol=1e-7)
nph=integrate.nph(group=group.all(),P=0.0,gamma=.5)
while not(fire.has_converged()):
run(100)
Note:
The algorithm requires a base integrator to update the particle position and velocities.
Usually this will be either NVE (to minimize energy) or NPH (to minimize energy and relax the box).
The quantity minimized is in any case the energy (not the enthalpy or any other quantity).
Note:
As a default setting, the algorithm will start with a :math:`\delta t = \frac{1}{10} \delta t_{max}` and
attempts at least 10 search steps. In practice, it was found that this prevents the simulation from making too
aggressive a first step, but also from quitting before having found a good search direction. The minimum number of
attempts can be set by the user.
"""
def __init__(self,
dt,
Nmin=5,
finc=1.1,
fdec=0.5,
alpha_start=0.1,
falpha=0.99,
ftol=1e-1,
wtol=1e-1,
Etol=1e-5,
min_steps=10,
aniso=None):
# initialize base class
_integrator.__init__(self)
# initialize the reflected c++ class
if not hoomd.context.current.device.cpp_exec_conf.isCUDAEnabled():
self.cpp_integrator = _md.FIREEnergyMinimizer(
hoomd.context.current.system_definition, dt)
else:
self.cpp_integrator = _md.FIREEnergyMinimizerGPU(
hoomd.context.current.system_definition, dt)
self.supports_methods = True
hoomd.context.current.system.setIntegrator(self.cpp_integrator)
self.aniso = aniso
if aniso is not None:
self.set_params(aniso=aniso)
# change the set parameters if not None
self.dt = dt
self.metadata_fields = ['dt', 'aniso']
self.cpp_integrator.setNmin(Nmin)
self.Nmin = Nmin
self.metadata_fields.append('Nmin')
self.cpp_integrator.setFinc(finc)
self.finc = finc
self.metadata_fields.append('finc')
self.cpp_integrator.setFdec(fdec)
self.fdec = fdec
self.metadata_fields.append('fdec')
self.cpp_integrator.setAlphaStart(alpha_start)
self.alpha_start = alpha_start
self.metadata_fields.append('alpha_start')
self.cpp_integrator.setFalpha(falpha)
self.falpha = falpha
self.metadata_fields.append(falpha)
self.cpp_integrator.setFtol(ftol)
self.ftol = ftol
self.metadata_fields.append(ftol)
self.cpp_integrator.setWtol(wtol)
self.wtol = wtol
self.metadata_fields.append(wtol)
self.cpp_integrator.setEtol(Etol)
self.Etol = Etol
self.metadata_fields.append(Etol)
self.cpp_integrator.setMinSteps(min_steps)
self.min_steps = min_steps
self.metadata_fields.append(min_steps)
## \internal
# \brief Cached set of anisotropic mode enums for ease of access
_aniso_modes = {
None: _md.IntegratorAnisotropicMode.Automatic,
True: _md.IntegratorAnisotropicMode.Anisotropic,
False: _md.IntegratorAnisotropicMode.Isotropic
}
def get_energy(self):
R""" Returns the energy after the last iteration of the minimizer
"""
self.check_initialization()
return self.cpp_integrator.getEnergy()
def set_params(self, aniso=None):
R""" Changes parameters of an existing integration mode.
Args:
aniso (bool): Anisotropic integration mode (bool), default None (autodetect).
Examples::
integrator_mode.set_params(aniso=False)
"""
self.check_initialization()
if aniso is not None:
if aniso in self._aniso_modes:
anisoMode = self._aniso_modes[aniso]
else:
hoomd.context.current.device.cpp_msg.error(
"integrate.mode_standard: unknown anisotropic mode {}.\n"
.format(aniso))
raise RuntimeError(
"Error setting anisotropic integration mode.")
self.aniso = aniso
self.cpp_integrator.setAnisotropicMode(anisoMode)
def has_converged(self):
R""" Test if the energy minimizer has converged.
Returns:
True when the minimizer has converged. Otherwise, return False.
"""
self.check_initialization()
return self.cpp_integrator.hasConverged()
def reset(self):
R""" Reset the minimizer to its initial state.
"""
self.check_initialization()
return self.cpp_integrator.reset()
|
joaander/hoomd-blue
|
hoomd/minimize/fire.py
|
Python
|
bsd-3-clause
| 8,315
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.layers import convolutional
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# Returns true iff the two initializers produce the same tensor to
# within a tiny tolerance.
def identicaltest(tc, init1, init2, shape=None):
"""Tests if two initializations are identical to within tiny tolerances.
Args:
tc: An instance of TensorFlowTestCase.
init1: An Initializer that generates a tensor of a given shape
init2: An Initializer that generates a tensor of a given shape
shape: Shape of the tensor to initialize or `None` to use a vector of length
100.
Returns:
True or False as determined by test.
"""
if shape is None:
shape = [100]
with tc.test_session(graph=ops.Graph()):
t1 = init1(shape).eval()
with tc.test_session(graph=ops.Graph()):
t2 = init2(shape).eval()
return np.allclose(t1, t2, rtol=1e-15, atol=1e-15)
def duplicated_initializer(tc, init, graph_seed, shape=None):
"""Tests duplicated random initializer within the same graph.
This test generates two random kernels from the same initializer to the same
graph, and checks if the results are close enough. Even given the same global,
seed, two different instances of random kernels should generate different
results.
Args:
tc: An instance of TensorFlowTestCase.
init: An Initializer that generates a tensor of a given shape
graph_seed: A graph-level seed to use.
shape: Shape of the tensor to initialize or `None` to use a vector of length
100.
Returns:
True or False as determined by test.
"""
if shape is None:
shape = [100]
with tc.test_session(graph=ops.Graph()):
random_seed.set_random_seed(graph_seed)
t1 = init(shape).eval()
t2 = init(shape).eval()
return np.allclose(t1, t2, rtol=1e-15, atol=1e-15)
def _init_sampler(tc, init, num):
"""Returns a func to generate a random tensor of shape [num].
Args:
tc: An instance of TensorFlowTestCase.
init: An Initializer that generates a tensor of a given shape
num: Size of 1D tensor to create.
Returns:
Function to generate a random tensor.
"""
def func():
with tc.test_session(use_gpu=True):
return init([num]).eval()
return func
class ConstantInitializersTest(test.TestCase):
def testZerosInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.zeros_initializer())
x.initializer.run()
self.assertAllEqual(x.eval(), np.zeros(shape))
def testOnesInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.ones_initializer())
x.initializer.run()
self.assertAllEqual(x.eval(), np.ones(shape))
def testConstantZeroInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.constant_initializer(0.0))
x.initializer.run()
self.assertAllEqual(x.eval(), np.zeros(shape))
def testConstantOneInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.constant_initializer(1.0))
x.initializer.run()
self.assertAllEqual(x.eval(), np.ones(shape))
def testConstantIntInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x",
shape=shape,
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(7))
x.initializer.run()
self.assertEqual(x.dtype.base_dtype, dtypes.int32)
self.assertAllEqual(x.eval(), 7 * np.ones(shape, dtype=np.int32))
def testConstantTupleInitializer(self):
with self.test_session(use_gpu=True):
shape = [3]
x = variable_scope.get_variable(
"x",
shape=shape,
dtype=dtypes.int32,
initializer=init_ops.constant_initializer((10, 20, 30)))
x.initializer.run()
self.assertEqual(x.dtype.base_dtype, dtypes.int32)
self.assertAllEqual(x.eval(), [10, 20, 30])
def _testNDimConstantInitializer(self, name, value, shape, expected):
with self.test_session(use_gpu=True):
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
x = variable_scope.get_variable(name, shape=shape, initializer=init)
x.initializer.run()
actual = array_ops.reshape(x, [-1]).eval()
self.assertEqual(len(actual), len(expected))
for a, e in zip(actual, expected):
self.assertEqual(a, e)
def testNDimConstantInitializer(self):
value = [0, 1, 2, 3, 4, 5]
shape = [2, 3]
expected = list(value)
self._testNDimConstantInitializer("list", value, shape, expected)
self._testNDimConstantInitializer("ndarray",
np.asarray(value), shape, expected)
self._testNDimConstantInitializer("2D-ndarray",
np.asarray(value).reshape(tuple(shape)),
shape, expected)
def _testNDimConstantInitializerLessValues(self, name, value, shape,
expected):
with self.test_session(use_gpu=True):
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
x = variable_scope.get_variable(name, shape=shape, initializer=init)
x.initializer.run()
actual = array_ops.reshape(x, [-1]).eval()
self.assertGreater(len(actual), len(expected))
for i in xrange(len(actual)):
a = actual[i]
e = expected[i] if i < len(expected) else expected[-1]
self.assertEqual(a, e)
def testNDimConstantInitializerLessValues(self):
value = [0, 1, 2, 3, 4, 5]
shape = [2, 4]
expected = list(value)
self._testNDimConstantInitializerLessValues("list", value, shape, expected)
self._testNDimConstantInitializerLessValues("ndarray",
np.asarray(value), shape,
expected)
self._testNDimConstantInitializerLessValues(
"2D-ndarray", np.asarray(value).reshape(tuple([2, 3])), shape, expected)
def _testNDimConstantInitializerMoreValues(self, value, shape):
ops.reset_default_graph()
with self.test_session(use_gpu=True):
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
self.assertRaises(
ValueError,
variable_scope.get_variable,
"x",
shape=shape,
initializer=init)
def testNDimConstantInitializerMoreValues(self):
value = [0, 1, 2, 3, 4, 5, 6, 7]
shape = [2, 3]
self._testNDimConstantInitializerMoreValues(value, shape)
self._testNDimConstantInitializerMoreValues(np.asarray(value), shape)
self._testNDimConstantInitializerMoreValues(
np.asarray(value).reshape(tuple([2, 4])), shape)
def testInvalidValueTypeForConstantInitializerCausesTypeError(self):
c = constant_op.constant([1.0, 2.0, 3.0])
with self.assertRaisesRegexp(
TypeError, r"Invalid type for initial value: .*Tensor.*"):
init_ops.constant_initializer(c, dtype=dtypes.float32)
v = variables.Variable([3.0, 2.0, 1.0])
with self.assertRaisesRegexp(
TypeError, r"Invalid type for initial value: .*Variable.*"):
init_ops.constant_initializer(v, dtype=dtypes.float32)
class RandomNormalInitializationTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
init2 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
init2 = init_ops.random_normal_initializer(0.0, 1.0, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
def testDuplicatedInitializer(self):
init = init_ops.random_normal_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
init_ops.random_normal_initializer,
0.0,
1.0,
dtype=dtypes.string)
class TruncatedNormalInitializationTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.truncated_normal_initializer(
0.0, 1.0, seed=1, dtype=dtype)
init2 = init_ops.truncated_normal_initializer(
0.0, 1.0, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.truncated_normal_initializer(
0.0, 1.0, seed=1, dtype=dtype)
init2 = init_ops.truncated_normal_initializer(
0.0, 1.0, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
def testDuplicatedInitializer(self):
init = init_ops.truncated_normal_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
init_ops.truncated_normal_initializer,
0.0,
1.0,
dtype=dtypes.string)
class RandomUniformInitializationTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64, dtypes.int64]:
init1 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype)
init2 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64]:
init1 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype)
init2 = init_ops.random_uniform_initializer(0, 7, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
def testDuplicatedInitializer(self):
init = init_ops.random_uniform_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, 1))
class UniformUnitScalingInitializationTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
init2 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
init3 = init_ops.uniform_unit_scaling_initializer(
1.5, seed=1, dtype=dtype)
init4 = init_ops.uniform_unit_scaling_initializer(
1.5, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init3, init4))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
init2 = init_ops.uniform_unit_scaling_initializer(seed=2, dtype=dtype)
init3 = init_ops.uniform_unit_scaling_initializer(
1.5, seed=1, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
self.assertFalse(identicaltest(self, init1, init3))
self.assertFalse(identicaltest(self, init2, init3))
def testZeroSize(self):
shape = [0, 2]
with self.cached_session():
x = variable_scope.get_variable(
"x",
shape=shape,
initializer=init_ops.uniform_unit_scaling_initializer())
variables.global_variables_initializer().run()
self.assertAllEqual(shape, x.eval().shape)
def testDuplicatedInitializer(self):
init = init_ops.uniform_unit_scaling_initializer()
self.assertFalse(duplicated_initializer(self, init, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
init_ops.uniform_unit_scaling_initializer,
dtype=dtypes.string)
class VarianceScalingInitializationTest(test.TestCase):
def testTruncatedNormalDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops.variance_scaling_initializer(
distribution='truncated_normal')
with self.test_session(use_gpu=True), \
test.mock.patch.object(
random_ops, 'truncated_normal', wraps=random_ops.truncated_normal) \
as mock_truncated_normal:
x = init(shape).eval()
self.assertTrue(mock_truncated_normal.called)
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
def testNormalDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops.variance_scaling_initializer(distribution='normal')
with self.test_session(use_gpu=True), \
test.mock.patch.object(
random_ops, 'truncated_normal', wraps=random_ops.truncated_normal) \
as mock_truncated_normal:
x = init(shape).eval()
self.assertTrue(mock_truncated_normal.called)
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
def testUntruncatedNormalDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops.variance_scaling_initializer(
distribution='untruncated_normal')
with self.test_session(use_gpu=True), \
test.mock.patch.object(
random_ops, 'random_normal', wraps=random_ops.random_normal) \
as mock_random_normal:
x = init(shape).eval()
self.assertTrue(mock_random_normal.called)
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
def testUniformDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops.variance_scaling_initializer(distribution='uniform')
with self.test_session(use_gpu=True):
x = init(shape).eval()
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
# TODO(vrv): move to sequence_ops_test?
class RangeTest(test.TestCase):
def _Range(self, start, limit, delta):
with self.test_session(use_gpu=True):
tf_ans = math_ops.range(start, limit, delta, name="range")
self.assertEqual([len(np.arange(start, limit, delta))],
tf_ans.get_shape())
return tf_ans.eval()
def testBasic(self):
self.assertTrue(
np.array_equal(self._Range(0, 5, 1), np.array([0, 1, 2, 3, 4])))
self.assertTrue(np.array_equal(self._Range(0, 5, 2), np.array([0, 2, 4])))
self.assertTrue(np.array_equal(self._Range(0, 6, 2), np.array([0, 2, 4])))
self.assertTrue(
np.array_equal(self._Range(13, 32, 7), np.array([13, 20, 27])))
self.assertTrue(
np.array_equal(
self._Range(100, 500, 100), np.array([100, 200, 300, 400])))
self.assertEqual(math_ops.range(0, 5, 1).dtype, dtypes.int32)
def testLimitOnly(self):
with self.test_session(use_gpu=True):
self.assertAllEqual(np.arange(5), math_ops.range(5).eval())
def testEmpty(self):
for start in 0, 5:
self.assertTrue(np.array_equal(self._Range(start, start, 1), []))
def testNonInteger(self):
self.assertTrue(
np.allclose(self._Range(0, 2, 0.5), np.array([0, 0.5, 1, 1.5])))
self.assertTrue(np.allclose(self._Range(0, 5, 2.5), np.array([0, 2.5])))
self.assertTrue(
np.allclose(self._Range(0, 3, 0.9), np.array([0, 0.9, 1.8, 2.7])))
self.assertTrue(
np.allclose(
self._Range(100., 500., 100.), np.array([100, 200, 300, 400])))
self.assertEqual(math_ops.range(0., 5., 1.).dtype, dtypes.float32)
def testNegativeDelta(self):
self.assertTrue(
np.array_equal(self._Range(5, -1, -1), np.array([5, 4, 3, 2, 1, 0])))
self.assertTrue(
np.allclose(self._Range(2.5, 0, -0.5), np.array([2.5, 2, 1.5, 1, 0.5])))
self.assertTrue(
np.array_equal(self._Range(-5, -10, -3), np.array([-5, -8])))
def testDType(self):
zero_int32 = math_ops.cast(0, dtypes.int32)
zero_int64 = math_ops.cast(0, dtypes.int64)
zero_float32 = math_ops.cast(0, dtypes.float32)
zero_float64 = math_ops.cast(0, dtypes.float64)
self.assertEqual(math_ops.range(zero_int32, 0, 1).dtype, dtypes.int32)
self.assertEqual(math_ops.range(zero_int64, 0, 1).dtype, dtypes.int64)
self.assertEqual(math_ops.range(zero_float32, 0, 1).dtype, dtypes.float32)
self.assertEqual(math_ops.range(zero_float64, 0, 1).dtype, dtypes.float64)
self.assertEqual(
math_ops.range(zero_int32, zero_int64, 1).dtype, dtypes.int64)
self.assertEqual(
math_ops.range(zero_int64, zero_float32, 1).dtype, dtypes.float32)
self.assertEqual(
math_ops.range(zero_float32, zero_float64, 1).dtype, dtypes.float64)
self.assertEqual(
math_ops.range(zero_float64, zero_int32, 1).dtype, dtypes.float64)
self.assertEqual(
math_ops.range(
0, 0, 1, dtype=dtypes.int32).dtype, dtypes.int32)
self.assertEqual(
math_ops.range(
0, 0, 1, dtype=dtypes.int64).dtype, dtypes.int64)
self.assertEqual(
math_ops.range(
0, 0, 1, dtype=dtypes.float32).dtype, dtypes.float32)
self.assertEqual(
math_ops.range(
0, 0, 1, dtype=dtypes.float64).dtype, dtypes.float64)
# TODO(vrv): move to sequence_ops_test?
class LinSpaceTest(test.TestCase):
def _gpu_modes(self):
if test.is_gpu_available():
return [False, True]
else:
return [False]
def _LinSpace(self, start, stop, num):
# NOTE(touts): Needs to pass a graph to get a new session each time.
with ops.Graph().as_default() as graph:
with self.test_session(graph=graph, force_gpu=self.force_gpu):
tf_ans = math_ops.linspace(start, stop, num, name="linspace")
self.assertEqual([num], tf_ans.get_shape())
return tf_ans.eval()
def testPositive(self):
for self.force_gpu in self._gpu_modes():
self.assertArrayNear(self._LinSpace(1., 5., 1), np.array([1.]), 1e-5)
self.assertArrayNear(self._LinSpace(1., 5., 2), np.array([1., 5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(1., 5., 3), np.array([1., 3., 5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(1., 5., 4), np.array([1., 7. / 3., 11. / 3., 5.]),
1e-5)
def testNegative(self):
for self.force_gpu in self._gpu_modes():
self.assertArrayNear(self._LinSpace(-1., -5., 1), np.array([-1.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., -5., 2), np.array([-1., -5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., -5., 3), np.array([-1., -3., -5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., -5., 4),
np.array([-1., -7. / 3., -11. / 3., -5.]), 1e-5)
def testNegativeToPositive(self):
for self.force_gpu in self._gpu_modes():
self.assertArrayNear(self._LinSpace(-1., 5., 1), np.array([-1.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., 5., 2), np.array([-1., 5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., 5., 3), np.array([-1., 2., 5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., 5., 4), np.array([-1., 1., 3., 5.]), 1e-5)
def testPoint(self):
for self.force_gpu in self._gpu_modes():
self.assertArrayNear(self._LinSpace(5., 5., 1), np.array([5.]), 1e-5)
self.assertArrayNear(self._LinSpace(5., 5., 2), np.array([5.] * 2), 1e-5)
self.assertArrayNear(self._LinSpace(5., 5., 3), np.array([5.] * 3), 1e-5)
self.assertArrayNear(self._LinSpace(5., 5., 4), np.array([5.] * 4), 1e-5)
class DeviceTest(test.TestCase):
def testNoDevice(self):
with ops.Graph().as_default():
var = variables.Variable([[1.0, 1.0]])
self.assertDeviceEqual(None, var.device)
self.assertDeviceEqual(None, var.initializer.device)
def testDevice(self):
with ops.Graph().as_default():
with ops.device("/job:ps"):
var = variables.Variable([[1.0, 1.0]])
self.assertDeviceEqual("/job:ps", var.device)
self.assertDeviceEqual("/job:ps", var.initializer.device)
class OrthogonalInitializerTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
init2 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, (10, 10)))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
init2 = init_ops.orthogonal_initializer(seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, (10, 10)))
def testDuplicatedInitializer(self):
init = init_ops.orthogonal_initializer()
self.assertFalse(duplicated_initializer(self, init, 1, (10, 10)))
def testInvalidDataType(self):
self.assertRaises(
ValueError, init_ops.orthogonal_initializer, dtype=dtypes.string)
def testInvalidShape(self):
init1 = init_ops.orthogonal_initializer()
with self.test_session(graph=ops.Graph(), use_gpu=True):
self.assertRaises(ValueError, init1, shape=[5])
def testGain(self):
shape = (10, 10)
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
init2 = init_ops.orthogonal_initializer(gain=3.14, seed=1, dtype=dtype)
with self.test_session(graph=ops.Graph(), use_gpu=True):
t1 = init1(shape).eval()
t2 = init2(shape).eval()
return np.allclose(t1, t2 / 3.14, rtol=1e-15, atol=1e-15)
def testShapesValues(self):
for dtype in [dtypes.float32, dtypes.float64]:
for shape in [(10, 10), (10, 9, 8), (100, 5, 5), (50, 40), (40, 50)]:
init = init_ops.orthogonal_initializer(dtype=dtype)
tol = 1e-5 if dtype == dtypes.float32 else 1e-12
with self.test_session(graph=ops.Graph(), use_gpu=True):
# Check the shape
t = init(shape).eval()
self.assertAllEqual(shape, t.shape)
# Check orthogonality by computing the inner product
t = t.reshape((np.prod(t.shape[:-1]), t.shape[-1]))
if t.shape[0] > t.shape[1]:
self.assertAllClose(
np.dot(t.T, t), np.eye(t.shape[1]), rtol=tol, atol=tol)
else:
self.assertAllClose(
np.dot(t, t.T), np.eye(t.shape[0]), rtol=tol, atol=tol)
class ConvolutionDeltaOrthogonalInitializerTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype)
init2 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, (3, 3, 10, 10)))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype)
init2 = init_ops.convolutional_delta_orthogonal(seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, (3, 3, 10, 10)))
def testDuplicatedInitializer(self):
init = init_ops.convolutional_delta_orthogonal()
self.assertFalse(duplicated_initializer(self, init, 1, (3, 3, 10, 10)))
def testInvalidDataType(self):
self.assertRaises(
ValueError, init_ops.convolutional_delta_orthogonal,
dtype=dtypes.string)
def testInvalidShape(self):
init1 = init_ops.convolutional_delta_orthogonal()
with self.test_session(graph=ops.Graph(), use_gpu=True):
self.assertRaises(ValueError, init1, shape=[3, 3, 6, 5])
def testGain(self):
shape = (3, 3, 10, 10)
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype)
init2 = init_ops.convolutional_delta_orthogonal(gain=3.14,
seed=1, dtype=dtype)
with self.test_session(graph=ops.Graph(), use_gpu=True):
t1 = init1(shape).eval()
t2 = init2(shape).eval()
return np.allclose(t1, t2 / 3.14, rtol=1e-15, atol=1e-15)
def testShapesValues(self):
gain = 3.14
for dtype in [dtypes.float32]:
for kernel_size in [[3], [8], [3, 5], [2, 4], [3, 3, 3], [2, 2, 2]]:
tol = 1e-2
# Check orthogonality by computing ratio between
# the 2-norms of the inputs and outputs.
if len(kernel_size) == 1:
shape = [4, 32, 64]
convolution = convolutional.conv1d
elif len(kernel_size) == 2:
convolution = convolutional.conv2d
shape = [4, 32, 32, 64]
else:
shape = [4, 16, 16, 16, 64]
convolution = convolutional.conv3d
inputs = random_ops.random_normal(shape, dtype=dtype)
inputs_2norm = linalg_ops.norm(inputs)
outputs = convolution(
inputs, padding="same", filters=128,
kernel_size=kernel_size, use_bias=False,
kernel_initializer=init_ops.convolutional_delta_orthogonal(
gain=gain))
outputs_shape = shape[0:-1] + [128]
outputs_2norm = linalg_ops.norm(outputs)
ratio = outputs_2norm / inputs_2norm
my_ops = variables.global_variables_initializer()
with self.test_session(use_gpu=True) as sess:
sess.run(my_ops)
# Check the shape of the outputs
t = outputs.eval()
self.assertAllEqual(t.shape, outputs_shape)
# Check isometry of the delta-orthogonal kernel.
self.assertAllClose(sess.run(ratio), np.sqrt(gain),
rtol=tol, atol=tol)
def testNonuniformity(self):
value = 0
abs_value = 0
shape = [3, 3, 10, 10]
count = 70
tol = 1e-5
with self.test_session(use_gpu=True):
for i in range(count):
x = variable_scope.get_variable("{}".format(i), shape=shape,
initializer=
init_ops.convolutional_delta_orthogonal)
x.initializer.run()
y = x.eval()[1, 1, :, :]
determinant = np.linalg.det(y)
value += determinant
abs_value += np.abs(determinant)
# Check there is some variation in the signs of the determinants
self.assertLess(value, count - tol)
self.assertLess(-count + tol, value)
# Check all determinants have absolute value 1
# Compute the sum of the absolute values of 'count' determinants
self.assertAllClose(abs_value, count, rtol=tol, atol=tol)
class ConvolutionOrthogonal1dInitializerTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_orthogonal_1d(seed=1, dtype=dtype)
init2 = init_ops.convolutional_orthogonal_1d(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, (3, 10, 10)))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_orthogonal_1d(seed=1, dtype=dtype)
init2 = init_ops.convolutional_orthogonal_1d(seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, (3, 10, 10)))
def testDuplicatedInitializer(self):
init = init_ops.convolutional_orthogonal_1d()
self.assertFalse(duplicated_initializer(self, init, 1, (3, 10, 10)))
def testInvalidDataType(self):
self.assertRaises(
ValueError, init_ops.convolutional_orthogonal_1d,
dtype=dtypes.string)
def testInvalidShape(self):
init1 = init_ops.convolutional_orthogonal_1d()
with self.test_session(graph=ops.Graph(), use_gpu=True):
self.assertRaises(ValueError, init1, shape=[3, 6, 5])
def testGain(self):
shape = (3, 10, 10)
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_orthogonal_1d(seed=1, dtype=dtype)
init2 = init_ops.convolutional_orthogonal_1d(gain=3.14,
seed=1, dtype=dtype)
with self.test_session(graph=ops.Graph(), use_gpu=True):
t1 = init1(shape).eval()
t2 = init2(shape).eval()
return np.allclose(t1, t2 / 3.14, rtol=1e-15, atol=1e-15)
def testNonuniformity(self):
value = 0
abs_value = 0
shape = [3, 10, 10]
count = 70
tol = 1e-5
with self.test_session(use_gpu=True):
for i in range(count):
x = variable_scope.get_variable("{}".format(i), shape=shape,
initializer=
init_ops.convolutional_orthogonal_1d)
x.initializer.run()
y = np.sum(x.eval(), axis=0)
determinant = np.linalg.det(y)
value += determinant
abs_value += np.abs(determinant)
# Check there is some variation in the signs of the determinants.
self.assertLess(value, count - tol)
self.assertLess(-count + tol, value)
# Check all determinants have absolute value 1
# Compute the sum of the absolute values of 'count' determinants
self.assertAllClose(abs_value, count, rtol=tol, atol=tol)
def testShapesValues(self):
def circular_pad(input_, width, kernel_size):
"""Pad input_ for computing (circular) convolution.
Args:
input_: the input tensor
width: the width of the tensor.
kernel_size: the kernel size of the filter.
Returns:
a tensor whose width is (width + kernel_size - 1).
"""
beginning = kernel_size // 2
end = kernel_size - 1 - beginning
tmp_up = array_ops.slice(input_, [0, width - beginning, 0],
[-1, beginning, -1])
tmp_down = array_ops.slice(input_, [0, 0, 0], [-1, end, -1])
tmp = array_ops.concat([tmp_up, input_, tmp_down], 1)
return tmp
cout = 64
shape = [10, 20, 32]
outputs_shape = shape[0:-1] + [cout]
dtype = dtypes.float32
tol = 1e-3
gain = 3.14
# Check orthogonality/isometry by computing the ratio between
# the 2-norms of the inputs and outputs.
for kernel_size in [[1], [2], [3], [4], [5], [6]]:
convolution = convolutional.conv1d
inputs = random_ops.random_normal(shape, dtype=dtype)
inputs_2norm = linalg_ops.norm(inputs)
input_with_circular_pad = circular_pad(inputs, shape[1], kernel_size[0])
outputs = convolution(
input_with_circular_pad, padding="valid", filters=cout,
kernel_size=kernel_size[0], use_bias=False,
kernel_initializer=init_ops.convolutional_orthogonal_1d(gain=gain))
outputs_2norm = linalg_ops.norm(outputs)
ratio = outputs_2norm / inputs_2norm
my_ops = variables.global_variables_initializer()
with self.test_session(use_gpu=True) as sess:
sess.run(my_ops)
# Check the shape of the outputs
t = outputs.eval()
self.assertAllEqual(t.shape, outputs_shape)
# Check isometry of the orthogonal kernel.
self.assertAllClose(sess.run(ratio), np.sqrt(gain), rtol=tol, atol=tol)
class ConvolutionOrthogonal2dInitializerTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_orthogonal_2d(seed=1, dtype=dtype)
init2 = init_ops.convolutional_orthogonal_2d(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, (3, 3, 10, 10)))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_orthogonal_2d(seed=1, dtype=dtype)
init2 = init_ops.convolutional_orthogonal_2d(seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, (3, 3, 10, 10)))
def testDuplicatedInitializer(self):
init = init_ops.convolutional_orthogonal_2d()
self.assertFalse(duplicated_initializer(self, init, 1, (3, 3, 10, 10)))
def testInvalidDataType(self):
self.assertRaises(
ValueError, init_ops.convolutional_orthogonal_2d,
dtype=dtypes.string)
def testInvalidShape(self):
init1 = init_ops.convolutional_orthogonal_2d()
with self.test_session(graph=ops.Graph(), use_gpu=True):
self.assertRaises(ValueError, init1, shape=[3, 3, 6, 5])
def testGain(self):
shape = (3, 3, 10, 10)
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_orthogonal_2d(seed=1, dtype=dtype)
init2 = init_ops.convolutional_orthogonal_2d(gain=3.14,
seed=1, dtype=dtype)
with self.test_session(graph=ops.Graph(), use_gpu=True):
t1 = init1(shape).eval()
t2 = init2(shape).eval()
return np.allclose(t1, t2 / 3.14, rtol=1e-15, atol=1e-15)
def testShapesValues(self):
def circular_pad(input_, width, kernel_size):
"""Pad input_ for computing (circular) convolution.
Args:
input_: the input tensor
width: the width of the tensor.
kernel_size: the kernel size of the filter.
Returns:
a tensor whose width is (width + kernel_size - 1).
"""
beginning = kernel_size // 2
end = kernel_size - 1 - beginning
tmp_up = array_ops.slice(input_, [0, width - beginning, 0, 0],
[-1, beginning, width, -1])
tmp_down = array_ops.slice(input_, [0, 0, 0, 0], [-1, end, width, -1])
tmp = array_ops.concat([tmp_up, input_, tmp_down], 1)
new_width = width + kernel_size - 1
tmp_left = array_ops.slice(tmp, [0, 0, width - beginning, 0],
[-1, new_width, beginning, -1])
tmp_right = array_ops.slice(tmp, [0, 0, 0, 0], [-1, new_width, end, -1])
final = array_ops.concat([tmp_left, tmp, tmp_right], 2)
return final
cout = 45
shape = [64, 28, 28, 32]
outputs_shape = shape[0:-1] + [cout]
dtype = dtypes.float32
tol = 1e-3
gain = 3.14
# Check orthogonality/isometry by computing the ratio between
# the 2-norms of the inputs and outputs.
for kernel_size in [[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]:
convolution = convolutional.conv2d
inputs = random_ops.random_normal(shape, dtype=dtype)
inputs_2norm = linalg_ops.norm(inputs)
input_with_circular_pad = circular_pad(inputs, shape[1], kernel_size[0])
outputs = convolution(
input_with_circular_pad, padding="valid", filters=cout,
kernel_size=kernel_size, use_bias=False,
kernel_initializer=init_ops.convolutional_orthogonal_2d(gain=gain))
outputs_2norm = linalg_ops.norm(outputs)
ratio = outputs_2norm / inputs_2norm
my_ops = variables.global_variables_initializer()
with self.test_session(use_gpu=True) as sess:
sess.run(my_ops)
# Check the shape of the outputs
t = outputs.eval()
self.assertAllEqual(t.shape, outputs_shape)
# Check isometry of the orthogonal kernel.
self.assertAllClose(sess.run(ratio), np.sqrt(gain), rtol=tol, atol=tol)
class ConvolutionOrthogonal3dInitializerTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_orthogonal_3d(seed=1, dtype=dtype)
init2 = init_ops.convolutional_orthogonal_3d(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, (3, 3, 3, 10, 10)))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_orthogonal_3d(seed=1, dtype=dtype)
init2 = init_ops.convolutional_orthogonal_3d(seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, (3, 3, 3, 10, 10)))
def testDuplicatedInitializer(self):
init = init_ops.convolutional_orthogonal_3d()
self.assertFalse(duplicated_initializer(self, init, 1, (3, 3, 3, 10, 10)))
def testInvalidDataType(self):
self.assertRaises(
ValueError, init_ops.convolutional_orthogonal_3d,
dtype=dtypes.string)
def testInvalidShape(self):
init1 = init_ops.convolutional_orthogonal_3d()
with self.test_session(graph=ops.Graph(), use_gpu=True):
self.assertRaises(ValueError, init1, shape=[3, 3, 3, 6, 5])
def testGain(self):
shape = (3, 3, 3, 10, 10)
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_orthogonal_3d(seed=1, dtype=dtype)
init2 = init_ops.convolutional_orthogonal_3d(gain=3.14,
seed=1, dtype=dtype)
with self.test_session(graph=ops.Graph(), use_gpu=True):
t1 = init1(shape).eval()
t2 = init2(shape).eval()
return np.allclose(t1, t2 / 3.14, rtol=1e-15, atol=1e-15)
def testNonuniformity(self):
value = 0
abs_value = 0
shape = [3, 3, 3, 5, 5]
count = 20
tol = 1e-5
with self.test_session(use_gpu=True):
for i in range(count):
x = variable_scope.get_variable("{}".format(i), shape=shape,
initializer=
init_ops.convolutional_orthogonal_3d)
x.initializer.run()
y = np.sum(x.eval(), axis=(0, 1, 2))
determinant = np.linalg.det(y)
value += determinant
abs_value += np.abs(determinant)
# Check there is some variation in the signs of the determinants
self.assertLess(value, count - tol)
self.assertLess(-count + tol, value)
# Check all determinants have absolute value 1
# Compute the sum of the absolute values of 'count' determinants
self.assertAllClose(abs_value, count, rtol=tol, atol=tol)
def testShapesValues(self):
def circular_pad(input_, width, kernel_size):
"""Padding input_ for computing circular convolution.
Args:
input_: the input tensor
width: the width of the tensor.
kernel_size: the kernel size of the filter.
Returns:
a tensor whose width is (width + kernel_size - 1).
"""
beginning = kernel_size // 2
end = kernel_size - 1 - beginning
tmp_up = array_ops.slice(input_, [0, width - beginning, 0, 0, 0],
[-1, beginning, -1, -1, -1])
tmp_down = array_ops.slice(input_, [0, 0, 0, 0, 0],
[-1, end, -1, -1, -1])
tmp = array_ops.concat([tmp_up, input_, tmp_down], 1)
tmp_left = array_ops.slice(tmp, [0, 0, width - beginning, 0, 0],
[-1, -1, beginning, -1, -1])
tmp_right = array_ops.slice(tmp, [0, 0, 0, 0, 0],
[-1, -1, end, -1, -1])
tmp = array_ops.concat([tmp_left, tmp, tmp_right], 2)
tmp_front = array_ops.slice(tmp, [0, 0, 0, width - beginning, 0],
[-1, -1, -1, beginning, -1])
tmp_back = array_ops.slice(tmp, [0, 0, 0, 0, 0], [-1, -1, -1, end, -1])
return array_ops.concat([tmp_front, tmp, tmp_back], 3)
cout = 32
shape = [1, 7, 7, 7, 16]
outputs_shape = shape[0:-1] + [cout]
dtype = dtypes.float32
tol = 1e-3
gain = 3.14
# Check orthogonality/isometry by computing the ratio between
# the 2-norms of the inputs and outputs.
for kernel_size in [[1, 1, 1], [2, 2, 2], [3, 3, 3]]:
convolution = convolutional.conv3d
inputs = random_ops.random_normal(shape, dtype=dtype)
inputs_2norm = linalg_ops.norm(inputs)
input_with_circular_pad = circular_pad(inputs, shape[1], kernel_size[0])
outputs = convolution(
input_with_circular_pad, padding="valid", filters=cout,
kernel_size=kernel_size[0], use_bias=False,
kernel_initializer=init_ops.convolutional_orthogonal_3d(gain=gain))
outputs_2norm = linalg_ops.norm(outputs)
ratio = outputs_2norm / inputs_2norm
my_ops = variables.global_variables_initializer()
with self.test_session(use_gpu=True) as sess:
sess.run(my_ops)
# Check the shape of the outputs
t = outputs.eval()
self.assertAllEqual(t.shape, outputs_shape)
# Check isometry of the orthogonal kernel.
self.assertAllClose(sess.run(ratio), np.sqrt(gain), rtol=tol, atol=tol)
class IdentityInitializerTest(test.TestCase):
def testInvalidDataType(self):
self.assertRaises(
ValueError, init_ops.orthogonal_initializer, dtype=dtypes.string)
def testInvalidShape(self):
init = init_ops.identity_initializer()
with self.test_session(graph=ops.Graph(), use_gpu=True):
self.assertRaises(ValueError, init, shape=[5, 7, 7])
self.assertRaises(ValueError, init, shape=[5])
self.assertRaises(ValueError, init, shape=[])
def testNonSquare(self):
init = init_ops.identity_initializer()
shape = (10, 5)
with self.test_session(graph=ops.Graph(), use_gpu=True):
self.assertAllClose(init(shape).eval(), np.eye(*shape))
def testGain(self):
shape = (10, 10)
for dtype in [dtypes.float32, dtypes.float64]:
init_default = init_ops.identity_initializer(dtype=dtype)
init_custom = init_ops.identity_initializer(gain=0.9, dtype=dtype)
with self.test_session(graph=ops.Graph(), use_gpu=True):
self.assertAllClose(init_default(shape).eval(), np.eye(*shape))
with self.test_session(graph=ops.Graph(), use_gpu=True):
self.assertAllClose(init_custom(shape).eval(), np.eye(*shape) * 0.9)
def testPartitions(self):
shape = (10, 10)
init = init_ops.identity_initializer()
partitioner = partitioned_variables.variable_axis_size_partitioner(1)
with self.test_session(graph=ops.Graph(), use_gpu=True):
with variable_scope.variable_scope(
"foo", partitioner=partitioner, initializer=init):
v = array_ops.identity(variable_scope.get_variable("bar", shape=shape))
variables.global_variables_initializer().run()
self.assertAllClose(v.eval(), np.eye(*shape))
if __name__ == "__main__":
test.main()
|
xodus7/tensorflow
|
tensorflow/python/kernel_tests/init_ops_test.py
|
Python
|
apache-2.0
| 44,023
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import json
import uuid
from lxml import etree
import webob
from cinder import context
from cinder import test
from cinder.tests.api import fakes
from cinder import volume
def fake_volume_get(*args, **kwargs):
return {
'id': 'fake',
'host': 'host001',
'status': 'available',
'size': 5,
'availability_zone': 'somewhere',
'created_at': datetime.datetime.now(),
'attach_status': None,
'display_name': 'anothervolume',
'display_description': 'Just another volume!',
'volume_type_id': None,
'snapshot_id': None,
'project_id': 'fake',
}
def fake_volume_get_all(*args, **kwargs):
return [fake_volume_get()]
def app():
# no auth, just let environ['cinder.context'] pass through
api = fakes.router.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v2'] = api
return mapper
class VolumeHostAttributeTest(test.TestCase):
def setUp(self):
super(VolumeHostAttributeTest, self).setUp()
self.stubs.Set(volume.API, 'get', fake_volume_get)
self.stubs.Set(volume.API, 'get_all', fake_volume_get_all)
self.UUID = uuid.uuid4()
def test_get_volume_allowed(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID)
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = json.loads(res.body)['volume']
self.assertEqual(vol['os-vol-host-attr:host'], 'host001')
def test_get_volume_unallowed(self):
ctx = context.RequestContext('non-admin', 'fake', False)
req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID)
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = json.loads(res.body)['volume']
self.assertFalse('os-vol-host-attr:host' in vol)
def test_list_detail_volumes_allowed(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes/detail')
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = json.loads(res.body)['volumes']
self.assertEqual(vol[0]['os-vol-host-attr:host'], 'host001')
def test_list_detail_volumes_unallowed(self):
ctx = context.RequestContext('non-admin', 'fake', False)
req = webob.Request.blank('/v2/fake/volumes/detail')
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = json.loads(res.body)['volumes']
self.assertFalse('os-vol-host-attr:host' in vol[0])
def test_list_simple_volumes_no_host(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes')
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = json.loads(res.body)['volumes']
self.assertFalse('os-vol-host-attr:host' in vol[0])
def test_get_volume_xml(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID)
req.method = 'GET'
req.accept = 'application/xml'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = etree.XML(res.body)
host_key = ('{http://docs.openstack.org/volume/ext/'
'volume_host_attribute/api/v1}host')
self.assertEqual(vol.get(host_key), 'host001')
def test_list_volumes_detail_xml(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes/detail')
req.method = 'GET'
req.accept = 'application/xml'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = list(etree.XML(res.body))[0]
host_key = ('{http://docs.openstack.org/volume/ext/'
'volume_host_attribute/api/v1}host')
self.assertEqual(vol.get(host_key), 'host001')
|
tomasdubec/openstack-cinder
|
cinder/tests/api/contrib/test_volume_host_attribute.py
|
Python
|
apache-2.0
| 4,834
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import apps
from django.core.exceptions import ObjectDoesNotExist
from django.forms import widgets
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils.safestring import mark_safe
from cmsplugin_cascade.fields import PartialFormField
from cmsplugin_cascade.plugin_base import CascadePluginBase
from cmsplugin_cascade.utils import resolve_dependencies
from .forms import LinkForm
class LinkPluginBase(CascadePluginBase):
text_enabled = True
allow_children = False
parent_classes = []
require_parent = False
glossary_fields = (
PartialFormField('target',
widgets.RadioSelect(choices=(('', _("Same Window")), ('_blank', _("New Window")),
('_parent', _("Parent Window")), ('_top', _("Topmost Frame")),)),
initial='',
label=_("Link Target"),
help_text=_("Open Link in other target.")
),
PartialFormField('title',
widgets.TextInput(),
label=_("Title"),
help_text=_("Link's Title")
),
)
html_tag_attributes = {'title': 'title', 'target': 'target'}
# map field from glossary to these form fields
glossary_field_map = {'link': ('link_type', 'cms_page', 'section', 'ext_url', 'mail_to',)}
@classmethod
def get_link(cls, obj):
link = obj.glossary.get('link', {})
linktype = link.get('type')
if linktype == 'exturl':
return '{url}'.format(**link)
if linktype == 'email':
return 'mailto:{email}'.format(**link)
# otherwise try to resolve by model
if 'model' in link and 'pk' in link:
if not hasattr(obj, '_link_model'):
Model = apps.get_model(*link['model'].split('.'))
try:
obj._link_model = Model.objects.get(pk=link['pk'])
except Model.DoesNotExist:
obj._link_model = None
if obj._link_model:
href = obj._link_model.get_absolute_url()
if 'section' in link:
try:
element_ids = obj._link_model.cascadepage.glossary['element_ids']
href = '{}#{}'.format(href, element_ids[link['section']])
except (KeyError, ObjectDoesNotExist):
pass
return href
def get_ring_bases(self):
bases = super(LinkPluginBase, self).get_ring_bases()
bases.append('LinkPluginBase')
return bases
def get_form(self, request, obj=None, **kwargs):
kwargs.setdefault('form', LinkForm.get_form_class())
return super(LinkPluginBase, self).get_form(request, obj, **kwargs)
class DefaultLinkPluginBase(LinkPluginBase):
"""
The default `LinkPluginBase` class. It is injected by the class creator in link.config
"""
fields = (('link_type', 'cms_page', 'section', 'ext_url', 'mail_to',), 'glossary',)
class Media:
js = resolve_dependencies('cascade/js/admin/defaultlinkplugin.js')
@python_2_unicode_compatible
class LinkElementMixin(object):
"""
A mixin class to convert a CascadeElement into a proxy model for rendering the ``<a>`` element.
Note that a Link inside the Text Editor Plugin is rendered using ``str(instance)`` rather
than ``instance.content``.
"""
def __str__(self):
return self.content
@property
def link(self):
return self.plugin_class.get_link(self)
@property
def content(self):
return mark_safe(self.glossary.get('link_content', ''))
|
rfleschenberg/djangocms-cascade
|
cmsplugin_cascade/link/plugin_base.py
|
Python
|
mit
| 3,748
|
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.apps import apps
from django.db.models import Q
from taiga.base.filters import PermissionBasedFilterBackend
class ContactsFilterBackend(PermissionBasedFilterBackend):
permission = "view_project"
def filter_queryset(self, request, queryset, view):
qs = queryset.filter(is_active=True)
# Authenticated
if request.user.is_authenticated():
# if super user we don't need to filter anything
if not request.user.is_superuser:
Membership = apps.get_model('projects', 'Membership')
memberships_qs = Membership.objects.filter(user=request.user)
memberships_qs = memberships_qs.filter(Q(role__permissions__contains=[self.permission]) |
Q(is_owner=True))
projects_list = [membership.project_id for membership in memberships_qs]
qs = qs.filter(memberships__project_id__in=projects_list)
qs = qs.exclude(id=request.user.id)
# Anonymous
else:
qs = qs.filter(memberships__project__anon_permissions__contains=[self.permission])
return qs.distinct()
|
rajiteh/taiga-back
|
taiga/users/filters.py
|
Python
|
agpl-3.0
| 2,013
|
#coding:utf-8
class md5_str(object):
def __init__(self,value):
|
51reboot/actual_09_homework
|
11/zhouyang/cmdb/user/md5_str.py
|
Python
|
mit
| 67
|
"""Client interfaces for local and backend (binary) data storage.
Example
-------
>>> import pybackend.storage as S
>>> store = S.Storage(name='blah-blah-5678', project='my-project-3',
backend=S.LOCAL, local_dir="tmp")
>>> key = "my_song"
>>> store.upload(b"never gonna give you up...", key)
>>> print(store.download(key))
b"hello darkness my old friend"
"""
from google.cloud import storage
import io
import logging
import os
import warnings
from . import GCLOUD, LOCAL
logger = logging.getLogger(__name__)
def _makedirs(dpath):
if not os.path.exists(dpath):
os.makedirs(dpath)
return dpath
class LocalData(object):
def __init__(self, name, root):
self.name = name
self.root = root
@property
def path(self):
return os.path.join(self.root, self.name)
class LocalBlob(LocalData):
def upload_from_string(self, bstream, content_type):
"""Upload data as a bytestring.
Parameters
----------
bstream : bytes
Bytestring to upload.
content_type : str
Not used; preserved for consistency with gcloud.storage.
"""
# obj = dict(bstream=bstream, content_type=content_type)
with open(self.path, 'wb') as fp:
fp.write(bstream)
def download_as_string(self):
"""Upload data as a bytestring.
Returns
-------
bstream : bytes
Bytestring format of the data.
"""
with open(self.path, 'rb') as fp:
fdata = fp.read()
return fdata
class LocalBucket(LocalData):
def __init__(self, name, root):
super(LocalBucket, self).__init__(name, root)
def blob(self, name):
return LocalBlob(name, root=_makedirs(self.path))
def get_blob(self, name):
return LocalBlob(name, root=self.path)
class LocalClient(object):
def __init__(self, project, root_dir):
"""Create a local storage client.
Paratmeters
-----------
project : str
Unique identifier for the owner of the client.
root_dir : str, default=None
A directory on disk for writing binary data.
"""
self.project = project
self.root_dir = _makedirs(root_dir)
def get_bucket(self, name):
"""Returns a bucket for the given name."""
return LocalBucket(name=name, root=self.root_dir)
BACKENDS = {
GCLOUD: storage.Client,
LOCAL: LocalClient
}
class Storage(object):
def __init__(self, name, project, backend=GCLOUD,
local_dir=None):
"""Create a storage object.
Parameters
----------
name : str
Unique name for the bucket to use, persistent across instances.
project : str
Unique identifier for the owner of this storage object.
backend : str, default='gcloud'
Backend storage platform to use, one of ['local', 'gcloud'].
local_dir : str, default=None
A local directory on disk to use for local clients; only used if
backend='local'.
"""
if backend == LOCAL and local_dir is None:
raise ValueError(
"`local_dir` must be given if backend is '{}'".format(LOCAL))
self.name = name
self.project = project
self._backend = backend
self._client_kwargs = dict(project=project)
if self._backend == LOCAL:
self._client_kwargs.update(
root_dir=os.path.abspath(os.path.expanduser(local_dir)))
@property
def client(self):
return BACKENDS[self._backend](**self._client_kwargs)
def put(self, key, fdata):
"""Put filedata into GCS.
Parameters
----------
key : str
Key for writing the file data.
fdata : str
File's bytestream.
"""
logger.debug("Uploading {} bytes to {}.".format(len(fdata), key))
bucket = self.client.get_bucket(self.name)
blob = bucket.blob(key)
blob.upload_from_string(fdata, content_type="application/octet-stream")
def get(self, key):
"""Retrieve binary data for the given key.
Parameters
----------
key : str
Name of the object to retrieve.
Returns
-------
data : bytes
Binary data.
"""
bucket = self.client.get_bucket(self.name)
blob = bucket.get_blob(key)
return blob.download_as_string()
|
cosmir/open-mic
|
backend_server/pybackend/storage.py
|
Python
|
mit
| 4,555
|
#!/usr/bin/env python
#
# asn2wrs.py
# ASN.1 to Wireshark dissector compiler
# Copyright 2004 Tomas Kukosa
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, provided that the above
# copyright notice(s) and this permission notice appear in all copies of
# the Software and that both the above copyright notice(s) and this
# permission notice appear in supporting documentation.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
# OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# HOLDERS INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL
# INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING
# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# Except as contained in this notice, the name of a copyright holder
# shall not be used in advertising or otherwise to promote the sale, use
# or other dealings in this Software without prior written authorization
# of the copyright holder.
"""ASN.1 to Wireshark dissector compiler"""
#
# Compiler from ASN.1 specification to the Wireshark dissector
#
# Based on ASN.1 to Python compiler from Aaron S. Lav's PyZ3950 package licensed under the X Consortium license
# http://www.pobox.com/~asl2/software/PyZ3950/
# (ASN.1 to Python compiler functionality is broken but not removed, it could be revived if necessary)
#
# It requires Dave Beazley's PLY parsing package licensed under the LGPL (tested with version 2.3)
# http://www.dabeaz.com/ply/
#
#
# ITU-T Recommendation X.680 (07/2002),
# Information technology - Abstract Syntax Notation One (ASN.1): Specification of basic notation
#
# ITU-T Recommendation X.681 (07/2002),
# Information technology - Abstract Syntax Notation One (ASN.1): Information object specification
#
# ITU-T Recommendation X.682 (07/2002),
# Information technology - Abstract Syntax Notation One (ASN.1): Constraint specification
#
# ITU-T Recommendation X.683 (07/2002),
# Information technology - Abstract Syntax Notation One (ASN.1): Parameterization of ASN.1 specifications
#
# ITU-T Recommendation X.880 (07/1994),
# Information technology - Remote Operations: Concepts, model and notation
#
import warnings
import re
import sys
import os
import os.path
import time
import getopt
import traceback
import lex
import yacc
from functools import partial
if sys.version_info[0] < 3:
from string import maketrans
# OID name -> number conversion table
oid_names = {
'/itu-t' : 0,
'/itu' : 0,
'/ccitt' : 0,
'/itu-r' : 0,
'0/recommendation' : 0,
'0.0/a' : 1,
'0.0/b' : 2,
'0.0/c' : 3,
'0.0/d' : 4,
'0.0/e' : 5,
'0.0/f' : 6,
'0.0/g' : 7,
'0.0/h' : 8,
'0.0/i' : 9,
'0.0/j' : 10,
'0.0/k' : 11,
'0.0/l' : 12,
'0.0/m' : 13,
'0.0/n' : 14,
'0.0/o' : 15,
'0.0/p' : 16,
'0.0/q' : 17,
'0.0/r' : 18,
'0.0/s' : 19,
'0.0/t' : 20,
'0.0/tseries' : 20,
'0.0/u' : 21,
'0.0/v' : 22,
'0.0/w' : 23,
'0.0/x' : 24,
'0.0/y' : 25,
'0.0/z' : 26,
'0/question' : 1,
'0/administration' : 2,
'0/network-operator' : 3,
'0/identified-organization' : 4,
'0/r-recommendation' : 5,
'0/data' : 9,
'/iso' : 1,
'1/standard' : 0,
'1/registration-authority' : 1,
'1/member-body' : 2,
'1/identified-organization' : 3,
'/joint-iso-itu-t' : 2,
'/joint-iso-ccitt' : 2,
'2/presentation' : 0,
'2/asn1' : 1,
'2/association-control' : 2,
'2/reliable-transfer' : 3,
'2/remote-operations' : 4,
'2/ds' : 5,
'2/directory' : 5,
'2/mhs' : 6,
'2/mhs-motis' : 6,
'2/ccr' : 7,
'2/oda' : 8,
'2/ms' : 9,
'2/osi-management' : 9,
'2/transaction-processing' : 10,
'2/dor' : 11,
'2/distinguished-object-reference' : 11,
'2/reference-data-transfe' : 12,
'2/network-layer' : 13,
'2/network-layer-management' : 13,
'2/transport-layer' : 14,
'2/transport-layer-management' : 14,
'2/datalink-layer' : 15,
'2/datalink-layer-managemen' : 15,
'2/datalink-layer-management-information' : 15,
'2/country' : 16,
'2/registration-procedures' : 17,
'2/registration-procedure' : 17,
'2/physical-layer' : 18,
'2/physical-layer-management' : 18,
'2/mheg' : 19,
'2/genericULS' : 20,
'2/generic-upper-layers-security' : 20,
'2/guls' : 20,
'2/transport-layer-security-protocol' : 21,
'2/network-layer-security-protocol' : 22,
'2/international-organizations' : 23,
'2/internationalRA' : 23,
'2/sios' : 24,
'2/uuid' : 25,
'2/odp' : 26,
'2/upu' : 40,
}
ITEM_FIELD_NAME = '_item'
UNTAG_TYPE_NAME = '_untag'
def asn2c(id):
return id.replace('-', '_').replace('.', '_').replace('&', '_')
input_file = None
g_conform = None
lexer = None
in_oid = False
class LexError(Exception):
def __init__(self, tok, filename=None):
self.tok = tok
self.filename = filename
self.msg = "Unexpected character %r" % (self.tok.value[0])
Exception.__init__(self, self.msg)
def __repr__(self):
return "%s:%d: %s" % (self.filename, self.tok.lineno, self.msg)
__str__ = __repr__
class ParseError(Exception):
def __init__(self, tok, filename=None):
self.tok = tok
self.filename = filename
self.msg = "Unexpected token %s(%r)" % (self.tok.type, self.tok.value)
Exception.__init__(self, self.msg)
def __repr__(self):
return "%s:%d: %s" % (self.filename, self.tok.lineno, self.msg)
__str__ = __repr__
class DuplicateError(Exception):
def __init__(self, type, ident):
self.type = type
self.ident = ident
self.msg = "Duplicate %s for %s" % (self.type, self.ident)
Exception.__init__(self, self.msg)
def __repr__(self):
return self.msg
__str__ = __repr__
class CompError(Exception):
def __init__(self, msg):
self.msg = msg
Exception.__init__(self, self.msg)
def __repr__(self):
return self.msg
__str__ = __repr__
states = (
('braceignore','exclusive'),
)
precedence = (
('left', 'UNION', 'BAR'),
('left', 'INTERSECTION', 'CIRCUMFLEX'),
)
# 11 ASN.1 lexical items
static_tokens = {
r'::=' : 'ASSIGNMENT', # 11.16 Assignment lexical item
r'\.\.' : 'RANGE', # 11.17 Range separator
r'\.\.\.' : 'ELLIPSIS', # 11.18 Ellipsis
r'\[\[' : 'LVERBRACK', # 11.19 Left version brackets
r'\]\]' : 'RVERBRACK', # 11.20 Right version brackets
# 11.26 Single character lexical items
r'\{' : 'LBRACE',
r'\}' : 'RBRACE',
r'<' : 'LT',
#r'>' : 'GT',
r',' : 'COMMA',
r'\.' : 'DOT',
r'\(' : 'LPAREN',
r'\)' : 'RPAREN',
r'\[' : 'LBRACK',
r'\]' : 'RBRACK',
r'-' : 'MINUS',
r':' : 'COLON',
#r'=' : 'EQ',
#r'"' : 'QUOTATION',
#r"'" : 'APOSTROPHE',
r';' : 'SEMICOLON',
r'@' : 'AT',
r'\!' : 'EXCLAMATION',
r'\^' : 'CIRCUMFLEX',
r'\&' : 'AMPERSAND',
r'\|' : 'BAR'
}
# 11.27 Reserved words
# all keys in reserved_words must start w/ upper case
reserved_words = {
'ABSENT' : 'ABSENT',
'ABSTRACT-SYNTAX' : 'ABSTRACT_SYNTAX',
'ALL' : 'ALL',
'APPLICATION' : 'APPLICATION',
'AUTOMATIC' : 'AUTOMATIC',
'BEGIN' : 'BEGIN',
'BIT' : 'BIT',
'BOOLEAN' : 'BOOLEAN',
'BY' : 'BY',
'CHARACTER' : 'CHARACTER',
'CHOICE' : 'CHOICE',
'CLASS' : 'CLASS',
'COMPONENT' : 'COMPONENT',
'COMPONENTS' : 'COMPONENTS',
'CONSTRAINED' : 'CONSTRAINED',
'CONTAINING' : 'CONTAINING',
'DEFAULT' : 'DEFAULT',
'DEFINITIONS' : 'DEFINITIONS',
'EMBEDDED' : 'EMBEDDED',
# 'ENCODED' : 'ENCODED',
'END' : 'END',
'ENUMERATED' : 'ENUMERATED',
# 'EXCEPT' : 'EXCEPT',
'EXPLICIT' : 'EXPLICIT',
'EXPORTS' : 'EXPORTS',
# 'EXTENSIBILITY' : 'EXTENSIBILITY',
'EXTERNAL' : 'EXTERNAL',
'FALSE' : 'FALSE',
'FROM' : 'FROM',
'GeneralizedTime' : 'GeneralizedTime',
'IDENTIFIER' : 'IDENTIFIER',
'IMPLICIT' : 'IMPLICIT',
# 'IMPLIED' : 'IMPLIED',
'IMPORTS' : 'IMPORTS',
'INCLUDES' : 'INCLUDES',
'INSTANCE' : 'INSTANCE',
'INTEGER' : 'INTEGER',
'INTERSECTION' : 'INTERSECTION',
'MAX' : 'MAX',
'MIN' : 'MIN',
'MINUS-INFINITY' : 'MINUS_INFINITY',
'NULL' : 'NULL',
'OBJECT' : 'OBJECT',
'ObjectDescriptor' : 'ObjectDescriptor',
'OCTET' : 'OCTET',
'OF' : 'OF',
'OPTIONAL' : 'OPTIONAL',
'PATTERN' : 'PATTERN',
'PDV' : 'PDV',
'PLUS-INFINITY' : 'PLUS_INFINITY',
'PRESENT' : 'PRESENT',
'PRIVATE' : 'PRIVATE',
'REAL' : 'REAL',
'RELATIVE-OID' : 'RELATIVE_OID',
'SEQUENCE' : 'SEQUENCE',
'SET' : 'SET',
'SIZE' : 'SIZE',
'STRING' : 'STRING',
'SYNTAX' : 'SYNTAX',
'TAGS' : 'TAGS',
'TRUE' : 'TRUE',
'TYPE-IDENTIFIER' : 'TYPE_IDENTIFIER',
'UNION' : 'UNION',
'UNIQUE' : 'UNIQUE',
'UNIVERSAL' : 'UNIVERSAL',
'UTCTime' : 'UTCTime',
'WITH' : 'WITH',
# X.208 obsolete but still used
'ANY' : 'ANY',
'DEFINED' : 'DEFINED',
}
for k in list(static_tokens.keys()):
if static_tokens [k] == None:
static_tokens [k] = k
StringTypes = ['Numeric', 'Printable', 'IA5', 'BMP', 'Universal', 'UTF8',
'Teletex', 'T61', 'Videotex', 'Graphic', 'ISO646', 'Visible',
'General']
for s in StringTypes:
reserved_words[s + 'String'] = s + 'String'
tokens = list(static_tokens.values()) \
+ list(reserved_words.values()) \
+ ['BSTRING', 'HSTRING', 'QSTRING',
'UCASE_IDENT', 'LCASE_IDENT', 'LCASE_IDENT_ASSIGNED', 'CLASS_IDENT',
'REAL_NUMBER', 'NUMBER', 'PYQUOTE']
cur_mod = __import__ (__name__) # XXX blech!
for (k, v) in list(static_tokens.items ()):
cur_mod.__dict__['t_' + v] = k
# 11.10 Binary strings
def t_BSTRING (t):
r"'[01]*'B"
return t
# 11.12 Hexadecimal strings
def t_HSTRING (t):
r"'[0-9A-Fa-f]*'H"
return t
def t_QSTRING (t):
r'"([^"]|"")*"'
return t
def t_UCASE_IDENT (t):
r"[A-Z](-[a-zA-Z0-9]|[a-zA-Z0-9])*" # can't end w/ '-'
if (is_class_ident(t.value)): t.type = 'CLASS_IDENT'
if (is_class_syntax(t.value)): t.type = t.value
t.type = reserved_words.get(t.value, t.type)
return t
lcase_ident_assigned = {}
def t_LCASE_IDENT (t):
r"[a-z](-[a-zA-Z0-9]|[a-zA-Z0-9])*" # can't end w/ '-'
if (not in_oid and (t.value in lcase_ident_assigned)): t.type = 'LCASE_IDENT_ASSIGNED'
return t
# 11.9 Real numbers
def t_REAL_NUMBER (t):
r"[0-9]+\.[0-9]*(?!\.)"
return t
# 11.8 Numbers
def t_NUMBER (t):
r"0|([1-9][0-9]*)"
return t
# 11.6 Comments
pyquote_str = 'PYQUOTE'
def t_COMMENT(t):
r"--(-[^\-\n]|[^\-\n])*(--|\n|-\n|$|-$)"
if (t.value.find("\n") >= 0) : t.lexer.lineno += 1
if t.value[2:2+len (pyquote_str)] == pyquote_str:
t.value = t.value[2+len(pyquote_str):]
t.value = t.value.lstrip ()
t.type = pyquote_str
return t
return None
t_ignore = " \t\r"
def t_NEWLINE(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(t):
global input_file
raise LexError(t, input_file)
# state 'braceignore'
def t_braceignore_lbrace(t):
r'\{'
t.lexer.level +=1
def t_braceignore_rbrace(t):
r'\}'
t.lexer.level -=1
# If closing brace, return token
if t.lexer.level == 0:
t.type = 'RBRACE'
return t
def t_braceignore_QSTRING (t):
r'"([^"]|"")*"'
t.lexer.lineno += t.value.count("\n")
def t_braceignore_COMMENT(t):
r"--(-[^\-\n]|[^\-\n])*(--|\n|-\n|$|-$)"
if (t.value.find("\n") >= 0) : t.lexer.lineno += 1
def t_braceignore_nonspace(t):
r'[^\s\{\}\"-]+|-(?!-)'
t_braceignore_ignore = " \t\r"
def t_braceignore_NEWLINE(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_braceignore_error(t):
t.lexer.skip(1)
class Ctx:
def __init__ (self, defined_dict, indent = 0):
self.tags_def = 'EXPLICIT' # default = explicit
self.indent_lev = 0
self.assignments = {}
self.dependencies = {}
self.pyquotes = []
self.defined_dict = defined_dict
self.name_ctr = 0
def spaces (self):
return " " * (4 * self.indent_lev)
def indent (self):
self.indent_lev += 1
def outdent (self):
self.indent_lev -= 1
assert (self.indent_lev >= 0)
def register_assignment (self, ident, val, dependencies):
if ident in self.assignments:
raise DuplicateError("assignment", ident)
if ident in self.defined_dict:
raise Exception("cross-module duplicates for %s" % ident)
self.defined_dict [ident] = 1
self.assignments[ident] = val
self.dependencies [ident] = dependencies
return ""
# return "#%s depends on %s" % (ident, str (dependencies))
def register_pyquote (self, val):
self.pyquotes.append (val)
return ""
def output_assignments (self):
already_output = {}
text_list = []
assign_keys = list(self.assignments.keys())
to_output_count = len (assign_keys)
while True:
any_output = 0
for (ident, val) in list(self.assignments.items ()):
if ident in already_output:
continue
ok = 1
for d in self.dependencies [ident]:
if ((d not in already_output) and
(d in assign_keys)):
ok = 0
if ok:
text_list.append ("%s=%s" % (ident,
self.assignments [ident]))
already_output [ident] = 1
any_output = 1
to_output_count -= 1
assert (to_output_count >= 0)
if not any_output:
if to_output_count == 0:
break
# OK, we detected a cycle
cycle_list = []
for ident in list(self.assignments.keys ()):
if ident not in already_output:
depend_list = [d for d in self.dependencies[ident] if d in assign_keys]
cycle_list.append ("%s(%s)" % (ident, ",".join (depend_list)))
text_list.append ("# Cycle XXX " + ",".join (cycle_list))
for (ident, val) in list(self.assignments.items ()):
if ident not in already_output:
text_list.append ("%s=%s" % (ident, self.assignments [ident]))
break
return "\n".join (text_list)
def output_pyquotes (self):
return "\n".join (self.pyquotes)
def make_new_name (self):
self.name_ctr += 1
return "_compiler_generated_name_%d" % (self.name_ctr,)
#--- Flags for EXPORT, USER_DEFINED, NO_EMIT, MAKE_ENUM -------------------------------
EF_TYPE = 0x0001
EF_VALS = 0x0002
EF_ENUM = 0x0004
EF_WS_DLL = 0x0010 # exported from shared library
EF_EXTERN = 0x0020
EF_NO_PROT = 0x0040
EF_NO_TYPE = 0x0080
EF_UCASE = 0x0100
EF_TABLE = 0x0400
EF_DEFINE = 0x0800
EF_MODULE = 0x1000
#--- common dependency computation ---
# Input : list of items
# dictionary with lists of dependency
#
#
# Output : list of two outputs:
# [0] list of items in dependency
# [1] list of cycle dependency cycles
def dependency_compute(items, dependency, map_fn = lambda t: t, ignore_fn = lambda t: False):
item_ord = []
item_cyc = []
x = {} # already emitted
#print '# Dependency computation'
for t in items:
if map_fn(t) in x:
#print 'Continue: %s : %s' % (t, (map_fn(t))
continue
stack = [t]
stackx = {t : dependency.get(t, [])[:]}
#print 'Push: %s : %s' % (t, str(stackx[t]))
while stack:
if stackx[stack[-1]]: # has dependencies
d = stackx[stack[-1]].pop(0)
if map_fn(d) in x or ignore_fn(d):
continue
if d in stackx: # cyclic dependency
c = stack[:]
c.reverse()
c = [d] + c[0:c.index(d)+1]
c.reverse()
item_cyc.append(c)
#print 'Cyclic: %s ' % (' -> '.join(c))
continue
stack.append(d)
stackx[d] = dependency.get(d, [])[:]
#print 'Push: %s : %s' % (d, str(stackx[d]))
else:
#print 'Pop: %s' % (stack[-1])
del stackx[stack[-1]]
e = map_fn(stack.pop())
if e in x:
continue
#print 'Add: %s' % (e)
item_ord.append(e)
x[e] = True
return (item_ord, item_cyc)
# Given a filename, return a relative path from epan/dissectors
def rel_dissector_path(filename):
path_parts = os.path.abspath(filename).split(os.sep)
while (len(path_parts) > 3 and path_parts[0] != 'asn1'):
path_parts.pop(0)
path_parts.insert(0, '..')
path_parts.insert(0, '..')
return '/'.join(path_parts)
#--- EthCtx -------------------------------------------------------------------
class EthCtx:
def __init__(self, conform, output, indent = 0):
self.conform = conform
self.output = output
self.conform.ectx = self
self.output.ectx = self
self.encoding = 'per'
self.aligned = False
self.default_oid_variant = ''
self.default_opentype_variant = ''
self.default_containing_variant = '_pdu_new'
self.default_embedded_pdv_cb = None
self.default_external_type_cb = None
self.remove_prefix = None
self.srcdir = None
self.emitted_pdu = {}
self.module = {}
self.module_ord = []
self.all_type_attr = {}
self.all_tags = {}
self.all_vals = {}
def encp(self): # encoding protocol
encp = self.encoding
return encp
# Encoding
def Per(self): return self.encoding == 'per'
def Ber(self): return self.encoding == 'ber'
def Aligned(self): return self.aligned
def Unaligned(self): return not self.aligned
def NeedTags(self): return self.tag_opt or self.Ber()
def NAPI(self): return False # disable planned features
def Module(self): # current module name
return self.modules[-1][0]
def groups(self):
return self.group_by_prot or (self.conform.last_group > 0)
def dbg(self, d):
if (self.dbgopt.find(d) >= 0):
return True
else:
return False
def value_max(self, a, b):
if (a == 'MAX') or (b == 'MAX'): return 'MAX';
if a == 'MIN': return b;
if b == 'MIN': return a;
try:
if (int(a) > int(b)):
return a
else:
return b
except (ValueError, TypeError):
pass
return "MAX((%s),(%s))" % (a, b)
def value_min(self, a, b):
if (a == 'MIN') or (b == 'MIN'): return 'MIN';
if a == 'MAX': return b;
if b == 'MAX': return a;
try:
if (int(a) < int(b)):
return a
else:
return b
except (ValueError, TypeError):
pass
return "MIN((%s),(%s))" % (a, b)
def value_get_eth(self, val):
if isinstance(val, Value):
return val.to_str(self)
ethname = val
if val in self.value:
ethname = self.value[val]['ethname']
return ethname
def value_get_val(self, nm):
val = asn2c(nm)
if nm in self.value:
if self.value[nm]['import']:
v = self.get_val_from_all(nm, self.value[nm]['import'])
if v is None:
msg = 'Need value of imported value identifier %s from %s (%s)' % (nm, self.value[nm]['import'], self.value[nm]['proto'])
warnings.warn_explicit(msg, UserWarning, '', 0)
else:
val = v
else:
val = self.value[nm]['value']
if isinstance (val, Value):
val = val.to_str(self)
else:
msg = 'Need value of unknown value identifier %s' % (nm)
warnings.warn_explicit(msg, UserWarning, '', 0)
return val
def eth_get_type_attr(self, type):
#print "eth_get_type_attr(%s)" % (type)
types = [type]
while (not self.type[type]['import']):
val = self.type[type]['val']
#print val
ttype = type
while (val.type == 'TaggedType'):
val = val.val
ttype += '/' + UNTAG_TYPE_NAME
if (val.type != 'Type_Ref'):
if (type != ttype):
types.append(ttype)
break
type = val.val
types.append(type)
attr = {}
#print " ", types
while len(types):
t = types.pop()
if (self.type[t]['import']):
attr.update(self.type[t]['attr'])
attr.update(self.eth_get_type_attr_from_all(t, self.type[t]['import']))
elif (self.type[t]['val'].type == 'SelectionType'):
val = self.type[t]['val']
(ftype, display) = val.eth_ftype(self)
attr.update({ 'TYPE' : ftype, 'DISPLAY' : display,
'STRINGS' : val.eth_strings(), 'BITMASK' : '0' });
else:
attr.update(self.type[t]['attr'])
attr.update(self.eth_type[self.type[t]['ethname']]['attr'])
#print " ", attr
return attr
def eth_get_type_attr_from_all(self, type, module):
attr = {}
if module in self.all_type_attr and type in self.all_type_attr[module]:
attr = self.all_type_attr[module][type]
return attr
def get_ttag_from_all(self, type, module):
ttag = None
if module in self.all_tags and type in self.all_tags[module]:
ttag = self.all_tags[module][type]
return ttag
def get_val_from_all(self, nm, module):
val = None
if module in self.all_vals and nm in self.all_vals[module]:
val = self.all_vals[module][nm]
return val
def get_obj_repr(self, ident, flds=[], not_flds=[]):
def set_type_fn(cls, field, fnfield):
obj[fnfield + '_fn'] = 'NULL'
obj[fnfield + '_pdu'] = 'NULL'
if field in val and isinstance(val[field], Type_Ref):
p = val[field].eth_type_default_pars(self, '')
obj[fnfield + '_fn'] = p['TYPE_REF_FN']
obj[fnfield + '_fn'] = obj[fnfield + '_fn'] % p # one iteration
if (self.conform.check_item('PDU', cls + '.' + field)):
obj[fnfield + '_pdu'] = 'dissect_' + self.field[val[field].val]['ethname']
return
# end of get_type_fn()
obj = { '_name' : ident, '_ident' : asn2c(ident)}
obj['_class'] = self.oassign[ident].cls
obj['_module'] = self.oassign[ident].module
val = self.oassign[ident].val
for f in flds:
if f not in val:
return None
for f in not_flds:
if f in val:
return None
for f in list(val.keys()):
if isinstance(val[f], Node):
obj[f] = val[f].fld_obj_repr(self)
else:
obj[f] = str(val[f])
if (obj['_class'] == 'TYPE-IDENTIFIER') or (obj['_class'] == 'ABSTRACT-SYNTAX'):
set_type_fn(obj['_class'], '&Type', '_type')
if (obj['_class'] == 'OPERATION'):
set_type_fn(obj['_class'], '&ArgumentType', '_argument')
set_type_fn(obj['_class'], '&ResultType', '_result')
if (obj['_class'] == 'ERROR'):
set_type_fn(obj['_class'], '&ParameterType', '_parameter')
return obj
#--- eth_reg_module -----------------------------------------------------------
def eth_reg_module(self, module):
#print "eth_reg_module(module='%s')" % (module)
name = module.get_name()
self.modules.append([name, module.get_proto(self)])
if name in self.module:
raise DuplicateError("module", name)
self.module[name] = []
self.module_ord.append(name)
#--- eth_module_dep_add ------------------------------------------------------------
def eth_module_dep_add(self, module, dep):
self.module[module].append(dep)
#--- eth_exports ------------------------------------------------------------
def eth_exports(self, exports):
self.exports_all = False
if ((len(exports) == 1) and (exports[0] == 'ALL')):
self.exports_all = True
return
for e in (exports):
if isinstance(e, Type_Ref):
self.exports.append(e.val)
elif isinstance(e, Class_Ref):
self.cexports.append(e.val)
else:
self.vexports.append(e)
#--- eth_reg_assign ---------------------------------------------------------
def eth_reg_assign(self, ident, val, virt=False):
#print "eth_reg_assign(ident='%s')" % (ident)
if ident in self.assign:
raise DuplicateError("assignment", ident)
self.assign[ident] = { 'val' : val , 'virt' : virt }
self.assign_ord.append(ident)
if (self.exports_all):
self.exports.append(ident)
#--- eth_reg_vassign --------------------------------------------------------
def eth_reg_vassign(self, vassign):
ident = vassign.ident
#print "eth_reg_vassign(ident='%s')" % (ident)
if ident in self.vassign:
raise DuplicateError("value assignment", ident)
self.vassign[ident] = vassign
self.vassign_ord.append(ident)
if (self.exports_all):
self.vexports.append(ident)
#--- eth_reg_oassign --------------------------------------------------------
def eth_reg_oassign(self, oassign):
ident = oassign.ident
#print "eth_reg_oassign(ident='%s')" % (ident)
if ident in self.oassign:
if self.oassign[ident] == oassign:
return # OK - already defined
else:
raise DuplicateError("information object assignment", ident)
self.oassign[ident] = oassign
self.oassign_ord.append(ident)
self.oassign_cls.setdefault(oassign.cls, []).append(ident)
#--- eth_import_type --------------------------------------------------------
def eth_import_type(self, ident, mod, proto):
#print "eth_import_type(ident='%s', mod='%s', prot='%s')" % (ident, mod, proto)
if ident in self.type:
#print "already defined '%s' import=%s, module=%s" % (ident, str(self.type[ident]['import']), self.type[ident].get('module', '-'))
if not self.type[ident]['import'] and (self.type[ident]['module'] == mod) :
return # OK - already defined
elif self.type[ident]['import'] and (self.type[ident]['import'] == mod) :
return # OK - already imported
else:
raise DuplicateError("type", ident)
self.type[ident] = {'import' : mod, 'proto' : proto,
'ethname' : '' }
self.type[ident]['attr'] = { 'TYPE' : 'FT_NONE', 'DISPLAY' : 'BASE_NONE',
'STRINGS' : 'NULL', 'BITMASK' : '0' }
mident = "$%s$%s" % (mod, ident)
if (self.conform.check_item('TYPE_ATTR', mident)):
self.type[ident]['attr'].update(self.conform.use_item('TYPE_ATTR', mident))
else:
self.type[ident]['attr'].update(self.conform.use_item('TYPE_ATTR', ident))
if (self.conform.check_item('IMPORT_TAG', mident)):
self.conform.copy_item('IMPORT_TAG', ident, mident)
self.type_imp.append(ident)
#--- dummy_import_type --------------------------------------------------------
def dummy_import_type(self, ident):
# dummy imported
if ident in self.type:
raise Exception("Try to dummy import for existing type :%s" % ident)
ethtype = asn2c(ident)
self.type[ident] = {'import' : 'xxx', 'proto' : 'xxx',
'ethname' : ethtype }
self.type[ident]['attr'] = { 'TYPE' : 'FT_NONE', 'DISPLAY' : 'BASE_NONE',
'STRINGS' : 'NULL', 'BITMASK' : '0' }
self.eth_type[ethtype] = { 'import' : 'xxx', 'proto' : 'xxx' , 'attr' : {}, 'ref' : []}
print("Dummy imported: %s (%s)" % (ident, ethtype))
return ethtype
#--- eth_import_class --------------------------------------------------------
def eth_import_class(self, ident, mod, proto):
#print "eth_import_class(ident='%s', mod='%s', prot='%s')" % (ident, mod, proto)
if ident in self.objectclass:
#print "already defined import=%s, module=%s" % (str(self.objectclass[ident]['import']), self.objectclass[ident]['module'])
if not self.objectclass[ident]['import'] and (self.objectclass[ident]['module'] == mod) :
return # OK - already defined
elif self.objectclass[ident]['import'] and (self.objectclass[ident]['import'] == mod) :
return # OK - already imported
else:
raise DuplicateError("object class", ident)
self.objectclass[ident] = {'import' : mod, 'proto' : proto,
'ethname' : '' }
self.objectclass_imp.append(ident)
#--- eth_import_value -------------------------------------------------------
def eth_import_value(self, ident, mod, proto):
#print "eth_import_value(ident='%s', mod='%s', prot='%s')" % (ident, mod, prot)
if ident in self.value:
#print "already defined import=%s, module=%s" % (str(self.value[ident]['import']), self.value[ident]['module'])
if not self.value[ident]['import'] and (self.value[ident]['module'] == mod) :
return # OK - already defined
elif self.value[ident]['import'] and (self.value[ident]['import'] == mod) :
return # OK - already imported
else:
raise DuplicateError("value", ident)
self.value[ident] = {'import' : mod, 'proto' : proto,
'ethname' : ''}
self.value_imp.append(ident)
#--- eth_sel_req ------------------------------------------------------------
def eth_sel_req(self, typ, sel):
key = typ + '.' + sel
if key not in self.sel_req:
self.sel_req[key] = { 'typ' : typ , 'sel' : sel}
self.sel_req_ord.append(key)
return key
#--- eth_comp_req ------------------------------------------------------------
def eth_comp_req(self, type):
self.comp_req_ord.append(type)
#--- eth_dep_add ------------------------------------------------------------
def eth_dep_add(self, type, dep):
if type not in self.type_dep:
self.type_dep[type] = []
self.type_dep[type].append(dep)
#--- eth_reg_type -----------------------------------------------------------
def eth_reg_type(self, ident, val):
#print "eth_reg_type(ident='%s', type='%s')" % (ident, val.type)
if ident in self.type:
if self.type[ident]['import'] and (self.type[ident]['import'] == self.Module()) :
# replace imported type
del self.type[ident]
self.type_imp.remove(ident)
else:
raise DuplicateError("type", ident)
val.ident = ident
self.type[ident] = { 'val' : val, 'import' : None }
self.type[ident]['module'] = self.Module()
self.type[ident]['proto'] = self.proto
if len(ident.split('/')) > 1:
self.type[ident]['tname'] = val.eth_tname()
else:
self.type[ident]['tname'] = asn2c(ident)
self.type[ident]['export'] = self.conform.use_item('EXPORTS', ident)
self.type[ident]['enum'] = self.conform.use_item('MAKE_ENUM', ident)
self.type[ident]['vals_ext'] = self.conform.use_item('USE_VALS_EXT', ident)
self.type[ident]['user_def'] = self.conform.use_item('USER_DEFINED', ident)
self.type[ident]['no_emit'] = self.conform.use_item('NO_EMIT', ident)
self.type[ident]['tname'] = self.conform.use_item('TYPE_RENAME', ident, val_dflt=self.type[ident]['tname'])
self.type[ident]['ethname'] = ''
if (val.type == 'Type_Ref') or (val.type == 'TaggedType') or (val.type == 'SelectionType') :
self.type[ident]['attr'] = {}
else:
(ftype, display) = val.eth_ftype(self)
self.type[ident]['attr'] = { 'TYPE' : ftype, 'DISPLAY' : display,
'STRINGS' : val.eth_strings(), 'BITMASK' : '0' }
self.type[ident]['attr'].update(self.conform.use_item('TYPE_ATTR', ident))
self.type_ord.append(ident)
# PDU
if (self.conform.check_item('PDU', ident)):
self.eth_reg_field(ident, ident, impl=val.HasImplicitTag(self), pdu=self.conform.use_item('PDU', ident))
#--- eth_reg_objectclass ----------------------------------------------------------
def eth_reg_objectclass(self, ident, val):
#print "eth_reg_objectclass(ident='%s')" % (ident)
if ident in self.objectclass:
if self.objectclass[ident]['import'] and (self.objectclass[ident]['import'] == self.Module()) :
# replace imported object class
del self.objectclass[ident]
self.objectclass_imp.remove(ident)
elif isinstance(self.objectclass[ident]['val'], Class_Ref) and \
isinstance(val, Class_Ref) and \
(self.objectclass[ident]['val'].val == val.val):
pass # ignore duplicated CLASS1 ::= CLASS2
else:
raise DuplicateError("object class", ident)
self.objectclass[ident] = { 'import' : None, 'module' : self.Module(), 'proto' : self.proto }
self.objectclass[ident]['val'] = val
self.objectclass[ident]['export'] = self.conform.use_item('EXPORTS', ident)
self.objectclass_ord.append(ident)
#--- eth_reg_value ----------------------------------------------------------
def eth_reg_value(self, ident, type, value, ethname=None):
#print "eth_reg_value(ident='%s')" % (ident)
if ident in self.value:
if self.value[ident]['import'] and (self.value[ident]['import'] == self.Module()) :
# replace imported value
del self.value[ident]
self.value_imp.remove(ident)
elif ethname:
self.value[ident]['ethname'] = ethname
return
else:
raise DuplicateError("value", ident)
self.value[ident] = { 'import' : None, 'module' : self.Module(), 'proto' : self.proto,
'type' : type, 'value' : value,
'no_emit' : False }
self.value[ident]['export'] = self.conform.use_item('EXPORTS', ident)
self.value[ident]['ethname'] = ''
if (ethname): self.value[ident]['ethname'] = ethname
self.value_ord.append(ident)
#--- eth_reg_field ----------------------------------------------------------
def eth_reg_field(self, ident, type, idx='', parent=None, impl=False, pdu=None):
#print "eth_reg_field(ident='%s', type='%s')" % (ident, type)
if ident in self.field:
if pdu and (type == self.field[ident]['type']):
pass # OK already created PDU
else:
raise DuplicateError("field", ident)
self.field[ident] = {'type' : type, 'idx' : idx, 'impl' : impl, 'pdu' : pdu,
'modified' : '', 'attr' : {} }
name = ident.split('/')[-1]
if self.remove_prefix and name.startswith(self.remove_prefix):
name = name[len(self.remove_prefix):]
if len(ident.split('/')) > 1 and name == ITEM_FIELD_NAME: # Sequence/Set of type
if len(self.field[ident]['type'].split('/')) > 1:
self.field[ident]['attr']['NAME'] = '"%s item"' % ident.split('/')[-2]
self.field[ident]['attr']['ABBREV'] = asn2c(ident.split('/')[-2] + name)
else:
self.field[ident]['attr']['NAME'] = '"%s"' % self.field[ident]['type']
self.field[ident]['attr']['ABBREV'] = asn2c(self.field[ident]['type'])
else:
self.field[ident]['attr']['NAME'] = '"%s"' % name
self.field[ident]['attr']['ABBREV'] = asn2c(name)
if self.conform.check_item('FIELD_ATTR', ident):
self.field[ident]['modified'] = '#' + str(id(self))
self.field[ident]['attr'].update(self.conform.use_item('FIELD_ATTR', ident))
if (pdu):
self.field[ident]['pdu']['export'] = (self.conform.use_item('EXPORTS', ident + '_PDU') != 0)
self.pdu_ord.append(ident)
else:
self.field_ord.append(ident)
if parent:
self.eth_dep_add(parent, type)
def eth_dummy_eag_field_required(self):
if (not self.dummy_eag_field):
self.dummy_eag_field = 'dummy_eag_field'
#--- eth_clean --------------------------------------------------------------
def eth_clean(self):
self.proto = self.proto_opt;
#--- ASN.1 tables ----------------
self.assign = {}
self.assign_ord = []
self.field = {}
self.pdu_ord = []
self.field_ord = []
self.type = {}
self.type_ord = []
self.type_imp = []
self.type_dep = {}
self.sel_req = {}
self.sel_req_ord = []
self.comp_req_ord = []
self.vassign = {}
self.vassign_ord = []
self.value = {}
self.value_ord = []
self.value_imp = []
self.objectclass = {}
self.objectclass_ord = []
self.objectclass_imp = []
self.oassign = {}
self.oassign_ord = []
self.oassign_cls = {}
#--- Modules ------------
self.modules = []
self.exports_all = False
self.exports = []
self.cexports = []
self.vexports = []
#--- types -------------------
self.eth_type = {}
self.eth_type_ord = []
self.eth_export_ord = []
self.eth_type_dupl = {}
self.named_bit = []
#--- value dependencies -------------------
self.value_dep = {}
#--- values -------------------
self.eth_value = {}
self.eth_value_ord = []
#--- fields -------------------------
self.eth_hf = {}
self.eth_hf_ord = []
self.eth_hfpdu_ord = []
self.eth_hf_dupl = {}
self.dummy_eag_field = None
#--- type dependencies -------------------
self.eth_type_ord1 = []
self.eth_dep_cycle = []
self.dep_cycle_eth_type = {}
#--- value dependencies and export -------------------
self.eth_value_ord1 = []
self.eth_vexport_ord = []
#--- eth_prepare ------------------------------------------------------------
def eth_prepare(self):
self.eproto = asn2c(self.proto)
#--- dummy types/fields for PDU registration ---
nm = 'NULL'
if (self.conform.check_item('PDU', nm)):
self.eth_reg_type('_dummy/'+nm, NullType())
self.eth_reg_field(nm, '_dummy/'+nm, pdu=self.conform.use_item('PDU', nm))
#--- required PDUs ----------------------------
for t in self.type_ord:
pdu = self.type[t]['val'].eth_need_pdu(self)
if not pdu: continue
f = pdu['type']
pdu['reg'] = None
pdu['hidden'] = False
pdu['need_decl'] = True
if f not in self.field:
self.eth_reg_field(f, f, pdu=pdu)
#--- values -> named values -------------------
t_for_update = {}
for v in self.value_ord:
if (self.value[v]['type'].type == 'Type_Ref') or self.conform.check_item('ASSIGN_VALUE_TO_TYPE', v):
if self.conform.check_item('ASSIGN_VALUE_TO_TYPE', v):
tnm = self.conform.use_item('ASSIGN_VALUE_TO_TYPE', v)
else:
tnm = self.value[v]['type'].val
if tnm in self.type \
and not self.type[tnm]['import'] \
and (self.type[tnm]['val'].type == 'IntegerType'):
self.type[tnm]['val'].add_named_value(v, self.value[v]['value'])
self.value[v]['no_emit'] = True
t_for_update[tnm] = True
for t in list(t_for_update.keys()):
self.type[t]['attr']['STRINGS'] = self.type[t]['val'].eth_strings()
self.type[t]['attr'].update(self.conform.use_item('TYPE_ATTR', t))
#--- required components of ---------------------------
#print "self.comp_req_ord = ", self.comp_req_ord
for t in self.comp_req_ord:
self.type[t]['val'].eth_reg_sub(t, self, components_available=True)
#--- required selection types ---------------------------
#print "self.sel_req_ord = ", self.sel_req_ord
for t in self.sel_req_ord:
tt = self.sel_req[t]['typ']
if tt not in self.type:
self.dummy_import_type(t)
elif self.type[tt]['import']:
self.eth_import_type(t, self.type[tt]['import'], self.type[tt]['proto'])
else:
self.type[tt]['val'].sel_req(t, self.sel_req[t]['sel'], self)
#--- types -------------------
for t in self.type_imp: # imported types
nm = asn2c(t)
self.eth_type[nm] = { 'import' : self.type[t]['import'],
'proto' : asn2c(self.type[t]['proto']),
'attr' : {}, 'ref' : []}
self.eth_type[nm]['attr'].update(self.conform.use_item('ETYPE_ATTR', nm))
self.type[t]['ethname'] = nm
for t in self.type_ord: # dummy import for missing type reference
tp = self.type[t]['val']
#print "X : %s %s " % (t, tp.type)
if isinstance(tp, TaggedType):
#print "%s : %s " % (tp.type, t)
tp = tp.val
if isinstance(tp, Type_Ref):
#print "%s : %s ::= %s " % (tp.type, t, tp.val)
if tp.val not in self.type:
self.dummy_import_type(tp.val)
for t in self.type_ord:
nm = self.type[t]['tname']
if ((nm.find('#') >= 0) or
((len(t.split('/'))>1) and
(self.conform.get_fn_presence(t) or self.conform.check_item('FN_PARS', t) or
self.conform.get_fn_presence('/'.join((t,ITEM_FIELD_NAME))) or self.conform.check_item('FN_PARS', '/'.join((t,ITEM_FIELD_NAME)))) and
not self.conform.check_item('TYPE_RENAME', t))):
if len(t.split('/')) == 2 and t.split('/')[1] == ITEM_FIELD_NAME: # Sequence of type at the 1st level
nm = t.split('/')[0] + t.split('/')[1]
elif t.split('/')[-1] == ITEM_FIELD_NAME: # Sequence/Set of type at next levels
nm = 'T_' + self.conform.use_item('FIELD_RENAME', '/'.join(t.split('/')[0:-1]), val_dflt=t.split('/')[-2]) + t.split('/')[-1]
elif t.split('/')[-1] == UNTAG_TYPE_NAME: # Untagged type
nm = self.type['/'.join(t.split('/')[0:-1])]['ethname'] + '_U'
else:
nm = 'T_' + self.conform.use_item('FIELD_RENAME', t, val_dflt=t.split('/')[-1])
nm = asn2c(nm)
if nm in self.eth_type:
if nm in self.eth_type_dupl:
self.eth_type_dupl[nm].append(t)
else:
self.eth_type_dupl[nm] = [self.eth_type[nm]['ref'][0], t]
nm += '_%02d' % (len(self.eth_type_dupl[nm])-1)
if nm in self.eth_type:
self.eth_type[nm]['ref'].append(t)
else:
self.eth_type_ord.append(nm)
self.eth_type[nm] = { 'import' : None, 'proto' : self.eproto, 'export' : 0, 'enum' : 0, 'vals_ext' : 0,
'user_def' : EF_TYPE|EF_VALS, 'no_emit' : EF_TYPE|EF_VALS,
'val' : self.type[t]['val'],
'attr' : {}, 'ref' : [t]}
self.type[t]['ethname'] = nm
if (not self.eth_type[nm]['export'] and self.type[t]['export']): # new export
self.eth_export_ord.append(nm)
self.eth_type[nm]['export'] |= self.type[t]['export']
self.eth_type[nm]['enum'] |= self.type[t]['enum']
self.eth_type[nm]['vals_ext'] |= self.type[t]['vals_ext']
self.eth_type[nm]['user_def'] &= self.type[t]['user_def']
self.eth_type[nm]['no_emit'] &= self.type[t]['no_emit']
if self.type[t]['attr'].get('STRINGS') == '$$':
use_ext = self.type[t]['vals_ext']
if (use_ext):
self.eth_type[nm]['attr']['STRINGS'] = '&%s_ext' % (self.eth_vals_nm(nm))
else:
self.eth_type[nm]['attr']['STRINGS'] = 'VALS(%s)' % (self.eth_vals_nm(nm))
self.eth_type[nm]['attr'].update(self.conform.use_item('ETYPE_ATTR', nm))
for t in self.eth_type_ord:
bits = self.eth_type[t]['val'].eth_named_bits()
if (bits):
for (val, id) in bits:
self.named_bit.append({'name' : id, 'val' : val,
'ethname' : 'hf_%s_%s_%s' % (self.eproto, t, asn2c(id)),
'ftype' : 'FT_BOOLEAN', 'display' : '8',
'strings' : 'NULL',
'bitmask' : '0x'+('80','40','20','10','08','04','02','01')[val%8]})
if self.eth_type[t]['val'].eth_need_tree():
self.eth_type[t]['tree'] = "ett_%s_%s" % (self.eth_type[t]['proto'], t)
else:
self.eth_type[t]['tree'] = None
#--- register values from enums ------------
for t in self.eth_type_ord:
if (self.eth_type[t]['val'].eth_has_enum(t, self)):
self.eth_type[t]['val'].reg_enum_vals(t, self)
#--- value dependencies -------------------
for v in self.value_ord:
if isinstance (self.value[v]['value'], Value):
dep = self.value[v]['value'].get_dep()
else:
dep = self.value[v]['value']
if dep and dep in self.value:
self.value_dep.setdefault(v, []).append(dep)
#--- exports all necessary values
for v in self.value_ord:
if not self.value[v]['export']: continue
deparr = self.value_dep.get(v, [])
while deparr:
d = deparr.pop()
if not self.value[d]['import']:
if not self.value[d]['export']:
self.value[d]['export'] = EF_TYPE
deparr.extend(self.value_dep.get(d, []))
#--- values -------------------
for v in self.value_imp:
nm = asn2c(v)
self.eth_value[nm] = { 'import' : self.value[v]['import'],
'proto' : asn2c(self.value[v]['proto']),
'ref' : []}
self.value[v]['ethname'] = nm
for v in self.value_ord:
if (self.value[v]['ethname']):
continue
if (self.value[v]['no_emit']):
continue
nm = asn2c(v)
self.eth_value[nm] = { 'import' : None,
'proto' : asn2c(self.value[v]['proto']),
'export' : self.value[v]['export'], 'ref' : [v] }
self.eth_value[nm]['value'] = self.value[v]['value']
self.eth_value_ord.append(nm)
self.value[v]['ethname'] = nm
#--- fields -------------------------
for f in (self.pdu_ord + self.field_ord):
if len(f.split('/')) > 1 and f.split('/')[-1] == ITEM_FIELD_NAME: # Sequence/Set of type
nm = self.conform.use_item('FIELD_RENAME', '/'.join(f.split('/')[0:-1]), val_dflt=f.split('/')[-2]) + f.split('/')[-1]
else:
nm = f.split('/')[-1]
nm = self.conform.use_item('FIELD_RENAME', f, val_dflt=nm)
nm = asn2c(nm)
if (self.field[f]['pdu']):
nm += '_PDU'
if (not self.merge_modules or self.field[f]['pdu']['export']):
nm = self.eproto + '_' + nm
t = self.field[f]['type']
if t in self.type:
ethtype = self.type[t]['ethname']
else: # undefined type
ethtype = self.dummy_import_type(t)
ethtypemod = ethtype + self.field[f]['modified']
if nm in self.eth_hf:
if nm in self.eth_hf_dupl:
if ethtypemod in self.eth_hf_dupl[nm]:
nm = self.eth_hf_dupl[nm][ethtypemod]
self.eth_hf[nm]['ref'].append(f)
self.field[f]['ethname'] = nm
continue
else:
nmx = nm + ('_%02d' % (len(self.eth_hf_dupl[nm])))
self.eth_hf_dupl[nm][ethtype] = nmx
nm = nmx
else:
if (self.eth_hf[nm]['ethtype']+self.eth_hf[nm]['modified']) == ethtypemod:
self.eth_hf[nm]['ref'].append(f)
self.field[f]['ethname'] = nm
continue
else:
nmx = nm + '_01'
self.eth_hf_dupl[nm] = {self.eth_hf[nm]['ethtype']+self.eth_hf[nm]['modified'] : nm, \
ethtypemod : nmx}
nm = nmx
if (self.field[f]['pdu']):
self.eth_hfpdu_ord.append(nm)
else:
self.eth_hf_ord.append(nm)
fullname = 'hf_%s_%s' % (self.eproto, nm)
attr = self.eth_get_type_attr(self.field[f]['type']).copy()
attr.update(self.field[f]['attr'])
if (self.NAPI() and 'NAME' in attr):
attr['NAME'] += self.field[f]['idx']
attr.update(self.conform.use_item('EFIELD_ATTR', nm))
use_vals_ext = self.eth_type[ethtype].get('vals_ext')
if (use_vals_ext):
attr['DISPLAY'] += '|BASE_EXT_STRING'
self.eth_hf[nm] = {'fullname' : fullname, 'pdu' : self.field[f]['pdu'],
'ethtype' : ethtype, 'modified' : self.field[f]['modified'],
'attr' : attr.copy(),
'ref' : [f]}
self.field[f]['ethname'] = nm
if (self.dummy_eag_field):
self.dummy_eag_field = 'hf_%s_%s' % (self.eproto, self.dummy_eag_field)
#--- type dependencies -------------------
(self.eth_type_ord1, self.eth_dep_cycle) = dependency_compute(self.type_ord, self.type_dep, map_fn = lambda t: self.type[t]['ethname'], ignore_fn = lambda t: self.type[t]['import'])
i = 0
while i < len(self.eth_dep_cycle):
t = self.type[self.eth_dep_cycle[i][0]]['ethname']
self.dep_cycle_eth_type.setdefault(t, []).append(i)
i += 1
#--- value dependencies and export -------------------
for v in self.eth_value_ord:
if self.eth_value[v]['export']:
self.eth_vexport_ord.append(v)
else:
self.eth_value_ord1.append(v)
#--- export tags, values, ... ---
for t in self.exports:
if t not in self.type:
continue
if self.type[t]['import']:
continue
m = self.type[t]['module']
if not self.Per():
if m not in self.all_tags:
self.all_tags[m] = {}
self.all_tags[m][t] = self.type[t]['val'].GetTTag(self)
if m not in self.all_type_attr:
self.all_type_attr[m] = {}
self.all_type_attr[m][t] = self.eth_get_type_attr(t).copy()
for v in self.vexports:
if v not in self.value:
continue
if self.value[v]['import']:
continue
m = self.value[v]['module']
if m not in self.all_vals:
self.all_vals[m] = {}
vv = self.value[v]['value']
if isinstance (vv, Value):
vv = vv.to_str(self)
self.all_vals[m][v] = vv
#--- eth_vals_nm ------------------------------------------------------------
def eth_vals_nm(self, tname):
out = ""
if (not self.eth_type[tname]['export'] & EF_NO_PROT):
out += "%s_" % (self.eproto)
out += "%s_vals" % (tname)
return out
#--- eth_vals ---------------------------------------------------------------
def eth_vals(self, tname, vals):
out = ""
has_enum = self.eth_type[tname]['enum'] & EF_ENUM
use_ext = self.eth_type[tname]['vals_ext']
if (use_ext):
vals.sort(key=lambda vals_entry: int(vals_entry[0]))
if (not self.eth_type[tname]['export'] & EF_VALS):
out += 'static '
if (self.eth_type[tname]['export'] & EF_VALS) and (self.eth_type[tname]['export'] & EF_TABLE):
out += 'static '
out += "const value_string %s[] = {\n" % (self.eth_vals_nm(tname))
for (val, id) in vals:
if (has_enum):
vval = self.eth_enum_item(tname, id)
else:
vval = val
out += ' { %3s, "%s" },\n' % (vval, id)
out += " { 0, NULL }\n};\n"
if (use_ext):
out += "\nstatic value_string_ext %s_ext = VALUE_STRING_EXT_INIT(%s);\n" % (self.eth_vals_nm(tname), self.eth_vals_nm(tname))
return out
#--- eth_enum_prefix ------------------------------------------------------------
def eth_enum_prefix(self, tname, type=False):
out = ""
if (self.eth_type[tname]['export'] & EF_ENUM):
no_prot = self.eth_type[tname]['export'] & EF_NO_PROT
else:
no_prot = self.eth_type[tname]['enum'] & EF_NO_PROT
if (not no_prot):
out += self.eproto
if ((not self.eth_type[tname]['enum'] & EF_NO_TYPE) or type):
if (out): out += '_'
out += tname
if (self.eth_type[tname]['enum'] & EF_UCASE):
out = out.upper()
if (out): out += '_'
return out
#--- eth_enum_nm ------------------------------------------------------------
def eth_enum_nm(self, tname):
out = self.eth_enum_prefix(tname, type=True)
out += "enum"
return out
#--- eth_enum_item ---------------------------------------------------------------
def eth_enum_item(self, tname, ident):
out = self.eth_enum_prefix(tname)
out += asn2c(ident)
if (self.eth_type[tname]['enum'] & EF_UCASE):
out = out.upper()
return out
#--- eth_enum ---------------------------------------------------------------
def eth_enum(self, tname, vals):
out = ""
if (self.eth_type[tname]['enum'] & EF_DEFINE):
out += "/* enumerated values for %s */\n" % (tname)
for (val, id) in vals:
out += '#define %-12s %3s\n' % (self.eth_enum_item(tname, id), val)
else:
out += "typedef enum _%s {\n" % (self.eth_enum_nm(tname))
first_line = 1
for (val, id) in vals:
if (first_line == 1):
first_line = 0
else:
out += ",\n"
out += ' %-12s = %3s' % (self.eth_enum_item(tname, id), val)
out += "\n} %s;\n" % (self.eth_enum_nm(tname))
return out
#--- eth_bits ---------------------------------------------------------------
def eth_bits(self, tname, bits):
out = ""
out += "static const "
out += "asn_namedbit %(TABLE)s[] = {\n"
for (val, id) in bits:
out += ' { %2d, &hf_%s_%s_%s, -1, -1, "%s", NULL },\n' % (val, self.eproto, tname, asn2c(id), id)
out += " { 0, NULL, 0, 0, NULL, NULL }\n};\n"
return out
#--- eth_type_fn_h ----------------------------------------------------------
def eth_type_fn_h(self, tname):
out = ""
if (not self.eth_type[tname]['export'] & EF_TYPE):
out += 'static '
out += "int "
if (self.Ber()):
out += "dissect_%s_%s(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_)" % (self.eth_type[tname]['proto'], tname)
elif (self.Per()):
out += "dissect_%s_%s(tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_)" % (self.eth_type[tname]['proto'], tname)
out += ";\n"
return out
#--- eth_fn_call ------------------------------------------------------------
def eth_fn_call(self, fname, ret=None, indent=2, par=None):
out = indent * ' '
if (ret):
if (ret == 'return'):
out += 'return '
else:
out += ret + ' = '
out += fname + '('
ind = len(out)
for i in range(len(par)):
if (i>0): out += ind * ' '
out += ', '.join(par[i])
if (i<(len(par)-1)): out += ',\n'
out += ');\n'
return out
#--- eth_type_fn_hdr --------------------------------------------------------
def eth_type_fn_hdr(self, tname):
out = '\n'
if (not self.eth_type[tname]['export'] & EF_TYPE):
out += 'static '
out += "int\n"
if (self.Ber()):
out += "dissect_%s_%s(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {\n" % (self.eth_type[tname]['proto'], tname)
elif (self.Per()):
out += "dissect_%s_%s(tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {\n" % (self.eth_type[tname]['proto'], tname)
#if self.conform.get_fn_presence(tname):
# out += self.conform.get_fn_text(tname, 'FN_HDR')
#el
if self.conform.get_fn_presence(self.eth_type[tname]['ref'][0]):
out += self.conform.get_fn_text(self.eth_type[tname]['ref'][0], 'FN_HDR')
return out
#--- eth_type_fn_ftr --------------------------------------------------------
def eth_type_fn_ftr(self, tname):
out = '\n'
#if self.conform.get_fn_presence(tname):
# out += self.conform.get_fn_text(tname, 'FN_FTR')
#el
if self.conform.get_fn_presence(self.eth_type[tname]['ref'][0]):
out += self.conform.get_fn_text(self.eth_type[tname]['ref'][0], 'FN_FTR')
out += " return offset;\n"
out += "}\n"
return out
#--- eth_type_fn_body -------------------------------------------------------
def eth_type_fn_body(self, tname, body, pars=None):
out = body
#if self.conform.get_fn_body_presence(tname):
# out = self.conform.get_fn_text(tname, 'FN_BODY')
#el
if self.conform.get_fn_body_presence(self.eth_type[tname]['ref'][0]):
out = self.conform.get_fn_text(self.eth_type[tname]['ref'][0], 'FN_BODY')
if pars:
try:
out = out % pars
except (TypeError):
pass
return out
#--- eth_out_pdu_decl ----------------------------------------------------------
def eth_out_pdu_decl(self, f):
t = self.eth_hf[f]['ethtype']
is_new = self.eth_hf[f]['pdu']['new']
out = ''
if (not self.eth_hf[f]['pdu']['export']):
out += 'static '
if (is_new):
out += 'int '
out += 'dissect_'+f+'(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, void *data _U_);\n'
else:
out += 'void '
out += 'dissect_'+f+'(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_);\n'
return out
#--- eth_output_hf ----------------------------------------------------------
def eth_output_hf (self):
if not len(self.eth_hf_ord) and not len(self.eth_hfpdu_ord) and not len(self.named_bit): return
fx = self.output.file_open('hf')
for f in (self.eth_hfpdu_ord + self.eth_hf_ord):
fx.write("%-50s/* %s */\n" % ("static int %s = -1; " % (self.eth_hf[f]['fullname']), self.eth_hf[f]['ethtype']))
if (self.named_bit):
fx.write('/* named bits */\n')
for nb in self.named_bit:
fx.write("static int %s = -1;\n" % (nb['ethname']))
if (self.dummy_eag_field):
fx.write("static int %s = -1; /* never registered */\n" % (self.dummy_eag_field))
self.output.file_close(fx)
#--- eth_output_hf_arr ------------------------------------------------------
def eth_output_hf_arr (self):
if not len(self.eth_hf_ord) and not len(self.eth_hfpdu_ord) and not len(self.named_bit): return
fx = self.output.file_open('hfarr')
for f in (self.eth_hfpdu_ord + self.eth_hf_ord):
t = self.eth_hf[f]['ethtype']
if self.remove_prefix and t.startswith(self.remove_prefix):
t = t[len(self.remove_prefix):]
name=self.eth_hf[f]['attr']['NAME']
try: # Python < 3
trantab = maketrans("- ", "__")
except:
trantab = str.maketrans("- ", "__")
name = name.translate(trantab)
namelower = name.lower()
tquoted_lower = '"' + t.lower() + '"'
# Try to avoid giving blurbs that give no more info than the name
if tquoted_lower == namelower or \
t == "NULL" or \
tquoted_lower.replace("t_", "") == namelower:
blurb = 'NULL'
else:
blurb = '"%s"' % (t)
attr = self.eth_hf[f]['attr'].copy()
if attr['TYPE'] == 'FT_NONE':
attr['ABBREV'] = '"%s.%s_element"' % (self.proto, attr['ABBREV'])
else:
attr['ABBREV'] = '"%s.%s"' % (self.proto, attr['ABBREV'])
if 'BLURB' not in attr:
attr['BLURB'] = blurb
fx.write(' { &%s,\n' % (self.eth_hf[f]['fullname']))
fx.write(' { %(NAME)s, %(ABBREV)s,\n' % attr)
fx.write(' %(TYPE)s, %(DISPLAY)s, %(STRINGS)s, %(BITMASK)s,\n' % attr)
fx.write(' %(BLURB)s, HFILL }},\n' % attr)
for nb in self.named_bit:
fx.write(' { &%s,\n' % (nb['ethname']))
fx.write(' { "%s", "%s.%s",\n' % (nb['name'], self.proto, nb['name']))
fx.write(' %s, %s, %s, %s,\n' % (nb['ftype'], nb['display'], nb['strings'], nb['bitmask']))
fx.write(' NULL, HFILL }},\n')
self.output.file_close(fx)
#--- eth_output_ett ---------------------------------------------------------
def eth_output_ett (self):
fx = self.output.file_open('ett')
fempty = True
#fx.write("static gint ett_%s = -1;\n" % (self.eproto))
for t in self.eth_type_ord:
if self.eth_type[t]['tree']:
fx.write("static gint %s = -1;\n" % (self.eth_type[t]['tree']))
fempty = False
self.output.file_close(fx, discard=fempty)
#--- eth_output_ett_arr -----------------------------------------------------
def eth_output_ett_arr(self):
fx = self.output.file_open('ettarr')
fempty = True
#fx.write(" &ett_%s,\n" % (self.eproto))
for t in self.eth_type_ord:
if self.eth_type[t]['tree']:
fx.write(" &%s,\n" % (self.eth_type[t]['tree']))
fempty = False
self.output.file_close(fx, discard=fempty)
#--- eth_output_export ------------------------------------------------------
def eth_output_export(self):
fx = self.output.file_open('exp', ext='h')
for t in self.eth_export_ord: # vals
if (self.eth_type[t]['export'] & EF_ENUM) and self.eth_type[t]['val'].eth_has_enum(t, self):
fx.write(self.eth_type[t]['val'].eth_type_enum(t, self))
if (self.eth_type[t]['export'] & EF_VALS) and self.eth_type[t]['val'].eth_has_vals():
if not self.eth_type[t]['export'] & EF_TABLE:
if self.eth_type[t]['export'] & EF_WS_DLL:
fx.write("WS_DLL_PUBLIC ")
else:
fx.write("extern ")
fx.write("const value_string %s[];\n" % (self.eth_vals_nm(t)))
else:
fx.write(self.eth_type[t]['val'].eth_type_vals(t, self))
for t in self.eth_export_ord: # functions
if (self.eth_type[t]['export'] & EF_TYPE):
if self.eth_type[t]['export'] & EF_EXTERN:
if self.eth_type[t]['export'] & EF_WS_DLL:
fx.write("WS_DLL_PUBLIC ")
else:
fx.write("extern ")
fx.write(self.eth_type_fn_h(t))
for f in self.eth_hfpdu_ord: # PDUs
if (self.eth_hf[f]['pdu'] and self.eth_hf[f]['pdu']['export']):
fx.write(self.eth_out_pdu_decl(f))
self.output.file_close(fx)
#--- eth_output_expcnf ------------------------------------------------------
def eth_output_expcnf(self):
fx = self.output.file_open('exp', ext='cnf')
fx.write('#.MODULE\n')
maxw = 0
for (m, p) in self.modules:
if (len(m) > maxw): maxw = len(m)
for (m, p) in self.modules:
fx.write("%-*s %s\n" % (maxw, m, p))
fx.write('#.END\n\n')
for cls in self.objectclass_ord:
if self.objectclass[cls]['export']:
cnm = cls
if self.objectclass[cls]['export'] & EF_MODULE:
cnm = "$%s$%s" % (self.objectclass[cls]['module'], cnm)
fx.write('#.CLASS %s\n' % (cnm))
maxw = 2
for fld in self.objectclass[cls]['val'].fields:
w = len(fld.fld_repr()[0])
if (w > maxw): maxw = w
for fld in self.objectclass[cls]['val'].fields:
repr = fld.fld_repr()
fx.write('%-*s %s\n' % (maxw, repr[0], ' '.join(repr[1:])))
fx.write('#.END\n\n')
if self.Ber():
fx.write('#.IMPORT_TAG\n')
for t in self.eth_export_ord: # tags
if (self.eth_type[t]['export'] & EF_TYPE):
fx.write('%-24s ' % self.eth_type[t]['ref'][0])
fx.write('%s %s\n' % self.eth_type[t]['val'].GetTag(self))
fx.write('#.END\n\n')
fx.write('#.TYPE_ATTR\n')
for t in self.eth_export_ord: # attributes
if (self.eth_type[t]['export'] & EF_TYPE):
tnm = self.eth_type[t]['ref'][0]
if self.eth_type[t]['export'] & EF_MODULE:
tnm = "$%s$%s" % (self.type[tnm]['module'], tnm)
fx.write('%-24s ' % tnm)
attr = self.eth_get_type_attr(self.eth_type[t]['ref'][0]).copy()
fx.write('TYPE = %(TYPE)-9s DISPLAY = %(DISPLAY)-9s STRINGS = %(STRINGS)s BITMASK = %(BITMASK)s\n' % attr)
fx.write('#.END\n\n')
self.output.file_close(fx, keep_anyway=True)
#--- eth_output_val ------------------------------------------------------
def eth_output_val(self):
fx = self.output.file_open('val', ext='h')
for v in self.eth_value_ord1:
vv = self.eth_value[v]['value']
if isinstance (vv, Value):
vv = vv.to_str(self)
fx.write("#define %-30s %s\n" % (v, vv))
for t in self.eth_type_ord1:
if self.eth_type[t]['import']:
continue
if self.eth_type[t]['val'].eth_has_enum(t, self) and not (self.eth_type[t]['export'] & EF_ENUM):
fx.write(self.eth_type[t]['val'].eth_type_enum(t, self))
self.output.file_close(fx)
#--- eth_output_valexp ------------------------------------------------------
def eth_output_valexp(self):
if (not len(self.eth_vexport_ord)): return
fx = self.output.file_open('valexp', ext='h')
for v in self.eth_vexport_ord:
vv = self.eth_value[v]['value']
if isinstance (vv, Value):
vv = vv.to_str(self)
fx.write("#define %-30s %s\n" % (v, vv))
self.output.file_close(fx)
#--- eth_output_types -------------------------------------------------------
def eth_output_types(self):
def out_pdu(f):
t = self.eth_hf[f]['ethtype']
is_new = self.eth_hf[f]['pdu']['new']
impl = 'FALSE'
out = ''
if (not self.eth_hf[f]['pdu']['export']):
out += 'static '
if (is_new):
out += 'int '
out += 'dissect_'+f+'(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, void *data _U_) {\n'
else:
out += 'void '
out += 'dissect_'+f+'(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_) {\n'
if (is_new):
out += ' int offset = 0;\n'
off_par = 'offset'
ret_par = 'offset'
else:
off_par = '0'
ret_par = None
if (self.Per()):
if (self.Aligned()):
aligned = 'TRUE'
else:
aligned = 'FALSE'
out += " asn1_ctx_t asn1_ctx;\n"
out += self.eth_fn_call('asn1_ctx_init', par=(('&asn1_ctx', 'ASN1_ENC_PER', aligned, 'pinfo'),))
if (self.Ber()):
out += " asn1_ctx_t asn1_ctx;\n"
out += self.eth_fn_call('asn1_ctx_init', par=(('&asn1_ctx', 'ASN1_ENC_BER', 'TRUE', 'pinfo'),))
par=((impl, 'tvb', off_par,'&asn1_ctx', 'tree', self.eth_hf[f]['fullname']),)
elif (self.Per()):
par=(('tvb', off_par, '&asn1_ctx', 'tree', self.eth_hf[f]['fullname']),)
else:
par=((),)
out += self.eth_fn_call('dissect_%s_%s' % (self.eth_type[t]['proto'], t), ret=ret_par, par=par)
if (self.Per() and is_new):
out += ' offset += 7; offset >>= 3;\n'
if (is_new):
out += ' return offset;\n'
out += '}\n'
return out
#end out_pdu()
fx = self.output.file_open('fn')
pos = fx.tell()
if (len(self.eth_hfpdu_ord)):
first_decl = True
for f in self.eth_hfpdu_ord:
if (self.eth_hf[f]['pdu'] and self.eth_hf[f]['pdu']['need_decl']):
if first_decl:
fx.write('/*--- PDUs declarations ---*/\n')
first_decl = False
fx.write(self.eth_out_pdu_decl(f))
if not first_decl:
fx.write('\n')
if self.eth_dep_cycle:
fx.write('/*--- Cyclic dependencies ---*/\n\n')
i = 0
while i < len(self.eth_dep_cycle):
t = self.type[self.eth_dep_cycle[i][0]]['ethname']
if self.dep_cycle_eth_type[t][0] != i: i += 1; continue
fx.write(''.join(['/* %s */\n' % ' -> '.join(self.eth_dep_cycle[i]) for i in self.dep_cycle_eth_type[t]]))
fx.write(self.eth_type_fn_h(t))
fx.write('\n')
i += 1
fx.write('\n')
for t in self.eth_type_ord1:
if self.eth_type[t]['import']:
continue
if self.eth_type[t]['val'].eth_has_vals():
if self.eth_type[t]['no_emit'] & EF_VALS:
pass
elif self.eth_type[t]['user_def'] & EF_VALS:
fx.write("extern const value_string %s[];\n" % (self.eth_vals_nm(t)))
elif (self.eth_type[t]['export'] & EF_VALS) and (self.eth_type[t]['export'] & EF_TABLE):
pass
else:
fx.write(self.eth_type[t]['val'].eth_type_vals(t, self))
if self.eth_type[t]['no_emit'] & EF_TYPE:
pass
elif self.eth_type[t]['user_def'] & EF_TYPE:
fx.write(self.eth_type_fn_h(t))
else:
fx.write(self.eth_type[t]['val'].eth_type_fn(self.eth_type[t]['proto'], t, self))
fx.write('\n')
if (len(self.eth_hfpdu_ord)):
fx.write('/*--- PDUs ---*/\n\n')
for f in self.eth_hfpdu_ord:
if (self.eth_hf[f]['pdu']):
if (f in self.emitted_pdu):
fx.write(" /* %s already emitted */\n" % (f))
else:
fx.write(out_pdu(f))
self.emitted_pdu[f] = True
fx.write('\n')
fempty = pos == fx.tell()
self.output.file_close(fx, discard=fempty)
#--- eth_output_dis_hnd -----------------------------------------------------
def eth_output_dis_hnd(self):
fx = self.output.file_open('dis-hnd')
fempty = True
for f in self.eth_hfpdu_ord:
pdu = self.eth_hf[f]['pdu']
if (pdu and pdu['reg'] and not pdu['hidden']):
dis = self.proto
if (pdu['reg'] != '.'):
dis += '.' + pdu['reg']
fx.write('static dissector_handle_t %s_handle;\n' % (asn2c(dis)))
fempty = False
fx.write('\n')
self.output.file_close(fx, discard=fempty)
#--- eth_output_dis_reg -----------------------------------------------------
def eth_output_dis_reg(self):
fx = self.output.file_open('dis-reg')
fempty = True
for f in self.eth_hfpdu_ord:
pdu = self.eth_hf[f]['pdu']
if (pdu and pdu['reg']):
new_prefix = ''
if (pdu['new']): new_prefix = 'new_'
dis = self.proto
if (pdu['reg'] != '.'): dis += '.' + pdu['reg']
fx.write(' %sregister_dissector("%s", dissect_%s, proto_%s);\n' % (new_prefix, dis, f, self.eproto))
if (not pdu['hidden']):
fx.write(' %s_handle = find_dissector("%s");\n' % (asn2c(dis), dis))
fempty = False
fx.write('\n')
self.output.file_close(fx, discard=fempty)
#--- eth_output_dis_tab -----------------------------------------------------
def eth_output_dis_tab(self):
fx = self.output.file_open('dis-tab')
fempty = True
for k in self.conform.get_order('REGISTER'):
reg = self.conform.use_item('REGISTER', k)
if reg['pdu'] not in self.field: continue
f = self.field[reg['pdu']]['ethname']
pdu = self.eth_hf[f]['pdu']
new_prefix = ''
if (pdu['new']): new_prefix = 'new_'
if (reg['rtype'] in ('NUM', 'STR')):
rstr = ''
if (reg['rtype'] == 'STR'):
rstr = 'string'
else:
rstr = 'uint'
if (pdu['reg']):
dis = self.proto
if (pdu['reg'] != '.'): dis += '.' + pdu['reg']
if (not pdu['hidden']):
hnd = '%s_handle' % (asn2c(dis))
else:
hnd = 'find_dissector("%s")' % (dis)
else:
hnd = '%screate_dissector_handle(dissect_%s, proto_%s)' % (new_prefix, f, self.eproto)
rport = self.value_get_eth(reg['rport'])
fx.write(' dissector_add_%s("%s", %s, %s);\n' % (rstr, reg['rtable'], rport, hnd))
elif (reg['rtype'] in ('BER', 'PER')):
roid = self.value_get_eth(reg['roid'])
fx.write(' %sregister_%s_oid_dissector(%s, dissect_%s, proto_%s, %s);\n' % (new_prefix, reg['rtype'].lower(), roid, f, self.eproto, reg['roidname']))
fempty = False
fx.write('\n')
self.output.file_close(fx, discard=fempty)
#--- eth_output_syn_reg -----------------------------------------------------
def eth_output_syn_reg(self):
fx = self.output.file_open('syn-reg')
fempty = True
first_decl = True
for k in self.conform.get_order('SYNTAX'):
reg = self.conform.use_item('SYNTAX', k)
if reg['pdu'] not in self.field: continue
f = self.field[reg['pdu']]['ethname']
pdu = self.eth_hf[f]['pdu']
new_prefix = ''
if (pdu['new']): new_prefix = 'new_'
if first_decl:
fx.write(' /*--- Syntax registrations ---*/\n')
first_decl = False
fx.write(' %sregister_ber_syntax_dissector(%s, proto_%s, dissect_%s_PDU);\n' % (new_prefix, k, self.eproto, reg['pdu']));
fempty=False
self.output.file_close(fx, discard=fempty)
#--- eth_output_tables -----------------------------------------------------
def eth_output_tables(self):
for num in list(self.conform.report.keys()):
fx = self.output.file_open('table' + num)
for rep in self.conform.report[num]:
self.eth_output_table(fx, rep)
self.output.file_close(fx)
#--- eth_output_table -----------------------------------------------------
def eth_output_table(self, fx, rep):
def cmp_fn(a, b, cmp_flds, objs):
if not cmp_flds: return 0
obja = objs[a]
objb = objs[b]
res = 0
for f in cmp_flds:
if f[0] == '#':
f = f[1:]
res = int(obja[f]) - int(objb[f])
else:
res = cmp(obja[f], objb[f])
if res: break
return res
if rep['type'] == 'HDR':
fx.write('\n')
if rep['var']:
var = rep['var']
var_list = var.split('.', 1)
cls = var_list[0]
del var_list[0]
flds = []
not_flds = []
sort_flds = []
for f in var_list:
if f[0] == '!':
not_flds.append(f[1:])
continue
if f[0] == '#':
flds.append(f[1:])
sort_flds.append(f)
continue
if f[0] == '@':
flds.append(f[1:])
sort_flds.append(f[1:])
continue
flds.append(f)
objs = {}
objs_ord = []
if (cls in self.oassign_cls):
for ident in self.oassign_cls[cls]:
obj = self.get_obj_repr(ident, flds, not_flds)
if not obj:
continue
obj['_LOOP'] = var
obj['_DICT'] = str(obj)
objs[ident] = obj
objs_ord.append(ident)
if (sort_flds):
objs_ord.sort(cmp=partial(cmp_fn, cmp_flds=sort_flds, objs=objs))
for ident in objs_ord:
obj = objs[ident]
try:
text = rep['text'] % obj
except (KeyError):
raise sys.exc_info()[0]("%s:%s invalid key %s for information object %s of %s" % (rep['fn'], rep['lineno'], sys.exc_info()[1], ident, var))
fx.write(text)
else:
fx.write("/* Unknown or empty loop list %s */\n" % (var))
else:
fx.write(rep['text'])
if rep['type'] == 'FTR':
fx.write('\n')
#--- dupl_report -----------------------------------------------------
def dupl_report(self):
# types
tmplist = sorted(self.eth_type_dupl.keys())
for t in tmplist:
msg = "The same type names for different types. Explicit type renaming is recommended.\n"
msg += t + "\n"
for tt in self.eth_type_dupl[t]:
msg += " %-20s %s\n" % (self.type[tt]['ethname'], tt)
warnings.warn_explicit(msg, UserWarning, '', 0)
# fields
tmplist = list(self.eth_hf_dupl.keys())
tmplist.sort()
for f in tmplist:
msg = "The same field names for different types. Explicit field renaming is recommended.\n"
msg += f + "\n"
for tt in list(self.eth_hf_dupl[f].keys()):
msg += " %-20s %-20s " % (self.eth_hf_dupl[f][tt], tt)
msg += ", ".join(self.eth_hf[self.eth_hf_dupl[f][tt]]['ref'])
msg += "\n"
warnings.warn_explicit(msg, UserWarning, '', 0)
#--- eth_do_output ------------------------------------------------------------
def eth_do_output(self):
if self.dbg('a'):
print("\n# Assignments")
for a in self.assign_ord:
v = ' '
if (self.assign[a]['virt']): v = '*'
print(v, a)
print("\n# Value assignments")
for a in self.vassign_ord:
print(' ', a)
print("\n# Information object assignments")
for a in self.oassign_ord:
print(" %-12s (%s)" % (a, self.oassign[a].cls))
if self.dbg('t'):
print("\n# Imported Types")
print("%-40s %-24s %-24s" % ("ASN.1 name", "Module", "Protocol"))
print("-" * 100)
for t in self.type_imp:
print("%-40s %-24s %-24s" % (t, self.type[t]['import'], self.type[t]['proto']))
print("\n# Imported Values")
print("%-40s %-24s %-24s" % ("ASN.1 name", "Module", "Protocol"))
print("-" * 100)
for t in self.value_imp:
print("%-40s %-24s %-24s" % (t, self.value[t]['import'], self.value[t]['proto']))
print("\n# Imported Object Classes")
print("%-40s %-24s %-24s" % ("ASN.1 name", "Module", "Protocol"))
print("-" * 100)
for t in self.objectclass_imp:
print("%-40s %-24s %-24s" % (t, self.objectclass[t]['import'], self.objectclass[t]['proto']))
print("\n# Exported Types")
print("%-31s %s" % ("Wireshark type", "Export Flag"))
print("-" * 100)
for t in self.eth_export_ord:
print("%-31s 0x%02X" % (t, self.eth_type[t]['export']))
print("\n# Exported Values")
print("%-40s %s" % ("Wireshark name", "Value"))
print("-" * 100)
for v in self.eth_vexport_ord:
vv = self.eth_value[v]['value']
if isinstance (vv, Value):
vv = vv.to_str(self)
print("%-40s %s" % (v, vv))
print("\n# ASN.1 Object Classes")
print("%-40s %-24s %-24s" % ("ASN.1 name", "Module", "Protocol"))
print("-" * 100)
for t in self.objectclass_ord:
print("%-40s " % (t))
print("\n# ASN.1 Types")
print("%-49s %-24s %-24s" % ("ASN.1 unique name", "'tname'", "Wireshark type"))
print("-" * 100)
for t in self.type_ord:
print("%-49s %-24s %-24s" % (t, self.type[t]['tname'], self.type[t]['ethname']))
print("\n# Wireshark Types")
print("Wireshark type References (ASN.1 types)")
print("-" * 100)
for t in self.eth_type_ord:
sys.stdout.write("%-31s %d" % (t, len(self.eth_type[t]['ref'])))
print(', '.join(self.eth_type[t]['ref']))
print("\n# ASN.1 Values")
print("%-40s %-18s %-20s %s" % ("ASN.1 unique name", "Type", "Value", "Wireshark value"))
print("-" * 100)
for v in self.value_ord:
vv = self.value[v]['value']
if isinstance (vv, Value):
vv = vv.to_str(self)
print("%-40s %-18s %-20s %s" % (v, self.value[v]['type'].eth_tname(), vv, self.value[v]['ethname']))
#print "\n# Wireshark Values"
#print "%-40s %s" % ("Wireshark name", "Value")
#print "-" * 100
#for v in self.eth_value_ord:
# vv = self.eth_value[v]['value']
# if isinstance (vv, Value):
# vv = vv.to_str(self)
# print "%-40s %s" % (v, vv)
print("\n# ASN.1 Fields")
print("ASN.1 unique name Wireshark name ASN.1 type")
print("-" * 100)
for f in (self.pdu_ord + self.field_ord):
print("%-40s %-20s %s" % (f, self.field[f]['ethname'], self.field[f]['type']))
print("\n# Wireshark Fields")
print("Wireshark name Wireshark type References (ASN.1 fields)")
print("-" * 100)
for f in (self.eth_hfpdu_ord + self.eth_hf_ord):
sys.stdout.write("%-30s %-20s %s" % (f, self.eth_hf[f]['ethtype'], len(self.eth_hf[f]['ref'])))
print(', '.join(self.eth_hf[f]['ref']))
#print "\n# Order after dependencies"
#print '\n'.join(self.eth_type_ord1)
print("\n# Cyclic dependencies")
for c in self.eth_dep_cycle:
print(' -> '.join(c))
self.dupl_report()
self.output.outnm = self.outnm_opt
if (not self.output.outnm):
self.output.outnm = self.proto
self.output.outnm = self.output.outnm.replace('.', '-')
if not self.justexpcnf:
self.eth_output_hf()
self.eth_output_ett()
self.eth_output_types()
self.eth_output_hf_arr()
self.eth_output_ett_arr()
self.eth_output_export()
self.eth_output_val()
self.eth_output_valexp()
self.eth_output_dis_hnd()
self.eth_output_dis_reg()
self.eth_output_dis_tab()
self.eth_output_syn_reg()
self.eth_output_tables()
if self.expcnf:
self.eth_output_expcnf()
def dbg_modules(self):
def print_mod(m):
sys.stdout.write("%-30s " % (m))
dep = self.module[m][:]
for i in range(len(dep)):
if dep[i] not in self.module:
dep[i] = '*' + dep[i]
print(', '.join(dep))
# end of print_mod()
(mod_ord, mod_cyc) = dependency_compute(self.module_ord, self.module, ignore_fn = lambda t: t not in self.module)
print("\n# ASN.1 Moudules")
print("Module name Dependency")
print("-" * 100)
new_ord = False
for m in (self.module_ord):
print_mod(m)
new_ord = new_ord or (self.module_ord.index(m) != mod_ord.index(m))
if new_ord:
print("\n# ASN.1 Moudules - in dependency order")
print("Module name Dependency")
print("-" * 100)
for m in (mod_ord):
print_mod(m)
if mod_cyc:
print("\nCyclic dependencies:")
for i in (list(range(len(mod_cyc)))):
print("%02d: %s" % (i + 1, str(mod_cyc[i])))
#--- EthCnf -------------------------------------------------------------------
class EthCnf:
def __init__(self):
self.ectx = None
self.tblcfg = {}
self.table = {}
self.order = {}
self.fn = {}
self.report = {}
self.suppress_line = False
self.include_path = []
# Value name Default value Duplicity check Usage check
self.tblcfg['EXPORTS'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['MAKE_ENUM'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['USE_VALS_EXT'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['PDU'] = { 'val_nm' : 'attr', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['SYNTAX'] = { 'val_nm' : 'attr', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['REGISTER'] = { 'val_nm' : 'attr', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['USER_DEFINED'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['NO_EMIT'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['MODULE'] = { 'val_nm' : 'proto', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : False }
self.tblcfg['OMIT_ASSIGNMENT'] = { 'val_nm' : 'omit', 'val_dflt' : False, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['NO_OMIT_ASSGN'] = { 'val_nm' : 'omit', 'val_dflt' : True, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['VIRTUAL_ASSGN'] = { 'val_nm' : 'name', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['SET_TYPE'] = { 'val_nm' : 'type', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['TYPE_RENAME'] = { 'val_nm' : 'eth_name', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['FIELD_RENAME'] = { 'val_nm' : 'eth_name', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['IMPORT_TAG'] = { 'val_nm' : 'ttag', 'val_dflt' : (), 'chk_dup' : True, 'chk_use' : False }
self.tblcfg['FN_PARS'] = { 'val_nm' : 'pars', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['TYPE_ATTR'] = { 'val_nm' : 'attr', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : False }
self.tblcfg['ETYPE_ATTR'] = { 'val_nm' : 'attr', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : False }
self.tblcfg['FIELD_ATTR'] = { 'val_nm' : 'attr', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['EFIELD_ATTR'] = { 'val_nm' : 'attr', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : True }
self.tblcfg['ASSIGNED_ID'] = { 'val_nm' : 'ids', 'val_dflt' : {}, 'chk_dup' : False,'chk_use' : False }
self.tblcfg['ASSIGN_VALUE_TO_TYPE'] = { 'val_nm' : 'name', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True }
for k in list(self.tblcfg.keys()) :
self.table[k] = {}
self.order[k] = []
def add_item(self, table, key, fn, lineno, **kw):
if self.tblcfg[table]['chk_dup'] and key in self.table[table]:
warnings.warn_explicit("Duplicated %s for %s. Previous one is at %s:%d" %
(table, key, self.table[table][key]['fn'], self.table[table][key]['lineno']),
UserWarning, fn, lineno)
return
self.table[table][key] = {'fn' : fn, 'lineno' : lineno, 'used' : False}
self.table[table][key].update(kw)
self.order[table].append(key)
def update_item(self, table, key, fn, lineno, **kw):
if key not in self.table[table]:
self.table[table][key] = {'fn' : fn, 'lineno' : lineno, 'used' : False}
self.order[table].append(key)
self.table[table][key][self.tblcfg[table]['val_nm']] = {}
self.table[table][key][self.tblcfg[table]['val_nm']].update(kw[self.tblcfg[table]['val_nm']])
def get_order(self, table):
return self.order[table]
def check_item(self, table, key):
return key in self.table[table]
def copy_item(self, table, dst_key, src_key):
if (src_key in self.table[table]):
self.table[table][dst_key] = self.table[table][src_key]
def check_item_value(self, table, key, **kw):
return key in self.table[table] and kw.get('val_nm', self.tblcfg[table]['val_nm']) in self.table[table][key]
def use_item(self, table, key, **kw):
vdflt = kw.get('val_dflt', self.tblcfg[table]['val_dflt'])
if key not in self.table[table]: return vdflt
vname = kw.get('val_nm', self.tblcfg[table]['val_nm'])
#print "use_item() - set used for %s %s" % (table, key)
self.table[table][key]['used'] = True
return self.table[table][key].get(vname, vdflt)
def omit_assignment(self, type, ident, module):
if self.ectx.conform.use_item('OMIT_ASSIGNMENT', ident):
return True
if self.ectx.conform.use_item('OMIT_ASSIGNMENT', '*') or \
self.ectx.conform.use_item('OMIT_ASSIGNMENT', '*'+type) or \
self.ectx.conform.use_item('OMIT_ASSIGNMENT', '*/'+module) or \
self.ectx.conform.use_item('OMIT_ASSIGNMENT', '*'+type+'/'+module):
return self.ectx.conform.use_item('NO_OMIT_ASSGN', ident)
return False
def add_fn_line(self, name, ctx, line, fn, lineno):
if name not in self.fn:
self.fn[name] = {'FN_HDR' : None, 'FN_FTR' : None, 'FN_BODY' : None}
if (self.fn[name][ctx]):
self.fn[name][ctx]['text'] += line
else:
self.fn[name][ctx] = {'text' : line, 'used' : False,
'fn' : fn, 'lineno' : lineno}
def get_fn_presence(self, name):
#print "get_fn_presence('%s'):%s" % (name, str(self.fn.has_key(name)))
#if self.fn.has_key(name): print self.fn[name]
return name in self.fn
def get_fn_body_presence(self, name):
return name in self.fn and self.fn[name]['FN_BODY']
def get_fn_text(self, name, ctx):
if (name not in self.fn):
return '';
if (not self.fn[name][ctx]):
return '';
self.fn[name][ctx]['used'] = True
out = self.fn[name][ctx]['text']
if (not self.suppress_line):
out = '#line %u "%s"\n%s\n' % (self.fn[name][ctx]['lineno'], rel_dissector_path(self.fn[name][ctx]['fn']), out);
return out
def add_pdu(self, par, is_new, fn, lineno):
#print "add_pdu(par=%s, %s, %d)" % (str(par), fn, lineno)
(reg, hidden) = (None, False)
if (len(par) > 1): reg = par[1]
if (reg and reg[0]=='@'): (reg, hidden) = (reg[1:], True)
attr = {'new' : is_new, 'reg' : reg, 'hidden' : hidden, 'need_decl' : False, 'export' : False}
self.add_item('PDU', par[0], attr=attr, fn=fn, lineno=lineno)
return
def add_syntax(self, par, fn, lineno):
#print "add_syntax(par=%s, %s, %d)" % (str(par), fn, lineno)
if( (len(par) >=2)):
name = par[1]
else:
name = '"'+par[0]+'"'
attr = { 'pdu' : par[0] }
self.add_item('SYNTAX', name, attr=attr, fn=fn, lineno=lineno)
return
def add_register(self, pdu, par, fn, lineno):
#print "add_register(pdu=%s, par=%s, %s, %d)" % (pdu, str(par), fn, lineno)
if (par[0] in ('N', 'NUM')): rtype = 'NUM'; (pmin, pmax) = (2, 2)
elif (par[0] in ('S', 'STR')): rtype = 'STR'; (pmin, pmax) = (2, 2)
elif (par[0] in ('B', 'BER')): rtype = 'BER'; (pmin, pmax) = (1, 2)
elif (par[0] in ('P', 'PER')): rtype = 'PER'; (pmin, pmax) = (1, 2)
else: warnings.warn_explicit("Unknown registration type '%s'" % (par[2]), UserWarning, fn, lineno); return
if ((len(par)-1) < pmin):
warnings.warn_explicit("Too few parameters for %s registration type. At least %d parameters are required" % (rtype, pmin), UserWarning, fn, lineno)
return
if ((len(par)-1) > pmax):
warnings.warn_explicit("Too many parameters for %s registration type. Only %d parameters are allowed" % (rtype, pmax), UserWarning, fn, lineno)
attr = {'pdu' : pdu, 'rtype' : rtype}
if (rtype in ('NUM', 'STR')):
attr['rtable'] = par[1]
attr['rport'] = par[2]
rkey = '/'.join([rtype, attr['rtable'], attr['rport']])
elif (rtype in ('BER', 'PER')):
attr['roid'] = par[1]
attr['roidname'] = '""'
if (len(par)>=3):
attr['roidname'] = par[2]
elif attr['roid'][0] != '"':
attr['roidname'] = '"' + attr['roid'] + '"'
rkey = '/'.join([rtype, attr['roid']])
self.add_item('REGISTER', rkey, attr=attr, fn=fn, lineno=lineno)
def check_par(self, par, pmin, pmax, fn, lineno):
for i in range(len(par)):
if par[i] == '-':
par[i] = None
continue
if par[i][0] == '#':
par[i:] = []
break
if len(par) < pmin:
warnings.warn_explicit("Too few parameters. At least %d parameters are required" % (pmin), UserWarning, fn, lineno)
return None
if (pmax >= 0) and (len(par) > pmax):
warnings.warn_explicit("Too many parameters. Only %d parameters are allowed" % (pmax), UserWarning, fn, lineno)
return par[0:pmax]
return par
def read(self, fn):
def get_par(line, pmin, pmax, fn, lineno):
par = line.split(None, pmax)
par = self.check_par(par, pmin, pmax, fn, lineno)
return par
def get_par_nm(line, pmin, pmax, fn, lineno):
if pmax:
par = line.split(None, pmax)
else:
par = [line,]
for i in range(len(par)):
if par[i][0] == '#':
par[i:] = []
break
if len(par) < pmin:
warnings.warn_explicit("Too few parameters. At least %d parameters are required" % (pmin), UserWarning, fn, lineno)
return None
if len(par) > pmax:
nmpar = par[pmax]
else:
nmpar = ''
nmpars = {}
nmpar_first = re.compile(r'^\s*(?P<attr>[_A-Z][_A-Z0-9]*)\s*=\s*')
nmpar_next = re.compile(r'\s+(?P<attr>[_A-Z][_A-Z0-9]*)\s*=\s*')
nmpar_end = re.compile(r'\s*$')
result = nmpar_first.search(nmpar)
pos = 0
while result:
k = result.group('attr')
pos = result.end()
result = nmpar_next.search(nmpar, pos)
p1 = pos
if result:
p2 = result.start()
else:
p2 = nmpar_end.search(nmpar, pos).start()
v = nmpar[p1:p2]
nmpars[k] = v
if len(par) > pmax:
par[pmax] = nmpars
return par
f = open(fn, "r")
lineno = 0
is_import = False
directive = re.compile(r'^\s*#\.(?P<name>[A-Z_][A-Z_0-9]*)(\s+|$)')
cdirective = re.compile(r'^\s*##')
report = re.compile(r'^TABLE(?P<num>\d*)_(?P<type>HDR|BODY|FTR)$')
comment = re.compile(r'^\s*#[^.#]')
empty = re.compile(r'^\s*$')
ctx = None
name = ''
default_flags = 0x00
stack = []
while True:
if not f.closed:
line = f.readline()
lineno += 1
else:
line = None
if not line:
if not f.closed:
f.close()
if stack:
frec = stack.pop()
fn, f, lineno, is_import = frec['fn'], frec['f'], frec['lineno'], frec['is_import']
continue
else:
break
if comment.search(line): continue
result = directive.search(line)
if result: # directive
rep_result = report.search(result.group('name'))
if result.group('name') == 'END_OF_CNF':
f.close()
elif result.group('name') == 'OPT':
ctx = result.group('name')
par = get_par(line[result.end():], 0, -1, fn=fn, lineno=lineno)
if not par: continue
self.set_opt(par[0], par[1:], fn, lineno)
ctx = None
elif result.group('name') in ('PDU', 'PDU_NEW', 'REGISTER', 'REGISTER_NEW',
'MODULE', 'MODULE_IMPORT',
'OMIT_ASSIGNMENT', 'NO_OMIT_ASSGN',
'VIRTUAL_ASSGN', 'SET_TYPE', 'ASSIGN_VALUE_TO_TYPE',
'TYPE_RENAME', 'FIELD_RENAME', 'TF_RENAME', 'IMPORT_TAG',
'TYPE_ATTR', 'ETYPE_ATTR', 'FIELD_ATTR', 'EFIELD_ATTR',
'SYNTAX', 'SYNTAX_NEW'):
ctx = result.group('name')
elif result.group('name') in ('OMIT_ALL_ASSIGNMENTS', 'OMIT_ASSIGNMENTS_EXCEPT',
'OMIT_ALL_TYPE_ASSIGNMENTS', 'OMIT_TYPE_ASSIGNMENTS_EXCEPT',
'OMIT_ALL_VALUE_ASSIGNMENTS', 'OMIT_VALUE_ASSIGNMENTS_EXCEPT'):
ctx = result.group('name')
key = '*'
if ctx in ('OMIT_ALL_TYPE_ASSIGNMENTS', 'OMIT_TYPE_ASSIGNMENTS_EXCEPT'):
key += 'T'
if ctx in ('OMIT_ALL_VALUE_ASSIGNMENTS', 'OMIT_VALUE_ASSIGNMENTS_EXCEPT'):
key += 'V'
par = get_par(line[result.end():], 0, 1, fn=fn, lineno=lineno)
if par:
key += '/' + par[0]
self.add_item('OMIT_ASSIGNMENT', key, omit=True, fn=fn, lineno=lineno)
if ctx in ('OMIT_ASSIGNMENTS_EXCEPT', 'OMIT_TYPE_ASSIGNMENTS_EXCEPT', 'OMIT_VALUE_ASSIGNMENTS_EXCEPT'):
ctx = 'NO_OMIT_ASSGN'
else:
ctx = None
elif result.group('name') in ('EXPORTS', 'MODULE_EXPORTS', 'USER_DEFINED', 'NO_EMIT'):
ctx = result.group('name')
default_flags = EF_TYPE|EF_VALS
if ctx == 'MODULE_EXPORTS':
ctx = 'EXPORTS'
default_flags |= EF_MODULE
if ctx == 'EXPORTS':
par = get_par(line[result.end():], 0, 5, fn=fn, lineno=lineno)
else:
par = get_par(line[result.end():], 0, 1, fn=fn, lineno=lineno)
if not par: continue
p = 1
if (par[0] == 'WITH_VALS'): default_flags |= EF_TYPE|EF_VALS
elif (par[0] == 'WITHOUT_VALS'): default_flags |= EF_TYPE; default_flags &= ~EF_TYPE
elif (par[0] == 'ONLY_VALS'): default_flags &= ~EF_TYPE; default_flags |= EF_VALS
elif (ctx == 'EXPORTS'): p = 0
else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[0]), UserWarning, fn, lineno)
for i in range(p, len(par)):
if (par[i] == 'ONLY_ENUM'): default_flags &= ~(EF_TYPE|EF_VALS); default_flags |= EF_ENUM
elif (par[i] == 'WITH_ENUM'): default_flags |= EF_ENUM
elif (par[i] == 'VALS_WITH_TABLE'): default_flags |= EF_TABLE
elif (par[i] == 'WS_DLL'): default_flags |= EF_WS_DLL
elif (par[i] == 'EXTERN'): default_flags |= EF_EXTERN
elif (par[i] == 'NO_PROT_PREFIX'): default_flags |= EF_NO_PROT
else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[i]), UserWarning, fn, lineno)
elif result.group('name') in ('MAKE_ENUM', 'MAKE_DEFINES'):
ctx = result.group('name')
default_flags = EF_ENUM
if ctx == 'MAKE_ENUM': default_flags |= EF_NO_PROT|EF_NO_TYPE
if ctx == 'MAKE_DEFINES': default_flags |= EF_DEFINE|EF_UCASE|EF_NO_TYPE
par = get_par(line[result.end():], 0, 3, fn=fn, lineno=lineno)
for i in range(0, len(par)):
if (par[i] == 'NO_PROT_PREFIX'): default_flags |= EF_NO_PROT
elif (par[i] == 'PROT_PREFIX'): default_flags &= ~ EF_NO_PROT
elif (par[i] == 'NO_TYPE_PREFIX'): default_flags |= EF_NO_TYPE
elif (par[i] == 'TYPE_PREFIX'): default_flags &= ~ EF_NO_TYPE
elif (par[i] == 'UPPER_CASE'): default_flags |= EF_UCASE
elif (par[i] == 'NO_UPPER_CASE'): default_flags &= ~EF_UCASE
else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[i]), UserWarning, fn, lineno)
elif result.group('name') == 'USE_VALS_EXT':
ctx = result.group('name')
default_flags = 0xFF
elif result.group('name') == 'FN_HDR':
minp = 1
if (ctx in ('FN_PARS',)) and name: minp = 0
par = get_par(line[result.end():], minp, 1, fn=fn, lineno=lineno)
if (not par) and (minp > 0): continue
ctx = result.group('name')
if par: name = par[0]
elif result.group('name') == 'FN_FTR':
minp = 1
if (ctx in ('FN_PARS','FN_HDR')) and name: minp = 0
par = get_par(line[result.end():], minp, 1, fn=fn, lineno=lineno)
if (not par) and (minp > 0): continue
ctx = result.group('name')
if par: name = par[0]
elif result.group('name') == 'FN_BODY':
par = get_par_nm(line[result.end():], 1, 1, fn=fn, lineno=lineno)
if not par: continue
ctx = result.group('name')
name = par[0]
if len(par) > 1:
self.add_item('FN_PARS', name, pars=par[1], fn=fn, lineno=lineno)
elif result.group('name') == 'FN_PARS':
par = get_par_nm(line[result.end():], 0, 1, fn=fn, lineno=lineno)
ctx = result.group('name')
if not par:
name = None
elif len(par) == 1:
name = par[0]
self.add_item(ctx, name, pars={}, fn=fn, lineno=lineno)
elif len(par) > 1:
self.add_item(ctx, par[0], pars=par[1], fn=fn, lineno=lineno)
ctx = None
elif result.group('name') == 'CLASS':
par = get_par(line[result.end():], 1, 1, fn=fn, lineno=lineno)
if not par: continue
ctx = result.group('name')
name = par[0]
add_class_ident(name)
if not name.split('$')[-1].isupper():
warnings.warn_explicit("No lower-case letters shall be included in information object class name (%s)" % (name),
UserWarning, fn, lineno)
elif result.group('name') == 'ASSIGNED_OBJECT_IDENTIFIER':
par = get_par(line[result.end():], 1, 1, fn=fn, lineno=lineno)
if not par: continue
self.update_item('ASSIGNED_ID', 'OBJECT_IDENTIFIER', ids={par[0] : par[0]}, fn=fn, lineno=lineno)
elif rep_result: # Reports
num = rep_result.group('num')
type = rep_result.group('type')
if type == 'BODY':
par = get_par(line[result.end():], 1, 1, fn=fn, lineno=lineno)
if not par: continue
else:
par = get_par(line[result.end():], 0, 0, fn=fn, lineno=lineno)
rep = { 'type' : type, 'var' : None, 'text' : '', 'fn' : fn, 'lineno' : lineno }
if len(par) > 0:
rep['var'] = par[0]
self.report.setdefault(num, []).append(rep)
ctx = 'TABLE'
name = num
elif result.group('name') in ('INCLUDE', 'IMPORT') :
is_imp = result.group('name') == 'IMPORT'
par = get_par(line[result.end():], 1, 1, fn=fn, lineno=lineno)
if not par:
warnings.warn_explicit("%s requires parameter" % (result.group('name'),), UserWarning, fn, lineno)
continue
fname = par[0]
#print "Try include: %s" % (fname)
if (not os.path.exists(fname)):
fname = os.path.join(os.path.split(fn)[0], par[0])
#print "Try include: %s" % (fname)
i = 0
while not os.path.exists(fname) and (i < len(self.include_path)):
fname = os.path.join(self.include_path[i], par[0])
#print "Try include: %s" % (fname)
i += 1
if (not os.path.exists(fname)):
if is_imp:
continue # just ignore
else:
fname = par[0] # report error
fnew = open(fname, "r")
stack.append({'fn' : fn, 'f' : f, 'lineno' : lineno, 'is_import' : is_import})
fn, f, lineno, is_import = par[0], fnew, 0, is_imp
elif result.group('name') == 'END':
ctx = None
else:
warnings.warn_explicit("Unknown directive '%s'" % (result.group('name')), UserWarning, fn, lineno)
continue
if not ctx:
if not empty.match(line):
warnings.warn_explicit("Non-empty line in empty context", UserWarning, fn, lineno)
elif ctx == 'OPT':
if empty.match(line): continue
par = get_par(line, 1, -1, fn=fn, lineno=lineno)
if not par: continue
self.set_opt(par[0], par[1:], fn, lineno)
elif ctx in ('EXPORTS', 'USER_DEFINED', 'NO_EMIT'):
if empty.match(line): continue
if ctx == 'EXPORTS':
par = get_par(line, 1, 6, fn=fn, lineno=lineno)
else:
par = get_par(line, 1, 2, fn=fn, lineno=lineno)
if not par: continue
flags = default_flags
p = 2
if (len(par)>=2):
if (par[1] == 'WITH_VALS'): flags |= EF_TYPE|EF_VALS
elif (par[1] == 'WITHOUT_VALS'): flags |= EF_TYPE; flags &= ~EF_TYPE
elif (par[1] == 'ONLY_VALS'): flags &= ~EF_TYPE; flags |= EF_VALS
elif (ctx == 'EXPORTS'): p = 1
else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[1]), UserWarning, fn, lineno)
for i in range(p, len(par)):
if (par[i] == 'ONLY_ENUM'): flags &= ~(EF_TYPE|EF_VALS); flags |= EF_ENUM
elif (par[i] == 'WITH_ENUM'): flags |= EF_ENUM
elif (par[i] == 'VALS_WITH_TABLE'): flags |= EF_TABLE
elif (par[i] == 'WS_DLL'): flags |= EF_WS_DLL
elif (par[i] == 'EXTERN'): flags |= EF_EXTERN
elif (par[i] == 'NO_PROT_PREFIX'): flags |= EF_NO_PROT
else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[i]), UserWarning, fn, lineno)
self.add_item(ctx, par[0], flag=flags, fn=fn, lineno=lineno)
elif ctx in ('MAKE_ENUM', 'MAKE_DEFINES'):
if empty.match(line): continue
par = get_par(line, 1, 4, fn=fn, lineno=lineno)
if not par: continue
flags = default_flags
for i in range(1, len(par)):
if (par[i] == 'NO_PROT_PREFIX'): flags |= EF_NO_PROT
elif (par[i] == 'PROT_PREFIX'): flags &= ~ EF_NO_PROT
elif (par[i] == 'NO_TYPE_PREFIX'): flags |= EF_NO_TYPE
elif (par[i] == 'TYPE_PREFIX'): flags &= ~ EF_NO_TYPE
elif (par[i] == 'UPPER_CASE'): flags |= EF_UCASE
elif (par[i] == 'NO_UPPER_CASE'): flags &= ~EF_UCASE
else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[i]), UserWarning, fn, lineno)
self.add_item('MAKE_ENUM', par[0], flag=flags, fn=fn, lineno=lineno)
elif ctx == 'USE_VALS_EXT':
if empty.match(line): continue
par = get_par(line, 1, 1, fn=fn, lineno=lineno)
if not par: continue
flags = default_flags
self.add_item('USE_VALS_EXT', par[0], flag=flags, fn=fn, lineno=lineno)
elif ctx in ('PDU', 'PDU_NEW'):
if empty.match(line): continue
par = get_par(line, 1, 5, fn=fn, lineno=lineno)
if not par: continue
is_new = False
if (ctx == 'PDU_NEW'): is_new = True
self.add_pdu(par[0:2], is_new, fn, lineno)
if (len(par)>=3):
self.add_register(par[0], par[2:5], fn, lineno)
elif ctx in ('SYNTAX', 'SYNTAX_NEW'):
if empty.match(line): continue
par = get_par(line, 1, 2, fn=fn, lineno=lineno)
if not par: continue
if not self.check_item('PDU', par[0]):
is_new = False
if (ctx == 'SYNTAX_NEW'): is_new = True
self.add_pdu(par[0:1], is_new, fn, lineno)
self.add_syntax(par, fn, lineno)
elif ctx in ('REGISTER', 'REGISTER_NEW'):
if empty.match(line): continue
par = get_par(line, 3, 4, fn=fn, lineno=lineno)
if not par: continue
if not self.check_item('PDU', par[0]):
is_new = False
if (ctx == 'REGISTER_NEW'): is_new = True
self.add_pdu(par[0:1], is_new, fn, lineno)
self.add_register(par[0], par[1:4], fn, lineno)
elif ctx in ('MODULE', 'MODULE_IMPORT'):
if empty.match(line): continue
par = get_par(line, 2, 2, fn=fn, lineno=lineno)
if not par: continue
self.add_item('MODULE', par[0], proto=par[1], fn=fn, lineno=lineno)
elif ctx == 'IMPORT_TAG':
if empty.match(line): continue
par = get_par(line, 3, 3, fn=fn, lineno=lineno)
if not par: continue
self.add_item(ctx, par[0], ttag=(par[1], par[2]), fn=fn, lineno=lineno)
elif ctx == 'OMIT_ASSIGNMENT':
if empty.match(line): continue
par = get_par(line, 1, 1, fn=fn, lineno=lineno)
if not par: continue
self.add_item(ctx, par[0], omit=True, fn=fn, lineno=lineno)
elif ctx == 'NO_OMIT_ASSGN':
if empty.match(line): continue
par = get_par(line, 1, 1, fn=fn, lineno=lineno)
if not par: continue
self.add_item(ctx, par[0], omit=False, fn=fn, lineno=lineno)
elif ctx == 'VIRTUAL_ASSGN':
if empty.match(line): continue
par = get_par(line, 2, -1, fn=fn, lineno=lineno)
if not par: continue
if (len(par[1].split('/')) > 1) and not self.check_item('SET_TYPE', par[1]):
self.add_item('SET_TYPE', par[1], type=par[0], fn=fn, lineno=lineno)
self.add_item('VIRTUAL_ASSGN', par[1], name=par[0], fn=fn, lineno=lineno)
for nm in par[2:]:
self.add_item('SET_TYPE', nm, type=par[0], fn=fn, lineno=lineno)
if not par[0][0].isupper():
warnings.warn_explicit("Virtual assignment should have uppercase name (%s)" % (par[0]),
UserWarning, fn, lineno)
elif ctx == 'SET_TYPE':
if empty.match(line): continue
par = get_par(line, 2, 2, fn=fn, lineno=lineno)
if not par: continue
if not self.check_item('VIRTUAL_ASSGN', par[0]):
self.add_item('SET_TYPE', par[0], type=par[1], fn=fn, lineno=lineno)
if not par[1][0].isupper():
warnings.warn_explicit("Set type should have uppercase name (%s)" % (par[1]),
UserWarning, fn, lineno)
elif ctx == 'ASSIGN_VALUE_TO_TYPE':
if empty.match(line): continue
par = get_par(line, 2, 2, fn=fn, lineno=lineno)
if not par: continue
self.add_item(ctx, par[0], name=par[1], fn=fn, lineno=lineno)
elif ctx == 'TYPE_RENAME':
if empty.match(line): continue
par = get_par(line, 2, 2, fn=fn, lineno=lineno)
if not par: continue
self.add_item('TYPE_RENAME', par[0], eth_name=par[1], fn=fn, lineno=lineno)
if not par[1][0].isupper():
warnings.warn_explicit("Type should be renamed to uppercase name (%s)" % (par[1]),
UserWarning, fn, lineno)
elif ctx == 'FIELD_RENAME':
if empty.match(line): continue
par = get_par(line, 2, 2, fn=fn, lineno=lineno)
if not par: continue
self.add_item('FIELD_RENAME', par[0], eth_name=par[1], fn=fn, lineno=lineno)
if not par[1][0].islower():
warnings.warn_explicit("Field should be renamed to lowercase name (%s)" % (par[1]),
UserWarning, fn, lineno)
elif ctx == 'TF_RENAME':
if empty.match(line): continue
par = get_par(line, 2, 2, fn=fn, lineno=lineno)
if not par: continue
tmpu = par[1][0].upper() + par[1][1:]
tmpl = par[1][0].lower() + par[1][1:]
self.add_item('TYPE_RENAME', par[0], eth_name=tmpu, fn=fn, lineno=lineno)
if not tmpu[0].isupper():
warnings.warn_explicit("Type should be renamed to uppercase name (%s)" % (par[1]),
UserWarning, fn, lineno)
self.add_item('FIELD_RENAME', par[0], eth_name=tmpl, fn=fn, lineno=lineno)
if not tmpl[0].islower():
warnings.warn_explicit("Field should be renamed to lowercase name (%s)" % (par[1]),
UserWarning, fn, lineno)
elif ctx in ('TYPE_ATTR', 'ETYPE_ATTR', 'FIELD_ATTR', 'EFIELD_ATTR'):
if empty.match(line): continue
par = get_par_nm(line, 1, 1, fn=fn, lineno=lineno)
if not par: continue
self.add_item(ctx, par[0], attr=par[1], fn=fn, lineno=lineno)
elif ctx == 'FN_PARS':
if empty.match(line): continue
if name:
par = get_par_nm(line, 0, 0, fn=fn, lineno=lineno)
else:
par = get_par_nm(line, 1, 1, fn=fn, lineno=lineno)
if not par: continue
if name:
self.update_item(ctx, name, pars=par[0], fn=fn, lineno=lineno)
else:
self.add_item(ctx, par[0], pars=par[1], fn=fn, lineno=lineno)
elif ctx in ('FN_HDR', 'FN_FTR', 'FN_BODY'):
result = cdirective.search(line)
if result: # directive
line = '#' + line[result.end():]
self.add_fn_line(name, ctx, line, fn=fn, lineno=lineno)
elif ctx == 'CLASS':
if empty.match(line): continue
par = get_par(line, 1, 3, fn=fn, lineno=lineno)
if not par: continue
if not set_type_to_class(name, par[0], par[1:]):
warnings.warn_explicit("Could not set type of class member %s.&%s to %s" % (name, par[0], par[1]),
UserWarning, fn, lineno)
elif ctx == 'TABLE':
self.report[name][-1]['text'] += line
def set_opt(self, opt, par, fn, lineno):
#print "set_opt: %s, %s" % (opt, par)
if opt in ("-I",):
par = self.check_par(par, 1, 1, fn, lineno)
if not par: return
self.include_path.append(par[0])
elif opt in ("-b", "BER", "CER", "DER"):
par = self.check_par(par, 0, 0, fn, lineno)
self.ectx.encoding = 'ber'
elif opt in ("PER",):
par = self.check_par(par, 0, 0, fn, lineno)
self.ectx.encoding = 'per'
elif opt in ("-p", "PROTO"):
par = self.check_par(par, 1, 1, fn, lineno)
if not par: return
self.ectx.proto_opt = par[0]
self.ectx.merge_modules = True
elif opt in ("ALIGNED",):
par = self.check_par(par, 0, 0, fn, lineno)
self.ectx.aligned = True
elif opt in ("-u", "UNALIGNED"):
par = self.check_par(par, 0, 0, fn, lineno)
self.ectx.aligned = False
elif opt in ("-d",):
par = self.check_par(par, 1, 1, fn, lineno)
if not par: return
self.ectx.dbgopt = par[0]
elif opt in ("-e",):
par = self.check_par(par, 0, 0, fn, lineno)
self.ectx.expcnf = True
elif opt in ("-S",):
par = self.check_par(par, 0, 0, fn, lineno)
self.ectx.merge_modules = True
elif opt in ("GROUP_BY_PROT",):
par = self.check_par(par, 0, 0, fn, lineno)
self.ectx.group_by_prot = True
elif opt in ("-o",):
par = self.check_par(par, 1, 1, fn, lineno)
if not par: return
self.ectx.outnm_opt = par[0]
elif opt in ("-O",):
par = self.check_par(par, 1, 1, fn, lineno)
if not par: return
self.ectx.output.outdir = par[0]
elif opt in ("-s",):
par = self.check_par(par, 1, 1, fn, lineno)
if not par: return
self.ectx.output.single_file = par[0]
elif opt in ("-k",):
par = self.check_par(par, 0, 0, fn, lineno)
self.ectx.output.keep = True
elif opt in ("-L",):
par = self.check_par(par, 0, 0, fn, lineno)
self.suppress_line = True
elif opt in ("EMBEDDED_PDV_CB",):
par = self.check_par(par, 1, 1, fn, lineno)
if not par: return
self.ectx.default_embedded_pdv_cb = par[0]
elif opt in ("EXTERNAL_TYPE_CB",):
par = self.check_par(par, 1, 1, fn, lineno)
if not par: return
self.ectx.default_external_type_cb = par[0]
elif opt in ("-r",):
par = self.check_par(par, 1, 1, fn, lineno)
if not par: return
self.ectx.remove_prefix = par[0]
else:
warnings.warn_explicit("Unknown option %s" % (opt),
UserWarning, fn, lineno)
def dbg_print(self):
print("\n# Conformance values")
print("%-15s %-4s %-15s %-20s %s" % ("File", "Line", "Table", "Key", "Value"))
print("-" * 100)
tbls = sorted(self.table.keys())
for t in tbls:
keys = sorted(self.table[t].keys())
for k in keys:
print("%-15s %4s %-15s %-20s %s" % (
self.table[t][k]['fn'], self.table[t][k]['lineno'], t, k, str(self.table[t][k][self.tblcfg[t]['val_nm']])))
def unused_report(self):
tbls = sorted(self.table.keys())
for t in tbls:
if not self.tblcfg[t]['chk_use']: continue
keys = sorted(self.table[t].keys())
for k in keys:
if not self.table[t][k]['used']:
warnings.warn_explicit("Unused %s for %s" % (t, k),
UserWarning, self.table[t][k]['fn'], self.table[t][k]['lineno'])
fnms = list(self.fn.keys())
fnms.sort()
for f in fnms:
keys = sorted(self.fn[f].keys())
for k in keys:
if not self.fn[f][k]: continue
if not self.fn[f][k]['used']:
warnings.warn_explicit("Unused %s for %s" % (k, f),
UserWarning, self.fn[f][k]['fn'], self.fn[f][k]['lineno'])
#--- EthOut -------------------------------------------------------------------
class EthOut:
def __init__(self):
self.ectx = None
self.outnm = None
self.outdir = '.'
self.single_file = None
self.created_files = {}
self.created_files_ord = []
self.keep = False
def outcomment(self, ln, comment=None):
if comment:
return '%s %s\n' % (comment, ln)
else:
return '/* %-74s */\n' % (ln)
def created_file_add(self, name, keep_anyway):
name = os.path.normcase(os.path.abspath(name))
if name not in self.created_files:
self.created_files_ord.append(name)
self.created_files[name] = keep_anyway
else:
self.created_files[name] = self.created_files[name] or keep_anyway
def created_file_exists(self, name):
name = os.path.normcase(os.path.abspath(name))
return name in self.created_files
#--- output_fname -------------------------------------------------------
def output_fname(self, ftype, ext='c'):
fn = ''
if not ext in ('cnf',):
fn += 'packet-'
fn += self.outnm
if (ftype):
fn += '-' + ftype
fn += '.' + ext
return fn
#--- file_open -------------------------------------------------------
def file_open(self, ftype, ext='c'):
fn = self.output_fname(ftype, ext=ext)
if self.created_file_exists(fn):
fx = open(fn, 'a')
else:
fx = open(fn, 'w')
comment = None
if ext in ('cnf',):
comment = '#'
fx.write(self.fhdr(fn, comment = comment))
else:
if (not self.single_file and not self.created_file_exists(fn)):
fx.write(self.fhdr(fn))
if not self.ectx.merge_modules:
fx.write('\n')
mstr = "--- "
if self.ectx.groups():
mstr += "Module"
if (len(self.ectx.modules) > 1):
mstr += "s"
for (m, p) in self.ectx.modules:
mstr += " %s" % (m)
else:
mstr += "Module %s" % (self.ectx.Module())
mstr += " --- --- ---"
fx.write(self.outcomment(mstr, comment))
fx.write('\n')
return fx
#--- file_close -------------------------------------------------------
def file_close(self, fx, discard=False, keep_anyway=False):
fx.close()
if discard and not self.created_file_exists(fx.name):
os.unlink(fx.name)
else:
self.created_file_add(fx.name, keep_anyway)
#--- fhdr -------------------------------------------------------
def fhdr(self, fn, comment=None):
out = ''
out += self.outcomment('Do not modify this file. Changes will be overwritten.', comment)
out += self.outcomment('Generated automatically by the ASN.1 to Wireshark dissector compiler', comment)
out += self.outcomment(os.path.basename(fn), comment)
out += self.outcomment(' '.join(sys.argv), comment)
out += '\n'
# Make Windows path separator look like Unix path separator
out = out.replace('\\', '/')
# Change absolute paths and relative paths generated outside
# source directory to paths relative to asn1/<proto> subdir.
out = re.sub(r'(\s)[./]\S*(/tools/|/epan/)', r'\1../..\2', out)
out = re.sub(r'(\s)[./]\S*/asn1/\S*?([\s/])', r'\1.\2', out)
return out
#--- dbg_print -------------------------------------------------------
def dbg_print(self):
print("\n# Output files")
print("\n".join(self.created_files_ord))
print("\n")
#--- make_single_file -------------------------------------------------------
def make_single_file(self):
if (not self.single_file): return
in_nm = self.single_file + '.c'
out_nm = os.path.join(self.outdir, self.output_fname(''))
self.do_include(out_nm, in_nm)
in_nm = self.single_file + '.h'
if (os.path.exists(in_nm)):
out_nm = os.path.join(self.outdir, self.output_fname('', ext='h'))
self.do_include(out_nm, in_nm)
if (not self.keep):
for fn in self.created_files_ord:
if not self.created_files[fn]:
os.unlink(fn)
#--- do_include -------------------------------------------------------
def do_include(self, out_nm, in_nm):
def check_file(fn, fnlist):
fnfull = os.path.normcase(os.path.abspath(fn))
if (fnfull in fnlist and os.path.exists(fnfull)):
return os.path.normpath(fn)
return None
fin = open(in_nm, "r")
fout = open(out_nm, "w")
fout.write(self.fhdr(out_nm))
fout.write('/* Input file: ' + os.path.basename(in_nm) +' */\n')
fout.write('\n')
fout.write('#line %u "%s"\n' % (1, rel_dissector_path(in_nm)))
include = re.compile(r'^\s*#\s*include\s+[<"](?P<fname>[^>"]+)[>"]', re.IGNORECASE)
cont_linenum = 0;
while (True):
cont_linenum = cont_linenum + 1;
line = fin.readline()
if (line == ''): break
ifile = None
result = include.search(line)
#if (result): print os.path.normcase(os.path.abspath(result.group('fname')))
if (result):
ifile = check_file(os.path.join(os.path.split(in_nm)[0], result.group('fname')), self.created_files)
if (not ifile):
ifile = check_file(os.path.join(self.outdir, result.group('fname')), self.created_files)
if (not ifile):
ifile = check_file(result.group('fname'), self.created_files)
if (ifile):
fout.write('\n')
fout.write('/*--- Included file: ' + ifile + ' ---*/\n')
fout.write('#line %u "%s"\n' % (1, rel_dissector_path(ifile)))
finc = open(ifile, "r")
fout.write(finc.read())
fout.write('\n')
fout.write('/*--- End of included file: ' + ifile + ' ---*/\n')
fout.write('#line %u "%s"\n' % (cont_linenum+1, rel_dissector_path(in_nm)) )
finc.close()
else:
fout.write(line)
fout.close()
fin.close()
#--- Node ---------------------------------------------------------------------
class Node:
def __init__(self,*args, **kw):
if len (args) == 0:
self.type = self.__class__.__name__
else:
assert (len(args) == 1)
self.type = args[0]
self.__dict__.update (kw)
def str_child (self, key, child, depth):
indent = " " * (2 * depth)
keystr = indent + key + ": "
if key == 'type': # already processed in str_depth
return ""
if isinstance (child, Node): # ugh
return keystr + "\n" + child.str_depth (depth+1)
if isinstance(child, type ([])):
l = []
for x in child:
if isinstance (x, Node):
l.append (x.str_depth (depth+1))
else:
l.append (indent + " " + str(x) + "\n")
return keystr + "[\n" + ''.join(l) + indent + "]\n"
else:
return keystr + str (child) + "\n"
def str_depth (self, depth): # ugh
indent = " " * (2 * depth)
l = ["%s%s" % (indent, self.type)]
l.append ("".join ([self.str_child (k_v[0], k_v[1], depth + 1) for k_v in list(self.__dict__.items ())]))
return "\n".join (l)
def __repr__(self):
return "\n" + self.str_depth (0)
def to_python (self, ctx):
return self.str_depth (ctx.indent_lev)
def eth_reg(self, ident, ectx):
pass
def fld_obj_repr(self, ectx):
return "/* TO DO %s */" % (str(self))
#--- ValueAssignment -------------------------------------------------------------
class ValueAssignment (Node):
def __init__(self,*args, **kw) :
Node.__init__ (self,*args, **kw)
def eth_reg(self, ident, ectx):
if ectx.conform.omit_assignment('V', self.ident, ectx.Module()): return # Assignment to omit
ectx.eth_reg_vassign(self)
ectx.eth_reg_value(self.ident, self.typ, self.val)
#--- ObjectAssignment -------------------------------------------------------------
class ObjectAssignment (Node):
def __init__(self,*args, **kw) :
Node.__init__ (self,*args, **kw)
def __eq__(self, other):
if self.cls != other.cls:
return False
if len(self.val) != len(other.val):
return False
for f in (list(self.val.keys())):
if f not in other.val:
return False
if isinstance(self.val[f], Node) and isinstance(other.val[f], Node):
if not self.val[f].fld_obj_eq(other.val[f]):
return False
else:
if str(self.val[f]) != str(other.val[f]):
return False
return True
def eth_reg(self, ident, ectx):
def make_virtual_type(cls, field, prefix):
if isinstance(self.val, str): return
if field in self.val and not isinstance(self.val[field], Type_Ref):
vnm = prefix + '-' + self.ident
virtual_tr = Type_Ref(val = vnm)
t = self.val[field]
self.val[field] = virtual_tr
ectx.eth_reg_assign(vnm, t, virt=True)
ectx.eth_reg_type(vnm, t)
t.eth_reg_sub(vnm, ectx)
if field in self.val and ectx.conform.check_item('PDU', cls + '.' + field):
ectx.eth_reg_field(self.val[field].val, self.val[field].val, impl=self.val[field].HasImplicitTag(ectx), pdu=ectx.conform.use_item('PDU', cls + '.' + field))
return
# end of make_virtual_type()
if ectx.conform.omit_assignment('V', self.ident, ectx.Module()): return # Assignment to omit
self.module = ectx.Module()
ectx.eth_reg_oassign(self)
if (self.cls == 'TYPE-IDENTIFIER') or (self.cls == 'ABSTRACT-SYNTAX'):
make_virtual_type(self.cls, '&Type', 'TYPE')
if (self.cls == 'OPERATION'):
make_virtual_type(self.cls, '&ArgumentType', 'ARG')
make_virtual_type(self.cls, '&ResultType', 'RES')
if (self.cls == 'ERROR'):
make_virtual_type(self.cls, '&ParameterType', 'PAR')
#--- Type ---------------------------------------------------------------------
class Type (Node):
def __init__(self,*args, **kw) :
self.name = None
self.constr = None
self.tags = []
self.named_list = None
Node.__init__ (self,*args, **kw)
def IsNamed(self):
if self.name is None :
return False
else:
return True
def HasConstraint(self):
if self.constr is None :
return False
else :
return True
def HasSizeConstraint(self):
return self.HasConstraint() and self.constr.IsSize()
def HasValueConstraint(self):
return self.HasConstraint() and self.constr.IsValue()
def HasPermAlph(self):
return self.HasConstraint() and self.constr.IsPermAlph()
def HasContentsConstraint(self):
return self.HasConstraint() and self.constr.IsContents()
def HasOwnTag(self):
return len(self.tags) > 0
def HasImplicitTag(self, ectx):
return (self.HasOwnTag() and self.tags[0].IsImplicit(ectx))
def IndetermTag(self, ectx):
return False
def AddTag(self, tag):
self.tags[0:0] = [tag]
def GetTag(self, ectx):
#print "GetTag(%s)\n" % self.name;
if (self.HasOwnTag()):
return self.tags[0].GetTag(ectx)
else:
return self.GetTTag(ectx)
def GetTTag(self, ectx):
print("#Unhandled GetTTag() in %s" % (self.type))
print(self.str_depth(1))
return ('BER_CLASS_unknown', 'TAG_unknown')
def SetName(self, name):
self.name = name
def AddConstraint(self, constr):
if not self.HasConstraint():
self.constr = constr
else:
self.constr = Constraint(type = 'Intersection', subtype = [self.constr, constr])
def eth_tname(self):
return '#' + self.type + '_' + str(id(self))
def eth_ftype(self, ectx):
return ('FT_NONE', 'BASE_NONE')
def eth_strings(self):
return 'NULL'
def eth_omit_field(self):
return False
def eth_need_tree(self):
return False
def eth_has_vals(self):
return False
def eth_has_enum(self, tname, ectx):
return self.eth_has_vals() and (ectx.eth_type[tname]['enum'] & EF_ENUM)
def eth_need_pdu(self, ectx):
return None
def eth_named_bits(self):
return None
def eth_reg_sub(self, ident, ectx):
pass
def get_components(self, ectx):
print("#Unhandled get_components() in %s" % (self.type))
print(self.str_depth(1))
return []
def sel_req(self, sel, ectx):
print("#Selection '%s' required for non-CHOICE type %s" % (sel, self.type))
print(self.str_depth(1))
def fld_obj_eq(self, other):
return isinstance(other, Type) and (self.eth_tname() == other.eth_tname())
def eth_reg(self, ident, ectx, tstrip=0, tagflag=False, selflag=False, idx='', parent=None):
#print "eth_reg(): %s, ident=%s, tstrip=%d, tagflag=%s, selflag=%s, parent=%s" %(self.type, ident, tstrip, str(tagflag), str(selflag), str(parent))
#print " ", self
if (ectx.NeedTags() and (len(self.tags) > tstrip)):
tagged_type = self
for i in range(len(self.tags)-1, tstrip-1, -1):
tagged_type = TaggedType(val=tagged_type, tstrip=i)
tagged_type.AddTag(self.tags[i])
if not tagflag: # 1st tagged level
if self.IsNamed() and not selflag:
tagged_type.SetName(self.name)
tagged_type.eth_reg(ident, ectx, tstrip=1, tagflag=tagflag, idx=idx, parent=parent)
return
nm = ''
if ident and self.IsNamed() and not tagflag and not selflag:
nm = ident + '/' + self.name
elif ident:
nm = ident
elif self.IsNamed():
nm = self.name
if not ident and ectx.conform.omit_assignment('T', nm, ectx.Module()): return # Assignment to omit
if not ident: # Assignment
ectx.eth_reg_assign(nm, self)
if self.type == 'Type_Ref' and not self.tr_need_own_fn(ectx):
ectx.eth_reg_type(nm, self)
virtual_tr = Type_Ref(val=ectx.conform.use_item('SET_TYPE', nm))
if (self.type == 'Type_Ref') or ectx.conform.check_item('SET_TYPE', nm):
if ident and (ectx.conform.check_item('TYPE_RENAME', nm) or ectx.conform.get_fn_presence(nm) or selflag):
if ectx.conform.check_item('SET_TYPE', nm):
ectx.eth_reg_type(nm, virtual_tr) # dummy Type Reference
else:
ectx.eth_reg_type(nm, self) # new type
trnm = nm
elif ectx.conform.check_item('SET_TYPE', nm):
trnm = ectx.conform.use_item('SET_TYPE', nm)
elif (self.type == 'Type_Ref') and self.tr_need_own_fn(ectx):
ectx.eth_reg_type(nm, self) # need own function, e.g. for constraints
trnm = nm
else:
trnm = self.val
else:
ectx.eth_reg_type(nm, self)
trnm = nm
if ectx.conform.check_item('VIRTUAL_ASSGN', nm):
vnm = ectx.conform.use_item('VIRTUAL_ASSGN', nm)
ectx.eth_reg_assign(vnm, self, virt=True)
ectx.eth_reg_type(vnm, self)
self.eth_reg_sub(vnm, ectx)
if parent and (ectx.type[parent]['val'].type == 'TaggedType'):
ectx.type[parent]['val'].eth_set_val_name(parent, trnm, ectx)
if ident and not tagflag and not self.eth_omit_field():
ectx.eth_reg_field(nm, trnm, idx=idx, parent=parent, impl=self.HasImplicitTag(ectx))
if ectx.conform.check_item('SET_TYPE', nm):
virtual_tr.eth_reg_sub(nm, ectx)
else:
self.eth_reg_sub(nm, ectx)
def eth_get_size_constr(self, ectx):
(minv, maxv, ext) = ('MIN', 'MAX', False)
if self.HasSizeConstraint():
if self.constr.IsSize():
(minv, maxv, ext) = self.constr.GetSize(ectx)
if (self.constr.type == 'Intersection'):
if self.constr.subtype[0].IsSize():
(minv, maxv, ext) = self.constr.subtype[0].GetSize(ectx)
elif self.constr.subtype[1].IsSize():
(minv, maxv, ext) = self.constr.subtype[1].GetSize(ectx)
if minv == 'MIN': minv = 'NO_BOUND'
if maxv == 'MAX': maxv = 'NO_BOUND'
if (ext): ext = 'TRUE'
else: ext = 'FALSE'
return (minv, maxv, ext)
def eth_get_value_constr(self, ectx):
(minv, maxv, ext) = ('MIN', 'MAX', False)
if self.HasValueConstraint():
(minv, maxv, ext) = self.constr.GetValue(ectx)
if minv == 'MIN': minv = 'NO_BOUND'
if maxv == 'MAX': maxv = 'NO_BOUND'
if str(minv).isdigit():
minv += 'U'
elif (str(minv)[0] == "-") and str(minv)[1:].isdigit():
if (int(minv) == -(2**31)):
minv = "G_MININT32"
elif (int(minv) < -(2**31)):
minv = "G_GINT64_CONSTANT(%s)" % (str(minv))
if str(maxv).isdigit():
if (int(maxv) >= 2**32):
maxv = "G_GUINT64_CONSTANT(%s)" % (str(maxv))
else:
maxv += 'U'
if (ext): ext = 'TRUE'
else: ext = 'FALSE'
return (minv, maxv, ext)
def eth_get_alphabet_constr(self, ectx):
(alph, alphlen) = ('NULL', '0')
if self.HasPermAlph():
alph = self.constr.GetPermAlph(ectx)
if not alph:
alph = 'NULL'
if (alph != 'NULL'):
if (((alph[0] + alph[-1]) == '""') and (not alph.count('"', 1, -1))):
alphlen = str(len(alph) - 2)
else:
alphlen = 'strlen(%s)' % (alph)
return (alph, alphlen)
def eth_type_vals(self, tname, ectx):
if self.eth_has_vals():
print("#Unhandled eth_type_vals('%s') in %s" % (tname, self.type))
print(self.str_depth(1))
return ''
def eth_type_enum(self, tname, ectx):
if self.eth_has_enum(tname, ectx):
print("#Unhandled eth_type_enum('%s') in %s" % (tname, self.type))
print(self.str_depth(1))
return ''
def eth_type_default_table(self, ectx, tname):
return ''
def eth_type_default_body(self, ectx):
print("#Unhandled eth_type_default_body() in %s" % (self.type))
print(self.str_depth(1))
return ''
def eth_type_default_pars(self, ectx, tname):
pars = {
'TNAME' : tname,
'ER' : ectx.encp(),
'FN_VARIANT' : '',
'TREE' : 'tree',
'TVB' : 'tvb',
'OFFSET' : 'offset',
'ACTX' : 'actx',
'HF_INDEX' : 'hf_index',
'VAL_PTR' : 'NULL',
'IMPLICIT_TAG' : 'implicit_tag',
}
if (ectx.eth_type[tname]['tree']):
pars['ETT_INDEX'] = ectx.eth_type[tname]['tree']
if (ectx.merge_modules):
pars['PROTOP'] = ''
else:
pars['PROTOP'] = ectx.eth_type[tname]['proto'] + '_'
return pars
def eth_type_fn(self, proto, tname, ectx):
body = self.eth_type_default_body(ectx, tname)
pars = self.eth_type_default_pars(ectx, tname)
if ectx.conform.check_item('FN_PARS', tname):
pars.update(ectx.conform.use_item('FN_PARS', tname))
elif ectx.conform.check_item('FN_PARS', ectx.eth_type[tname]['ref'][0]):
pars.update(ectx.conform.use_item('FN_PARS', ectx.eth_type[tname]['ref'][0]))
pars['DEFAULT_BODY'] = body
for i in range(4):
for k in list(pars.keys()):
try:
pars[k] = pars[k] % pars
except (ValueError,TypeError):
raise sys.exc_info()[0]("%s\n%s" % (str(pars), sys.exc_info()[1]))
out = '\n'
out += self.eth_type_default_table(ectx, tname) % pars
out += ectx.eth_type_fn_hdr(tname)
out += ectx.eth_type_fn_body(tname, body, pars=pars)
out += ectx.eth_type_fn_ftr(tname)
return out
#--- Value --------------------------------------------------------------------
class Value (Node):
def __init__(self,*args, **kw) :
self.name = None
Node.__init__ (self,*args, **kw)
def SetName(self, name) :
self.name = name
def to_str(self, ectx):
return str(self.val)
def get_dep(self):
return None
def fld_obj_repr(self, ectx):
return self.to_str(ectx)
#--- Value_Ref -----------------------------------------------------------------
class Value_Ref (Value):
def to_str(self, ectx):
return asn2c(self.val)
#--- ObjectClass ---------------------------------------------------------------------
class ObjectClass (Node):
def __init__(self,*args, **kw) :
self.name = None
Node.__init__ (self,*args, **kw)
def SetName(self, name):
self.name = name
add_class_ident(self.name)
def eth_reg(self, ident, ectx):
if ectx.conform.omit_assignment('C', self.name, ectx.Module()): return # Assignment to omit
ectx.eth_reg_objectclass(self.name, self)
#--- Class_Ref -----------------------------------------------------------------
class Class_Ref (ObjectClass):
pass
#--- ObjectClassDefn ---------------------------------------------------------------------
class ObjectClassDefn (ObjectClass):
def reg_types(self):
for fld in self.fields:
repr = fld.fld_repr()
set_type_to_class(self.name, repr[0], repr[1:])
#--- Tag ---------------------------------------------------------------
class Tag (Node):
def to_python (self, ctx):
return 'asn1.TYPE(%s,%s)' % (mk_tag_str (ctx, self.tag.cls,
self.tag_typ,
self.tag.num),
self.typ.to_python (ctx))
def IsImplicit(self, ectx):
return ((self.mode == 'IMPLICIT') or ((self.mode == 'default') and (ectx.tag_def != 'EXPLICIT')))
def GetTag(self, ectx):
tc = ''
if (self.cls == 'UNIVERSAL'): tc = 'BER_CLASS_UNI'
elif (self.cls == 'APPLICATION'): tc = 'BER_CLASS_APP'
elif (self.cls == 'CONTEXT'): tc = 'BER_CLASS_CON'
elif (self.cls == 'PRIVATE'): tc = 'BER_CLASS_PRI'
return (tc, self.num)
def eth_tname(self):
n = ''
if (self.cls == 'UNIVERSAL'): n = 'U'
elif (self.cls == 'APPLICATION'): n = 'A'
elif (self.cls == 'CONTEXT'): n = 'C'
elif (self.cls == 'PRIVATE'): n = 'P'
return n + str(self.num)
#--- Constraint ---------------------------------------------------------------
constr_cnt = 0
class Constraint (Node):
def to_python (self, ctx):
print("Ignoring constraint:", self.type)
return self.subtype.typ.to_python (ctx)
def __str__ (self):
return "Constraint: type=%s, subtype=%s" % (self.type, self.subtype)
def eth_tname(self):
return '#' + self.type + '_' + str(id(self))
def IsSize(self):
return (self.type == 'Size' and self.subtype.IsValue()) \
or (self.type == 'Intersection' and (self.subtype[0].IsSize() or self.subtype[1].IsSize())) \
def GetSize(self, ectx):
(minv, maxv, ext) = ('MIN', 'MAX', False)
if self.IsSize():
if self.type == 'Size':
(minv, maxv, ext) = self.subtype.GetValue(ectx)
elif self.type == 'Intersection':
if self.subtype[0].IsSize() and not self.subtype[1].IsSize():
(minv, maxv, ext) = self.subtype[0].GetSize(ectx)
elif not self.subtype[0].IsSize() and self.subtype[1].IsSize():
(minv, maxv, ext) = self.subtype[1].GetSize(ectx)
return (minv, maxv, ext)
def IsValue(self):
return self.type == 'SingleValue' \
or self.type == 'ValueRange' \
or (self.type == 'Intersection' and (self.subtype[0].IsValue() or self.subtype[1].IsValue())) \
or (self.type == 'Union' and (self.subtype[0].IsValue() and self.subtype[1].IsValue()))
def GetValue(self, ectx):
(minv, maxv, ext) = ('MIN', 'MAX', False)
if self.IsValue():
if self.type == 'SingleValue':
minv = ectx.value_get_eth(self.subtype)
maxv = ectx.value_get_eth(self.subtype)
ext = hasattr(self, 'ext') and self.ext
elif self.type == 'ValueRange':
minv = ectx.value_get_eth(self.subtype[0])
maxv = ectx.value_get_eth(self.subtype[1])
ext = hasattr(self, 'ext') and self.ext
elif self.type == 'Intersection':
if self.subtype[0].IsValue() and not self.subtype[1].IsValue():
(minv, maxv, ext) = self.subtype[0].GetValue(ectx)
elif not self.subtype[0].IsValue() and self.subtype[1].IsValue():
(minv, maxv, ext) = self.subtype[1].GetValue(ectx)
elif self.subtype[0].IsValue() and self.subtype[1].IsValue():
v0 = self.subtype[0].GetValue(ectx)
v1 = self.subtype[1].GetValue(ectx)
(minv, maxv, ext) = (ectx.value_max(v0[0],v1[0]), ectx.value_min(v0[1],v1[1]), v0[2] and v1[2])
elif self.type == 'Union':
if self.subtype[0].IsValue() and self.subtype[1].IsValue():
v0 = self.subtype[0].GetValue(ectx)
v1 = self.subtype[1].GetValue(ectx)
(minv, maxv, ext) = (ectx.value_min(v0[0],v1[0]), ectx.value_max(v0[1],v1[1]), v0[2] or v1[2])
return (minv, maxv, ext)
def IsAlphabet(self):
return self.type == 'SingleValue' \
or self.type == 'ValueRange' \
or (self.type == 'Intersection' and (self.subtype[0].IsAlphabet() or self.subtype[1].IsAlphabet())) \
or (self.type == 'Union' and (self.subtype[0].IsAlphabet() and self.subtype[1].IsAlphabet()))
def GetAlphabet(self, ectx):
alph = None
if self.IsAlphabet():
if self.type == 'SingleValue':
alph = ectx.value_get_eth(self.subtype)
elif self.type == 'ValueRange':
if ((len(self.subtype[0]) == 3) and ((self.subtype[0][0] + self.subtype[0][-1]) == '""') \
and (len(self.subtype[1]) == 3) and ((self.subtype[1][0] + self.subtype[1][-1]) == '""')):
alph = '"'
for c in range(ord(self.subtype[0][1]), ord(self.subtype[1][1]) + 1):
alph += chr(c)
alph += '"'
elif self.type == 'Union':
if self.subtype[0].IsAlphabet() and self.subtype[1].IsAlphabet():
a0 = self.subtype[0].GetAlphabet(ectx)
a1 = self.subtype[1].GetAlphabet(ectx)
if (((a0[0] + a0[-1]) == '""') and not a0.count('"', 1, -1) \
and ((a1[0] + a1[-1]) == '""') and not a1.count('"', 1, -1)):
alph = '"' + a0[1:-1] + a1[1:-1] + '"'
else:
alph = a0 + ' ' + a1
return alph
def IsPermAlph(self):
return self.type == 'From' and self.subtype.IsAlphabet() \
or (self.type == 'Intersection' and (self.subtype[0].IsPermAlph() or self.subtype[1].IsPermAlph())) \
def GetPermAlph(self, ectx):
alph = None
if self.IsPermAlph():
if self.type == 'From':
alph = self.subtype.GetAlphabet(ectx)
elif self.type == 'Intersection':
if self.subtype[0].IsPermAlph() and not self.subtype[1].IsPermAlph():
alph = self.subtype[0].GetPermAlph(ectx)
elif not self.subtype[0].IsPermAlph() and self.subtype[1].IsPermAlph():
alph = self.subtype[1].GetPermAlph(ectx)
return alph
def IsContents(self):
return self.type == 'Contents' \
or (self.type == 'Intersection' and (self.subtype[0].IsContents() or self.subtype[1].IsContents())) \
def GetContents(self, ectx):
contents = None
if self.IsContents():
if self.type == 'Contents':
if self.subtype.type == 'Type_Ref':
contents = self.subtype.val
elif self.type == 'Intersection':
if self.subtype[0].IsContents() and not self.subtype[1].IsContents():
contents = self.subtype[0].GetContents(ectx)
elif not self.subtype[0].IsContents() and self.subtype[1].IsContents():
contents = self.subtype[1].GetContents(ectx)
return contents
def IsNegativ(self):
def is_neg(sval):
return isinstance(sval, str) and (sval[0] == '-')
if self.type == 'SingleValue':
return is_neg(self.subtype)
elif self.type == 'ValueRange':
if self.subtype[0] == 'MIN': return True
return is_neg(self.subtype[0])
return False
def eth_constrname(self):
def int2str(val):
if isinstance(val, Value_Ref):
return asn2c(val.val)
try:
if (int(val) < 0):
return 'M' + str(-int(val))
else:
return str(int(val))
except (ValueError, TypeError):
return asn2c(str(val))
ext = ''
if hasattr(self, 'ext') and self.ext:
ext = '_'
if self.type == 'SingleValue':
return int2str(self.subtype) + ext
elif self.type == 'ValueRange':
return int2str(self.subtype[0]) + '_' + int2str(self.subtype[1]) + ext
elif self.type == 'Size':
return 'SIZE_' + self.subtype.eth_constrname() + ext
else:
if (not hasattr(self, 'constr_num')):
global constr_cnt
constr_cnt += 1
self.constr_num = constr_cnt
return 'CONSTR%03d%s' % (self.constr_num, ext)
class Module (Node):
def to_python (self, ctx):
ctx.tag_def = self.tag_def.dfl_tag
return """#%s
%s""" % (self.ident, self.body.to_python (ctx))
def get_name(self):
return self.ident.val
def get_proto(self, ectx):
if (ectx.proto):
prot = ectx.proto
else:
prot = ectx.conform.use_item('MODULE', self.get_name(), val_dflt=self.get_name())
return prot
def to_eth(self, ectx):
ectx.tags_def = 'EXPLICIT' # default = explicit
ectx.proto = self.get_proto(ectx)
ectx.tag_def = self.tag_def.dfl_tag
ectx.eth_reg_module(self)
self.body.to_eth(ectx)
class Module_Body (Node):
def to_python (self, ctx):
# XXX handle exports, imports.
l = [x.to_python (ctx) for x in self.assign_list]
l = [a for a in l if a != '']
return "\n".join (l)
def to_eth(self, ectx):
# Exports
ectx.eth_exports(self.exports)
# Imports
for i in self.imports:
mod = i.module.val
proto = ectx.conform.use_item('MODULE', mod, val_dflt=mod)
ectx.eth_module_dep_add(ectx.Module(), mod)
for s in i.symbol_list:
if isinstance(s, Type_Ref):
ectx.eth_import_type(s.val, mod, proto)
elif isinstance(s, Value_Ref):
ectx.eth_import_value(s.val, mod, proto)
elif isinstance(s, Class_Ref):
ectx.eth_import_class(s.val, mod, proto)
else:
msg = 'Unknown kind of imported symbol %s from %s' % (str(s), mod)
warnings.warn_explicit(msg, UserWarning, '', 0)
# AssignmentList
for a in self.assign_list:
a.eth_reg('', ectx)
class Default_Tags (Node):
def to_python (self, ctx): # not to be used directly
assert (0)
# XXX should just calculate dependencies as we go along.
def calc_dependencies (node, dict, trace = 0):
if not hasattr (node, '__dict__'):
if trace: print("#returning, node=", node)
return
if isinstance (node, Type_Ref):
dict [node.val] = 1
if trace: print("#Setting", node.val)
return
for (a, val) in list(node.__dict__.items ()):
if trace: print("# Testing node ", node, "attr", a, " val", val)
if a[0] == '_':
continue
elif isinstance (val, Node):
calc_dependencies (val, dict, trace)
elif isinstance (val, type ([])):
for v in val:
calc_dependencies (v, dict, trace)
class Type_Assign (Node):
def __init__ (self, *args, **kw):
Node.__init__ (self, *args, **kw)
if isinstance (self.val, Tag): # XXX replace with generalized get_typ_ignoring_tag (no-op for Node, override in Tag)
to_test = self.val.typ
else:
to_test = self.val
if isinstance (to_test, SequenceType):
to_test.sequence_name = self.name.name
def to_python (self, ctx):
dep_dict = {}
calc_dependencies (self.val, dep_dict, 0)
depend_list = list(dep_dict.keys ())
return ctx.register_assignment (self.name.name,
self.val.to_python (ctx),
depend_list)
class PyQuote (Node):
def to_python (self, ctx):
return ctx.register_pyquote (self.val)
#--- Type_Ref -----------------------------------------------------------------
class Type_Ref (Type):
def to_python (self, ctx):
return self.val
def eth_reg_sub(self, ident, ectx):
ectx.eth_dep_add(ident, self.val)
def eth_tname(self):
if self.HasSizeConstraint():
return asn2c(self.val) + '_' + self.constr.eth_constrname()
else:
return asn2c(self.val)
def tr_need_own_fn(self, ectx):
return ectx.Per() and self.HasSizeConstraint()
def fld_obj_repr(self, ectx):
return self.val
def get_components(self, ectx):
if self.val not in ectx.type or ectx.type[self.val]['import']:
msg = "Can not get COMPONENTS OF %s which is imported type" % (self.val)
warnings.warn_explicit(msg, UserWarning, '', 0)
return []
else:
return ectx.type[self.val]['val'].get_components(ectx)
def GetTTag(self, ectx):
#print "GetTTag(%s)\n" % self.val;
if (ectx.type[self.val]['import']):
if 'ttag' not in ectx.type[self.val]:
ttag = ectx.get_ttag_from_all(self.val, ectx.type[self.val]['import'])
if not ttag and not ectx.conform.check_item('IMPORT_TAG', self.val):
msg = 'Missing tag information for imported type %s from %s (%s)' % (self.val, ectx.type[self.val]['import'], ectx.type[self.val]['proto'])
warnings.warn_explicit(msg, UserWarning, '', 0)
ttag = ('-1/*imported*/', '-1/*imported*/')
ectx.type[self.val]['ttag'] = ectx.conform.use_item('IMPORT_TAG', self.val, val_dflt=ttag)
return ectx.type[self.val]['ttag']
else:
return ectx.type[self.val]['val'].GetTag(ectx)
def IndetermTag(self, ectx):
if (ectx.type[self.val]['import']):
return False
else:
return ectx.type[self.val]['val'].IndetermTag(ectx)
def eth_type_default_pars(self, ectx, tname):
if tname:
pars = Type.eth_type_default_pars(self, ectx, tname)
else:
pars = {}
t = ectx.type[self.val]['ethname']
pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto']
pars['TYPE_REF_TNAME'] = t
pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s'
if self.HasSizeConstraint():
(pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx)
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('%(TYPE_REF_FN)s', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),))
elif (ectx.Per()):
if self.HasSizeConstraint():
body = ectx.eth_fn_call('dissect_%(ER)s_size_constrained_type', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),
('"%(TYPE_REF_TNAME)s"', '%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s',),))
else:
body = ectx.eth_fn_call('%(TYPE_REF_FN)s', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- SelectionType ------------------------------------------------------------
class SelectionType (Type):
def to_python (self, ctx):
return self.val
def sel_of_typeref(self):
return self.typ.type == 'Type_Ref'
def eth_reg_sub(self, ident, ectx):
if not self.sel_of_typeref():
self.seltype = ''
return
self.seltype = ectx.eth_sel_req(self.typ.val, self.sel)
ectx.eth_dep_add(ident, self.seltype)
def eth_ftype(self, ectx):
(ftype, display) = ('FT_NONE', 'BASE_NONE')
if self.sel_of_typeref() and not ectx.type[self.seltype]['import']:
(ftype, display) = ectx.type[self.typ.val]['val'].eth_ftype_sel(self.sel, ectx)
return (ftype, display)
def GetTTag(self, ectx):
#print "GetTTag(%s)\n" % self.seltype;
if (ectx.type[self.seltype]['import']):
if 'ttag' not in ectx.type[self.seltype]:
if not ectx.conform.check_item('IMPORT_TAG', self.seltype):
msg = 'Missing tag information for imported type %s from %s (%s)' % (self.seltype, ectx.type[self.seltype]['import'], ectx.type[self.seltype]['proto'])
warnings.warn_explicit(msg, UserWarning, '', 0)
ectx.type[self.seltype]['ttag'] = ectx.conform.use_item('IMPORT_TAG', self.seltype, val_dflt=('-1 /*imported*/', '-1 /*imported*/'))
return ectx.type[self.seltype]['ttag']
else:
return ectx.type[self.typ.val]['val'].GetTTagSel(self.sel, ectx)
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
if self.sel_of_typeref():
t = ectx.type[self.seltype]['ethname']
pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto']
pars['TYPE_REF_TNAME'] = t
pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s'
return pars
def eth_type_default_body(self, ectx, tname):
if not self.sel_of_typeref():
body = '#error Can not decode %s' % (tname)
elif (ectx.Ber()):
body = ectx.eth_fn_call('%(TYPE_REF_FN)s', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),))
elif (ectx.Per()):
body = ectx.eth_fn_call('%(TYPE_REF_FN)s', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- TaggedType -----------------------------------------------------------------
class TaggedType (Type):
def eth_tname(self):
tn = ''
for i in range(self.tstrip, len(self.val.tags)):
tn += self.val.tags[i].eth_tname()
tn += '_'
tn += self.val.eth_tname()
return tn
def eth_set_val_name(self, ident, val_name, ectx):
#print "TaggedType::eth_set_val_name(): ident=%s, val_name=%s" % (ident, val_name)
self.val_name = val_name
ectx.eth_dep_add(ident, self.val_name)
def eth_reg_sub(self, ident, ectx):
self.val_name = ident + '/' + UNTAG_TYPE_NAME
self.val.eth_reg(self.val_name, ectx, tstrip=self.tstrip+1, tagflag=True, parent=ident)
def GetTTag(self, ectx):
#print "GetTTag(%s)\n" % self.seltype;
return self.GetTag(ectx)
def eth_ftype(self, ectx):
return self.val.eth_ftype(ectx)
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
t = ectx.type[self.val_name]['ethname']
pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto']
pars['TYPE_REF_TNAME'] = t
pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s'
(pars['TAG_CLS'], pars['TAG_TAG']) = self.GetTag(ectx)
if self.HasImplicitTag(ectx):
pars['TAG_IMPL'] = 'TRUE'
else:
pars['TAG_IMPL'] = 'FALSE'
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_tagged_type', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(HF_INDEX)s', '%(TAG_CLS)s', '%(TAG_TAG)s', '%(TAG_IMPL)s', '%(TYPE_REF_FN)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- SqType -----------------------------------------------------------
class SqType (Type):
def out_item(self, f, val, optional, ext, ectx):
if (val.eth_omit_field()):
t = ectx.type[val.ident]['ethname']
fullname = ectx.dummy_eag_field
else:
ef = ectx.field[f]['ethname']
t = ectx.eth_hf[ef]['ethtype']
fullname = ectx.eth_hf[ef]['fullname']
if (ectx.Ber()):
#print "optional=%s, e.val.HasOwnTag()=%s, e.val.IndetermTag()=%s" % (str(e.optional), str(e.val.HasOwnTag()), str(e.val.IndetermTag(ectx)))
#print val.str_depth(1)
opt = ''
if (optional):
opt = 'BER_FLAGS_OPTIONAL'
if (not val.HasOwnTag()):
if (opt): opt += '|'
opt += 'BER_FLAGS_NOOWNTAG'
elif (val.HasImplicitTag(ectx)):
if (opt): opt += '|'
opt += 'BER_FLAGS_IMPLTAG'
if (val.IndetermTag(ectx)):
if (opt): opt += '|'
opt += 'BER_FLAGS_NOTCHKTAG'
if (not opt): opt = '0'
else:
if optional:
opt = 'ASN1_OPTIONAL'
else:
opt = 'ASN1_NOT_OPTIONAL'
if (ectx.Ber()):
(tc, tn) = val.GetTag(ectx)
out = ' { %-24s, %-13s, %s, %s, dissect_%s_%s },\n' \
% ('&'+fullname, tc, tn, opt, ectx.eth_type[t]['proto'], t)
elif (ectx.Per()):
out = ' { %-24s, %-23s, %-17s, dissect_%s_%s },\n' \
% ('&'+fullname, ext, opt, ectx.eth_type[t]['proto'], t)
else:
out = ''
return out
#--- SeqType -----------------------------------------------------------
class SeqType (SqType):
def all_components(self):
lst = self.elt_list[:]
if hasattr(self, 'ext_list'):
lst.extend(self.ext_list)
if hasattr(self, 'elt_list2'):
lst.extend(self.elt_list2)
return lst
def need_components(self):
lst = self.all_components()
for e in (lst):
if e.type == 'components_of':
return True
return False
def expand_components(self, ectx):
while self.need_components():
for i in range(len(self.elt_list)):
if self.elt_list[i].type == 'components_of':
comp = self.elt_list[i].typ.get_components(ectx)
self.elt_list[i:i+1] = comp
break
if hasattr(self, 'ext_list'):
for i in range(len(self.ext_list)):
if self.ext_list[i].type == 'components_of':
comp = self.ext_list[i].typ.get_components(ectx)
self.ext_list[i:i+1] = comp
break
if hasattr(self, 'elt_list2'):
for i in range(len(self.elt_list2)):
if self.elt_list2[i].type == 'components_of':
comp = self.elt_list2[i].typ.get_components(ectx)
self.elt_list2[i:i+1] = comp
break
def get_components(self, ectx):
lst = self.elt_list[:]
if hasattr(self, 'elt_list2'):
lst.extend(self.elt_list2)
return lst
def eth_reg_sub(self, ident, ectx, components_available=False):
# check if autotag is required
autotag = False
if (ectx.NeedTags() and (ectx.tag_def == 'AUTOMATIC')):
autotag = True
lst = self.all_components()
for e in (self.elt_list):
if e.val.HasOwnTag(): autotag = False; break;
# expand COMPONENTS OF
if self.need_components():
if components_available:
self.expand_components(ectx)
else:
ectx.eth_comp_req(ident)
return
# extension addition groups
if hasattr(self, 'ext_list'):
if (ectx.Per()): # add names
eag_num = 1
for e in (self.ext_list):
if isinstance(e.val, ExtensionAdditionGroup):
e.val.parent_ident = ident
e.val.parent_tname = ectx.type[ident]['tname']
if (e.val.ver):
e.val.SetName("eag_v%s" % (e.val.ver))
else:
e.val.SetName("eag_%d" % (eag_num))
eag_num += 1;
else: # expand
new_ext_list = []
for e in (self.ext_list):
if isinstance(e.val, ExtensionAdditionGroup):
new_ext_list.extend(e.val.elt_list)
else:
new_ext_list.append(e)
self.ext_list = new_ext_list
# do autotag
if autotag:
atag = 0
for e in (self.elt_list):
e.val.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT'))
atag += 1
if autotag and hasattr(self, 'elt_list2'):
for e in (self.elt_list2):
e.val.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT'))
atag += 1
if autotag and hasattr(self, 'ext_list'):
for e in (self.ext_list):
e.val.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT'))
atag += 1
# register components
for e in (self.elt_list):
e.val.eth_reg(ident, ectx, tstrip=1, parent=ident)
if hasattr(self, 'ext_list'):
for e in (self.ext_list):
e.val.eth_reg(ident, ectx, tstrip=1, parent=ident)
if hasattr(self, 'elt_list2'):
for e in (self.elt_list2):
e.val.eth_reg(ident, ectx, tstrip=1, parent=ident)
def eth_type_default_table(self, ectx, tname):
#print "eth_type_default_table(tname='%s')" % (tname)
fname = ectx.eth_type[tname]['ref'][0]
table = "static const %(ER)s_sequence_t %(TABLE)s[] = {\n"
if hasattr(self, 'ext_list'):
ext = 'ASN1_EXTENSION_ROOT'
else:
ext = 'ASN1_NO_EXTENSIONS'
empty_ext_flag = '0'
if (len(self.elt_list)==0) and hasattr(self, 'ext_list') and (len(self.ext_list)==0) and (not hasattr(self, 'elt_list2') or (len(self.elt_list2)==0)):
empty_ext_flag = ext
for e in (self.elt_list):
f = fname + '/' + e.val.name
table += self.out_item(f, e.val, e.optional, ext, ectx)
if hasattr(self, 'ext_list'):
for e in (self.ext_list):
f = fname + '/' + e.val.name
table += self.out_item(f, e.val, e.optional, 'ASN1_NOT_EXTENSION_ROOT', ectx)
if hasattr(self, 'elt_list2'):
for e in (self.elt_list2):
f = fname + '/' + e.val.name
table += self.out_item(f, e.val, e.optional, ext, ectx)
if (ectx.Ber()):
table += " { NULL, 0, 0, 0, NULL }\n};\n"
else:
table += " { NULL, %s, 0, NULL }\n};\n" % (empty_ext_flag)
return table
#--- SeqOfType -----------------------------------------------------------
class SeqOfType (SqType):
def eth_type_default_table(self, ectx, tname):
#print "eth_type_default_table(tname='%s')" % (tname)
fname = ectx.eth_type[tname]['ref'][0]
if self.val.IsNamed ():
f = fname + '/' + self.val.name
else:
f = fname + '/' + ITEM_FIELD_NAME
table = "static const %(ER)s_sequence_t %(TABLE)s[1] = {\n"
table += self.out_item(f, self.val, False, 'ASN1_NO_EXTENSIONS', ectx)
table += "};\n"
return table
#--- SequenceOfType -----------------------------------------------------------
class SequenceOfType (SeqOfType):
def to_python (self, ctx):
# name, tag (None for no tag, EXPLICIT() for explicit), typ)
# or '' + (1,) for optional
sizestr = ''
if self.size_constr != None:
print("#Ignoring size constraint:", self.size_constr.subtype)
return "%sasn1.SEQUENCE_OF (%s%s)" % (ctx.spaces (),
self.val.to_python (ctx),
sizestr)
def eth_reg_sub(self, ident, ectx):
itmnm = ident
if not self.val.IsNamed ():
itmnm += '/' + ITEM_FIELD_NAME
self.val.eth_reg(itmnm, ectx, tstrip=1, idx='[##]', parent=ident)
def eth_tname(self):
if self.val.type != 'Type_Ref':
return '#' + self.type + '_' + str(id(self))
if not self.HasConstraint():
return "SEQUENCE_OF_" + self.val.eth_tname()
elif self.constr.IsSize():
return 'SEQUENCE_' + self.constr.eth_constrname() + '_OF_' + self.val.eth_tname()
else:
return '#' + self.type + '_' + str(id(self))
def eth_ftype(self, ectx):
return ('FT_UINT32', 'BASE_DEC')
def eth_need_tree(self):
return True
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_SEQUENCE')
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
(pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx)
pars['TABLE'] = '%(PROTOP)s%(TNAME)s_sequence_of'
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
if (ectx.constraints_check and self.HasSizeConstraint()):
body = ectx.eth_fn_call('dissect_%(ER)s_constrained_sequence_of', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),))
else:
body = ectx.eth_fn_call('dissect_%(ER)s_sequence_of', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),))
elif (ectx.Per() and not self.HasConstraint()):
body = ectx.eth_fn_call('dissect_%(ER)s_sequence_of', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(ETT_INDEX)s', '%(TABLE)s',),))
elif (ectx.Per() and self.constr.type == 'Size'):
body = ectx.eth_fn_call('dissect_%(ER)s_constrained_sequence_of', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(ETT_INDEX)s', '%(TABLE)s',),
('%(MIN_VAL)s', '%(MAX_VAL)s','%(EXT)s'),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- SetOfType ----------------------------------------------------------------
class SetOfType (SeqOfType):
def eth_reg_sub(self, ident, ectx):
itmnm = ident
if not self.val.IsNamed ():
itmnm += '/' + ITEM_FIELD_NAME
self.val.eth_reg(itmnm, ectx, tstrip=1, idx='(##)', parent=ident)
def eth_tname(self):
if self.val.type != 'Type_Ref':
return '#' + self.type + '_' + str(id(self))
if not self.HasConstraint():
return "SET_OF_" + self.val.eth_tname()
elif self.constr.IsSize():
return 'SET_' + self.constr.eth_constrname() + '_OF_' + self.val.eth_tname()
else:
return '#' + self.type + '_' + str(id(self))
def eth_ftype(self, ectx):
return ('FT_UINT32', 'BASE_DEC')
def eth_need_tree(self):
return True
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_SET')
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
(pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx)
pars['TABLE'] = '%(PROTOP)s%(TNAME)s_set_of'
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
if (ectx.constraints_check and self.HasSizeConstraint()):
body = ectx.eth_fn_call('dissect_%(ER)s_constrained_set_of', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),))
else:
body = ectx.eth_fn_call('dissect_%(ER)s_set_of', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),))
elif (ectx.Per() and not self.HasConstraint()):
body = ectx.eth_fn_call('dissect_%(ER)s_set_of', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(ETT_INDEX)s', '%(TABLE)s',),))
elif (ectx.Per() and self.constr.type == 'Size'):
body = ectx.eth_fn_call('dissect_%(ER)s_constrained_set_of', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(ETT_INDEX)s', '%(TABLE)s',),
('%(MIN_VAL)s', '%(MAX_VAL)s','%(EXT)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
def mk_tag_str (ctx, cls, typ, num):
# XXX should do conversion to int earlier!
val = int (num)
typ = typ.upper()
if typ == 'DEFAULT':
typ = ctx.tags_def
return 'asn1.%s(%d,cls=asn1.%s_FLAG)' % (typ, val, cls) # XXX still ned
#--- SequenceType -------------------------------------------------------------
class SequenceType (SeqType):
def to_python (self, ctx):
# name, tag (None for no tag, EXPLICIT() for explicit), typ)
# or '' + (1,) for optional
# XXX should also collect names for SEQUENCE inside SEQUENCE or
# CHOICE or SEQUENCE_OF (where should the SEQUENCE_OF name come
# from? for others, element or arm name would be fine)
seq_name = getattr (self, 'sequence_name', None)
if seq_name == None:
seq_name = 'None'
else:
seq_name = "'" + seq_name + "'"
if 'ext_list' in self.__dict__:
return "%sasn1.SEQUENCE ([%s], ext=[%s], seq_name = %s)" % (ctx.spaces (),
self.elts_to_py (self.elt_list, ctx),
self.elts_to_py (self.ext_list, ctx), seq_name)
else:
return "%sasn1.SEQUENCE ([%s]), seq_name = %s" % (ctx.spaces (),
self.elts_to_py (self.elt_list, ctx), seq_name)
def elts_to_py (self, list, ctx):
# we have elt_type, val= named_type, maybe default=, optional=
# named_type node: either ident = or typ =
# need to dismember these in order to generate Python output syntax.
ctx.indent ()
def elt_to_py (e):
assert (e.type == 'elt_type')
nt = e.val
optflag = e.optional
#assert (not hasattr (e, 'default')) # XXX add support for DEFAULT!
assert (nt.type == 'named_type')
tagstr = 'None'
identstr = nt.ident
if hasattr (nt.typ, 'type') and nt.typ.type == 'tag': # ugh
tagstr = mk_tag_str (ctx,nt.typ.tag.cls,
nt.typ.tag.tag_typ,nt.typ.tag.num)
nt = nt.typ
return "('%s',%s,%s,%d)" % (identstr, tagstr,
nt.typ.to_python (ctx), optflag)
indentstr = ",\n" + ctx.spaces ()
rv = indentstr.join ([elt_to_py (e) for e in list])
ctx.outdent ()
return rv
def eth_need_tree(self):
return True
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_SEQUENCE')
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
pars['TABLE'] = '%(PROTOP)s%(TNAME)s_sequence'
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_sequence', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),))
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_sequence', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(ETT_INDEX)s', '%(TABLE)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- ExtensionAdditionGroup ---------------------------------------------------
class ExtensionAdditionGroup (SeqType):
def __init__(self,*args, **kw) :
self.parent_ident = None
self.parent_tname = None
SeqType.__init__ (self,*args, **kw)
def eth_omit_field(self):
return True
def eth_tname(self):
if (self.parent_tname and self.IsNamed()):
return self.parent_tname + "_" + self.name
else:
return SeqType.eth_tname(self)
def eth_reg_sub(self, ident, ectx):
ectx.eth_dummy_eag_field_required()
ectx.eth_dep_add(self.parent_ident, ident)
SeqType.eth_reg_sub(self, ident, ectx)
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
pars['TABLE'] = '%(PROTOP)s%(TNAME)s_sequence'
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_sequence_eag', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(TABLE)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- SetType ------------------------------------------------------------------
class SetType (SeqType):
def eth_need_tree(self):
return True
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_SET')
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
pars['TABLE'] = '%(PROTOP)s%(TNAME)s_set'
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_set', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),))
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_set', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(ETT_INDEX)s', '%(TABLE)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- ChoiceType ---------------------------------------------------------------
class ChoiceType (Type):
def to_python (self, ctx):
# name, tag (None for no tag, EXPLICIT() for explicit), typ)
# or '' + (1,) for optional
if 'ext_list' in self.__dict__:
return "%sasn1.CHOICE ([%s], ext=[%s])" % (ctx.spaces (),
self.elts_to_py (self.elt_list, ctx),
self.elts_to_py (self.ext_list, ctx))
else:
return "%sasn1.CHOICE ([%s])" % (ctx.spaces (), self.elts_to_py (self.elt_list, ctx))
def elts_to_py (self, list, ctx):
ctx.indent ()
def elt_to_py (nt):
assert (nt.type == 'named_type')
tagstr = 'None'
if hasattr (nt, 'ident'):
identstr = nt.ident
else:
if hasattr (nt.typ, 'val'):
identstr = nt.typ.val # XXX, making up name
elif hasattr (nt.typ, 'name'):
identstr = nt.typ.name
else:
identstr = ctx.make_new_name ()
if hasattr (nt.typ, 'type') and nt.typ.type == 'tag': # ugh
tagstr = mk_tag_str (ctx,nt.typ.tag.cls,
nt.typ.tag.tag_typ,nt.typ.tag.num)
nt = nt.typ
return "('%s',%s,%s)" % (identstr, tagstr,
nt.typ.to_python (ctx))
indentstr = ",\n" + ctx.spaces ()
rv = indentstr.join ([elt_to_py (e) for e in list])
ctx.outdent ()
return rv
def eth_reg_sub(self, ident, ectx):
#print "eth_reg_sub(ident='%s')" % (ident)
# check if autotag is required
autotag = False
if (ectx.NeedTags() and (ectx.tag_def == 'AUTOMATIC')):
autotag = True
for e in (self.elt_list):
if e.HasOwnTag(): autotag = False; break;
if autotag and hasattr(self, 'ext_list'):
for e in (self.ext_list):
if e.HasOwnTag(): autotag = False; break;
# do autotag
if autotag:
atag = 0
for e in (self.elt_list):
e.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT'))
atag += 1
if autotag and hasattr(self, 'ext_list'):
for e in (self.ext_list):
e.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT'))
atag += 1
for e in (self.elt_list):
e.eth_reg(ident, ectx, tstrip=1, parent=ident)
if ectx.conform.check_item('EXPORTS', ident + '.' + e.name):
ectx.eth_sel_req(ident, e.name)
if hasattr(self, 'ext_list'):
for e in (self.ext_list):
e.eth_reg(ident, ectx, tstrip=1, parent=ident)
if ectx.conform.check_item('EXPORTS', ident + '.' + e.name):
ectx.eth_sel_req(ident, e.name)
def sel_item(self, ident, sel, ectx):
lst = self.elt_list[:]
if hasattr(self, 'ext_list'):
lst.extend(self.ext_list)
ee = None
for e in (self.elt_list):
if e.IsNamed() and (e.name == sel):
ee = e
break
if not ee:
print("#CHOICE %s does not contain item %s" % (ident, sel))
return ee
def sel_req(self, ident, sel, ectx):
#print "sel_req(ident='%s', sel=%s)\n%s" % (ident, sel, str(self))
ee = self.sel_item(ident, sel, ectx)
if ee:
ee.eth_reg(ident, ectx, tstrip=0, selflag=True)
def eth_ftype(self, ectx):
return ('FT_UINT32', 'BASE_DEC')
def eth_ftype_sel(self, sel, ectx):
ee = self.sel_item('', sel, ectx)
if ee:
return ee.eth_ftype(ectx)
else:
return ('FT_NONE', 'BASE_NONE')
def eth_strings(self):
return '$$'
def eth_need_tree(self):
return True
def eth_has_vals(self):
return True
def GetTTag(self, ectx):
lst = self.elt_list
cls = 'BER_CLASS_ANY/*choice*/'
#if hasattr(self, 'ext_list'):
# lst.extend(self.ext_list)
#if (len(lst) > 0):
# cls = lst[0].GetTag(ectx)[0]
#for e in (lst):
# if (e.GetTag(ectx)[0] != cls):
# cls = '-1/*choice*/'
return (cls, '-1/*choice*/')
def GetTTagSel(self, sel, ectx):
ee = self.sel_item('', sel, ectx)
if ee:
return ee.GetTag(ectx)
else:
return ('BER_CLASS_ANY/*unknown selection*/', '-1/*unknown selection*/')
def IndetermTag(self, ectx):
#print "Choice IndetermTag()=%s" % (str(not self.HasOwnTag()))
return not self.HasOwnTag()
def detect_tagval(self, ectx):
tagval = False
lst = self.elt_list[:]
if hasattr(self, 'ext_list'):
lst.extend(self.ext_list)
if (len(lst) > 0) and (not ectx.Per() or lst[0].HasOwnTag()):
t = lst[0].GetTag(ectx)[0]
tagval = True
else:
t = ''
tagval = False
if (t == 'BER_CLASS_UNI'):
tagval = False
for e in (lst):
if not ectx.Per() or e.HasOwnTag():
tt = e.GetTag(ectx)[0]
else:
tt = ''
tagval = False
if (tt != t):
tagval = False
return tagval
def get_vals(self, ectx):
tagval = self.detect_tagval(ectx)
vals = []
cnt = 0
for e in (self.elt_list):
if (tagval): val = e.GetTag(ectx)[1]
else: val = str(cnt)
vals.append((val, e.name))
cnt += 1
if hasattr(self, 'ext_list'):
for e in (self.ext_list):
if (tagval): val = e.GetTag(ectx)[1]
else: val = str(cnt)
vals.append((val, e.name))
cnt += 1
return vals
def eth_type_vals(self, tname, ectx):
out = '\n'
vals = self.get_vals(ectx)
out += ectx.eth_vals(tname, vals)
return out
def reg_enum_vals(self, tname, ectx):
vals = self.get_vals(ectx)
for (val, id) in vals:
ectx.eth_reg_value(id, self, val, ethname=ectx.eth_enum_item(tname, id))
def eth_type_enum(self, tname, ectx):
out = '\n'
vals = self.get_vals(ectx)
out += ectx.eth_enum(tname, vals)
return out
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
pars['TABLE'] = '%(PROTOP)s%(TNAME)s_choice'
return pars
def eth_type_default_table(self, ectx, tname):
def out_item(val, e, ext, ectx):
has_enum = ectx.eth_type[tname]['enum'] & EF_ENUM
if (has_enum):
vval = ectx.eth_enum_item(tname, e.name)
else:
vval = val
f = fname + '/' + e.name
ef = ectx.field[f]['ethname']
t = ectx.eth_hf[ef]['ethtype']
if (ectx.Ber()):
opt = ''
if (not e.HasOwnTag()):
opt = 'BER_FLAGS_NOOWNTAG'
elif (e.HasImplicitTag(ectx)):
if (opt): opt += '|'
opt += 'BER_FLAGS_IMPLTAG'
if (not opt): opt = '0'
if (ectx.Ber()):
(tc, tn) = e.GetTag(ectx)
out = ' { %3s, %-24s, %-13s, %s, %s, dissect_%s_%s },\n' \
% (vval, '&'+ectx.eth_hf[ef]['fullname'], tc, tn, opt, ectx.eth_type[t]['proto'], t)
elif (ectx.Per()):
out = ' { %3s, %-24s, %-23s, dissect_%s_%s },\n' \
% (vval, '&'+ectx.eth_hf[ef]['fullname'], ext, ectx.eth_type[t]['proto'], t)
else:
out = ''
return out
# end out_item()
#print "eth_type_default_table(tname='%s')" % (tname)
fname = ectx.eth_type[tname]['ref'][0]
tagval = self.detect_tagval(ectx)
table = "static const %(ER)s_choice_t %(TABLE)s[] = {\n"
cnt = 0
if hasattr(self, 'ext_list'):
ext = 'ASN1_EXTENSION_ROOT'
else:
ext = 'ASN1_NO_EXTENSIONS'
empty_ext_flag = '0'
if (len(self.elt_list)==0) and hasattr(self, 'ext_list') and (len(self.ext_list)==0):
empty_ext_flag = ext
for e in (self.elt_list):
if (tagval): val = e.GetTag(ectx)[1]
else: val = str(cnt)
table += out_item(val, e, ext, ectx)
cnt += 1
if hasattr(self, 'ext_list'):
for e in (self.ext_list):
if (tagval): val = e.GetTag(ectx)[1]
else: val = str(cnt)
table += out_item(val, e, 'ASN1_NOT_EXTENSION_ROOT', ectx)
cnt += 1
if (ectx.Ber()):
table += " { 0, NULL, 0, 0, 0, NULL }\n};\n"
else:
table += " { 0, NULL, %s, NULL }\n};\n" % (empty_ext_flag)
return table
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_choice', ret='offset',
par=(('%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s'),
('%(VAL_PTR)s',),))
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_choice', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(ETT_INDEX)s', '%(TABLE)s',),
('%(VAL_PTR)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- ChoiceValue ----------------------------------------------------
class ChoiceValue (Value):
def to_str(self, ectx):
return self.val.to_str(ectx)
def fld_obj_eq(self, other):
return isinstance(other, ChoiceValue) and (self.choice == other.choice) and (str(self.val.val) == str(other.val.val))
#--- EnumeratedType -----------------------------------------------------------
class EnumeratedType (Type):
def to_python (self, ctx):
def strify_one (named_num):
return "%s=%s" % (named_num.ident, named_num.val)
return "asn1.ENUM(%s)" % ",".join (map (strify_one, self.val))
def eth_ftype(self, ectx):
return ('FT_UINT32', 'BASE_DEC')
def eth_strings(self):
return '$$'
def eth_has_vals(self):
return True
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_ENUMERATED')
def get_vals_etc(self, ectx):
vals = []
lastv = 0
used = {}
maxv = 0
root_num = 0
ext_num = 0
map_table = []
for e in (self.val):
if e.type == 'NamedNumber':
used[int(e.val)] = True
for e in (self.val):
if e.type == 'NamedNumber':
val = int(e.val)
else:
while lastv in used:
lastv += 1
val = lastv
used[val] = True
vals.append((val, e.ident))
map_table.append(val)
root_num += 1
if val > maxv:
maxv = val
if self.ext is not None:
for e in (self.ext):
if e.type == 'NamedNumber':
used[int(e.val)] = True
for e in (self.ext):
if e.type == 'NamedNumber':
val = int(e.val)
else:
while lastv in used:
lastv += 1
val = lastv
used[val] = True
vals.append((val, e.ident))
map_table.append(val)
ext_num += 1
if val > maxv:
maxv = val
need_map = False
for i in range(len(map_table)):
need_map = need_map or (map_table[i] != i)
if (not need_map):
map_table = None
return (vals, root_num, ext_num, map_table)
def eth_type_vals(self, tname, ectx):
out = '\n'
vals = self.get_vals_etc(ectx)[0]
out += ectx.eth_vals(tname, vals)
return out
def reg_enum_vals(self, tname, ectx):
vals = self.get_vals_etc(ectx)[0]
for (val, id) in vals:
ectx.eth_reg_value(id, self, val, ethname=ectx.eth_enum_item(tname, id))
def eth_type_enum(self, tname, ectx):
out = '\n'
vals = self.get_vals_etc(ectx)[0]
out += ectx.eth_enum(tname, vals)
return out
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
(root_num, ext_num, map_table) = self.get_vals_etc(ectx)[1:]
if (self.ext != None):
ext = 'TRUE'
else:
ext = 'FALSE'
pars['ROOT_NUM'] = str(root_num)
pars['EXT'] = ext
pars['EXT_NUM'] = str(ext_num)
if (map_table):
pars['TABLE'] = '%(PROTOP)s%(TNAME)s_value_map'
else:
pars['TABLE'] = 'NULL'
return pars
def eth_type_default_table(self, ectx, tname):
if (not ectx.Per()): return ''
map_table = self.get_vals_etc(ectx)[3]
if (map_table == None): return ''
table = "static guint32 %(TABLE)s[%(ROOT_NUM)s+%(EXT_NUM)s] = {"
table += ", ".join([str(v) for v in map_table])
table += "};\n"
return table
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
if (ectx.constraints_check and self.HasValueConstraint()):
body = ectx.eth_fn_call('dissect_%(ER)s_constrained_integer', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
else:
body = ectx.eth_fn_call('dissect_%(ER)s_integer', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),
('%(VAL_PTR)s',),))
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_enumerated', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(ROOT_NUM)s', '%(VAL_PTR)s', '%(EXT)s', '%(EXT_NUM)s', '%(TABLE)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- EmbeddedPDVType -----------------------------------------------------------
class EmbeddedPDVType (Type):
def eth_tname(self):
return 'EMBEDDED_PDV'
def eth_ftype(self, ectx):
return ('FT_NONE', 'BASE_NONE')
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_EMBEDDED_PDV')
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
if ectx.default_embedded_pdv_cb:
pars['TYPE_REF_FN'] = ectx.default_embedded_pdv_cb
else:
pars['TYPE_REF_FN'] = 'NULL'
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_EmbeddedPDV_Type', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),))
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_embedded_pdv', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- ExternalType -----------------------------------------------------------
class ExternalType (Type):
def eth_tname(self):
return 'EXTERNAL'
def eth_ftype(self, ectx):
return ('FT_NONE', 'BASE_NONE')
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_EXTERNAL')
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
if ectx.default_external_type_cb:
pars['TYPE_REF_FN'] = ectx.default_external_type_cb
else:
pars['TYPE_REF_FN'] = 'NULL'
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_external_type', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),))
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_external_type', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- OpenType -----------------------------------------------------------
class OpenType (Type):
def to_python (self, ctx):
return "asn1.ANY"
def single_type(self):
if (self.HasConstraint() and
self.constr.type == 'Type' and
self.constr.subtype.type == 'Type_Ref'):
return self.constr.subtype.val
return None
def eth_reg_sub(self, ident, ectx):
t = self.single_type()
if t:
ectx.eth_dep_add(ident, t)
def eth_tname(self):
t = self.single_type()
if t:
return 'OpenType_' + t
else:
return Type.eth_tname(self)
def eth_ftype(self, ectx):
return ('FT_NONE', 'BASE_NONE')
def GetTTag(self, ectx):
return ('BER_CLASS_ANY', '0')
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
pars['FN_VARIANT'] = ectx.default_opentype_variant
t = self.single_type()
if t:
t = ectx.type[t]['ethname']
pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto']
pars['TYPE_REF_TNAME'] = t
pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s'
else:
pars['TYPE_REF_FN'] = 'NULL'
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_open_type%(FN_VARIANT)s', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- InstanceOfType -----------------------------------------------------------
class InstanceOfType (Type):
def eth_tname(self):
return 'INSTANCE_OF'
def eth_ftype(self, ectx):
return ('FT_NONE', 'BASE_NONE')
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_EXTERNAL')
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
if ectx.default_external_type_cb:
pars['TYPE_REF_FN'] = ectx.default_external_type_cb
else:
pars['TYPE_REF_FN'] = 'NULL'
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_external_type', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),))
elif (ectx.Per()):
body = '#error Can not decode %s' % (tname)
else:
body = '#error Can not decode %s' % (tname)
return body
#--- AnyType -----------------------------------------------------------
class AnyType (Type):
def to_python (self, ctx):
return "asn1.ANY"
def eth_ftype(self, ectx):
return ('FT_NONE', 'BASE_NONE')
def GetTTag(self, ectx):
return ('BER_CLASS_ANY', '0')
def eth_type_default_body(self, ectx, tname):
body = '#error Can not decode %s' % (tname)
return body
class Literal (Node):
def to_python (self, ctx):
return self.val
#--- NullType -----------------------------------------------------------------
class NullType (Type):
def to_python (self, ctx):
return 'asn1.NULL'
def eth_tname(self):
return 'NULL'
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_NULL')
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_null', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),))
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_null', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- NullValue ----------------------------------------------------
class NullValue (Value):
def to_str(self, ectx):
return 'NULL'
#--- RealType -----------------------------------------------------------------
class RealType (Type):
def to_python (self, ctx):
return 'asn1.REAL'
def eth_tname(self):
return 'REAL'
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_REAL')
def eth_ftype(self, ectx):
return ('FT_DOUBLE', 'BASE_NONE')
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_real', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),
('%(VAL_PTR)s',),))
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_real', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- BooleanType --------------------------------------------------------------
class BooleanType (Type):
def to_python (self, ctx):
return 'asn1.BOOLEAN'
def eth_tname(self):
return 'BOOLEAN'
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_BOOLEAN')
def eth_ftype(self, ectx):
return ('FT_BOOLEAN', 'BASE_NONE')
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_boolean', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s', '%(VAL_PTR)s'),))
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_boolean', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- OctetStringType ----------------------------------------------------------
class OctetStringType (Type):
def to_python (self, ctx):
return 'asn1.OCTSTRING'
def eth_tname(self):
if not self.HasConstraint():
return 'OCTET_STRING'
elif self.constr.type == 'Size':
return 'OCTET_STRING' + '_' + self.constr.eth_constrname()
else:
return '#' + self.type + '_' + str(id(self))
def eth_ftype(self, ectx):
return ('FT_BYTES', 'BASE_NONE')
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_OCTETSTRING')
def eth_need_pdu(self, ectx):
pdu = None
if self.HasContentsConstraint():
t = self.constr.GetContents(ectx)
if t and (ectx.default_containing_variant in ('_pdu', '_pdu_new')):
pdu = { 'type' : t,
'new' : ectx.default_containing_variant == '_pdu_new' }
return pdu
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
(pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx)
if self.HasContentsConstraint():
pars['FN_VARIANT'] = ectx.default_containing_variant
t = self.constr.GetContents(ectx)
if t:
if pars['FN_VARIANT'] in ('_pdu', '_pdu_new'):
t = ectx.field[t]['ethname']
pars['TYPE_REF_PROTO'] = ''
pars['TYPE_REF_TNAME'] = t
pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_TNAME)s'
else:
t = ectx.type[t]['ethname']
pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto']
pars['TYPE_REF_TNAME'] = t
pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s'
else:
pars['TYPE_REF_FN'] = 'NULL'
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
if (ectx.constraints_check and self.HasSizeConstraint()):
body = ectx.eth_fn_call('dissect_%(ER)s_constrained_octet_string', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
else:
body = ectx.eth_fn_call('dissect_%(ER)s_octet_string', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),
('%(VAL_PTR)s',),))
elif (ectx.Per()):
if self.HasContentsConstraint():
body = ectx.eth_fn_call('dissect_%(ER)s_octet_string_containing%(FN_VARIANT)s', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s', '%(TYPE_REF_FN)s',),))
else:
body = ectx.eth_fn_call('dissect_%(ER)s_octet_string', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s', '%(VAL_PTR)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- CharacterStringType ------------------------------------------------------
class CharacterStringType (Type):
def eth_tname(self):
if not self.HasConstraint():
return self.eth_tsname()
elif self.constr.type == 'Size':
return self.eth_tsname() + '_' + self.constr.eth_constrname()
else:
return '#' + self.type + '_' + str(id(self))
def eth_ftype(self, ectx):
return ('FT_STRING', 'BASE_NONE')
class RestrictedCharacterStringType (CharacterStringType):
def to_python (self, ctx):
return 'asn1.' + self.eth_tsname()
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_' + self.eth_tsname())
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
(pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx)
(pars['STRING_TYPE'], pars['STRING_TAG']) = (self.eth_tsname(), self.GetTTag(ectx)[1])
(pars['ALPHABET'], pars['ALPHABET_LEN']) = self.eth_get_alphabet_constr(ectx)
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
if (ectx.constraints_check and self.HasSizeConstraint()):
body = ectx.eth_fn_call('dissect_%(ER)s_constrained_restricted_string', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(STRING_TAG)s'),
('%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
else:
body = ectx.eth_fn_call('dissect_%(ER)s_restricted_string', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(STRING_TAG)s'),
('%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),
('%(VAL_PTR)s',),))
elif (ectx.Per() and self.HasPermAlph()):
body = ectx.eth_fn_call('dissect_%(ER)s_restricted_character_string', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s', '%(ALPHABET)s', '%(ALPHABET_LEN)s'),
('%(VAL_PTR)s',),))
elif (ectx.Per()):
if (self.eth_tsname() == 'GeneralString'):
body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),))
elif (self.eth_tsname() == 'GeneralizedTime'):
body = ectx.eth_fn_call('dissect_%(ER)s_VisibleString', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s',),))
elif (self.eth_tsname() == 'UTCTime'):
body = ectx.eth_fn_call('dissect_%(ER)s_VisibleString', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s',),))
else:
body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
class BMPStringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'BMPString'
class GeneralStringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'GeneralString'
class GraphicStringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'GraphicString'
class IA5StringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'IA5String'
class NumericStringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'NumericString'
class PrintableStringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'PrintableString'
class TeletexStringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'TeletexString'
class T61StringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'T61String'
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_TeletexString')
class UniversalStringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'UniversalString'
class UTF8StringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'UTF8String'
class VideotexStringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'VideotexString'
class VisibleStringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'VisibleString'
class ISO646StringType (RestrictedCharacterStringType):
def eth_tsname(self):
return 'ISO646String'
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_VisibleString')
class UnrestrictedCharacterStringType (CharacterStringType):
def to_python (self, ctx):
return 'asn1.UnrestrictedCharacterString'
def eth_tsname(self):
return 'CHARACTER_STRING'
#--- UsefulType ---------------------------------------------------------------
class GeneralizedTime (RestrictedCharacterStringType):
def eth_tsname(self):
return 'GeneralizedTime'
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),))
return body
else:
return RestrictedCharacterStringType.eth_type_default_body(self, ectx, tname)
class UTCTime (RestrictedCharacterStringType):
def eth_tsname(self):
return 'UTCTime'
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),))
return body
else:
return RestrictedCharacterStringType.eth_type_default_body(self, ectx, tname)
class ObjectDescriptor (RestrictedCharacterStringType):
def eth_tsname(self):
return 'ObjectDescriptor'
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = RestrictedCharacterStringType.eth_type_default_body(self, ectx, tname)
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_object_descriptor', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- ObjectIdentifierType -----------------------------------------------------
class ObjectIdentifierType (Type):
def to_python (self, ctx):
return 'asn1.OBJECT_IDENTIFIER'
def eth_tname(self):
return 'OBJECT_IDENTIFIER'
def eth_ftype(self, ectx):
return ('FT_OID', 'BASE_NONE')
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_OID')
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
pars['FN_VARIANT'] = ectx.default_oid_variant
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_object_identifier%(FN_VARIANT)s', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_object_identifier%(FN_VARIANT)s', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- ObjectIdentifierValue ----------------------------------------------------
class ObjectIdentifierValue (Value):
def get_num(self, path, val):
return str(oid_names.get(path + '/' + val, val))
def to_str(self, ectx):
out = ''
path = ''
first = True
sep = ''
for v in self.comp_list:
if isinstance(v, Node) and (v.type == 'name_and_number'):
vstr = v.number
elif v.isdigit():
vstr = v
else:
vstr = self.get_num(path, v)
if not first and not vstr.isdigit():
vstr = ectx.value_get_val(vstr)
if first:
if vstr.isdigit():
out += '"' + vstr
else:
out += ectx.value_get_eth(vstr) + '"'
else:
out += sep + vstr
path += sep + vstr
first = False
sep = '.'
out += '"'
return out
def get_dep(self):
v = self.comp_list[0]
if isinstance(v, Node) and (v.type == 'name_and_number'):
return None
elif v.isdigit():
return None
else:
vstr = self.get_num('', v)
if vstr.isdigit():
return None
else:
return vstr
class NamedNumber(Node):
def to_python (self, ctx):
return "('%s',%s)" % (self.ident, self.val)
class NamedNumListBase(Node):
def to_python (self, ctx):
return "asn1.%s_class ([%s])" % (self.asn1_typ,",".join (
[x.to_python (ctx) for x in self.named_list]))
#--- RelativeOIDType ----------------------------------------------------------
class RelativeOIDType (Type):
def eth_tname(self):
return 'RELATIVE_OID'
def eth_ftype(self, ectx):
return ('FT_REL_OID', 'BASE_NONE')
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_RELATIVE_OID')
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
pars['FN_VARIANT'] = ectx.default_oid_variant
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
body = ectx.eth_fn_call('dissect_%(ER)s_relative_oid%(FN_VARIANT)s', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
elif (ectx.Per()):
body = ectx.eth_fn_call('dissect_%(ER)s_relative_oid%(FN_VARIANT)s', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- IntegerType --------------------------------------------------------------
class IntegerType (Type):
def to_python (self, ctx):
return "asn1.INTEGER_class ([%s])" % (",".join (
[x.to_python (ctx) for x in self.named_list]))
def add_named_value(self, ident, val):
e = NamedNumber(ident = ident, val = val)
if not self.named_list:
self.named_list = []
self.named_list.append(e)
def eth_tname(self):
if self.named_list:
return Type.eth_tname(self)
if not self.HasConstraint():
return 'INTEGER'
elif self.constr.type == 'SingleValue' or self.constr.type == 'ValueRange':
return 'INTEGER' + '_' + self.constr.eth_constrname()
else:
return 'INTEGER' + '_' + self.constr.eth_tname()
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_INTEGER')
def eth_ftype(self, ectx):
if self.HasConstraint():
if not self.constr.IsNegativ():
return ('FT_UINT32', 'BASE_DEC')
return ('FT_INT32', 'BASE_DEC')
def eth_strings(self):
if (self.named_list):
return '$$'
else:
return 'NULL'
def eth_has_vals(self):
if (self.named_list):
return True
else:
return False
def get_vals(self, ectx):
vals = []
for e in (self.named_list):
vals.append((int(e.val), e.ident))
return vals
def eth_type_vals(self, tname, ectx):
if not self.eth_has_vals(): return ''
out = '\n'
vals = self.get_vals(ectx)
out += ectx.eth_vals(tname, vals)
return out
def reg_enum_vals(self, tname, ectx):
vals = self.get_vals(ectx)
for (val, id) in vals:
ectx.eth_reg_value(id, self, val, ethname=ectx.eth_enum_item(tname, id))
def eth_type_enum(self, tname, ectx):
if not self.eth_has_enum(tname, ectx): return ''
out = '\n'
vals = self.get_vals(ectx)
out += ectx.eth_enum(tname, vals)
return out
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
if self.HasValueConstraint():
(pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_value_constr(ectx)
return pars
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
if (ectx.constraints_check and self.HasValueConstraint()):
body = ectx.eth_fn_call('dissect_%(ER)s_constrained_integer%(FN_VARIANT)s', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),))
else:
body = ectx.eth_fn_call('dissect_%(ER)s_integer%(FN_VARIANT)s', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),
('%(VAL_PTR)s',),))
elif (ectx.Per() and not self.HasValueConstraint()):
body = ectx.eth_fn_call('dissect_%(ER)s_integer%(FN_VARIANT)s', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s'),))
elif (ectx.Per() and self.HasValueConstraint()):
body = ectx.eth_fn_call('dissect_%(ER)s_constrained_integer%(FN_VARIANT)s', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(VAL_PTR)s', '%(EXT)s'),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- BitStringType ------------------------------------------------------------
class BitStringType (Type):
def to_python (self, ctx):
return "asn1.BITSTRING_class ([%s])" % (",".join (
[x.to_python (ctx) for x in self.named_list]))
def eth_tname(self):
if self.named_list:
return Type.eth_tname(self)
elif not self.HasConstraint():
return 'BIT_STRING'
elif self.constr.IsSize():
return 'BIT_STRING' + '_' + self.constr.eth_constrname()
else:
return '#' + self.type + '_' + str(id(self))
def GetTTag(self, ectx):
return ('BER_CLASS_UNI', 'BER_UNI_TAG_BITSTRING')
def eth_ftype(self, ectx):
return ('FT_BYTES', 'BASE_NONE')
def eth_need_tree(self):
return self.named_list
def eth_need_pdu(self, ectx):
pdu = None
if self.HasContentsConstraint():
t = self.constr.GetContents(ectx)
if t and (ectx.default_containing_variant in ('_pdu', '_pdu_new')):
pdu = { 'type' : t,
'new' : ectx.default_containing_variant == '_pdu_new' }
return pdu
def eth_named_bits(self):
bits = []
if (self.named_list):
for e in (self.named_list):
bits.append((int(e.val), e.ident))
return bits
def eth_type_default_pars(self, ectx, tname):
pars = Type.eth_type_default_pars(self, ectx, tname)
pars['LEN_PTR'] = 'NULL'
(pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx)
if 'ETT_INDEX' not in pars:
pars['ETT_INDEX'] = '-1'
pars['TABLE'] = 'NULL'
if self.eth_named_bits():
pars['TABLE'] = '%(PROTOP)s%(TNAME)s_bits'
if self.HasContentsConstraint():
pars['FN_VARIANT'] = ectx.default_containing_variant
t = self.constr.GetContents(ectx)
if t:
if pars['FN_VARIANT'] in ('_pdu', '_pdu_new'):
t = ectx.field[t]['ethname']
pars['TYPE_REF_PROTO'] = ''
pars['TYPE_REF_TNAME'] = t
pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_TNAME)s'
else:
t = ectx.type[t]['ethname']
pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto']
pars['TYPE_REF_TNAME'] = t
pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s'
else:
pars['TYPE_REF_FN'] = 'NULL'
return pars
def eth_type_default_table(self, ectx, tname):
#print "eth_type_default_table(tname='%s')" % (tname)
table = ''
bits = self.eth_named_bits()
if (bits and ectx.Ber()):
table = ectx.eth_bits(tname, bits)
return table
def eth_type_default_body(self, ectx, tname):
if (ectx.Ber()):
if (ectx.constraints_check and self.HasSizeConstraint()):
body = ectx.eth_fn_call('dissect_%(ER)s_constrained_bitstring', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),
('%(VAL_PTR)s',),))
else:
body = ectx.eth_fn_call('dissect_%(ER)s_bitstring', ret='offset',
par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'),
('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),
('%(VAL_PTR)s',),))
elif (ectx.Per()):
if self.HasContentsConstraint():
body = ectx.eth_fn_call('dissect_%(ER)s_bit_string_containing%(FN_VARIANT)s', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s', '%(TYPE_REF_FN)s'),))
else:
body = ectx.eth_fn_call('dissect_%(ER)s_bit_string', ret='offset',
par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),
('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s', '%(VAL_PTR)s', '%(LEN_PTR)s'),))
else:
body = '#error Can not decode %s' % (tname)
return body
#--- BStringValue ------------------------------------------------------------
bstring_tab = {
'0000' : '0',
'0001' : '1',
'0010' : '2',
'0011' : '3',
'0100' : '4',
'0101' : '5',
'0110' : '6',
'0111' : '7',
'1000' : '8',
'1001' : '9',
'1010' : 'A',
'1011' : 'B',
'1100' : 'C',
'1101' : 'D',
'1110' : 'E',
'1111' : 'F',
}
class BStringValue (Value):
def to_str(self, ectx):
v = self.val[1:-2]
if len(v) % 8:
v += '0' * (8 - len(v) % 8)
vv = '0x'
for i in (list(range(0, len(v), 4))):
vv += bstring_tab[v[i:i+4]]
return vv
#--- HStringValue ------------------------------------------------------------
class HStringValue (Value):
def to_str(self, ectx):
vv = '0x'
vv += self.val[1:-2]
return vv
def __int__(self):
return int(self.val[1:-2], 16)
#--- FieldSpec ----------------------------------------------------------------
class FieldSpec (Node):
def __init__(self,*args, **kw) :
self.name = None
Node.__init__ (self,*args, **kw)
def SetName(self, name):
self.name = name
def get_repr(self):
return ['#UNSUPPORTED_' + self.type]
def fld_repr(self):
repr = [self.name]
repr.extend(self.get_repr())
return repr
class TypeFieldSpec (FieldSpec):
def get_repr(self):
return []
class FixedTypeValueFieldSpec (FieldSpec):
def get_repr(self):
if isinstance(self.typ, Type_Ref):
repr = ['TypeReference', self.typ.val]
else:
repr = [self.typ.type]
return repr
class VariableTypeValueFieldSpec (FieldSpec):
def get_repr(self):
return ['_' + self.type]
class FixedTypeValueSetFieldSpec (FieldSpec):
def get_repr(self):
return ['_' + self.type]
class ObjectFieldSpec (FieldSpec):
def get_repr(self):
return ['ClassReference', self.cls.val]
class ObjectSetFieldSpec (FieldSpec):
def get_repr(self):
return ['ClassReference', self.cls.val]
#==============================================================================
def p_module_list_1 (t):
'module_list : module_list ModuleDefinition'
t[0] = t[1] + [t[2]]
def p_module_list_2 (t):
'module_list : ModuleDefinition'
t[0] = [t[1]]
#--- ITU-T Recommendation X.680 -----------------------------------------------
# 11 ASN.1 lexical items --------------------------------------------------------
# 11.2 Type references
def p_type_ref (t):
'type_ref : UCASE_IDENT'
t[0] = Type_Ref(val=t[1])
# 11.3 Identifiers
def p_identifier (t):
'identifier : LCASE_IDENT'
t[0] = t[1]
# 11.4 Value references
# cause reduce/reduce conflict
#def p_valuereference (t):
# 'valuereference : LCASE_IDENT'
# t[0] = Value_Ref(val=t[1])
# 11.5 Module references
def p_modulereference (t):
'modulereference : UCASE_IDENT'
t[0] = t[1]
# 12 Module definition --------------------------------------------------------
# 12.1
def p_ModuleDefinition (t):
'ModuleDefinition : ModuleIdentifier DEFINITIONS TagDefault ASSIGNMENT ModuleBegin BEGIN ModuleBody END'
t[0] = Module (ident = t[1], tag_def = t[3], body = t[7])
def p_ModuleBegin (t):
'ModuleBegin : '
if t[-4].val == 'Remote-Operations-Information-Objects':
x880_module_begin()
def p_TagDefault_1 (t):
'''TagDefault : EXPLICIT TAGS
| IMPLICIT TAGS
| AUTOMATIC TAGS '''
t[0] = Default_Tags (dfl_tag = t[1])
def p_TagDefault_2 (t):
'TagDefault : '
# 12.2 The "TagDefault" is taken as EXPLICIT TAGS if it is "empty".
t[0] = Default_Tags (dfl_tag = 'EXPLICIT')
def p_ModuleIdentifier_1 (t):
'ModuleIdentifier : modulereference DefinitiveIdentifier' # name, oid
t [0] = Node('module_ident', val = t[1], ident = t[2])
def p_ModuleIdentifier_2 (t):
'ModuleIdentifier : modulereference' # name, oid
t [0] = Node('module_ident', val = t[1], ident = None)
def p_DefinitiveIdentifier (t):
'DefinitiveIdentifier : ObjectIdentifierValue'
t[0] = t[1]
#def p_module_ref (t):
# 'module_ref : UCASE_IDENT'
# t[0] = t[1]
def p_ModuleBody_1 (t):
'ModuleBody : Exports Imports AssignmentList'
t[0] = Module_Body (exports = t[1], imports = t[2], assign_list = t[3])
def p_ModuleBody_2 (t):
'ModuleBody : '
t[0] = Node ('module_body', exports = [], imports = [], assign_list = [])
def p_Exports_1 (t):
'Exports : EXPORTS syms_exported SEMICOLON'
t[0] = t[2]
def p_Exports_2 (t):
'Exports : EXPORTS ALL SEMICOLON'
t[0] = [ 'ALL' ]
def p_Exports_3 (t):
'Exports : '
t[0] = [ 'ALL' ]
def p_syms_exported_1 (t):
'syms_exported : exp_sym_list'
t[0] = t[1]
def p_syms_exported_2 (t):
'syms_exported : '
t[0] = []
def p_exp_sym_list_1 (t):
'exp_sym_list : Symbol'
t[0] = [t[1]]
def p_exp_sym_list_2 (t):
'exp_sym_list : exp_sym_list COMMA Symbol'
t[0] = t[1] + [t[3]]
def p_Imports_1 (t):
'Imports : importsbegin IMPORTS SymbolsImported SEMICOLON'
t[0] = t[3]
global lcase_ident_assigned
lcase_ident_assigned = {}
def p_importsbegin (t):
'importsbegin : '
global lcase_ident_assigned
global g_conform
lcase_ident_assigned = {}
lcase_ident_assigned.update(g_conform.use_item('ASSIGNED_ID', 'OBJECT_IDENTIFIER'))
def p_Imports_2 (t):
'Imports : '
t[0] = []
def p_SymbolsImported_1(t):
'SymbolsImported : '
t[0] = []
def p_SymbolsImported_2 (t):
'SymbolsImported : SymbolsFromModuleList'
t[0] = t[1]
def p_SymbolsFromModuleList_1 (t):
'SymbolsFromModuleList : SymbolsFromModuleList SymbolsFromModule'
t[0] = t[1] + [t[2]]
def p_SymbolsFromModuleList_2 (t):
'SymbolsFromModuleList : SymbolsFromModule'
t[0] = [t[1]]
def p_SymbolsFromModule (t):
'SymbolsFromModule : SymbolList FROM GlobalModuleReference'
t[0] = Node ('SymbolList', symbol_list = t[1], module = t[3])
for s in (t[0].symbol_list):
if (isinstance(s, Value_Ref)): lcase_ident_assigned[s.val] = t[3]
import_symbols_from_module(t[0].module, t[0].symbol_list)
def import_symbols_from_module(module, symbol_list):
if module.val == 'Remote-Operations-Information-Objects':
for i in range(len(symbol_list)):
s = symbol_list[i]
if isinstance(s, Type_Ref) or isinstance(s, Class_Ref):
x880_import(s.val)
if isinstance(s, Type_Ref) and is_class_ident(s.val):
symbol_list[i] = Class_Ref (val = s.val)
return
for i in range(len(symbol_list)):
s = symbol_list[i]
if isinstance(s, Type_Ref) and is_class_ident("$%s$%s" % (module.val, s.val)):
import_class_from_module(module.val, s.val)
if isinstance(s, Type_Ref) and is_class_ident(s.val):
symbol_list[i] = Class_Ref (val = s.val)
def p_GlobalModuleReference (t):
'GlobalModuleReference : modulereference AssignedIdentifier'
t [0] = Node('module_ident', val = t[1], ident = t[2])
def p_AssignedIdentifier_1 (t):
'AssignedIdentifier : ObjectIdentifierValue'
t[0] = t[1]
def p_AssignedIdentifier_2 (t):
'AssignedIdentifier : LCASE_IDENT_ASSIGNED'
t[0] = t[1]
def p_AssignedIdentifier_3 (t):
'AssignedIdentifier : '
pass
def p_SymbolList_1 (t):
'SymbolList : Symbol'
t[0] = [t[1]]
def p_SymbolList_2 (t):
'SymbolList : SymbolList COMMA Symbol'
t[0] = t[1] + [t[3]]
def p_Symbol (t):
'''Symbol : Reference
| ParameterizedReference'''
t[0] = t[1]
def p_Reference_1 (t):
'''Reference : type_ref
| objectclassreference '''
t[0] = t[1]
def p_Reference_2 (t):
'''Reference : LCASE_IDENT_ASSIGNED
| identifier ''' # instead of valuereference wich causes reduce/reduce conflict
t[0] = Value_Ref(val=t[1])
def p_AssignmentList_1 (t):
'AssignmentList : AssignmentList Assignment'
t[0] = t[1] + [t[2]]
def p_AssignmentList_2 (t):
'AssignmentList : Assignment SEMICOLON'
t[0] = [t[1]]
def p_AssignmentList_3 (t):
'AssignmentList : Assignment'
t[0] = [t[1]]
def p_Assignment (t):
'''Assignment : TypeAssignment
| ValueAssignment
| ValueSetTypeAssignment
| ObjectClassAssignment
| ObjectAssignment
| ObjectSetAssignment
| ParameterizedAssignment
| pyquote '''
t[0] = t[1]
# 13 Referencing type and value definitions -----------------------------------
# 13.1
def p_DefinedType (t):
'''DefinedType : ExternalTypeReference
| type_ref
| ParameterizedType'''
t[0] = t[1]
def p_DefinedValue_1(t):
'''DefinedValue : ExternalValueReference'''
t[0] = t[1]
def p_DefinedValue_2(t):
'''DefinedValue : identifier ''' # instead of valuereference wich causes reduce/reduce conflict
t[0] = Value_Ref(val=t[1])
# 13.6
def p_ExternalTypeReference (t):
'ExternalTypeReference : modulereference DOT type_ref'
t[0] = Node ('ExternalTypeReference', module = t[1], typ = t[3])
def p_ExternalValueReference (t):
'ExternalValueReference : modulereference DOT identifier'
t[0] = Node ('ExternalValueReference', module = t[1], ident = t[3])
# 15 Assigning types and values -----------------------------------------------
# 15.1
def p_TypeAssignment (t):
'TypeAssignment : UCASE_IDENT ASSIGNMENT Type'
t[0] = t[3]
t[0].SetName(t[1])
# 15.2
def p_ValueAssignment (t):
'ValueAssignment : LCASE_IDENT ValueType ASSIGNMENT Value'
t[0] = ValueAssignment(ident = t[1], typ = t[2], val = t[4])
# only "simple" types are supported to simplify grammer
def p_ValueType (t):
'''ValueType : type_ref
| BooleanType
| IntegerType
| ObjectIdentifierType
| OctetStringType
| RealType '''
t[0] = t[1]
# 15.6
def p_ValueSetTypeAssignment (t):
'ValueSetTypeAssignment : UCASE_IDENT ValueType ASSIGNMENT ValueSet'
t[0] = Node('ValueSetTypeAssignment', name=t[1], typ=t[2], val=t[4])
# 15.7
def p_ValueSet (t):
'ValueSet : lbraceignore rbraceignore'
t[0] = None
# 16 Definition of types and values -------------------------------------------
# 16.1
def p_Type (t):
'''Type : BuiltinType
| ReferencedType
| ConstrainedType'''
t[0] = t[1]
# 16.2
def p_BuiltinType (t):
'''BuiltinType : AnyType
| BitStringType
| BooleanType
| CharacterStringType
| ChoiceType
| EmbeddedPDVType
| EnumeratedType
| ExternalType
| InstanceOfType
| IntegerType
| NullType
| ObjectClassFieldType
| ObjectIdentifierType
| OctetStringType
| RealType
| RelativeOIDType
| SequenceType
| SequenceOfType
| SetType
| SetOfType
| TaggedType'''
t[0] = t[1]
# 16.3
def p_ReferencedType (t):
'''ReferencedType : DefinedType
| UsefulType
| SelectionType'''
t[0] = t[1]
# 16.5
def p_NamedType (t):
'NamedType : identifier Type'
t[0] = t[2]
t[0].SetName (t[1])
# 16.7
def p_Value (t):
'''Value : BuiltinValue
| ReferencedValue
| ObjectClassFieldValue'''
t[0] = t[1]
# 16.9
def p_BuiltinValue (t):
'''BuiltinValue : BooleanValue
| ChoiceValue
| IntegerValue
| ObjectIdentifierValue
| RealValue
| SequenceValue
| hex_string
| binary_string
| char_string''' # XXX we don't support {data} here
t[0] = t[1]
# 16.11
def p_ReferencedValue (t):
'''ReferencedValue : DefinedValue
| ValueFromObject'''
t[0] = t[1]
# 16.13
#def p_NamedValue (t):
# 'NamedValue : identifier Value'
# t[0] = Node ('NamedValue', ident = t[1], value = t[2])
# 17 Notation for the boolean type --------------------------------------------
# 17.1
def p_BooleanType (t):
'BooleanType : BOOLEAN'
t[0] = BooleanType ()
# 17.2
def p_BooleanValue (t):
'''BooleanValue : TRUE
| FALSE'''
t[0] = t[1]
# 18 Notation for the integer type --------------------------------------------
# 18.1
def p_IntegerType_1 (t):
'IntegerType : INTEGER'
t[0] = IntegerType (named_list = None)
def p_IntegerType_2 (t):
'IntegerType : INTEGER LBRACE NamedNumberList RBRACE'
t[0] = IntegerType(named_list = t[3])
def p_NamedNumberList_1 (t):
'NamedNumberList : NamedNumber'
t[0] = [t[1]]
def p_NamedNumberList_2 (t):
'NamedNumberList : NamedNumberList COMMA NamedNumber'
t[0] = t[1] + [t[3]]
def p_NamedNumber (t):
'''NamedNumber : identifier LPAREN SignedNumber RPAREN
| identifier LPAREN DefinedValue RPAREN'''
t[0] = NamedNumber(ident = t[1], val = t[3])
def p_SignedNumber_1 (t):
'SignedNumber : NUMBER'
t[0] = t [1]
def p_SignedNumber_2 (t):
'SignedNumber : MINUS NUMBER'
t[0] = '-' + t[2]
# 18.9
def p_IntegerValue (t):
'IntegerValue : SignedNumber'
t[0] = t [1]
# 19 Notation for the enumerated type -----------------------------------------
# 19.1
def p_EnumeratedType (t):
'EnumeratedType : ENUMERATED LBRACE Enumerations RBRACE'
t[0] = EnumeratedType (val = t[3]['val'], ext = t[3]['ext'])
def p_Enumerations_1 (t):
'Enumerations : Enumeration'
t[0] = { 'val' : t[1], 'ext' : None }
def p_Enumerations_2 (t):
'Enumerations : Enumeration COMMA ELLIPSIS ExceptionSpec'
t[0] = { 'val' : t[1], 'ext' : [] }
def p_Enumerations_3 (t):
'Enumerations : Enumeration COMMA ELLIPSIS ExceptionSpec COMMA Enumeration'
t[0] = { 'val' : t[1], 'ext' : t[6] }
def p_Enumeration_1 (t):
'Enumeration : EnumerationItem'
t[0] = [t[1]]
def p_Enumeration_2 (t):
'Enumeration : Enumeration COMMA EnumerationItem'
t[0] = t[1] + [t[3]]
def p_EnumerationItem (t):
'''EnumerationItem : Identifier
| NamedNumber'''
t[0] = t[1]
def p_Identifier (t):
'Identifier : identifier'
t[0] = Node ('Identifier', ident = t[1])
# 20 Notation for the real type -----------------------------------------------
# 20.1
def p_RealType (t):
'RealType : REAL'
t[0] = RealType ()
# 20.6
def p_RealValue (t):
'''RealValue : REAL_NUMBER
| SpecialRealValue'''
t[0] = t [1]
def p_SpecialRealValue (t):
'''SpecialRealValue : PLUS_INFINITY
| MINUS_INFINITY'''
t[0] = t[1]
# 21 Notation for the bitstring type ------------------------------------------
# 21.1
def p_BitStringType_1 (t):
'BitStringType : BIT STRING'
t[0] = BitStringType (named_list = None)
def p_BitStringType_2 (t):
'BitStringType : BIT STRING LBRACE NamedBitList RBRACE'
t[0] = BitStringType (named_list = t[4])
def p_NamedBitList_1 (t):
'NamedBitList : NamedBit'
t[0] = [t[1]]
def p_NamedBitList_2 (t):
'NamedBitList : NamedBitList COMMA NamedBit'
t[0] = t[1] + [t[3]]
def p_NamedBit (t):
'''NamedBit : identifier LPAREN NUMBER RPAREN
| identifier LPAREN DefinedValue RPAREN'''
t[0] = NamedNumber (ident = t[1], val = t[3])
# 22 Notation for the octetstring type ----------------------------------------
# 22.1
def p_OctetStringType (t):
'OctetStringType : OCTET STRING'
t[0] = OctetStringType ()
# 23 Notation for the null type -----------------------------------------------
# 23.1
def p_NullType (t):
'NullType : NULL'
t[0] = NullType ()
# 23.3
def p_NullValue (t):
'NullValue : NULL'
t[0] = NullValue ()
# 24 Notation for sequence types ----------------------------------------------
# 24.1
def p_SequenceType_1 (t):
'SequenceType : SEQUENCE LBRACE RBRACE'
t[0] = SequenceType (elt_list = [])
def p_SequenceType_2 (t):
'SequenceType : SEQUENCE LBRACE ComponentTypeLists RBRACE'
t[0] = SequenceType (elt_list = t[3]['elt_list'])
if 'ext_list' in t[3]:
t[0].ext_list = t[3]['ext_list']
if 'elt_list2' in t[3]:
t[0].elt_list2 = t[3]['elt_list2']
def p_ExtensionAndException_1 (t):
'ExtensionAndException : ELLIPSIS'
t[0] = []
def p_OptionalExtensionMarker_1 (t):
'OptionalExtensionMarker : COMMA ELLIPSIS'
t[0] = True
def p_OptionalExtensionMarker_2 (t):
'OptionalExtensionMarker : '
t[0] = False
def p_ComponentTypeLists_1 (t):
'ComponentTypeLists : ComponentTypeList'
t[0] = {'elt_list' : t[1]}
def p_ComponentTypeLists_2 (t):
'ComponentTypeLists : ComponentTypeList COMMA ExtensionAndException OptionalExtensionMarker'
t[0] = {'elt_list' : t[1], 'ext_list' : []}
def p_ComponentTypeLists_3 (t):
'ComponentTypeLists : ComponentTypeList COMMA ExtensionAndException ExtensionAdditionList OptionalExtensionMarker'
t[0] = {'elt_list' : t[1], 'ext_list' : t[4]}
def p_ComponentTypeLists_4 (t):
'ComponentTypeLists : ComponentTypeList COMMA ExtensionAndException ExtensionEndMarker COMMA ComponentTypeList'
t[0] = {'elt_list' : t[1], 'ext_list' : [], 'elt_list2' : t[6]}
def p_ComponentTypeLists_5 (t):
'ComponentTypeLists : ComponentTypeList COMMA ExtensionAndException ExtensionAdditionList ExtensionEndMarker COMMA ComponentTypeList'
t[0] = {'elt_list' : t[1], 'ext_list' : t[4], 'elt_list2' : t[7]}
def p_ComponentTypeLists_6 (t):
'ComponentTypeLists : ExtensionAndException OptionalExtensionMarker'
t[0] = {'elt_list' : [], 'ext_list' : []}
def p_ComponentTypeLists_7 (t):
'ComponentTypeLists : ExtensionAndException ExtensionAdditionList OptionalExtensionMarker'
t[0] = {'elt_list' : [], 'ext_list' : t[2]}
def p_ExtensionEndMarker (t):
'ExtensionEndMarker : COMMA ELLIPSIS'
pass
def p_ExtensionAdditionList_1 (t):
'ExtensionAdditionList : COMMA ExtensionAddition'
t[0] = [t[2]]
def p_ExtensionAdditionList_2 (t):
'ExtensionAdditionList : ExtensionAdditionList COMMA ExtensionAddition'
t[0] = t[1] + [t[3]]
def p_ExtensionAddition_1 (t):
'ExtensionAddition : ExtensionAdditionGroup'
t[0] = Node ('elt_type', val = t[1], optional = 0)
def p_ExtensionAddition_2 (t):
'ExtensionAddition : ComponentType'
t[0] = t[1]
def p_ExtensionAdditionGroup (t):
'ExtensionAdditionGroup : LVERBRACK VersionNumber ComponentTypeList RVERBRACK'
t[0] = ExtensionAdditionGroup (ver = t[2], elt_list = t[3])
def p_VersionNumber_1 (t):
'VersionNumber : '
def p_VersionNumber_2 (t):
'VersionNumber : NUMBER COLON'
t[0] = t[1]
def p_ComponentTypeList_1 (t):
'ComponentTypeList : ComponentType'
t[0] = [t[1]]
def p_ComponentTypeList_2 (t):
'ComponentTypeList : ComponentTypeList COMMA ComponentType'
t[0] = t[1] + [t[3]]
def p_ComponentType_1 (t):
'ComponentType : NamedType'
t[0] = Node ('elt_type', val = t[1], optional = 0)
def p_ComponentType_2 (t):
'ComponentType : NamedType OPTIONAL'
t[0] = Node ('elt_type', val = t[1], optional = 1)
def p_ComponentType_3 (t):
'ComponentType : NamedType DEFAULT DefaultValue'
t[0] = Node ('elt_type', val = t[1], optional = 1, default = t[3])
def p_ComponentType_4 (t):
'ComponentType : COMPONENTS OF Type'
t[0] = Node ('components_of', typ = t[3])
def p_DefaultValue_1 (t):
'''DefaultValue : ReferencedValue
| BooleanValue
| ChoiceValue
| IntegerValue
| RealValue
| hex_string
| binary_string
| char_string
| ObjectClassFieldValue'''
t[0] = t[1]
def p_DefaultValue_2 (t):
'DefaultValue : lbraceignore rbraceignore'
t[0] = ''
# 24.17
def p_SequenceValue_1 (t):
'SequenceValue : LBRACE RBRACE'
t[0] = []
#def p_SequenceValue_2 (t):
# 'SequenceValue : LBRACE ComponentValueList RBRACE'
# t[0] = t[2]
#def p_ComponentValueList_1 (t):
# 'ComponentValueList : NamedValue'
# t[0] = [t[1]]
#def p_ComponentValueList_2 (t):
# 'ComponentValueList : ComponentValueList COMMA NamedValue'
# t[0] = t[1] + [t[3]]
# 25 Notation for sequence-of types -------------------------------------------
# 25.1
def p_SequenceOfType (t):
'''SequenceOfType : SEQUENCE OF Type
| SEQUENCE OF NamedType'''
t[0] = SequenceOfType (val = t[3], size_constr = None)
# 26 Notation for set types ---------------------------------------------------
# 26.1
def p_SetType_1 (t):
'SetType : SET LBRACE RBRACE'
t[0] = SetType (elt_list = [])
def p_SetType_2 (t):
'SetType : SET LBRACE ComponentTypeLists RBRACE'
t[0] = SetType (elt_list = t[3]['elt_list'])
if 'ext_list' in t[3]:
t[0].ext_list = t[3]['ext_list']
if 'elt_list2' in t[3]:
t[0].elt_list2 = t[3]['elt_list2']
# 27 Notation for set-of types ------------------------------------------------
# 27.1
def p_SetOfType (t):
'''SetOfType : SET OF Type
| SET OF NamedType'''
t[0] = SetOfType (val = t[3])
# 28 Notation for choice types ------------------------------------------------
# 28.1
def p_ChoiceType (t):
'ChoiceType : CHOICE LBRACE AlternativeTypeLists RBRACE'
if 'ext_list' in t[3]:
t[0] = ChoiceType (elt_list = t[3]['elt_list'], ext_list = t[3]['ext_list'])
else:
t[0] = ChoiceType (elt_list = t[3]['elt_list'])
def p_AlternativeTypeLists_1 (t):
'AlternativeTypeLists : AlternativeTypeList'
t[0] = {'elt_list' : t[1]}
def p_AlternativeTypeLists_2 (t):
'AlternativeTypeLists : AlternativeTypeList COMMA ExtensionAndException ExtensionAdditionAlternatives OptionalExtensionMarker'
t[0] = {'elt_list' : t[1], 'ext_list' : t[4]}
def p_ExtensionAdditionAlternatives_1 (t):
'ExtensionAdditionAlternatives : ExtensionAdditionAlternativesList'
t[0] = t[1]
def p_ExtensionAdditionAlternatives_2 (t):
'ExtensionAdditionAlternatives : '
t[0] = []
def p_ExtensionAdditionAlternativesList_1 (t):
'ExtensionAdditionAlternativesList : COMMA ExtensionAdditionAlternative'
t[0] = t[2]
def p_ExtensionAdditionAlternativesList_2 (t):
'ExtensionAdditionAlternativesList : ExtensionAdditionAlternativesList COMMA ExtensionAdditionAlternative'
t[0] = t[1] + t[3]
def p_ExtensionAdditionAlternative_1 (t):
'ExtensionAdditionAlternative : NamedType'
t[0] = [t[1]]
def p_ExtensionAdditionAlternative_2 (t):
'ExtensionAdditionAlternative : ExtensionAdditionAlternativesGroup'
t[0] = t[1]
def p_ExtensionAdditionAlternativesGroup (t):
'ExtensionAdditionAlternativesGroup : LVERBRACK VersionNumber AlternativeTypeList RVERBRACK'
t[0] = t[3]
def p_AlternativeTypeList_1 (t):
'AlternativeTypeList : NamedType'
t[0] = [t[1]]
def p_AlternativeTypeList_2 (t):
'AlternativeTypeList : AlternativeTypeList COMMA NamedType'
t[0] = t[1] + [t[3]]
# 28.10
def p_ChoiceValue_1 (t):
'''ChoiceValue : identifier COLON Value
| identifier COLON NullValue '''
val = t[3]
if not isinstance(val, Value):
val = Value(val=val)
t[0] = ChoiceValue (choice = t[1], val = val)
# 29 Notation for selection types
# 29.1
def p_SelectionType (t): #
'SelectionType : identifier LT Type'
t[0] = SelectionType (typ = t[3], sel = t[1])
# 30 Notation for tagged types ------------------------------------------------
# 30.1
def p_TaggedType_1 (t):
'TaggedType : Tag Type'
t[1].mode = 'default'
t[0] = t[2]
t[0].AddTag(t[1])
def p_TaggedType_2 (t):
'''TaggedType : Tag IMPLICIT Type
| Tag EXPLICIT Type'''
t[1].mode = t[2]
t[0] = t[3]
t[0].AddTag(t[1])
def p_Tag (t):
'Tag : LBRACK Class ClassNumber RBRACK'
t[0] = Tag(cls = t[2], num = t[3])
def p_ClassNumber_1 (t):
'ClassNumber : number'
t[0] = t[1]
def p_ClassNumber_2 (t):
'ClassNumber : DefinedValue'
t[0] = t[1]
def p_Class_1 (t):
'''Class : UNIVERSAL
| APPLICATION
| PRIVATE'''
t[0] = t[1]
def p_Class_2 (t):
'Class :'
t[0] = 'CONTEXT'
# 31 Notation for the object identifier type ----------------------------------
# 31.1
def p_ObjectIdentifierType (t):
'ObjectIdentifierType : OBJECT IDENTIFIER'
t[0] = ObjectIdentifierType()
# 31.3
def p_ObjectIdentifierValue (t):
'ObjectIdentifierValue : LBRACE oid_comp_list RBRACE'
t[0] = ObjectIdentifierValue (comp_list=t[2])
def p_oid_comp_list_1 (t):
'oid_comp_list : oid_comp_list ObjIdComponents'
t[0] = t[1] + [t[2]]
def p_oid_comp_list_2 (t):
'oid_comp_list : ObjIdComponents'
t[0] = [t[1]]
def p_ObjIdComponents (t):
'''ObjIdComponents : NameForm
| NumberForm
| NameAndNumberForm'''
t[0] = t[1]
def p_NameForm (t):
'''NameForm : LCASE_IDENT
| LCASE_IDENT_ASSIGNED'''
t [0] = t[1]
def p_NumberForm (t):
'''NumberForm : NUMBER'''
# | DefinedValue'''
t [0] = t[1]
def p_NameAndNumberForm (t):
'''NameAndNumberForm : LCASE_IDENT_ASSIGNED LPAREN NumberForm RPAREN
| LCASE_IDENT LPAREN NumberForm RPAREN'''
t[0] = Node('name_and_number', ident = t[1], number = t[3])
# 32 Notation for the relative object identifier type -------------------------
# 32.1
def p_RelativeOIDType (t):
'RelativeOIDType : RELATIVE_OID'
t[0] = RelativeOIDType()
# 33 Notation for the embedded-pdv type ---------------------------------------
# 33.1
def p_EmbeddedPDVType (t):
'EmbeddedPDVType : EMBEDDED PDV'
t[0] = EmbeddedPDVType()
# 34 Notation for the external type -------------------------------------------
# 34.1
def p_ExternalType (t):
'ExternalType : EXTERNAL'
t[0] = ExternalType()
# 36 Notation for character string types --------------------------------------
# 36.1
def p_CharacterStringType (t):
'''CharacterStringType : RestrictedCharacterStringType
| UnrestrictedCharacterStringType'''
t[0] = t[1]
# 37 Definition of restricted character string types --------------------------
def p_RestrictedCharacterStringType_1 (t):
'RestrictedCharacterStringType : BMPString'
t[0] = BMPStringType ()
def p_RestrictedCharacterStringType_2 (t):
'RestrictedCharacterStringType : GeneralString'
t[0] = GeneralStringType ()
def p_RestrictedCharacterStringType_3 (t):
'RestrictedCharacterStringType : GraphicString'
t[0] = GraphicStringType ()
def p_RestrictedCharacterStringType_4 (t):
'RestrictedCharacterStringType : IA5String'
t[0] = IA5StringType ()
def p_RestrictedCharacterStringType_5 (t):
'RestrictedCharacterStringType : ISO646String'
t[0] = ISO646StringType ()
def p_RestrictedCharacterStringType_6 (t):
'RestrictedCharacterStringType : NumericString'
t[0] = NumericStringType ()
def p_RestrictedCharacterStringType_7 (t):
'RestrictedCharacterStringType : PrintableString'
t[0] = PrintableStringType ()
def p_RestrictedCharacterStringType_8 (t):
'RestrictedCharacterStringType : TeletexString'
t[0] = TeletexStringType ()
def p_RestrictedCharacterStringType_9 (t):
'RestrictedCharacterStringType : T61String'
t[0] = T61StringType ()
def p_RestrictedCharacterStringType_10 (t):
'RestrictedCharacterStringType : UniversalString'
t[0] = UniversalStringType ()
def p_RestrictedCharacterStringType_11 (t):
'RestrictedCharacterStringType : UTF8String'
t[0] = UTF8StringType ()
def p_RestrictedCharacterStringType_12 (t):
'RestrictedCharacterStringType : VideotexString'
t[0] = VideotexStringType ()
def p_RestrictedCharacterStringType_13 (t):
'RestrictedCharacterStringType : VisibleString'
t[0] = VisibleStringType ()
# 40 Definition of unrestricted character string types ------------------------
# 40.1
def p_UnrestrictedCharacterStringType (t):
'UnrestrictedCharacterStringType : CHARACTER STRING'
t[0] = UnrestrictedCharacterStringType ()
# 41 Notation for types defined in clauses 42 to 44 ---------------------------
# 42 Generalized time ---------------------------------------------------------
def p_UsefulType_1 (t):
'UsefulType : GeneralizedTime'
t[0] = GeneralizedTime()
# 43 Universal time -----------------------------------------------------------
def p_UsefulType_2 (t):
'UsefulType : UTCTime'
t[0] = UTCTime()
# 44 The object descriptor type -----------------------------------------------
def p_UsefulType_3 (t):
'UsefulType : ObjectDescriptor'
t[0] = ObjectDescriptor()
# 45 Constrained types --------------------------------------------------------
# 45.1
def p_ConstrainedType_1 (t):
'ConstrainedType : Type Constraint'
t[0] = t[1]
t[0].AddConstraint(t[2])
def p_ConstrainedType_2 (t):
'ConstrainedType : TypeWithConstraint'
t[0] = t[1]
# 45.5
def p_TypeWithConstraint_1 (t):
'''TypeWithConstraint : SET Constraint OF Type
| SET SizeConstraint OF Type'''
t[0] = SetOfType (val = t[4], constr = t[2])
def p_TypeWithConstraint_2 (t):
'''TypeWithConstraint : SEQUENCE Constraint OF Type
| SEQUENCE SizeConstraint OF Type'''
t[0] = SequenceOfType (val = t[4], constr = t[2])
def p_TypeWithConstraint_3 (t):
'''TypeWithConstraint : SET Constraint OF NamedType
| SET SizeConstraint OF NamedType'''
t[0] = SetOfType (val = t[4], constr = t[2])
def p_TypeWithConstraint_4 (t):
'''TypeWithConstraint : SEQUENCE Constraint OF NamedType
| SEQUENCE SizeConstraint OF NamedType'''
t[0] = SequenceOfType (val = t[4], constr = t[2])
# 45.6
# 45.7
def p_Constraint (t):
'Constraint : LPAREN ConstraintSpec ExceptionSpec RPAREN'
t[0] = t[2]
def p_ConstraintSpec (t):
'''ConstraintSpec : ElementSetSpecs
| GeneralConstraint'''
t[0] = t[1]
# 46 Element set specification ------------------------------------------------
# 46.1
def p_ElementSetSpecs_1 (t):
'ElementSetSpecs : RootElementSetSpec'
t[0] = t[1]
def p_ElementSetSpecs_2 (t):
'ElementSetSpecs : RootElementSetSpec COMMA ELLIPSIS'
t[0] = t[1]
t[0].ext = True
def p_ElementSetSpecs_3 (t):
'ElementSetSpecs : RootElementSetSpec COMMA ELLIPSIS COMMA AdditionalElementSetSpec'
t[0] = t[1]
t[0].ext = True
def p_RootElementSetSpec (t):
'RootElementSetSpec : ElementSetSpec'
t[0] = t[1]
def p_AdditionalElementSetSpec (t):
'AdditionalElementSetSpec : ElementSetSpec'
t[0] = t[1]
def p_ElementSetSpec (t):
'ElementSetSpec : Unions'
t[0] = t[1]
def p_Unions_1 (t):
'Unions : Intersections'
t[0] = t[1]
def p_Unions_2 (t):
'Unions : UElems UnionMark Intersections'
t[0] = Constraint(type = 'Union', subtype = [t[1], t[3]])
def p_UElems (t):
'UElems : Unions'
t[0] = t[1]
def p_Intersections_1 (t):
'Intersections : IntersectionElements'
t[0] = t[1]
def p_Intersections_2 (t):
'Intersections : IElems IntersectionMark IntersectionElements'
t[0] = Constraint(type = 'Intersection', subtype = [t[1], t[3]])
def p_IElems (t):
'IElems : Intersections'
t[0] = t[1]
def p_IntersectionElements (t):
'IntersectionElements : Elements'
t[0] = t[1]
def p_UnionMark (t):
'''UnionMark : BAR
| UNION'''
def p_IntersectionMark (t):
'''IntersectionMark : CIRCUMFLEX
| INTERSECTION'''
# 46.5
def p_Elements_1 (t):
'Elements : SubtypeElements'
t[0] = t[1]
def p_Elements_2 (t):
'Elements : LPAREN ElementSetSpec RPAREN'
t[0] = t[2]
# 47 Subtype elements ---------------------------------------------------------
# 47.1 General
def p_SubtypeElements (t):
'''SubtypeElements : SingleValue
| ContainedSubtype
| ValueRange
| PermittedAlphabet
| SizeConstraint
| TypeConstraint
| InnerTypeConstraints
| PatternConstraint'''
t[0] = t[1]
# 47.2 Single value
# 47.2.1
def p_SingleValue (t):
'SingleValue : Value'
t[0] = Constraint(type = 'SingleValue', subtype = t[1])
# 47.3 Contained subtype
# 47.3.1
def p_ContainedSubtype (t):
'ContainedSubtype : Includes Type'
t[0] = Constraint(type = 'ContainedSubtype', subtype = t[2])
def p_Includes (t):
'''Includes : INCLUDES
| '''
# 47.4 Value range
# 47.4.1
def p_ValueRange (t):
'ValueRange : LowerEndpoint RANGE UpperEndpoint'
t[0] = Constraint(type = 'ValueRange', subtype = [t[1], t[3]])
# 47.4.3
def p_LowerEndpoint_1 (t):
'LowerEndpoint : LowerEndValue'
t[0] = t[1]
def p_LowerEndpoint_2 (t):
'LowerEndpoint : LowerEndValue LT'
t[0] = t[1] # but not inclusive range
def p_UpperEndpoint_1 (t):
'UpperEndpoint : UpperEndValue'
t[0] = t[1]
def p_UpperEndpoint_2 (t):
'UpperEndpoint : LT UpperEndValue'
t[0] = t[1] # but not inclusive range
# 47.4.4
def p_LowerEndValue (t):
'''LowerEndValue : Value
| MIN'''
t[0] = t[1] # XXX
def p_UpperEndValue (t):
'''UpperEndValue : Value
| MAX'''
t[0] = t[1]
# 47.5 Size constraint
# 47.5.1
def p_SizeConstraint (t):
'SizeConstraint : SIZE Constraint'
t[0] = Constraint (type = 'Size', subtype = t[2])
# 47.6 Type constraint
# 47.6.1
def p_TypeConstraint (t):
'TypeConstraint : Type'
t[0] = Constraint (type = 'Type', subtype = t[1])
# 47.7 Permitted alphabet
# 47.7.1
def p_PermittedAlphabet (t):
'PermittedAlphabet : FROM Constraint'
t[0] = Constraint (type = 'From', subtype = t[2])
# 47.8 Inner subtyping
# 47.8.1
def p_InnerTypeConstraints (t):
'''InnerTypeConstraints : WITH COMPONENT SingleTypeConstraint
| WITH COMPONENTS MultipleTypeConstraints'''
pass # ignore PER invisible constraint
# 47.8.3
def p_SingleTypeConstraint (t):
'SingleTypeConstraint : Constraint'
t[0] = t[1]
# 47.8.4
def p_MultipleTypeConstraints (t):
'''MultipleTypeConstraints : FullSpecification
| PartialSpecification'''
t[0] = t[1]
def p_FullSpecification (t):
'FullSpecification : LBRACE TypeConstraints RBRACE'
t[0] = t[2]
def p_PartialSpecification (t):
'PartialSpecification : LBRACE ELLIPSIS COMMA TypeConstraints RBRACE'
t[0] = t[4]
def p_TypeConstraints_1 (t):
'TypeConstraints : named_constraint'
t [0] = [t[1]]
def p_TypeConstraints_2 (t):
'TypeConstraints : TypeConstraints COMMA named_constraint'
t[0] = t[1] + [t[3]]
def p_named_constraint_1 (t):
'named_constraint : identifier constraint'
return Node ('named_constraint', ident = t[1], constr = t[2])
def p_named_constraint_2 (t):
'named_constraint : constraint'
return Node ('named_constraint', constr = t[1])
def p_constraint (t):
'constraint : value_constraint presence_constraint'
t[0] = Node ('constraint', value = t[1], presence = t[2])
def p_value_constraint_1 (t):
'value_constraint : Constraint'
t[0] = t[1]
def p_value_constraint_2 (t):
'value_constraint : '
pass
def p_presence_constraint_1 (t):
'''presence_constraint : PRESENT
| ABSENT
| OPTIONAL'''
t[0] = t[1]
def p_presence_constraint_2 (t):
'''presence_constraint : '''
pass
# 47.9 Pattern constraint
# 47.9.1
def p_PatternConstraint (t):
'PatternConstraint : PATTERN Value'
t[0] = Constraint (type = 'Pattern', subtype = t[2])
# 49 The exception identifier
# 49.4
def p_ExceptionSpec_1 (t):
'ExceptionSpec : EXCLAMATION ExceptionIdentification'
pass
def p_ExceptionSpec_2 (t):
'ExceptionSpec : '
pass
def p_ExceptionIdentification (t):
'''ExceptionIdentification : SignedNumber
| DefinedValue
| Type COLON Value '''
pass
# /*-----------------------------------------------------------------------*/
# /* Value Notation Productions */
# /*-----------------------------------------------------------------------*/
def p_binary_string (t):
'binary_string : BSTRING'
t[0] = BStringValue(val = t[1])
def p_hex_string (t):
'hex_string : HSTRING'
t[0] = HStringValue(val = t[1])
def p_char_string (t):
'char_string : QSTRING'
t[0] = t[1]
def p_number (t):
'number : NUMBER'
t[0] = t[1]
#--- ITU-T Recommendation X.208 -----------------------------------------------
# 27 Notation for the any type ------------------------------------------------
# 27.1
def p_AnyType (t):
'''AnyType : ANY
| ANY DEFINED BY identifier'''
t[0] = AnyType()
#--- ITU-T Recommendation X.681 -----------------------------------------------
# 7 ASN.1 lexical items -------------------------------------------------------
# 7.1 Information object class references
def p_objectclassreference (t):
'objectclassreference : CLASS_IDENT'
t[0] = Class_Ref(val=t[1])
# 7.2 Information object references
def p_objectreference (t):
'objectreference : LCASE_IDENT'
t[0] = t[1]
# 7.3 Information object set references
#def p_objectsetreference (t):
# 'objectsetreference : UCASE_IDENT'
# t[0] = t[1]
# 7.4 Type field references
# ucasefieldreference
# 7.5 Value field references
# lcasefieldreference
# 7.6 Value set field references
# ucasefieldreference
# 7.7 Object field references
# lcasefieldreference
# 7.8 Object set field references
# ucasefieldreference
def p_ucasefieldreference (t):
'ucasefieldreference : AMPERSAND UCASE_IDENT'
t[0] = '&' + t[2]
def p_lcasefieldreference (t):
'lcasefieldreference : AMPERSAND LCASE_IDENT'
t[0] = '&' + t[2]
# 8 Referencing definitions
# 8.1
def p_DefinedObjectClass (t):
'''DefinedObjectClass : objectclassreference
| UsefulObjectClassReference'''
t[0] = t[1]
global obj_class
obj_class = t[0].val
def p_DefinedObject (t):
'''DefinedObject : objectreference'''
t[0] = t[1]
# 8.4
def p_UsefulObjectClassReference (t):
'''UsefulObjectClassReference : TYPE_IDENTIFIER
| ABSTRACT_SYNTAX'''
t[0] = Class_Ref(val=t[1])
# 9 Information object class definition and assignment
# 9.1
def p_ObjectClassAssignment (t):
'''ObjectClassAssignment : CLASS_IDENT ASSIGNMENT ObjectClass
| UCASE_IDENT ASSIGNMENT ObjectClass'''
t[0] = t[3]
t[0].SetName(t[1])
if isinstance(t[0], ObjectClassDefn):
t[0].reg_types()
# 9.2
def p_ObjectClass (t):
'''ObjectClass : DefinedObjectClass
| ObjectClassDefn
| ParameterizedObjectClass '''
t[0] = t[1]
# 9.3
def p_ObjectClassDefn (t):
'''ObjectClassDefn : CLASS LBRACE FieldSpecs RBRACE
| CLASS LBRACE FieldSpecs RBRACE WithSyntaxSpec'''
t[0] = ObjectClassDefn(fields = t[3])
def p_FieldSpecs_1 (t):
'FieldSpecs : FieldSpec'
t[0] = [t[1]]
def p_FieldSpecs_2 (t):
'FieldSpecs : FieldSpecs COMMA FieldSpec'
t[0] = t[1] + [t[3]]
def p_WithSyntaxSpec (t):
'WithSyntaxSpec : WITH SYNTAX lbraceignore rbraceignore'
t[0] = None
# 9.4
def p_FieldSpec (t):
'''FieldSpec : TypeFieldSpec
| FixedTypeValueFieldSpec
| VariableTypeValueFieldSpec
| FixedTypeValueSetFieldSpec
| ObjectFieldSpec
| ObjectSetFieldSpec '''
t[0] = t[1]
# 9.5
def p_TypeFieldSpec (t):
'''TypeFieldSpec : ucasefieldreference
| ucasefieldreference TypeOptionalitySpec '''
t[0] = TypeFieldSpec()
t[0].SetName(t[1])
def p_TypeOptionalitySpec_1 (t):
'TypeOptionalitySpec ::= OPTIONAL'
pass
def p_TypeOptionalitySpec_2 (t):
'TypeOptionalitySpec ::= DEFAULT Type'
pass
# 9.6
def p_FixedTypeValueFieldSpec (t):
'''FixedTypeValueFieldSpec : lcasefieldreference Type
| lcasefieldreference Type UNIQUE
| lcasefieldreference Type ValueOptionalitySpec
| lcasefieldreference Type UNIQUE ValueOptionalitySpec '''
t[0] = FixedTypeValueFieldSpec(typ = t[2])
t[0].SetName(t[1])
def p_ValueOptionalitySpec_1 (t):
'ValueOptionalitySpec ::= OPTIONAL'
pass
def p_ValueOptionalitySpec_2 (t):
'ValueOptionalitySpec ::= DEFAULT Value'
pass
# 9.8
def p_VariableTypeValueFieldSpec (t):
'''VariableTypeValueFieldSpec : lcasefieldreference FieldName
| lcasefieldreference FieldName ValueOptionalitySpec '''
t[0] = VariableTypeValueFieldSpec()
t[0].SetName(t[1])
# 9.9
def p_FixedTypeValueSetFieldSpec (t):
'''FixedTypeValueSetFieldSpec : ucasefieldreference Type
| ucasefieldreference Type ValueSetOptionalitySpec '''
t[0] = FixedTypeValueSetFieldSpec()
t[0].SetName(t[1])
def p_ValueSetOptionalitySpec_1 (t):
'ValueSetOptionalitySpec ::= OPTIONAL'
pass
def p_ValueSetOptionalitySpec_2 (t):
'ValueSetOptionalitySpec ::= DEFAULT ValueSet'
pass
# 9.11
def p_ObjectFieldSpec (t):
'''ObjectFieldSpec : lcasefieldreference DefinedObjectClass
| lcasefieldreference DefinedObjectClass ObjectOptionalitySpec '''
t[0] = ObjectFieldSpec(cls=t[2])
t[0].SetName(t[1])
global obj_class
obj_class = None
def p_ObjectOptionalitySpec_1 (t):
'ObjectOptionalitySpec ::= OPTIONAL'
pass
def p_ObjectOptionalitySpec_2 (t):
'ObjectOptionalitySpec ::= DEFAULT Object'
pass
# 9.12
def p_ObjectSetFieldSpec (t):
'''ObjectSetFieldSpec : ucasefieldreference DefinedObjectClass
| ucasefieldreference DefinedObjectClass ObjectSetOptionalitySpec '''
t[0] = ObjectSetFieldSpec(cls=t[2])
t[0].SetName(t[1])
def p_ObjectSetOptionalitySpec_1 (t):
'ObjectSetOptionalitySpec ::= OPTIONAL'
pass
def p_ObjectSetOptionalitySpec_2 (t):
'ObjectSetOptionalitySpec ::= DEFAULT ObjectSet'
pass
# 9.13
def p_PrimitiveFieldName (t):
'''PrimitiveFieldName : ucasefieldreference
| lcasefieldreference '''
t[0] = t[1]
# 9.13
def p_FieldName_1 (t):
'FieldName : PrimitiveFieldName'
t[0] = t[1]
def p_FieldName_2 (t):
'FieldName : FieldName DOT PrimitiveFieldName'
t[0] = t[1] + '.' + t[3]
# 11 Information object definition and assignment
# 11.1
def p_ObjectAssignment (t):
'ObjectAssignment : objectreference DefinedObjectClass ASSIGNMENT Object'
t[0] = ObjectAssignment (ident = t[1], cls=t[2].val, val=t[4])
global obj_class
obj_class = None
# 11.3
def p_Object (t):
'''Object : DefinedObject
| ObjectDefn
| ParameterizedObject'''
t[0] = t[1]
# 11.4
def p_ObjectDefn (t):
'ObjectDefn : lbraceobject bodyobject rbraceobject'
t[0] = t[2]
# {...} block of object definition
def p_lbraceobject(t):
'lbraceobject : braceobjectbegin LBRACE'
t[0] = t[1]
def p_braceobjectbegin(t):
'braceobjectbegin : '
global lexer
global obj_class
if set_class_syntax(obj_class):
state = 'INITIAL'
else:
lexer.level = 1
state = 'braceignore'
lexer.push_state(state)
def p_rbraceobject(t):
'rbraceobject : braceobjectend RBRACE'
t[0] = t[2]
def p_braceobjectend(t):
'braceobjectend : '
global lexer
lexer.pop_state()
set_class_syntax(None)
def p_bodyobject_1 (t):
'bodyobject : '
t[0] = { }
def p_bodyobject_2 (t):
'bodyobject : cls_syntax_list'
t[0] = t[1]
def p_cls_syntax_list_1 (t):
'cls_syntax_list : cls_syntax_list cls_syntax'
t[0] = t[1]
t[0].update(t[2])
def p_cls_syntax_list_2 (t):
'cls_syntax_list : cls_syntax'
t[0] = t[1]
# X.681
def p_cls_syntax_1 (t):
'cls_syntax : Type IDENTIFIED BY Value'
t[0] = { get_class_fieled(' ') : t[1], get_class_fieled(' '.join((t[2], t[3]))) : t[4] }
def p_cls_syntax_2 (t):
'cls_syntax : HAS PROPERTY Value'
t[0] = { get_class_fieled(' '.join(t[1:-1])) : t[-1:][0] }
# X.880
def p_cls_syntax_3 (t):
'''cls_syntax : ERRORS ObjectSet
| LINKED ObjectSet
| RETURN RESULT BooleanValue
| SYNCHRONOUS BooleanValue
| INVOKE PRIORITY Value
| RESULT_PRIORITY Value
| PRIORITY Value
| ALWAYS RESPONDS BooleanValue
| IDEMPOTENT BooleanValue '''
t[0] = { get_class_fieled(' '.join(t[1:-1])) : t[-1:][0] }
def p_cls_syntax_4 (t):
'''cls_syntax : ARGUMENT Type
| RESULT Type
| PARAMETER Type '''
t[0] = { get_class_fieled(t[1]) : t[2] }
def p_cls_syntax_5 (t):
'cls_syntax : CODE Value'
fld = get_class_fieled(t[1]);
t[0] = { fld : t[2] }
if isinstance(t[2], ChoiceValue):
fldt = fld + '.' + t[2].choice
t[0][fldt] = t[2]
def p_cls_syntax_6 (t):
'''cls_syntax : ARGUMENT Type OPTIONAL BooleanValue
| RESULT Type OPTIONAL BooleanValue
| PARAMETER Type OPTIONAL BooleanValue '''
t[0] = { get_class_fieled(t[1]) : t[2], get_class_fieled(' '.join((t[1], t[3]))) : t[4] }
# 12 Information object set definition and assignment
# 12.1
def p_ObjectSetAssignment (t):
'ObjectSetAssignment : UCASE_IDENT CLASS_IDENT ASSIGNMENT ObjectSet'
t[0] = Node('ObjectSetAssignment', name=t[1], cls=t[2], val=t[4])
# 12.3
def p_ObjectSet (t):
'ObjectSet : lbraceignore rbraceignore'
t[0] = None
# 14 Notation for the object class field type ---------------------------------
# 14.1
def p_ObjectClassFieldType (t):
'ObjectClassFieldType : DefinedObjectClass DOT FieldName'
t[0] = get_type_from_class(t[1], t[3])
# 14.6
def p_ObjectClassFieldValue (t):
'''ObjectClassFieldValue : OpenTypeFieldVal'''
t[0] = t[1]
def p_OpenTypeFieldVal (t):
'''OpenTypeFieldVal : Type COLON Value
| NullType COLON NullValue'''
t[0] = t[3]
# 15 Information from objects -------------------------------------------------
# 15.1
def p_ValueFromObject (t):
'ValueFromObject : LCASE_IDENT DOT FieldName'
t[0] = t[1] + '.' + t[3]
# Annex C - The instance-of type ----------------------------------------------
# C.2
def p_InstanceOfType (t):
'InstanceOfType : INSTANCE OF DefinedObjectClass'
t[0] = InstanceOfType()
# --- tables ---
useful_object_class_types = {
# Annex A
'TYPE-IDENTIFIER.&id' : lambda : ObjectIdentifierType(),
'TYPE-IDENTIFIER.&Type' : lambda : OpenType(),
# Annex B
'ABSTRACT-SYNTAX.&id' : lambda : ObjectIdentifierType(),
'ABSTRACT-SYNTAX.&Type' : lambda : OpenType(),
'ABSTRACT-SYNTAX.&property' : lambda : BitStringType(),
}
object_class_types = { }
object_class_typerefs = { }
object_class_classrefs = { }
# dummy types
class _VariableTypeValueFieldSpec (AnyType):
pass
class _FixedTypeValueSetFieldSpec (AnyType):
pass
class_types_creator = {
'BooleanType' : lambda : BooleanType(),
'IntegerType' : lambda : IntegerType(),
'ObjectIdentifierType' : lambda : ObjectIdentifierType(),
'OpenType' : lambda : OpenType(),
# dummy types
'_VariableTypeValueFieldSpec' : lambda : _VariableTypeValueFieldSpec(),
'_FixedTypeValueSetFieldSpec' : lambda : _FixedTypeValueSetFieldSpec(),
}
class_names = { }
x681_syntaxes = {
'TYPE-IDENTIFIER' : {
' ' : '&Type',
'IDENTIFIED' : 'IDENTIFIED',
#'BY' : 'BY',
'IDENTIFIED BY' : '&id',
},
'ABSTRACT-SYNTAX' : {
' ' : '&Type',
'IDENTIFIED' : 'IDENTIFIED',
#'BY' : 'BY',
'IDENTIFIED BY' : '&id',
'HAS' : 'HAS',
'PROPERTY' : 'PROPERTY',
'HAS PROPERTY' : '&property',
},
}
class_syntaxes_enabled = {
'TYPE-IDENTIFIER' : True,
'ABSTRACT-SYNTAX' : True,
}
class_syntaxes = {
'TYPE-IDENTIFIER' : x681_syntaxes['TYPE-IDENTIFIER'],
'ABSTRACT-SYNTAX' : x681_syntaxes['ABSTRACT-SYNTAX'],
}
class_current_syntax = None
def get_syntax_tokens(syntaxes):
tokens = { }
for s in (syntaxes):
for k in (list(syntaxes[s].keys())):
if k.find(' ') < 0:
tokens[k] = k
tokens[k] = tokens[k].replace('-', '_')
return list(tokens.values())
tokens = tokens + get_syntax_tokens(x681_syntaxes)
def set_class_syntax(syntax):
global class_syntaxes_enabled
global class_current_syntax
#print "set_class_syntax", syntax, class_current_syntax
if class_syntaxes_enabled.get(syntax, False):
class_current_syntax = syntax
return True
else:
class_current_syntax = None
return False
def is_class_syntax(name):
global class_syntaxes
global class_current_syntax
#print "is_class_syntax", name, class_current_syntax
if not class_current_syntax:
return False
return name in class_syntaxes[class_current_syntax]
def get_class_fieled(name):
if not class_current_syntax:
return None
return class_syntaxes[class_current_syntax][name]
def is_class_ident(name):
return name in class_names
def add_class_ident(name):
#print "add_class_ident", name
class_names[name] = name
def get_type_from_class(cls, fld):
flds = fld.split('.')
if (isinstance(cls, Class_Ref)):
key = cls.val + '.' + flds[0]
else:
key = cls + '.' + flds[0]
if key in object_class_classrefs:
return get_type_from_class(object_class_classrefs[key], '.'.join(flds[1:]))
if key in object_class_typerefs:
return Type_Ref(val=object_class_typerefs[key])
creator = lambda : AnyType()
creator = useful_object_class_types.get(key, creator)
creator = object_class_types.get(key, creator)
return creator()
def set_type_to_class(cls, fld, pars):
#print "set_type_to_class", cls, fld, pars
key = cls + '.' + fld
typename = 'OpenType'
if (len(pars) > 0):
typename = pars[0]
else:
pars.append(typename)
typeref = None
if (len(pars) > 1):
if (isinstance(pars[1], Class_Ref)):
pars[1] = pars[1].val
typeref = pars[1]
msg = None
if key in object_class_types:
msg = object_class_types[key]().type
if key in object_class_typerefs:
msg = "TypeReference " + object_class_typerefs[key]
if key in object_class_classrefs:
msg = "ClassReference " + object_class_classrefs[key]
if msg == ' '.join(pars):
msg = None
if msg:
msg0 = "Can not define CLASS field %s as '%s'\n" % (key, ' '.join(pars))
msg1 = "Already defined as '%s'" % (msg)
raise CompError(msg0 + msg1)
if (typename == 'ClassReference'):
if not typeref: return False
object_class_classrefs[key] = typeref
return True
if (typename == 'TypeReference'):
if not typeref: return False
object_class_typerefs[key] = typeref
return True
creator = class_types_creator.get(typename)
if creator:
object_class_types[key] = creator
return True
else:
return False
def import_class_from_module(mod, cls):
add_class_ident(cls)
mcls = "$%s$%s" % (mod, cls)
for k in list(object_class_classrefs.keys()):
kk = k.split('.', 1)
if kk[0] == mcls:
object_class_classrefs[cls + '.' + kk[0]] = object_class_classrefs[k]
for k in list(object_class_typerefs.keys()):
kk = k.split('.', 1)
if kk[0] == mcls:
object_class_typerefs[cls + '.' + kk[0]] = object_class_typerefs[k]
for k in list(object_class_types.keys()):
kk = k.split('.', 1)
if kk[0] == mcls:
object_class_types[cls + '.' + kk[0]] = object_class_types[k]
#--- ITU-T Recommendation X.682 -----------------------------------------------
# 8 General constraint specification ------------------------------------------
# 8.1
def p_GeneralConstraint (t):
'''GeneralConstraint : UserDefinedConstraint
| TableConstraint
| ContentsConstraint'''
t[0] = t[1]
# 9 User-defined constraints --------------------------------------------------
# 9.1
def p_UserDefinedConstraint (t):
'UserDefinedConstraint : CONSTRAINED BY LBRACE UserDefinedConstraintParameterList RBRACE'
t[0] = Constraint(type = 'UserDefined', subtype = t[4])
def p_UserDefinedConstraintParameterList_1 (t):
'UserDefinedConstraintParameterList : '
t[0] = []
def p_UserDefinedConstraintParameterList_2 (t):
'UserDefinedConstraintParameterList : UserDefinedConstraintParameter'
t[0] = [t[1]]
def p_UserDefinedConstraintParameterList_3 (t):
'UserDefinedConstraintParameterList : UserDefinedConstraintParameterList COMMA UserDefinedConstraintParameter'
t[0] = t[1] + [t[3]]
# 9.3
def p_UserDefinedConstraintParameter (t):
'UserDefinedConstraintParameter : Type'
t[0] = t[1]
# 10 Table constraints, including component relation constraints --------------
# 10.3
def p_TableConstraint (t):
'''TableConstraint : SimpleTableConstraint
| ComponentRelationConstraint'''
t[0] = Constraint(type = 'Table', subtype = t[1])
def p_SimpleTableConstraint (t):
'SimpleTableConstraint : LBRACE UCASE_IDENT RBRACE'
t[0] = t[2]
# 10.7
def p_ComponentRelationConstraint (t):
'ComponentRelationConstraint : LBRACE UCASE_IDENT RBRACE LBRACE AtNotations RBRACE'
t[0] = t[2] + str(t[5])
def p_AtNotations_1 (t):
'AtNotations : AtNotation'
t[0] = [t[1]]
def p_AtNotations_2 (t):
'AtNotations : AtNotations COMMA AtNotation'
t[0] = t[1] + [t[3]]
def p_AtNotation_1 (t):
'AtNotation : AT ComponentIdList'
t[0] = '@' + t[2]
def p_AtNotation_2 (t):
'AtNotation : AT DOT Level ComponentIdList'
t[0] = '@.' + t[3] + t[4]
def p_Level_1 (t):
'Level : DOT Level'
t[0] = '.' + t[2]
def p_Level_2 (t):
'Level : '
t[0] = ''
def p_ComponentIdList_1 (t):
'ComponentIdList : LCASE_IDENT'
t[0] = t[1]
def p_ComponentIdList_2 (t):
'ComponentIdList : ComponentIdList DOT LCASE_IDENT'
t[0] = t[1] + '.' + t[3]
# 11 Contents constraints -----------------------------------------------------
# 11.1
def p_ContentsConstraint (t):
'ContentsConstraint : CONTAINING type_ref'
t[0] = Constraint(type = 'Contents', subtype = t[2])
#--- ITU-T Recommendation X.683 -----------------------------------------------
# 8 Parameterized assignments -------------------------------------------------
# 8.1
def p_ParameterizedAssignment (t):
'''ParameterizedAssignment : ParameterizedTypeAssignment
| ParameterizedObjectClassAssignment
| ParameterizedObjectAssignment
| ParameterizedObjectSetAssignment'''
t[0] = t[1]
# 8.2
def p_ParameterizedTypeAssignment (t):
'ParameterizedTypeAssignment : UCASE_IDENT ParameterList ASSIGNMENT Type'
t[0] = t[4]
t[0].SetName(t[1]) # t[0].SetName(t[1] + 'xxx')
def p_ParameterizedObjectClassAssignment (t):
'''ParameterizedObjectClassAssignment : CLASS_IDENT ParameterList ASSIGNMENT ObjectClass
| UCASE_IDENT ParameterList ASSIGNMENT ObjectClass'''
t[0] = t[4]
t[0].SetName(t[1])
if isinstance(t[0], ObjectClassDefn):
t[0].reg_types()
def p_ParameterizedObjectAssignment (t):
'ParameterizedObjectAssignment : objectreference ParameterList DefinedObjectClass ASSIGNMENT Object'
t[0] = ObjectAssignment (ident = t[1], cls=t[3].val, val=t[5])
global obj_class
obj_class = None
def p_ParameterizedObjectSetAssignment (t):
'ParameterizedObjectSetAssignment : UCASE_IDENT ParameterList DefinedObjectClass ASSIGNMENT ObjectSet'
t[0] = Node('ObjectSetAssignment', name=t[1], cls=t[3].val, val=t[5])
# 8.3
def p_ParameterList (t):
'ParameterList : lbraceignore rbraceignore'
#def p_ParameterList (t):
# 'ParameterList : LBRACE Parameters RBRACE'
# t[0] = t[2]
#def p_Parameters_1 (t):
# 'Parameters : Parameter'
# t[0] = [t[1]]
#def p_Parameters_2 (t):
# 'Parameters : Parameters COMMA Parameter'
# t[0] = t[1] + [t[3]]
#def p_Parameter_1 (t):
# 'Parameter : Type COLON Reference'
# t[0] = [t[1], t[3]]
#def p_Parameter_2 (t):
# 'Parameter : Reference'
# t[0] = t[1]
# 9 Referencing parameterized definitions -------------------------------------
# 9.1
def p_ParameterizedReference (t):
'ParameterizedReference : Reference LBRACE RBRACE'
t[0] = t[1]
#t[0].val += 'xxx'
# 9.2
def p_ParameterizedType (t):
'ParameterizedType : type_ref ActualParameterList'
t[0] = t[1]
#t[0].val += 'xxx'
def p_ParameterizedObjectClass (t):
'ParameterizedObjectClass : DefinedObjectClass ActualParameterList'
t[0] = t[1]
#t[0].val += 'xxx'
def p_ParameterizedObject (t):
'ParameterizedObject : DefinedObject ActualParameterList'
t[0] = t[1]
#t[0].val += 'xxx'
# 9.5
def p_ActualParameterList (t):
'ActualParameterList : lbraceignore rbraceignore'
#def p_ActualParameterList (t):
# 'ActualParameterList : LBRACE ActualParameters RBRACE'
# t[0] = t[2]
#def p_ActualParameters_1 (t):
# 'ActualParameters : ActualParameter'
# t[0] = [t[1]]
#def p_ActualParameters_2 (t):
# 'ActualParameters : ActualParameters COMMA ActualParameter'
# t[0] = t[1] + [t[3]]
#def p_ActualParameter (t):
# '''ActualParameter : Type
# | Value'''
# t[0] = t[1]
#--- ITU-T Recommendation X.880 -----------------------------------------------
x880_classes = {
'OPERATION' : {
'&ArgumentType' : [],
'&argumentTypeOptional' : [ 'BooleanType' ],
'&returnResult' : [ 'BooleanType' ],
'&ResultType' : [],
'&resultTypeOptional' : [ 'BooleanType' ],
'&Errors' : [ 'ClassReference', 'ERROR' ],
'&Linked' : [ 'ClassReference', 'OPERATION' ],
'&synchronous' : [ 'BooleanType' ],
'&idempotent' : [ 'BooleanType' ],
'&alwaysReturns' : [ 'BooleanType' ],
'&InvokePriority' : [ '_FixedTypeValueSetFieldSpec' ],
'&ResultPriority' : [ '_FixedTypeValueSetFieldSpec' ],
'&operationCode' : [ 'TypeReference', 'Code' ],
},
'ERROR' : {
'&ParameterType' : [],
'¶meterTypeOptional' : [ 'BooleanType' ],
'&ErrorPriority' : [ '_FixedTypeValueSetFieldSpec' ],
'&errorCode' : [ 'TypeReference', 'Code' ],
},
'OPERATION-PACKAGE' : {
'&Both' : [ 'ClassReference', 'OPERATION' ],
'&Consumer' : [ 'ClassReference', 'OPERATION' ],
'&Supplier' : [ 'ClassReference', 'OPERATION' ],
'&id' : [ 'ObjectIdentifierType' ],
},
'CONNECTION-PACKAGE' : {
'&bind' : [ 'ClassReference', 'OPERATION' ],
'&unbind' : [ 'ClassReference', 'OPERATION' ],
'&responderCanUnbind' : [ 'BooleanType' ],
'&unbindCanFail' : [ 'BooleanType' ],
'&id' : [ 'ObjectIdentifierType' ],
},
'CONTRACT' : {
'&connection' : [ 'ClassReference', 'CONNECTION-PACKAGE' ],
'&OperationsOf' : [ 'ClassReference', 'OPERATION-PACKAGE' ],
'&InitiatorConsumerOf' : [ 'ClassReference', 'OPERATION-PACKAGE' ],
'&InitiatorSupplierOf' : [ 'ClassReference', 'OPERATION-PACKAGE' ],
'&id' : [ 'ObjectIdentifierType' ],
},
'ROS-OBJECT-CLASS' : {
'&Is' : [ 'ClassReference', 'ROS-OBJECT-CLASS' ],
'&Initiates' : [ 'ClassReference', 'CONTRACT' ],
'&Responds' : [ 'ClassReference', 'CONTRACT' ],
'&InitiatesAndResponds' : [ 'ClassReference', 'CONTRACT' ],
'&id' : [ 'ObjectIdentifierType' ],
},
}
x880_syntaxes = {
'OPERATION' : {
'ARGUMENT' : '&ArgumentType',
'ARGUMENT OPTIONAL' : '&argumentTypeOptional',
'RESULT' : '&ResultType',
'RESULT OPTIONAL' : '&resultTypeOptional',
'RETURN' : 'RETURN',
'RETURN RESULT' : '&returnResult',
'ERRORS' : '&Errors',
'LINKED' : '&Linked',
'SYNCHRONOUS' : '&synchronous',
'IDEMPOTENT' : '&idempotent',
'ALWAYS' : 'ALWAYS',
'RESPONDS' : 'RESPONDS',
'ALWAYS RESPONDS' : '&alwaysReturns',
'INVOKE' : 'INVOKE',
'PRIORITY' : 'PRIORITY',
'INVOKE PRIORITY' : '&InvokePriority',
'RESULT-PRIORITY': '&ResultPriority',
'CODE' : '&operationCode',
},
'ERROR' : {
'PARAMETER' : '&ParameterType',
'PARAMETER OPTIONAL' : '¶meterTypeOptional',
'PRIORITY' : '&ErrorPriority',
'CODE' : '&errorCode',
},
# 'OPERATION-PACKAGE' : {
# },
# 'CONNECTION-PACKAGE' : {
# },
# 'CONTRACT' : {
# },
# 'ROS-OBJECT-CLASS' : {
# },
}
def x880_module_begin():
#print "x880_module_begin()"
for name in list(x880_classes.keys()):
add_class_ident(name)
def x880_import(name):
if name in x880_syntaxes:
class_syntaxes_enabled[name] = True
class_syntaxes[name] = x880_syntaxes[name]
if name in x880_classes:
add_class_ident(name)
for f in (list(x880_classes[name].keys())):
set_type_to_class(name, f, x880_classes[name][f])
tokens = tokens + get_syntax_tokens(x880_syntaxes)
# {...} OID value
#def p_lbrace_oid(t):
# 'lbrace_oid : brace_oid_begin LBRACE'
# t[0] = t[1]
#def p_brace_oid_begin(t):
# 'brace_oid_begin : '
# global in_oid
# in_oid = True
#def p_rbrace_oid(t):
# 'rbrace_oid : brace_oid_end RBRACE'
# t[0] = t[2]
#def p_brace_oid_end(t):
# 'brace_oid_end : '
# global in_oid
# in_oid = False
# {...} block to be ignored
def p_lbraceignore(t):
'lbraceignore : braceignorebegin LBRACE'
t[0] = t[1]
def p_braceignorebegin(t):
'braceignorebegin : '
global lexer
lexer.level = 1
lexer.push_state('braceignore')
def p_rbraceignore(t):
'rbraceignore : braceignoreend RBRACE'
t[0] = t[2]
def p_braceignoreend(t):
'braceignoreend : '
global lexer
lexer.pop_state()
def p_error(t):
global input_file
raise ParseError(t, input_file)
def p_pyquote (t):
'''pyquote : PYQUOTE'''
t[0] = PyQuote (val = t[1])
def testlex (s):
lexer.input (s)
while True:
token = lexer.token ()
if not token:
break
print(token)
def do_module (ast, defined_dict):
assert (ast.type == 'Module')
ctx = Ctx (defined_dict)
print(ast.to_python (ctx))
print(ctx.output_assignments ())
print(ctx.output_pyquotes ())
def eth_do_module (ast, ectx):
assert (ast.type == 'Module')
if ectx.dbg('s'): print(ast.str_depth(0))
ast.to_eth(ectx)
def testyacc(s, fn, defined_dict):
ast = yacc.parse(s, debug=0)
time_str = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
print("""#!/usr/bin/env python
# Auto-generated from %s at %s
from PyZ3950 import asn1""" % (fn, time_str))
for module in ast:
eth_do_module (module, defined_dict)
# Wireshark compiler
def eth_usage():
print("""
asn2wrs [-h|?] [-d dbg] [-b] [-p proto] [-c cnf_file] [-e] input_file(s) ...
-h|? : Usage
-b : BER (default is PER)
-u : Unaligned (default is aligned)
-p proto : Protocol name (implies -S). Default is module-name
from input_file (renamed by #.MODULE if present)
-o name : Output files name core (default is <proto>)
-O dir : Output directory for dissector
-c cnf_file : Conformance file
-I path : Path for conformance file includes
-e : Create conformance file for exported types
-E : Just create conformance file for exported types
-S : Single output for multiple modules
-s template : Single file output (template is input file
without .c/.h extension)
-k : Keep intermediate files though single file output is used
-L : Suppress #line directive from .cnf file
-D dir : Directory for input_file(s) (default: '.')
-C : Add check for SIZE constraints
-r prefix : Remove the prefix from type names
input_file(s) : Input ASN.1 file(s)
-d dbg : Debug output, dbg = [l][y][p][s][a][t][c][m][o]
l - lex
y - yacc
p - parsing
s - internal ASN.1 structure
a - list of assignments
t - tables
c - conformance values
m - list of compiled modules with dependency
o - list of output files
""")
def eth_main():
global input_file
global g_conform
global lexer
print("ASN.1 to Wireshark dissector compiler");
try:
opts, args = getopt.getopt(sys.argv[1:], "h?d:D:buXp:FTo:O:c:I:eESs:kLCr:");
except getopt.GetoptError:
eth_usage(); sys.exit(2)
if len(args) < 1:
eth_usage(); sys.exit(2)
conform = EthCnf()
conf_to_read = None
output = EthOut()
ectx = EthCtx(conform, output)
ectx.encoding = 'per'
ectx.proto_opt = None
ectx.fld_opt = {}
ectx.tag_opt = False
ectx.outnm_opt = None
ectx.aligned = True
ectx.dbgopt = ''
ectx.new = True
ectx.expcnf = False
ectx.justexpcnf = False
ectx.merge_modules = False
ectx.group_by_prot = False
ectx.conform.last_group = 0
ectx.conform.suppress_line = False;
ectx.output.outnm = None
ectx.output.single_file = None
ectx.constraints_check = False;
for o, a in opts:
if o in ("-h", "-?"):
eth_usage(); sys.exit(2)
if o in ("-c",):
conf_to_read = a
if o in ("-I",):
ectx.conform.include_path.append(a)
if o in ("-E",):
ectx.expcnf = True
ectx.justexpcnf = True
if o in ("-D",):
ectx.srcdir = a
if o in ("-C",):
ectx.constraints_check = True
if o in ("-X",):
warnings.warn("Command line option -X is obsolete and can be removed")
if o in ("-T",):
warnings.warn("Command line option -T is obsolete and can be removed")
if conf_to_read:
ectx.conform.read(conf_to_read)
for o, a in opts:
if o in ("-h", "-?", "-c", "-I", "-E", "-D", "-C", "-X", "-T"):
pass # already processed
else:
par = []
if a: par.append(a)
ectx.conform.set_opt(o, par, "commandline", 0)
(ld, yd, pd) = (0, 0, 0);
if ectx.dbg('l'): ld = 1
if ectx.dbg('y'): yd = 1
if ectx.dbg('p'): pd = 2
lexer = lex.lex(debug=ld)
yacc.yacc(method='LALR', debug=yd)
g_conform = ectx.conform
ast = []
for fn in args:
input_file = fn
lexer.lineno = 1
if (ectx.srcdir): fn = ectx.srcdir + '/' + fn
f = open (fn, "r")
ast.extend(yacc.parse(f.read(), lexer=lexer, debug=pd))
f.close ()
ectx.eth_clean()
if (ectx.merge_modules): # common output for all module
ectx.eth_clean()
for module in ast:
eth_do_module(module, ectx)
ectx.eth_prepare()
ectx.eth_do_output()
elif (ectx.groups()): # group by protocols/group
groups = []
pr2gr = {}
if (ectx.group_by_prot): # group by protocols
for module in ast:
prot = module.get_proto(ectx)
if prot not in pr2gr:
pr2gr[prot] = len(groups)
groups.append([])
groups[pr2gr[prot]].append(module)
else: # group by groups
pass
for gm in (groups):
ectx.eth_clean()
for module in gm:
eth_do_module(module, ectx)
ectx.eth_prepare()
ectx.eth_do_output()
else: # output for each module
for module in ast:
ectx.eth_clean()
eth_do_module(module, ectx)
ectx.eth_prepare()
ectx.eth_do_output()
if ectx.dbg('m'):
ectx.dbg_modules()
if ectx.dbg('c'):
ectx.conform.dbg_print()
if not ectx.justexpcnf:
ectx.conform.unused_report()
if ectx.dbg('o'):
ectx.output.dbg_print()
ectx.output.make_single_file()
# Python compiler
def main():
testfn = testyacc
if len (sys.argv) == 1:
while True:
s = input ('Query: ')
if len (s) == 0:
break
testfn (s, 'console', {})
else:
defined_dict = {}
for fn in sys.argv [1:]:
f = open (fn, "r")
testfn (f.read (), fn, defined_dict)
f.close ()
lexer.lineno = 1
#--- BODY ---------------------------------------------------------------------
if __name__ == '__main__':
if (os.path.splitext(os.path.basename(sys.argv[0]))[0].lower() in ('asn2wrs', 'asn2eth')):
eth_main()
else:
main()
#------------------------------------------------------------------------------
#
# Editor modelines - http://www.wireshark.org/tools/modelines.html
#
# c-basic-offset: 4; tab-width: 8; indent-tabs-mode: nil
# vi: set shiftwidth=4 tabstop=8 expandtab:
# :indentSize=4:tabSize=8:noTabs=true:
|
weizhenwei/wireshark
|
tools/asn2wrs.py
|
Python
|
gpl-2.0
| 307,726
|
# DATABASE
db_name = ''
db_user = ''
db_host = ''
db_password = '';
# WEBFACTION
wf_login = ''
wf_password = ''
# SMTP
smtp_server = ''
smtp_login = ''
smtp_password = ''
|
jmorel/Naphtaline
|
passwords.py
|
Python
|
mit
| 175
|
import numpy as np
import datetime
from datetime import timedelta
import sys
import os
import pickle
#debug_logs_directory = r'C:\PROJECTS\TEMP'
# Based on QA Band - https://landsat.usgs.gov/collectionqualityband
QA_BAND_NUM = 7
landsat_5_clear_pix_vals = [672, 676, 680, 684]
#landsat_8_clear_pix_vals = [2720, 2724, 2728, 2732]
LANDSAT_CLEAR_PIX_VALS = landsat_5_clear_pix_vals #+ landsat_8_clear_pix_vals
class Landsat_Image_Synthesis():
def __init__(self):
self.name = 'Landsat 5 Scene Synthesis'
self.description = 'This function takes as input a spatial and temporal '\
'mosaic dataset of Landsat 5 TM images, selects images ' \
'for user defined month, filters out cloudy '\
'pixels from each image in the stack, then '\
'averages the values along a spatial element to '\
'create a synthetic Landsat 5 TM image for the '\
'user define month.'
self.times = []
self.predict_month = None
def getParameterInfo(self):
return [
{
'name': 'rasters',
'dataType': 'rasters',
'value': None,
'required': True,
'displayName': 'Rasters',
'description': 'The collection of overlapping rasters to aggregate.',
},
{
'name': 'predict_month',
'dataType': 'string',
'value': 'Jun',
'required': True,
'domain': ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'),
'displayName': 'Month to Predict',
'description': 'Jan, Feb, Mar, Apr, May, Jun, Jul, Aug, Sep, Oct, Nov, Dec'
}
]
def getConfiguration(self, **scalars):
return {
'inheritProperties': 4 | 8, # inherit everything but the pixel type (1) and NoData (2)
'invalidateProperties': 2 | 4, # invalidate histogram and statistics because we are modifying pixel values
'inputMask': True, # need raster mask of all input rasters in .updatePixels().
'resampling': False, # process at native resolution
'keyMetadata': ['AcquisitionDate']
}
def updateRasterInfo(self, **kwargs):
#outStats = {'minimum': -1, 'maximum': 1}
self.outBandCount = 6
kwargs['output_info']['pixelType'] = 'f4' # output pixels are floating-point values
kwargs['output_info']['histogram'] = () # no statistics/histogram for output raster specified
kwargs['output_info']['statistics'] = () # outStatsTuple
kwargs['output_info']['bandCount'] = self.outBandCount # number of output bands.
self.times = kwargs['rasters_keyMetadata']
month_dict = {'Jan':1,
'Feb':2,
'Mar':3,
'Apr':4,
'May':5,
'Jun':6,
'Jul':7,
'Aug':8,
'Sep':9,
'Oct':10,
'Nov':11,
'Dec':12}
self.predict_month = int(month_dict[kwargs['predict_month']])
return kwargs
def updateKeyMetadata(self, names, bandIndex, **keyMetadata):
return keyMetadata
def updatePixels(self, tlc, shape, props, **pixelBlocks):
#fname = '{:%Y_%b_%d_%H_%M_%S}_t.txt'.format(datetime.datetime.now())
#filename = os.path.join(debug_logs_directory, fname)
#file = open(filename,"w")
#file.write("File Open.\n")
pix_time = [j['acquisitiondate'] for j in self.times]
#pickle_filename = os.path.join(debug_logs_directory, fname)
#pickle.dump(pix_time, open(pickle_filename[:-4]+'pix_time.p',"wb"))
#file.write(str(len(pix_time))+ "\n")
pix_blocks = pixelBlocks['rasters_pixels']
pix_array = np.asarray(pix_blocks)
#pickle_filename = os.path.join(debug_logs_directory, fname)
#pickle.dump(pix_blocks, open(pickle_filename[:-4]+'pix_blocks.p',"wb"))
pix_array_dim = pix_array.shape
num_bands = 7 # pix_array_dim[1]
num_squares_x = pix_array_dim[2]
num_squares_y = pix_array_dim[3]
d = datetime.datetime(1900, 1,1)
datetime_list = []
idx_list = []
for idx,t in enumerate(pix_time):
year = timedelta(days=t)
date = year+d
if date.month == 6:
idx_list.append(idx)
datetime_list.append(year+d)
pix_array_within = pix_array[idx_list, :, :, :]
out_band_num = self.outBandCount
output_pixels = np.zeros((out_band_num, num_squares_x, num_squares_y))
QA_BAND_IND = QA_BAND_NUM-1
for num_x in range(0, int(num_squares_x)):
for num_y in range(0, int(num_squares_y)):
for num_b in range(0, int(num_bands)):
clear_indices = [
x for x in range(len(pix_array_within[:, QA_BAND_IND, num_x, num_y]))
if pix_array_within[x, QA_BAND_IND, num_x, num_y]
in LANDSAT_CLEAR_PIX_VALS
]
if len(clear_indices) > 0:
output_pixels[0, num_x, num_y] = np.mean(pix_array_within[clear_indices, 0, num_x, num_y])
output_pixels[1, num_x, num_y] = np.mean(pix_array_within[clear_indices, 1, num_x, num_y])
output_pixels[2, num_x, num_y] = np.mean(pix_array_within[clear_indices, 2, num_x, num_y])
output_pixels[3, num_x, num_y] = np.mean(pix_array_within[clear_indices, 3, num_x, num_y])
output_pixels[4, num_x, num_y] = np.mean(pix_array_within[clear_indices, 4, num_x, num_y])
output_pixels[5, num_x, num_y] = np.mean(pix_array_within[clear_indices, 5, num_x, num_y])
else:
output_pixels[:, num_x, num_y] = -1
mask = np.ones((out_band_num, num_squares_x, num_squares_y))
pixelBlocks['output_mask'] = mask.astype('u1', copy = False)
pixelBlocks['output_pixels'] = output_pixels.astype(props['pixelType'], copy=False)
return pixelBlocks
|
Esri/raster-functions
|
functions/Landsat_Image_Synthesis.py
|
Python
|
apache-2.0
| 6,422
|
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
import os
from osc_lib.i18n import _
import yaml
from tripleoclient import command
from tripleoclient import utils
from tripleoclient.workflows import baremetal
class ConfigureBIOS(command.Command):
"""Apply BIOS configuration on given nodes"""
log = logging.getLogger(__name__ + ".ConfigureBIOS")
def get_parser(self, prog_name):
parser = super(ConfigureBIOS, self).get_parser(prog_name)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('node_uuids',
nargs="*",
metavar="<node_uuid>",
default=[],
help=_('Baremetal Node UUIDs for the node(s) to '
'configure BIOS'))
group.add_argument("--all-manageable",
action='store_true',
help=_("Configure BIOS for all nodes currently in "
"'manageable' state"))
parser.add_argument('--configuration', metavar='<configuration>',
dest='configuration',
help=_('BIOS configuration (YAML/JSON string or '
'file name).'))
return parser
def take_action(self, parsed_args):
self.log.debug("take_action({args})".format(args=parsed_args))
if os.path.exists(parsed_args.configuration):
with open(parsed_args.configuration, 'r') as fp:
configuration = yaml.safe_load(fp.read())
else:
try:
configuration = yaml.safe_load(parsed_args.configuration)
except yaml.YAMLError as exc:
raise RuntimeError(
_('Configuration is not an existing file and cannot be '
'parsed as YAML: %s') % exc)
# Basic sanity check, we defer the full check to Ironic
try:
settings = configuration['settings']
except KeyError:
raise ValueError(
_('Configuration must contain key "settings"'))
except TypeError:
raise TypeError(
_('Configuration must be an object, got %r instead')
% configuration)
if (not isinstance(settings, list) or
not all(isinstance(item, dict) for item in settings)):
raise TypeError(
_('BIOS settings list is expected to be a list of '
'objects, got %r instead') % settings)
clients = self.app.client_manager
if parsed_args.node_uuids:
baremetal.apply_bios_configuration(
node_uuids=parsed_args.node_uuids,
configuration=configuration,
verbosity=utils.playbook_verbosity(self=self)
)
else:
baremetal.apply_bios_configuration_on_manageable_nodes(
clients,
configuration=configuration,
verbosity=utils.playbook_verbosity(self=self)
)
class ResetBIOS(command.Command):
"""Reset BIOS configuration to factory default"""
log = logging.getLogger(__name__ + ".ResetBIOS")
def get_parser(self, prog_name):
parser = super(ResetBIOS, self).get_parser(prog_name)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('node_uuids',
nargs="*",
metavar="<node_uuid>",
default=[],
help=_('Baremetal Node UUIDs for the node(s) to '
'reset BIOS'))
group.add_argument("--all-manageable",
action='store_true',
help=_("Reset BIOS on all nodes currently in "
"'manageable' state"))
return parser
def take_action(self, parsed_args):
self.log.debug("take_action({args})".format(args=parsed_args))
clients = self.app.client_manager
if parsed_args.node_uuids:
baremetal.reset_bios_configuration(
node_uuids=parsed_args.node_uuids,
verbosity=utils.playbook_verbosity(self=self)
)
else:
baremetal.reset_bios_configuration_on_manageable_nodes(
clients=clients,
verbosity=utils.playbook_verbosity(self=self)
)
|
openstack/python-tripleoclient
|
tripleoclient/v1/overcloud_bios.py
|
Python
|
apache-2.0
| 5,109
|
import unittest
from unittest import mock
from pathlib import Path
from tempfile import TemporaryDirectory
from contextlib import contextmanager
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.tasks.bulkdata.tests.utils import _make_task
import yaml
import pytest
from sqlalchemy import create_engine
from cumulusci.tasks.bulkdata.generate_and_load_data_from_yaml import (
GenerateAndLoadDataFromYaml,
)
from snowfakery import data_generator_runtime, data_generator
sample_yaml = Path(__file__).parent / "snowfakery/gen_npsp_standard_objects.yml"
simple_yaml = Path(__file__).parent / "snowfakery/include_parent.yml"
from cumulusci.tasks.bulkdata.generate_from_yaml import GenerateDataFromYaml
vanilla_mapping_file = Path(__file__).parent / "../tests/mapping_vanilla_sf.yml"
@contextmanager
def temporary_file_path(filename):
with TemporaryDirectory() as tmpdirname:
path = Path(tmpdirname) / filename
yield path
@contextmanager
def temp_sqlite_database_url():
with temporary_file_path("test.db") as path:
yield f"sqlite:///{str(path)}"
class TestGenerateFromDataTask(unittest.TestCase):
def assertRowsCreated(self, database_url):
engine = create_engine(database_url)
connection = engine.connect()
accounts = connection.execute("select * from Account")
accounts = list(accounts)
assert accounts and accounts[0] and accounts[0][1]
return accounts
def test_no_options(self):
with self.assertRaises(Exception):
_make_task(GenerateDataFromYaml, {})
def test_simple(self):
with temp_sqlite_database_url() as database_url:
task = _make_task(
GenerateDataFromYaml,
{
"options": {
"generator_yaml": sample_yaml,
"num_records": 1,
"database_url": database_url,
"num_records_tablename": "Account",
}
},
)
task()
self.assertRowsCreated(database_url)
def test_inaccessible_generator_yaml(self):
with self.assertRaises(TaskOptionsError):
task = _make_task(
GenerateDataFromYaml,
{
"options": {
"generator_yaml": sample_yaml / "junk",
"num_records": 10,
"num_records_tablename": "Account",
}
},
)
task()
def test_vars(self):
with temp_sqlite_database_url() as database_url:
with self.assertWarns(UserWarning):
task = _make_task(
GenerateDataFromYaml,
{
"options": {
"generator_yaml": sample_yaml,
"vars": "xyzzy:foo",
"database_url": database_url,
}
},
)
task()
self.assertRowsCreated(database_url)
def test_generate_mapping_file(self):
with temporary_file_path("mapping.yml") as temp_mapping:
with temp_sqlite_database_url() as database_url:
task = _make_task(
GenerateDataFromYaml,
{
"options": {
"generator_yaml": sample_yaml,
"database_url": database_url,
"generate_mapping_file": temp_mapping,
}
},
)
task()
mapping = yaml.safe_load(open(temp_mapping))
assert mapping["Insert Account"]["fields"]
def test_use_mapping_file(self):
assert vanilla_mapping_file.exists()
with temp_sqlite_database_url() as database_url:
task = _make_task(
GenerateDataFromYaml,
{
"options": {
"generator_yaml": sample_yaml,
"database_url": database_url,
"mapping": vanilla_mapping_file,
}
},
)
task()
self.assertRowsCreated(database_url)
def test_num_records(self):
with temp_sqlite_database_url() as database_url:
task = _make_task(
GenerateDataFromYaml,
{
"options": {
"generator_yaml": simple_yaml,
"num_records": 11,
"database_url": database_url,
"num_records_tablename": "Account",
}
},
)
task()
assert len(self.assertRowsCreated(database_url)) == 11
@mock.patch(
"cumulusci.tasks.bulkdata.generate_and_load_data_from_yaml.GenerateAndLoadDataFromYaml._dataload"
)
def test_simple_generate_and_load(self, _dataload):
task = _make_task(
GenerateAndLoadDataFromYaml,
{
"options": {
"generator_yaml": simple_yaml,
"num_records": 11,
"num_records_tablename": "Account",
}
},
)
task()
assert len(_dataload.mock_calls) == 1
@mock.patch("cumulusci.tasks.bulkdata.generate_from_yaml.generate")
def test_exception_handled_cleanly(self, generate):
generate.side_effect = AssertionError("Foo")
with pytest.raises(AssertionError) as e:
task = _make_task(
GenerateAndLoadDataFromYaml,
{
"options": {
"generator_yaml": simple_yaml,
"num_records": 11,
"num_records_tablename": "Account",
}
},
)
task()
assert "Foo" in str(e.value)
assert len(generate.mock_calls) == 1
@mock.patch(
"cumulusci.tasks.bulkdata.generate_and_load_data_from_yaml.GenerateAndLoadDataFromYaml._dataload"
)
def test_batching(self, _dataload):
with temp_sqlite_database_url() as database_url:
task = _make_task(
GenerateAndLoadDataFromYaml,
{
"options": {
"generator_yaml": simple_yaml,
"num_records": 14,
"batch_size": 6,
"database_url": database_url,
"num_records_tablename": "Account",
"data_generation_task": "cumulusci.tasks.bulkdata.generate_from_yaml.GenerateDataFromYaml",
"reset_oids": False,
}
},
)
task()
assert len(_dataload.mock_calls) == 3
task = None # clean up db?
engine = create_engine(database_url)
connection = engine.connect()
records = list(connection.execute("select * from Account"))
connection.close()
assert len(records) == 14 % 6 # leftovers
def test_mismatched_options(self):
with self.assertRaises(TaskOptionsError) as e:
task = _make_task(
GenerateDataFromYaml,
{"options": {"generator_yaml": sample_yaml, "num_records": 10}},
)
task()
assert "without num_records_tablename" in str(e.exception)
def generate_continuation_data(self, fileobj):
g = data_generator_runtime.Globals()
o = data_generator_runtime.ObjectRow(
"Account", {"Name": "Johnston incorporated", "id": 5}
)
g.register_object(o, "The Company")
for i in range(0, 5):
# burn through 5 imaginary accounts
g.id_manager.generate_id("Account")
data_generator.save_continuation_yaml(g, fileobj)
def test_with_continuation_file(self):
with temp_sqlite_database_url() as database_url:
with temporary_file_path("cont.yml") as continuation_file_path:
with open(continuation_file_path, "w") as continuation_file:
self.generate_continuation_data(continuation_file)
task = _make_task(
GenerateDataFromYaml,
{
"options": {
"generator_yaml": sample_yaml,
"database_url": database_url,
"mapping": vanilla_mapping_file,
"continuation_file": continuation_file_path,
}
},
)
task()
rows = self.assertRowsCreated(database_url)
assert dict(rows[0])["id"] == 6
def test_with_nonexistent_continuation_file(self):
with self.assertRaises(TaskOptionsError) as e:
with temp_sqlite_database_url() as database_url:
task = _make_task(
GenerateDataFromYaml,
{
"options": {
"generator_yaml": sample_yaml,
"database_url": database_url,
"mapping": vanilla_mapping_file,
"continuation_file": "/tmp/foobar/baz/jazz/continuation.yml",
}
},
)
task()
rows = self.assertRowsCreated(database_url)
assert dict(rows[0])["id"] == 6
assert "jazz" in str(e.exception)
assert "does not exist" in str(e.exception)
def test_generate_continuation_file(self):
with temporary_file_path("cont.yml") as temp_continuation_file:
with temp_sqlite_database_url() as database_url:
task = _make_task(
GenerateDataFromYaml,
{
"options": {
"generator_yaml": sample_yaml,
"database_url": database_url,
"generate_continuation_file": temp_continuation_file,
}
},
)
task()
mapping = yaml.safe_load(open(temp_continuation_file))
assert mapping # internals of this file are not important to MetaCI
|
SalesforceFoundation/CumulusCI
|
cumulusci/tasks/bulkdata/tests/test_generate_from_snowfakery_task.py
|
Python
|
bsd-3-clause
| 10,700
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-08 04:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('confirmation', '0003_emailchangeconfirmation'),
]
operations = [
migrations.DeleteModel(
name='EmailChangeConfirmation',
),
migrations.AlterModelOptions(
name='confirmation',
options={},
),
migrations.AddField(
model_name='confirmation',
name='type',
field=models.PositiveSmallIntegerField(default=1),
preserve_default=False,
),
migrations.AlterField(
model_name='confirmation',
name='confirmation_key',
field=models.CharField(max_length=40),
),
migrations.AlterField(
model_name='confirmation',
name='date_sent',
field=models.DateTimeField(),
),
]
|
vabs22/zulip
|
confirmation/migrations/0004_remove_confirmationmanager.py
|
Python
|
apache-2.0
| 1,026
|
# Copyright (c) 2020 Cloudification GmbH. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import rbac_address_scope
from neutron_lib.api import extensions
class Rbac_address_scope(extensions.APIExtensionDescriptor):
"""Extension class supporting address scope RBAC."""
api_definition = rbac_address_scope
|
mahak/neutron
|
neutron/extensions/rbac_address_scope.py
|
Python
|
apache-2.0
| 898
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.