code
stringlengths 1
199k
|
|---|
"""
Created on Tue Jun 12 13:45:31 2018
@author: huyn
"""
|
"""
A test script for the `indextable` module
"""
from random import randrange
import pytest
from HamiltonianPy.indextable import IndexTable
class TestIndexTable:
def test_init(self):
match0 = r"unhashable type"
match1 = r"The .* has different type from the previous ones"
match2 = r"The .* object already exists"
with pytest.raises(TypeError, match=match0):
IndexTable([[0, 1], [2, 3]])
with pytest.raises(TypeError, match=match1):
IndexTable([(0, 1), "ab"])
with pytest.raises(ValueError, match=match2):
IndexTable([(0, 1), (2, 3), (0, 1)])
def test_object_type(self):
table = IndexTable((x, y) for x in range(4) for y in range(4))
assert table.object_type is tuple
def test_str_and_iteration(self):
separator = "*" * 80
table = IndexTable((x, y) for x in range(2) for y in range(2))
print(table)
print(separator)
for index in table.indices():
print(index)
print(separator)
for item in table.objects():
print(item)
print(separator)
for index, item in table:
print(index, item)
print(separator)
def test_length(self):
num0 = 4
num1 = 7
table = IndexTable((x, y) for x in range(num0) for y in range(num1))
assert len(table) == num0 * num1
def test_query_index(self):
num0 = 7
num1 = 3
table = IndexTable((x, y) for x in range(num0) for y in range(num1))
for i in range(5):
key = (randrange(num0), randrange(num1))
assert table(key) == key[0] * num1 + key[1]
def test_query_object(self):
num0 = 7
num1 = 3
table = IndexTable((x, y) for x in range(num0) for y in range(num1))
for i in range(5):
index = randrange(num0 * num1)
assert table.query_object(index) == divmod(index, num1)
|
__author__ = "Laura Martinez Sanchez"
__license__ = "GPL"
__version__ = "1.0"
__email__ = "lmartisa@gmail.com"
from osgeo import gdal, gdalnumeric, ogr, osr
import numpy as np
from PIL import Image, ImageDraw
from collections import defaultdict
import pickle
import time
from texture_common import *
def world2Pixel(geoMatrix, x, y):
ulX = geoMatrix[0]
ulY = geoMatrix[3]
xDist = geoMatrix[1]
yDist = geoMatrix[5]
rtnX = geoMatrix[2]
rtnY = geoMatrix[4]
pixel = int((x - ulX) / xDist)
line = int((y - ulY) / yDist)
return (pixel, line)
def imageToArray(i):
'''
Converts a Python Imaging Library (PIL) array to a gdalnumeric image.
'''
a = gdalnumeric.fromstring(i.tobytes(), 'b')
a.shape = i.im.size[1], i.im.size[0]
return a
def ReadClipArray(lrY, ulY, lrX, ulX, img):
clip = np.empty((img.RasterCount, lrY - ulY, lrX - ulX))
#Read only the pixels needed for do the clip
for band in range(img.RasterCount):
band += 1
imgaux = img.GetRasterBand(band).ReadAsArray(ulX, ulY, lrX - ulX, lrY - ulY)
clip[band - 1] = imgaux
return clip
def ObtainPixelsfromShape(field, rasterPath, shapePath, INX, *args):
# field='zona'
# open dataset, also load as a gdal image to get geotransform
# INX can be false. If True, uses additional layers.
print "Starting clip...."
start = time.time()
if args:
texture_train_Path = args[0]
print texture_train_Path
img, textArrayShp = createTextureArray(texture_train_Path, rasterPath)
else:
#print"Indexes = False"
img = gdal.Open(rasterPath)
geoTrans = img.GetGeoTransform()
geoTransaux = img.GetGeoTransform()
proj = img.GetProjection()
#open shapefile
driver = ogr.GetDriverByName("ESRI Shapefile")
dataSource = driver.Open(shapePath, 0)
layer = dataSource.GetLayer()
clipdic = defaultdict(list)
count = 0
#Convert the layer extent to image pixel coordinates, we read only de pixels needed
for feature in layer:
minX, maxX, minY, maxY = feature.GetGeometryRef().GetEnvelope()
geoTrans = img.GetGeoTransform()
ulX, ulY = world2Pixel(geoTrans, minX, maxY)
lrX, lrY = world2Pixel(geoTrans, maxX, minY)
#print ulX,lrX,ulY,lrY
# Calculate the pixel size of the new image
pxWidth = int(lrX - ulX)
pxHeight = int(lrY - ulY)
clip = ReadClipArray(lrY, ulY, lrX, ulX, img)
#EDIT: create pixel offset to pass to new image Projection info
xoffset = ulX
yoffset = ulY
#print "Xoffset, Yoffset = ( %d, %d )" % ( xoffset, yoffset )
# Create a new geomatrix for the image
geoTrans = list(geoTrans)
geoTrans[0] = minX
geoTrans[3] = maxY
# Map points to pixels for drawing the boundary on a blank 8-bit, black and white, mask image.
points = []
pixels = []
geom = feature.GetGeometryRef()
pts = geom.GetGeometryRef(0)
[points.append((pts.GetX(p), pts.GetY(p))) for p in range(pts.GetPointCount())]
[pixels.append(world2Pixel(geoTrans, p[0], p[1])) for p in points]
rasterPoly = Image.new("L", (pxWidth, pxHeight), 1)
rasterize = ImageDraw.Draw(rasterPoly)
rasterize.polygon(pixels, 0)
mask = imageToArray(rasterPoly)
#SHow the clips of the features
# plt.imshow(mask)
# plt.show()
# Clip the image using the mask into a dict
temp = gdalnumeric.choose(mask, (clip, np.nan))
# #SHow the clips of the image
# plt.imshow(temp[4])
# plt.show()
temp = np.concatenate(temp.T)
temp = temp[~np.isnan(temp[:, 0])] #NaN
#print temp.shape
clipdic[str(feature.GetField(field))].append(temp)
count += temp.shape[0]
end = time.time()
print "Time clipshape:"
print (end - start)
print "count", count
return clipdic, count
|
import sys
import subprocess
import urllib.request
import copy
def main(argv):
cookie = urllib.request.HTTPCookieProcessor()
oc = copy.deepcopy(cookie)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
"""
Playlist Generation
"""
from os import path
from random import choice
import string
import pafy
from .. import content, g, playlists, screen, util, listview
from ..playlist import Playlist
from . import command, search, album_search
@command(r'mkp\s*(.{1,100})')
def generate_playlist(sourcefile):
"""Generate a playlist from video titles in sourcefile"""
# Hooks into this, check if the argument --description is present
if "--description" in sourcefile or "-d" in sourcefile:
description_generator(sourcefile)
return
expanded_sourcefile = path.expanduser(sourcefile)
if not check_sourcefile(expanded_sourcefile):
g.message = util.F('mkp empty') % expanded_sourcefile
else:
queries = read_sourcefile(expanded_sourcefile)
g.message = util.F('mkp parsed') % (len(queries), sourcefile)
if queries:
create_playlist(queries)
g.message = util.F('pl help')
g.content = content.playlists_display()
def read_sourcefile(filename):
"""Read each line as a query from filename"""
with open(filename) as srcfl:
queries = list()
for item in srcfl.readlines():
clean_item = str(item).strip()
if not clean_item:
continue
queries.append(clean_item)
return queries
def check_sourcefile(filename):
"""Check if filename exists and has a non-zero size"""
return path.isfile(filename) and path.getsize(filename) > 0
def create_playlist(queries, title=None):
"""Add a new playlist
Create playlist with a random name, get the first
match for each title in queries and append it to the playlist
"""
plname = None
if (title is not None):
plname=title.replace(" ", "-")
else:
plname=random_plname()
if not g.userpl.get(plname):
g.userpl[plname] = Playlist(plname)
for query in queries:
g.message = util.F('mkp finding') % query
screen.update()
qresult = find_best_match(query)
if qresult:
g.userpl[plname].songs.append(qresult)
if g.userpl[plname]:
playlists.save()
def find_best_match(query):
"""Find the best(first)"""
# This assumes that the first match is the best one
qs = search.generate_search_qs(query)
wdata = pafy.call_gdata('search', qs)
results = search.get_tracks_from_json(wdata)
if results:
res, score = album_search._best_song_match(
results, query, 0.1, 1.0, 0.0)
return res
def random_plname():
"""Generates a random alphanumeric string of 6 characters"""
n_chars = 6
return ''.join(choice(string.ascii_lowercase + string.digits)
for _ in range(n_chars))
def description_generator(text):
""" Fetches a videos description and parses it for
<artist> - <track> combinations
"""
if not isinstance(g.model, Playlist):
g.message = util.F("mkp desc unknown")
return
# Use only the first result, for now
num = text.replace("--description", "")
num = num.replace("-d", "")
num = util.number_string_to_list(num)[0]
query = {}
query['id'] = g.model[num].ytid
query['part'] = 'snippet'
query['maxResults'] = '1'
data = pafy.call_gdata('videos', query)['items'][0]['snippet']
title = "mkp %s" % data['title']
data = util.fetch_songs(data['description'], data['title'])
columns = [
{"name": "idx", "size": 3, "heading": "Num"},
{"name": "artist", "size": 30, "heading": "Artist"},
{"name": "title", "size": "remaining", "heading": "Title"},
]
def run_m(idx):
""" Create playlist based on the
results selected
"""
create_playlist(idx, title)
if data:
data = [listview.ListSongtitle(x) for x in data]
g.content = listview.ListView(columns, data, run_m)
g.message = util.F("mkp desc which data")
else:
g.message = util.F("mkp no valid")
return
|
import sys,os
os.environ["EPICS_CA_ADDR_LIST"] = "192.168.82.10"
os.environ["EPICS_CA_MAX_ARRAY_BYTES"] = "100000000"
import velaINJMagnetControl as VIMC
a = VIMC.velaINJMagnetController(True,False)
a.switchONpsu('SOL')
print(a.getRI('SOL'))
a.switchONpsu('SOL')
print(a.isON('SOL'))
|
from openstates.utils import LXMLMixin
import datetime as dt
from pupa.scrape import Scraper, Event
from .utils import get_short_codes
from requests import HTTPError
import pytz
URL = "http://www.capitol.hawaii.gov/upcominghearings.aspx"
class HIEventScraper(Scraper, LXMLMixin):
def get_related_bills(self, href):
ret = []
try:
page = self.lxmlize(href)
except HTTPError:
return ret
bills = page.xpath(".//a[contains(@href, 'Bills')]")
for bill in bills:
try:
row = next(bill.iterancestors(tag='tr'))
except StopIteration:
continue
tds = row.xpath("./td")
descr = tds[1].text_content()
for i in ['\r\n', '\xa0']:
descr = descr.replace(i, '')
ret.append({"bill_id": bill.text_content(),
"type": "consideration",
"descr": descr})
return ret
def scrape(self):
tz = pytz.timezone("US/Eastern")
get_short_codes(self)
page = self.lxmlize(URL)
table = page.xpath(
"//table[@id='ctl00_ContentPlaceHolderCol1_GridView1']")[0]
for event in table.xpath(".//tr")[1:]:
tds = event.xpath("./td")
committee = tds[0].text_content().strip()
descr = [x.text_content() for x in tds[1].xpath(".//span")]
if len(descr) != 1:
raise Exception
descr = descr[0].replace('.', '').strip()
when = tds[2].text_content().strip()
where = tds[3].text_content().strip()
notice = tds[4].xpath(".//a")[0]
notice_href = notice.attrib['href']
notice_name = notice.text
when = dt.datetime.strptime(when, "%m/%d/%Y %I:%M %p")
when = pytz.utc.localize(when)
event = Event(name=descr, start_time=when, classification='committee-meeting',
description=descr, location_name=where, timezone=tz.zone)
if "/" in committee:
committees = committee.split("/")
else:
committees = [committee]
for committee in committees:
if "INFO" not in committee:
committee = self.short_ids.get("committee", {"chamber": "unknown",
"name": committee})
else:
committee = {
"chamber": "joint",
"name": committee,
}
event.add_committee(committee['name'], note='host')
event.add_source(URL)
event.add_document(notice_name,
notice_href,
media_type='text/html')
for bill in self.get_related_bills(notice_href):
a = event.add_agenda_item(description=bill['descr'])
a.add_bill(
bill['bill_id'],
note=bill['type']
)
yield event
|
from oauth2 import Consumer, Client, Token
from httplib2 import ProxyInfo
from httplib2.socks import PROXY_TYPE_HTTP
from django.conf import settings
class Authentication(object):
def __init__(self, consumer_key, consumer_secret, token_key, token_secret):
consumer = Consumer(key=consumer_key, secret=consumer_secret)
token = Token(key=token_key, secret=token_secret)
proxy_info = None
if hasattr(settings, 'PROXY_HOST') and \
hasattr(settings, 'PROXY_PORT'):
proxy_info = ProxyInfo(
proxy_type=PROXY_TYPE_HTTP,
proxy_host=settings.PROXY_HOST,
proxy_port=settings.PROXY_PORT)
self.client = Client(
consumer=consumer,
token=token,
proxy_info=proxy_info)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
|
def classeq(x, y):
return x.__class__==y.__class__
class Element(object): pass
|
import json
import time
class TaskQueueInputError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class TaskQueueSystemError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class TaskQueueEmptyError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
def epoch():
return int(time.time())
def obj2jsonstring(object):
return json.dumps(object)
def generate_uuid(name, schedule, db_id):
return '%s|%d|%d' % (name, schedule, db_id)
|
"""
Conversion pack for October 2021 release
"""
CONVERSIONS = {
# Renamed items
"Quafe Zero": "Quafe Zero Classic",
"Exigent Sentry Drone Navigation Mutaplasmid": "Exigent Sentry Drone Precision Mutaplasmid",
}
|
ROOT = '/.well-known/acme-challenge'
ENDPOINT = '/k9s7WeOPg3HdSjwlAqEVRxnezsGGe-CFOwPfOcU3VgU'
RESPONSE = 'k9s7WeOPg3HdSjwlAqEVRxnezsGGe-CFOwPfOcU3VgU.QBkCfzPq0mKXIJSktgl4_b7psKazh3MSZ8juWnZbJbg'
|
import contextlib
import logging
import os
import stat
from unittest import mock
import fixtures
import http.server
import progressbar
import threading
import testscenarios
import testtools
import snapcraft
from snapcraft.internal import common, elf, steps
from snapcraft.internal.project_loader import grammar_processing
from tests import fake_servers, fixture_setup
from tests.file_utils import get_snapcraft_path
class ContainsList(list):
def __eq__(self, other):
return all([i[0] in i[1] for i in zip(self, other)])
class MockOptions:
def __init__(
self,
source=None,
source_type=None,
source_branch=None,
source_tag=None,
source_subdir=None,
source_depth=None,
source_commit=None,
source_checksum=None,
disable_parallel=False,
):
self.source = source
self.source_type = source_type
self.source_depth = source_depth
self.source_branch = source_branch
self.source_commit = source_commit
self.source_tag = source_tag
self.source_subdir = source_subdir
self.disable_parallel = disable_parallel
class IsExecutable:
"""Match if a file path is executable."""
def __str__(self):
return "IsExecutable()"
def match(self, file_path):
if not os.stat(file_path).st_mode & stat.S_IEXEC:
return testtools.matchers.Mismatch(
"Expected {!r} to be executable, but it was not".format(file_path)
)
return None
class LinkExists:
"""Match if a file path is a symlink."""
def __init__(self, expected_target=None):
self._expected_target = expected_target
def __str__(self):
return "LinkExists()"
def match(self, file_path):
if not os.path.exists(file_path):
return testtools.matchers.Mismatch(
"Expected {!r} to be a symlink, but it doesn't exist".format(file_path)
)
if not os.path.islink(file_path):
return testtools.matchers.Mismatch(
"Expected {!r} to be a symlink, but it was not".format(file_path)
)
target = os.readlink(file_path)
if target != self._expected_target:
return testtools.matchers.Mismatch(
"Expected {!r} to be a symlink pointing to {!r}, but it was "
"pointing to {!r}".format(file_path, self._expected_target, target)
)
return None
class TestCase(testscenarios.WithScenarios, testtools.TestCase):
def setUp(self):
super().setUp()
temp_cwd_fixture = fixture_setup.TempCWD()
self.useFixture(temp_cwd_fixture)
self.path = temp_cwd_fixture.path
# Use a separate path for XDG dirs, or changes there may be detected as
# source changes.
self.xdg_path = self.useFixture(fixtures.TempDir()).path
self.useFixture(fixture_setup.TempXDG(self.xdg_path))
self.fake_terminal = fixture_setup.FakeTerminal()
self.useFixture(self.fake_terminal)
self.useFixture(fixture_setup.SilentSnapProgress())
# Some tests will directly or indirectly change the plugindir, which
# is a module variable. Make sure that it is returned to the original
# value when a test ends.
self.addCleanup(common.set_plugindir, common.get_plugindir())
self.addCleanup(common.set_schemadir, common.get_schemadir())
self.addCleanup(common.set_librariesdir, common.get_librariesdir())
self.addCleanup(common.set_extensionsdir, common.get_extensionsdir())
self.addCleanup(common.reset_env)
common.set_schemadir(os.path.join(get_snapcraft_path(), "schema"))
self.fake_logger = fixtures.FakeLogger(level=logging.ERROR)
self.useFixture(self.fake_logger)
patcher = mock.patch("multiprocessing.cpu_count")
self.cpu_count = patcher.start()
self.cpu_count.return_value = 2
self.addCleanup(patcher.stop)
# We do not want the paths to affect every test we have.
patcher = mock.patch(
"snapcraft.file_utils.get_tool_path", side_effect=lambda x: x
)
patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch(
"snapcraft.internal.indicators.ProgressBar", new=SilentProgressBar
)
patcher.start()
self.addCleanup(patcher.stop)
# These are what we expect by default
self.snap_dir = os.path.join(os.getcwd(), "snap")
self.prime_dir = os.path.join(os.getcwd(), "prime")
self.stage_dir = os.path.join(os.getcwd(), "stage")
self.parts_dir = os.path.join(os.getcwd(), "parts")
self.local_plugins_dir = os.path.join(self.snap_dir, "plugins")
# Avoid installing patchelf in the tests
self.useFixture(fixtures.EnvironmentVariable("SNAPCRAFT_NO_PATCHELF", "1"))
# Disable Sentry reporting for tests, otherwise they'll hang waiting
# for input
self.useFixture(
fixtures.EnvironmentVariable("SNAPCRAFT_ENABLE_ERROR_REPORTING", "false")
)
# Don't let the managed host variable leak into tests
self.useFixture(fixtures.EnvironmentVariable("SNAPCRAFT_MANAGED_HOST"))
machine = os.environ.get("SNAPCRAFT_TEST_MOCK_MACHINE", None)
self.base_environment = fixture_setup.FakeBaseEnvironment(machine=machine)
self.useFixture(self.base_environment)
# Make sure "SNAPCRAFT_ENABLE_DEVELOPER_DEBUG" is reset between tests
self.useFixture(
fixtures.EnvironmentVariable("SNAPCRAFT_ENABLE_DEVELOPER_DEBUG")
)
self.useFixture(fixture_setup.FakeSnapcraftctl())
def make_snapcraft_yaml(self, content, encoding="utf-8"):
with contextlib.suppress(FileExistsError):
os.mkdir("snap")
snapcraft_yaml = os.path.join("snap", "snapcraft.yaml")
with open(snapcraft_yaml, "w", encoding=encoding) as fp:
fp.write(content)
return snapcraft_yaml
def verify_state(self, part_name, state_dir, expected_step_name):
self.assertTrue(
os.path.isdir(state_dir),
"Expected state directory for {}".format(part_name),
)
# Expect every step up to and including the specified one to be run
step = steps.get_step_by_name(expected_step_name)
for step in step.previous_steps() + [step]:
self.assertTrue(
os.path.exists(os.path.join(state_dir, step.name)),
"Expected {!r} to be run for {}".format(step.name, part_name),
)
def load_part(
self,
part_name,
plugin_name=None,
part_properties=None,
project_options=None,
stage_packages_repo=None,
base="core",
confinement="strict",
snap_type="app",
):
if not plugin_name:
plugin_name = "nil"
properties = {"plugin": plugin_name}
if part_properties:
properties.update(part_properties)
if not project_options:
project_options = snapcraft.ProjectOptions()
validator = snapcraft.internal.project_loader.Validator()
schema = validator.part_schema
definitions_schema = validator.definitions_schema
plugin = snapcraft.internal.pluginhandler.load_plugin(
part_name=part_name,
plugin_name=plugin_name,
properties=properties,
project_options=project_options,
part_schema=schema,
definitions_schema=definitions_schema,
)
if not stage_packages_repo:
stage_packages_repo = mock.Mock()
grammar_processor = grammar_processing.PartGrammarProcessor(
plugin=plugin,
properties=properties,
project=project_options,
repo=stage_packages_repo,
)
return snapcraft.internal.pluginhandler.PluginHandler(
plugin=plugin,
part_properties=properties,
project_options=project_options,
part_schema=schema,
definitions_schema=definitions_schema,
grammar_processor=grammar_processor,
stage_packages_repo=stage_packages_repo,
snap_base_path="/snap/fake-name/current",
base=base,
confinement=confinement,
snap_type=snap_type,
soname_cache=elf.SonameCache(),
)
class TestWithFakeRemoteParts(TestCase):
def setUp(self):
super().setUp()
self.useFixture(fixture_setup.FakeParts())
class FakeFileHTTPServerBasedTestCase(TestCase):
def setUp(self):
super().setUp()
self.useFixture(fixtures.EnvironmentVariable("no_proxy", "localhost,127.0.0.1"))
self.server = http.server.HTTPServer(
("127.0.0.1", 0), fake_servers.FakeFileHTTPRequestHandler
)
server_thread = threading.Thread(target=self.server.serve_forever)
self.addCleanup(server_thread.join)
self.addCleanup(self.server.server_close)
self.addCleanup(self.server.shutdown)
server_thread.start()
class SilentProgressBar(progressbar.ProgressBar):
"""A progress bar causing no spurious output during tests."""
def start(self):
pass
def update(self, value=None):
pass
def finish(self):
pass
|
"""
Copyright (c) 2015, Philipp Klaus. All rights reserved.
License: GPLv3
"""
from distutils.core import setup
setup(name='netio230a',
version = '1.1.9',
description = 'Python package to control the Koukaam NETIO-230A',
long_description = 'Python software to access the Koukaam NETIO-230A and NETIO-230B: power distribution units / controllable power outlets with Ethernet interface',
author = 'Philipp Klaus',
author_email = 'philipp.l.klaus@web.de',
url = 'https://github.com/pklaus/netio230a',
license = 'GPL3+',
packages = ['netio230a'],
scripts = ['scripts/netio230a_cli', 'scripts/netio230a_discovery', 'scripts/netio230a_fakeserver'],
zip_safe = True,
platforms = 'any',
keywords = 'Netio230A Koukaam PDU',
classifiers = [
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'License :: OSI Approved :: GPL License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
]
)
|
import actor
class Oxygen(actor.Actor):
extra_keys = ['capacity', 'pipe_length', 'is_initial']
def __init__(self, *args, **kwargs):
super(Oxygen, self).__init__(*args, **kwargs)
self.capacity = kwargs.get('capacity', 3000)
self.contained = self.capacity
self.pipe_length = kwargs.get('pipe_length', 5000)
self.is_initial = kwargs.get('is_initial', False)
def tick(self):
if self.contained == 0:
self.world.dispatch_event("on_suffocate")
|
"""
Tests for L{twisted.internet.stdio}.
@var properEnv: A copy of L{os.environ} which has L{bytes} keys/values on POSIX
platforms and native L{str} keys/values on Windows.
"""
from __future__ import absolute_import, division
import os
import sys
import itertools
from twisted.trial import unittest
from twisted.python import filepath, log
from twisted.python.reflect import requireModule
from twisted.python.runtime import platform
from twisted.python.compat import xrange, intToBytes, bytesEnviron
from twisted.internet import error, defer, protocol, stdio, reactor
from twisted.test.test_tcp import ConnectionLostNotifyingProtocol
UNIQUE_LAST_WRITE_STRING = b'xyz123abc Twisted is great!'
skipWindowsNopywin32 = None
if platform.isWindows():
if requireModule('win32process') is None:
skipWindowsNopywin32 = ("On windows, spawnProcess is not available "
"in the absence of win32process.")
properEnv = dict(os.environ)
properEnv["PYTHONPATH"] = os.pathsep.join(sys.path)
else:
properEnv = bytesEnviron()
properEnv[b"PYTHONPATH"] = os.pathsep.join(sys.path).encode(
sys.getfilesystemencoding())
class StandardIOTestProcessProtocol(protocol.ProcessProtocol):
"""
Test helper for collecting output from a child process and notifying
something when it exits.
@ivar onConnection: A L{defer.Deferred} which will be called back with
C{None} when the connection to the child process is established.
@ivar onCompletion: A L{defer.Deferred} which will be errbacked with the
failure associated with the child process exiting when it exits.
@ivar onDataReceived: A L{defer.Deferred} which will be called back with
this instance whenever C{childDataReceived} is called, or C{None} to
suppress these callbacks.
@ivar data: A C{dict} mapping file descriptors to strings containing all
bytes received from the child process on each file descriptor.
"""
onDataReceived = None
def __init__(self):
self.onConnection = defer.Deferred()
self.onCompletion = defer.Deferred()
self.data = {}
def connectionMade(self):
self.onConnection.callback(None)
def childDataReceived(self, name, bytes):
"""
Record all bytes received from the child process in the C{data}
dictionary. Fire C{onDataReceived} if it is not C{None}.
"""
self.data[name] = self.data.get(name, b'') + bytes
if self.onDataReceived is not None:
d, self.onDataReceived = self.onDataReceived, None
d.callback(self)
def processEnded(self, reason):
self.onCompletion.callback(reason)
class StandardInputOutputTests(unittest.TestCase):
skip = skipWindowsNopywin32
def _spawnProcess(self, proto, sibling, *args, **kw):
"""
Launch a child Python process and communicate with it using the
given ProcessProtocol.
@param proto: A L{ProcessProtocol} instance which will be connected
to the child process.
@param sibling: The basename of a file containing the Python program
to run in the child process.
@param *args: strings which will be passed to the child process on
the command line as C{argv[2:]}.
@param **kw: additional arguments to pass to L{reactor.spawnProcess}.
@return: The L{IProcessTransport} provider for the spawned process.
"""
args = [sys.executable,
b"-m", b"twisted.test." + sibling,
reactor.__class__.__module__] + list(args)
return reactor.spawnProcess(
proto,
sys.executable,
args,
env=properEnv,
**kw)
def _requireFailure(self, d, callback):
def cb(result):
self.fail("Process terminated with non-Failure: %r" % (result,))
def eb(err):
return callback(err)
return d.addCallbacks(cb, eb)
def test_loseConnection(self):
"""
Verify that a protocol connected to L{StandardIO} can disconnect
itself using C{transport.loseConnection}.
"""
errorLogFile = self.mktemp()
log.msg("Child process logging to " + errorLogFile)
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, b'stdio_test_loseconn', errorLogFile)
def processEnded(reason):
# Copy the child's log to ours so it's more visible.
with open(errorLogFile, 'r') as f:
for line in f:
log.msg("Child logged: " + line.rstrip())
self.failIfIn(1, p.data)
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_readConnectionLost(self):
"""
When stdin is closed and the protocol connected to it implements
L{IHalfCloseableProtocol}, the protocol's C{readConnectionLost} method
is called.
"""
errorLogFile = self.mktemp()
log.msg("Child process logging to " + errorLogFile)
p = StandardIOTestProcessProtocol()
p.onDataReceived = defer.Deferred()
def cbBytes(ignored):
d = p.onCompletion
p.transport.closeStdin()
return d
p.onDataReceived.addCallback(cbBytes)
def processEnded(reason):
reason.trap(error.ProcessDone)
d = self._requireFailure(p.onDataReceived, processEnded)
self._spawnProcess(
p, b'stdio_test_halfclose', errorLogFile)
return d
def test_lastWriteReceived(self):
"""
Verify that a write made directly to stdout using L{os.write}
after StandardIO has finished is reliably received by the
process reading that stdout.
"""
p = StandardIOTestProcessProtocol()
# Note: the OS X bug which prompted the addition of this test
# is an apparent race condition involving non-blocking PTYs.
# Delaying the parent process significantly increases the
# likelihood of the race going the wrong way. If you need to
# fiddle with this code at all, uncommenting the next line
# will likely make your life much easier. It is commented out
# because it makes the test quite slow.
# p.onConnection.addCallback(lambda ign: __import__('time').sleep(5))
try:
self._spawnProcess(
p, b'stdio_test_lastwrite', UNIQUE_LAST_WRITE_STRING,
usePTY=True)
except ValueError as e:
# Some platforms don't work with usePTY=True
raise unittest.SkipTest(str(e))
def processEnded(reason):
"""
Asserts that the parent received the bytes written by the child
immediately after the child starts.
"""
self.assertTrue(
p.data[1].endswith(UNIQUE_LAST_WRITE_STRING),
"Received %r from child, did not find expected bytes." % (
p.data,))
reason.trap(error.ProcessDone)
return self._requireFailure(p.onCompletion, processEnded)
def test_hostAndPeer(self):
"""
Verify that the transport of a protocol connected to L{StandardIO}
has C{getHost} and C{getPeer} methods.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, b'stdio_test_hostpeer')
def processEnded(reason):
host, peer = p.data[1].splitlines()
self.assertTrue(host)
self.assertTrue(peer)
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_write(self):
"""
Verify that the C{write} method of the transport of a protocol
connected to L{StandardIO} sends bytes to standard out.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, b'stdio_test_write')
def processEnded(reason):
self.assertEqual(p.data[1], b'ok!')
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_writeSequence(self):
"""
Verify that the C{writeSequence} method of the transport of a
protocol connected to L{StandardIO} sends bytes to standard out.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, b'stdio_test_writeseq')
def processEnded(reason):
self.assertEqual(p.data[1], b'ok!')
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def _junkPath(self):
junkPath = self.mktemp()
with open(junkPath, 'wb') as junkFile:
for i in xrange(1024):
junkFile.write(intToBytes(i) + b'\n')
return junkPath
def test_producer(self):
"""
Verify that the transport of a protocol connected to L{StandardIO}
is a working L{IProducer} provider.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
written = []
toWrite = list(range(100))
def connectionMade(ign):
if toWrite:
written.append(intToBytes(toWrite.pop()) + b"\n")
proc.write(written[-1])
reactor.callLater(0.01, connectionMade, None)
proc = self._spawnProcess(p, b'stdio_test_producer')
p.onConnection.addCallback(connectionMade)
def processEnded(reason):
self.assertEqual(p.data[1], b''.join(written))
self.assertFalse(
toWrite,
"Connection lost with %d writes left to go." % (len(toWrite),))
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_consumer(self):
"""
Verify that the transport of a protocol connected to L{StandardIO}
is a working L{IConsumer} provider.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
junkPath = self._junkPath()
self._spawnProcess(p, b'stdio_test_consumer', junkPath)
def processEnded(reason):
with open(junkPath, 'rb') as f:
self.assertEqual(p.data[1], f.read())
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_normalFileStandardOut(self):
"""
If L{StandardIO} is created with a file descriptor which refers to a
normal file (ie, a file from the filesystem), L{StandardIO.write}
writes bytes to that file. In particular, it does not immediately
consider the file closed or call its protocol's C{connectionLost}
method.
"""
onConnLost = defer.Deferred()
proto = ConnectionLostNotifyingProtocol(onConnLost)
path = filepath.FilePath(self.mktemp())
self.normal = normal = path.open('wb')
self.addCleanup(normal.close)
kwargs = dict(stdout=normal.fileno())
if not platform.isWindows():
# Make a fake stdin so that StandardIO doesn't mess with the *real*
# stdin.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
kwargs['stdin'] = r
connection = stdio.StandardIO(proto, **kwargs)
# The reactor needs to spin a bit before it might have incorrectly
# decided stdout is closed. Use this counter to keep track of how
# much we've let it spin. If it closes before we expected, this
# counter will have a value that's too small and we'll know.
howMany = 5
count = itertools.count()
def spin():
for value in count:
if value == howMany:
connection.loseConnection()
return
connection.write(intToBytes(value))
break
reactor.callLater(0, spin)
reactor.callLater(0, spin)
# Once the connection is lost, make sure the counter is at the
# appropriate value.
def cbLost(reason):
self.assertEqual(next(count), howMany + 1)
self.assertEqual(
path.getContent(),
b''.join(map(intToBytes, range(howMany))))
onConnLost.addCallback(cbLost)
return onConnLost
if platform.isWindows():
test_normalFileStandardOut.skip = (
"StandardIO does not accept stdout as an argument to Windows. "
"Testing redirection to a file is therefore harder.")
|
"""Contains expectations."""
import inquisition
FISHY = inquisition.SPANISH
FISHY = FISHY.replace('surprise', 'haddock')
print FISHY
|
import csv
from datetime import datetime
from django.conf import settings
from django.core.management import BaseCommand
from bustimes.utils import download_if_changed
from ...models import Licence, Registration, Variation
def parse_date(date_string):
if date_string:
return datetime.strptime(date_string, '%d/%m/%y').date()
def download_if_modified(path):
url = f"https://content.mgmt.dvsacloud.uk/olcs.prod.dvsa.aws/data-gov-uk-export/{path}"
return download_if_changed(settings.DATA_DIR / path, url)
class Command(BaseCommand):
@staticmethod
def add_arguments(parser):
parser.add_argument('regions', nargs='?', type=str, default="FBCMKGDH")
def get_rows(self, path):
with open(settings.DATA_DIR / path) as open_file:
yield from csv.DictReader(open_file)
def handle(self, regions, **kwargs):
for region in regions:
modified_1, last_modified_1 = download_if_modified(f"Bus_RegisteredOnly_{region}.csv")
modified_2, last_modified_2 = download_if_modified(f"Bus_Variation_{region}.csv")
if modified_1 or modified_2:
print(region, last_modified_1, last_modified_2)
self.handle_region(region)
def handle_region(self, region):
lics = Licence.objects.filter(traffic_area=region)
lics = lics.in_bulk(field_name="licence_number")
lics_to_update = set()
lics_to_create = []
regs = Registration.objects.filter(licence__traffic_area=region)
regs = regs.in_bulk(field_name="registration_number")
regs_to_update = set()
regs_to_create = []
variations = Variation.objects.filter(registration__licence__traffic_area=region)
variations = variations.select_related('registration').all()
variations_dict = {}
for variation in variations:
reg_no = variation.registration.registration_number
if reg_no in variations_dict:
variations_dict[reg_no][variation.variation_number] = variation
else:
variations_dict[reg_no] = {
variation.variation_number: variation
}
# vars_to_update = set()
vars_to_create = []
# previous_line = None
# cardinals = set()
for line in self.get_rows(f"Bus_Variation_{region}.csv"):
reg_no = line["Reg_No"]
var_no = int(line["Variation Number"])
lic_no = line["Lic_No"]
if lic_no in lics:
licence = lics[lic_no]
if licence.id and licence not in lics_to_update:
licence.trading_name = ''
lics_to_update.add(licence)
else:
licence = Licence(licence_number=lic_no)
lics_to_create.append(licence)
lics[lic_no] = licence
licence.name = line['Op_Name']
# a licence can have multiple trading names
if line['trading_name'] not in licence.trading_name:
if licence.trading_name:
licence.trading_name = f"{licence.trading_name}\n{line['trading_name']}"
else:
licence.trading_name = line['trading_name']
if licence.address != line['Address']:
if licence.address:
print(licence.address, line['Address'])
licence.address = line['Address']
if licence.traffic_area:
assert licence.traffic_area == line['Current Traffic Area']
else:
licence.traffic_area = line['Current Traffic Area']
licence.discs = line['Discs in Possession'] or 0
licence.authorised_discs = line['AUTHDISCS'] or 0
licence.description = line['Description']
licence.granted_date = parse_date(line['Granted_Date'])
licence.expiry_date = parse_date(line['Exp_Date'])
if len(reg_no) > 20:
# PK0000098/PK0000098/364
parts = reg_no.split('/')
assert parts[0] == parts[1]
reg_no = f'{parts[1]}/{parts[2]}'
if reg_no in regs:
registration = regs[reg_no]
if registration.id and registration not in regs_to_update:
regs_to_update.add(registration)
else:
registration = Registration(
registration_number=reg_no,
registered=False
)
regs_to_create.append(registration)
regs[reg_no] = registration
registration.licence = licence
status = line['Registration Status']
registration.registration_status = status
if var_no == 0 and status == 'New':
registration.registered = True
elif status == 'Registered':
registration.registered = True
elif status == 'Cancelled' or status == 'Admin Cancelled' or status == 'Cancellation':
registration.registered = False
registration.start_point = line['start_point']
registration.finish_point = line['finish_point']
registration.via = line['via']
registration.subsidies_description = line['Subsidies_Description']
registration.subsidies_details = line['Subsidies_Details']
registration.traffic_area_office_covered_by_area = line['TAO Covered BY Area']
# a registration can have multiple numbers
if registration.service_number:
if line['Service Number'] not in registration.service_number:
registration.service_number = f"{registration.service_number}\n{line['Service Number']}"
else:
registration.service_number = line['Service Number']
# a registration can have multiple types
if registration.service_type_description:
if line['Service_Type_Description'] not in registration.service_type_description:
registration.service_type_description += f"\n{line['Service_Type_Description']}"
else:
registration.service_type_description = line['Service_Type_Description']
if registration.authority_description:
if line['Auth_Description'] not in registration.authority_description:
registration.authority_description += f"\n{line['Auth_Description']}"
if len(registration.authority_description) > 255:
# some National Express coach services cover many authorities
# print(reg_no)
registration.authority_description = registration.authority_description[:255]
else:
registration.authority_description = line['Auth_Description']
# if previous_line:
# if previous_line["Reg_No"] == reg_no:
# if int(previous_line["Variation Number"]) == var_no:
# for key in line:
# prev = previous_line[key]
# value = line[key]
# if prev != value:
# if key not in (
# 'Auth_Description', 'TAO Covered BY Area',
# 'trading_name', 'Pub_Text', 'Registration Status', 'end_date', 'received_date'
# 'effective_date', 'short_notice', 'Service_Type_Description'
# ):
# print(reg_no)
# print(f"'{key}': '{prev}', '{value}'")
# cardinals.add(key)
# # print(line)
variation = Variation(registration=registration, variation_number=var_no)
if reg_no in variations_dict:
if var_no in variations_dict[reg_no]:
continue # ?
else:
variations_dict[reg_no][var_no] = variation
else:
variations_dict[reg_no] = {var_no: variation}
variation.effective_date = parse_date(line['effective_date'])
variation.date_received = parse_date(line['received_date'])
variation.end_date = parse_date(line['end_date'])
variation.service_type_other_details = line['Service_Type_Other_Details']
variation.registration_status = line['Registration Status']
variation.publication_text = line['Pub_Text']
variation.short_notice = line['Short Notice']
assert not variation.id
if not variation.id:
vars_to_create.append(variation)
# previous_line = line
# previous_line = None
# cardinals = set()
# use this file to work out if a registration has not been cancelled/expired
for line in self.get_rows(f"Bus_RegisteredOnly_{region}.csv"):
reg_no = line["Reg_No"]
reg = regs[reg_no]
if reg.registration_status != line["Registration Status"]:
reg.registration_status = line["Registration Status"]
reg.registered = True
# if previous_line and previous_line["Reg_No"] == reg_no:
# for key in line:
# prev = previous_line[key]
# value = line[key]
# if prev != value:
# cardinals.add(key)
# if key == 'TAO Covered BY Area':
# print(prev, value)
# previous_line = line
# print(cardinals)
Licence.objects.bulk_update(
lics_to_update,
["name", "trading_name", "traffic_area", "discs", "authorised_discs",
"description", "granted_date", "expiry_date", "address"]
)
Licence.objects.bulk_create(lics_to_create)
for registration in regs_to_create:
registration.licence = registration.licence
Registration.objects.bulk_update(
regs_to_update,
["start_point", "finish_point", "via",
"subsidies_description", "subsidies_details",
"traffic_area_office_covered_by_area",
"service_number", "service_type_description",
"registration_status", "authority_description",
"registered"],
batch_size=1000
)
Registration.objects.bulk_create(regs_to_create)
Variation.objects.bulk_create(vars_to_create)
# Variation.objects.bulk_update(
# vars_to_update,
# ['date_received', 'end_date', 'service_type_other_details', 'registration_status', 'publication_text',
# 'short_notice']
# )
|
from unittest import TestCase
from MyCapytain.resources.collections.cts import XmlCtsTextInventoryMetadata, XmlCtsTextgroupMetadata, XmlCtsWorkMetadata, XmlCtsEditionMetadata, XmlCtsTranslationMetadata
from MyCapytain.resources.prototypes.cts.inventory import CtsTextgroupMetadata
with open("tests/testing_data/examples/getcapabilities.seneca.xml") as f:
SENECA = f.read()
class TestCollectionCtsInheritance(TestCase):
def test_types(self):
TI = XmlCtsTextInventoryMetadata.parse(resource=SENECA)
self.assertCountEqual(
[type(descendant) for descendant in TI.descendants],
[XmlCtsTextgroupMetadata] + [XmlCtsWorkMetadata] * 10 + [XmlCtsEditionMetadata] * 10,
"Descendant should be correctly parsed into correct types"
)
self.assertCountEqual(
[type(descendant) for descendant in TI.readableDescendants],
[XmlCtsWorkMetadata] * 0 + [XmlCtsEditionMetadata] * 10,
"Descendant should be correctly parsed into correct types and filtered when readable"
)
def test_title(self):
TI = XmlCtsTextInventoryMetadata.parse(resource=SENECA)
self.assertCountEqual(
[str(descendant.get_label()) for descendant in TI.descendants],
["Seneca, Lucius Annaeus", "de Ira", "de Vita Beata", "de consolatione ad Helviam", "de Constantia",
"de Tranquilitate Animi", "de Brevitate Vitae", "de consolatione ad Polybium",
"de consolatione ad Marciam", "de Providentia", "de Otio Sapientis", "de Ira, Moral essays Vol 2",
"de Vita Beata, Moral essays Vol 2", "de consolatione ad Helviam, Moral essays Vol 2",
"de Constantia, Moral essays Vol 2", "de Tranquilitate Animi, Moral essays Vol 2",
"de Brevitate Vitae, Moral essays Vol 2", "de consolatione ad Polybium, Moral essays Vol 2",
"de consolatione ad Marciam, Moral essays Vol 2", "de Providentia, Moral essays Vol 2",
"de Otio Sapientis, Moral essays Vol 2"],
"Title should be computed correctly : default should be set"
)
def test_new_object(self):
""" When creating an object with same urn, we should retrieve the same metadata"""
TI = XmlCtsTextInventoryMetadata.parse(resource=SENECA)
a = TI["urn:cts:latinLit:stoa0255.stoa012.perseus-lat2"].metadata
b = (CtsTextgroupMetadata("urn:cts:latinLit:stoa0255")).metadata
|
from urllib.parse import urlparse
import subprocess
import logging
import boto3
import airflow.hooks.base_hook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
import utils.helpers as helpers
class PostgresToS3Transfer(BaseOperator):
'''Dumps a Postgres database to a S3 key
:param url: URL to download. (templated)
:type url: str
:param postgres_conn_id: Postgres Connection's ID.
:type postgres_conn_id: str
:param tables: List of tables to export (optional, default exports all
tables).
:type tables: list of str
:param s3_conn_id: S3 Connection's ID. It needs a JSON in the `extra` field
with `aws_access_key_id` and `aws_secret_access_key`
:type s3_conn_id: str
:param s3_url: S3 url (e.g. `s3://my_bucket/my_key.zip`) (templated)
:type s3_url: str
'''
template_fields = ('s3_url',)
@apply_defaults
def __init__(self, postgres_conn_id, s3_conn_id, s3_url, tables=None, *args, **kwargs):
super(PostgresToS3Transfer, self).__init__(*args, **kwargs)
self.postgres_conn_id = postgres_conn_id
self.tables = tables
self.s3_conn_id = s3_conn_id
self.s3_url = s3_url
def execute(self, context):
s3 = self._load_s3_connection(self.s3_conn_id)
s3_bucket, s3_key = self._parse_s3_url(self.s3_url)
command = [
'pg_dump',
'-Fc',
]
if self.tables:
tables_params = ['--table={}'.format(table) for table in self.tables]
command.extend(tables_params)
logging.info('Dumping database "%s" into "%s"', self.postgres_conn_id, self.s3_url)
logging.info('Command: %s <POSTGRES_URI>', ' '.join(command))
command.append(helpers.get_postgres_uri(self.postgres_conn_id))
with subprocess.Popen(command, stdout=subprocess.PIPE).stdout as dump_file:
s3.Bucket(s3_bucket) \
.upload_fileobj(dump_file, s3_key)
@staticmethod
def _parse_s3_url(s3_url):
parsed_url = urlparse(s3_url)
if not parsed_url.netloc:
raise airflow.exceptions.AirflowException('Please provide a bucket_name')
else:
bucket_name = parsed_url.netloc
key = parsed_url.path.strip('/')
return (bucket_name, key)
def _load_s3_connection(self, conn_id):
'''
Parses the S3 connection and returns a Boto3 resource.
This should be implementing using the S3Hook, but it currently uses
boto (not boto3) which doesn't allow streaming.
:return: Boto3 resource
:rtype: boto3.resources.factory.s3.ServiceResource
'''
conn = airflow.hooks.base_hook.BaseHook.get_connection(conn_id)
extra_dejson = conn.extra_dejson
key_id = extra_dejson['aws_access_key_id']
access_key = extra_dejson['aws_secret_access_key']
s3 = boto3.resource(
's3',
aws_access_key_id=key_id,
aws_secret_access_key=access_key
)
return s3
|
from weboob.capabilities.recipe import ICapRecipe, Recipe
from weboob.tools.backend import BaseBackend
from .browser import SevenFiftyGramsBrowser
import unicodedata
def strip_accents(s):
return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')
__all__ = ['SevenFiftyGramsBackend']
class SevenFiftyGramsBackend(BaseBackend, ICapRecipe):
NAME = '750g'
MAINTAINER = u'Julien Veyssier'
EMAIL = 'julien.veyssier@aiur.fr'
VERSION = '0.h'
DESCRIPTION = u'750g French recipe website'
LICENSE = 'AGPLv3+'
BROWSER = SevenFiftyGramsBrowser
def get_recipe(self, id):
return self.browser.get_recipe(id)
def iter_recipes(self, pattern):
return self.browser.iter_recipes(strip_accents(unicode(pattern)).encode('utf-8'))
def fill_recipe(self, recipe, fields):
if 'nb_person' in fields or 'instructions' in fields:
rec = self.get_recipe(recipe.id)
recipe.picture_url = rec.picture_url
recipe.instructions = rec.instructions
recipe.ingredients = rec.ingredients
recipe.comments = rec.comments
recipe.author = rec.author
recipe.nb_person = rec.nb_person
recipe.cooking_time = rec.cooking_time
recipe.preparation_time = rec.preparation_time
return recipe
OBJECTS = {
Recipe: fill_recipe,
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0007_auto_20170422_1622'),
]
operations = [
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(max_length=256),
),
]
|
{
'name': 'Order BOM explode report',
'version': '0.1',
'category': 'Report',
'description': '''
Manage report for order product
''',
'author': 'Micronaet S.r.l. - Nicola Riolini',
'website': 'http://www.micronaet.it',
'license': 'AGPL-3',
'depends': [
'base',
'product',
'sale',
'purchase',
'mrp',
'report_aeroo',
'order_bom',
'bom_category',
'inventory_field', # for inventory field
'bom_order_utility', # Utility for filter
'bom_dynamic_structured', # for filter type category
'textilene_status', # TODO remove when moved company parameters
'production_accounting_external',
'production_forecast_order', # for forecast check
'no_parcels_count', # exclude no parcels product
'product_last_supplier', # last purchase supplier data (for filter)
],
'init_xml': [],
'demo': [],
'data': [
#'security/xml_groups.xml',
#'security/ir.model.access.csv',
'bom_explode_view.xml',
'report/explode_report.xml',
'wizard/report_component_status.xml',
#'scheduler.xml',
],
'active': False,
'installable': True,
'auto_install': False,
}
|
from django.apps import AppConfig
class PhotosAppConfig(AppConfig):
name = 'livinglots_usercontent.photos'
def ready(self):
try:
from actstream import registry
from . import signals
registry.register(self.get_model('Photo'))
except ImportError:
# django-activity-stream is not installed and that's okay
pass
|
{
"name" : "GII",
"version" : "1.0",
"depends" : ['sale','product'],
"author" : "Novasoft Consultancy Services Pvt. Ltd.",
'category' : 'Generic Modules/Others',
"description": """ GII - Management Module
""",
'website': 'http://www.novasoftindia.com',
'data': ['giisa.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
'application': True,
}
|
import sys
import codecs
input_file = sys.stdin # input = file containing the student answer.
oz_file = codecs.open("/task/task.oz", "r", "utf8") # Open the "correction framework file".
new_file = codecs.open("new_file.oz", "w","utf8") # Open the final file.
for line in oz_file:
# "@@q1@@" is the arbitrary marker used to say "insert the student answer here".
if "@@q1@@" in line :
for input_line in input_file :
if '\0' in input_line :
input_line = input_line.strip('\0')
new_file.write(input_line) # Copy each line from the student answer to the final file.
else :
new_file.write(line) # Copy each line from the "correction framework file" to the final file.
oz_file.close()
new_file.close()
|
import numpy as np
class PriceHistoryPack(object):
def __init__(self, input_seq_len, num_features, target_seq_len):
super(PriceHistoryPack, self).__init__()
self.sku_ids = []
self.XX = np.empty((0, input_seq_len, num_features))
self.YY = np.empty((0, target_seq_len))
self.sequence_lens = []
self.seq_mask = np.empty((0, input_seq_len))
def update(self, sku_id, inputs, targets, input_seq_len):
self.sku_ids.append(sku_id)
inputs_len = len(inputs)
self.sequence_lens.append(inputs_len)
# build current mask with zeros and ones
cur_mask = np.zeros(input_seq_len)
cur_mask[:inputs_len] = 1 # only the valid firsts should have the value of one
xx_padded = np.pad(inputs, ((0, input_seq_len - inputs_len), (0, 0)), mode='constant', constant_values=0.)
# here targets do NOT need to be padded because we do not have a sequence to sequence model
# yy_padded = np.pad(targets, (0, series_max_len - len(targets)), mode='constant', constant_values=0.)
assert len(xx_padded) == input_seq_len
self.XX = np.vstack((self.XX, xx_padded[np.newaxis]))
self.YY = np.vstack((self.YY, targets[np.newaxis]))
self.seq_mask = np.vstack((self.seq_mask, cur_mask[np.newaxis]))
def get_data(self, fraction=None, random_state=None):
# from sklearn.model_selection import train_test_split
skuIds, xx, yy, seqLens, seqMask = np.array(self.sku_ids), self.XX, self.YY, np.array(
self.sequence_lens), self.seq_mask
if fraction is None:
return skuIds, xx, yy, seqLens, seqMask
else:
random_state = np.random if random_state is None else random_state
cur_len = len(skuIds)
assert cur_len == len(xx) and cur_len == len(yy) and cur_len == len(seqLens) and cur_len == len(seqMask)
random_inds = random_state.choice(cur_len, int(cur_len * fraction))
return skuIds[random_inds], xx[random_inds], yy[random_inds], seqLens[random_inds], seqMask[random_inds]
def save(self, filepath, fraction=None, random_state=None):
if fraction is None:
np.savez(filepath, sku_ids=self.sku_ids, inputs=self.XX, targets=self.YY,
sequence_lengths=self.sequence_lens,
sequence_masks=self.seq_mask)
else:
skuIds, xx, yy, seqLens, seqMask = self.get_data(fraction=fraction, random_state=random_state)
np.savez(filepath, sku_ids=skuIds, inputs=xx, targets=yy, sequence_lengths=seqLens, sequence_masks=seqMask)
|
"""
Unit tests covering the program listing and detail pages.
"""
import json
import re
from urlparse import urljoin
from uuid import uuid4
import mock
from bs4 import BeautifulSoup
from django.conf import settings
from django.urls import reverse, reverse_lazy
from django.test import override_settings
from lms.envs.test import CREDENTIALS_PUBLIC_SERVICE_URL
from openedx.core.djangoapps.catalog.tests.factories import CourseFactory, CourseRunFactory, ProgramFactory
from openedx.core.djangoapps.catalog.tests.mixins import CatalogIntegrationMixin
from openedx.core.djangoapps.credentials import STUDENT_RECORDS_FLAG
from openedx.core.djangoapps.programs.tests.mixins import ProgramsApiConfigMixin
from openedx.core.djangoapps.waffle_utils.testutils import override_waffle_flag
from openedx.core.djangolib.testing.utils import skip_unless_lms
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory as ModuleStoreCourseFactory
PROGRAMS_UTILS_MODULE = 'openedx.core.djangoapps.programs.utils'
@skip_unless_lms
@override_settings(MKTG_URLS={'ROOT': 'https://www.example.com'})
@mock.patch(PROGRAMS_UTILS_MODULE + '.get_programs')
class TestProgramListing(ProgramsApiConfigMixin, SharedModuleStoreTestCase):
"""Unit tests for the program listing page."""
shard = 4
maxDiff = None
password = 'test'
url = reverse_lazy('program_listing_view')
@classmethod
def setUpClass(cls):
super(TestProgramListing, cls).setUpClass()
cls.course = ModuleStoreCourseFactory()
course_run = CourseRunFactory(key=unicode(cls.course.id)) # pylint: disable=no-member
course = CourseFactory(course_runs=[course_run])
cls.first_program = ProgramFactory(courses=[course])
cls.second_program = ProgramFactory(courses=[course])
cls.data = sorted([cls.first_program, cls.second_program], key=cls.program_sort_key)
def setUp(self):
super(TestProgramListing, self).setUp()
self.user = UserFactory()
self.client.login(username=self.user.username, password=self.password)
@classmethod
def program_sort_key(cls, program):
"""
Helper function used to sort dictionaries representing programs.
"""
return program['title']
def load_serialized_data(self, response, key):
"""
Extract and deserialize serialized data from the response.
"""
pattern = re.compile(r'{key}: (?P<data>\[.*\])'.format(key=key))
match = pattern.search(response.content)
serialized = match.group('data')
return json.loads(serialized)
def assert_dict_contains_subset(self, superset, subset):
"""
Verify that the dict superset contains the dict subset.
Works like assertDictContainsSubset, deprecated since Python 3.2.
See: https://docs.python.org/2.7/library/unittest.html#unittest.TestCase.assertDictContainsSubset.
"""
superset_keys = set(superset.keys())
subset_keys = set(subset.keys())
intersection = {key: superset[key] for key in superset_keys & subset_keys}
self.assertEqual(subset, intersection)
def test_login_required(self, mock_get_programs):
"""
Verify that login is required to access the page.
"""
self.create_programs_config()
mock_get_programs.return_value = self.data
self.client.logout()
response = self.client.get(self.url)
self.assertRedirects(
response,
'{}?next={}'.format(reverse('signin_user'), self.url)
)
self.client.login(username=self.user.username, password=self.password)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_404_if_disabled(self, _mock_get_programs):
"""
Verify that the page 404s if disabled.
"""
self.create_programs_config(enabled=False)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
def test_empty_state(self, mock_get_programs):
"""
Verify that the response contains no programs data when no programs are engaged.
"""
self.create_programs_config()
mock_get_programs.return_value = self.data
response = self.client.get(self.url)
self.assertContains(response, 'programsData: []')
def test_programs_listed(self, mock_get_programs):
"""
Verify that the response contains accurate programs data when programs are engaged.
"""
self.create_programs_config()
mock_get_programs.return_value = self.data
CourseEnrollmentFactory(user=self.user, course_id=self.course.id) # pylint: disable=no-member
response = self.client.get(self.url)
actual = self.load_serialized_data(response, 'programsData')
actual = sorted(actual, key=self.program_sort_key)
for index, actual_program in enumerate(actual):
expected_program = self.data[index]
self.assert_dict_contains_subset(actual_program, expected_program)
def test_program_discovery(self, mock_get_programs):
"""
Verify that a link to a programs marketing page appears in the response.
"""
self.create_programs_config(marketing_path='bar')
mock_get_programs.return_value = self.data
marketing_root = urljoin(settings.MKTG_URLS.get('ROOT'), 'bar').rstrip('/')
response = self.client.get(self.url)
self.assertContains(response, marketing_root)
def test_links_to_detail_pages(self, mock_get_programs):
"""
Verify that links to detail pages are present.
"""
self.create_programs_config()
mock_get_programs.return_value = self.data
CourseEnrollmentFactory(user=self.user, course_id=self.course.id) # pylint: disable=no-member
response = self.client.get(self.url)
actual = self.load_serialized_data(response, 'programsData')
actual = sorted(actual, key=self.program_sort_key)
for index, actual_program in enumerate(actual):
expected_program = self.data[index]
expected_url = reverse('program_details_view', kwargs={'program_uuid': expected_program['uuid']})
self.assertEqual(actual_program['detail_url'], expected_url)
@skip_unless_lms
@mock.patch(PROGRAMS_UTILS_MODULE + '.get_programs')
@override_waffle_flag(STUDENT_RECORDS_FLAG, active=True)
class TestProgramDetails(ProgramsApiConfigMixin, CatalogIntegrationMixin, SharedModuleStoreTestCase):
"""Unit tests for the program details page."""
shard = 4
program_uuid = str(uuid4())
password = 'test'
url = reverse_lazy('program_details_view', kwargs={'program_uuid': program_uuid})
@classmethod
def setUpClass(cls):
super(TestProgramDetails, cls).setUpClass()
modulestore_course = ModuleStoreCourseFactory()
course_run = CourseRunFactory(key=unicode(modulestore_course.id))
course = CourseFactory(course_runs=[course_run])
cls.data = ProgramFactory(uuid=cls.program_uuid, courses=[course])
def setUp(self):
super(TestProgramDetails, self).setUp()
self.user = UserFactory()
self.client.login(username=self.user.username, password=self.password)
def assert_program_data_present(self, response):
"""Verify that program data is present."""
self.assertContains(response, 'programData')
self.assertContains(response, 'urls')
self.assertContains(response,
'"program_record_url": "{}/records/programs/'.format(CREDENTIALS_PUBLIC_SERVICE_URL))
self.assertContains(response, 'program_listing_url')
self.assertContains(response, self.data['title'])
self.assert_programs_tab_present(response)
def assert_programs_tab_present(self, response):
"""Verify that the programs tab is present in the nav."""
soup = BeautifulSoup(response.content, 'html.parser')
self.assertTrue(
any(soup.find_all('a', class_='tab-nav-link', href=reverse('program_listing_view')))
)
def test_login_required(self, mock_get_programs):
"""
Verify that login is required to access the page.
"""
self.create_programs_config()
catalog_integration = self.create_catalog_integration()
UserFactory(username=catalog_integration.service_username)
mock_get_programs.return_value = self.data
self.client.logout()
response = self.client.get(self.url)
self.assertRedirects(
response,
'{}?next={}'.format(reverse('signin_user'), self.url)
)
self.client.login(username=self.user.username, password=self.password)
with mock.patch('lms.djangoapps.learner_dashboard.programs.get_certificates') as certs:
certs.return_value = [{'type': 'program', 'url': '/'}]
response = self.client.get(self.url)
self.assert_program_data_present(response)
def test_404_if_disabled(self, _mock_get_programs):
"""
Verify that the page 404s if disabled.
"""
self.create_programs_config(enabled=False)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
def test_404_if_no_data(self, mock_get_programs):
"""Verify that the page 404s if no program data is found."""
self.create_programs_config()
mock_get_programs.return_value = None
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
|
from django.http import HttpResponse
from openpyxl import Workbook
from openpyxl.writer.excel import save_virtual_workbook
from openpyxl.styles import Color, Style, PatternFill, Font, colors
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from base import models as mdl
from base.models.enums import exam_enrollment_justification_type
HEADER = ['academic_year', 'session_title', 'learning_unit', 'program', 'registration_number', 'lastname', 'firstname',
'email', 'numbered_score', 'justification', 'end_date']
JUSTIFICATION_ALIASES = {
exam_enrollment_justification_type.ABSENCE_JUSTIFIED : "M",
exam_enrollment_justification_type.ABSENCE_UNJUSTIFIED : "S",
exam_enrollment_justification_type.CHEATING : "T",
}
def export_xls(exam_enrollments):
workbook = Workbook()
worksheet = workbook.active
worksheet.append([str(exam_enrollments[0].learning_unit_enrollment.learning_unit_year)])
worksheet.append([str('Session: %s' % exam_enrollments[0].session_exam.number_session)])
worksheet.append([str('')])
__display_creation_date_with_message_about_state(worksheet, row_number=4)
__display_warning_about_students_deliberated(worksheet, row_number=5)
worksheet.append([str('')])
__display_legends(worksheet)
worksheet.append([str('')])
__columns_resizing(worksheet)
header_translate_list = [str(_(elem)) for elem in HEADER]
worksheet.append(header_translate_list)
row_number = 11
for exam_enroll in exam_enrollments:
student = exam_enroll.learning_unit_enrollment.student
offer = exam_enroll.learning_unit_enrollment.offer
person = mdl.person.find_by_id(student.person.id)
end_date = __get_session_exam_deadline(exam_enroll)
score = None
if exam_enroll.score_final is not None:
if exam_enroll.session_exam.learning_unit_year.decimal_scores:
score = "{0:.2f}".format(exam_enroll.score_final)
else:
score = "{0:.0f}".format(exam_enroll.score_final)
justification = JUSTIFICATION_ALIASES.get(exam_enroll.justification_final, "")
worksheet.append([str(exam_enroll.learning_unit_enrollment.learning_unit_year.academic_year),
str(exam_enroll.session_exam.number_session),
exam_enroll.session_exam.learning_unit_year.acronym,
offer.acronym,
student.registration_id,
person.last_name,
person.first_name,
person.email,
score,
str(justification),
end_date])
row_number += 1
__coloring_non_editable(worksheet, row_number, score, exam_enroll.justification_final)
lst_exam_enrollments = list(exam_enrollments)
number_session = lst_exam_enrollments[0].session_exam.number_session
learn_unit_acronym = lst_exam_enrollments[0].session_exam.learning_unit_year.acronym
academic_year = lst_exam_enrollments[0].learning_unit_enrollment.learning_unit_year.academic_year
filename = "session_%s_%s_%s.xlsx" % (str(academic_year.year), str(number_session), learn_unit_acronym)
response = HttpResponse(save_virtual_workbook(workbook), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
def __columns_resizing(ws):
"""
Definition of the columns sizes
"""
col_academic_year = ws.column_dimensions['A']
col_academic_year.width = 18
col_academic_year = ws.column_dimensions['C']
col_academic_year.width = 18
col_academic_year = ws.column_dimensions['E']
col_academic_year.width = 18
col_last_name = ws.column_dimensions['F']
col_last_name.width = 25
col_first_name = ws.column_dimensions['G']
col_first_name.width = 25
col_email = ws.column_dimensions['H']
col_email.width = 30
col_note = ws.column_dimensions['I']
col_note.width = 15
col_note = ws.column_dimensions['J']
col_note.width = 15
col_note = ws.column_dimensions['K']
col_note.width = 15
def __coloring_non_editable(ws, row_number, score, justification):
"""
Coloring of the non-editable columns
"""
pattern_fill_grey = PatternFill(patternType='solid', fgColor=Color('C1C1C1'))
style_no_modification = Style(fill=pattern_fill_grey)
column_number = 1
while column_number < 12:
if column_number < 9 or column_number > 10:
ws.cell(row=row_number, column=column_number).style = style_no_modification
else:
if not(score is None and justification is None):
ws.cell(row=row_number, column=9).style = style_no_modification
ws.cell(row=row_number, column=10).style = style_no_modification
column_number += 1
def __display_creation_date_with_message_about_state(ws, row_number):
date_format = str(_('date_format'))
printing_date = timezone.now()
printing_date = printing_date.strftime(date_format)
ws.cell(row=row_number, column=1).value = str('%s' % (_('warn_user_data_can_change') % printing_date))
ws.cell(row=row_number, column=1).font = Font(color=colors.RED)
def __display_warning_about_students_deliberated(ws, row_number):
ws.cell(row=row_number, column=1).value = str(_('students_deliberated_are_not_shown'))
ws.cell(row=row_number, column=1).font = Font(color=colors.RED)
def __display_legends(ws):
ws.append([
str(_('justification')),
str(_('justification_values_accepted') % mdl.exam_enrollment.justification_label_authorized())
])
ws.append([
str(''),
str(_('justification_other_values') % justification_other_values())
])
ws.append([
str(_('numbered_score')),
str(_('score_legend') % "0 - 20")
])
def justification_other_values():
return "%s, %s" % (_('unjustified_absence_export_legend'),
_('justified_absence_export_legend'))
def __get_session_exam_deadline(exam_enroll):
date_format = str(_('date_format'))
deadline = None
session_exam_deadline = mdl.exam_enrollment.get_session_exam_deadline(exam_enroll)
if session_exam_deadline:
deadline = session_exam_deadline.deadline_tutor_computed if session_exam_deadline.deadline_tutor_computed else\
session_exam_deadline.deadline
return deadline.strftime(date_format) if deadline else "-"
|
from odoo.osv import expression
from odoo.addons.sale_timesheet.models.account import AccountAnalyticLine
def _timesheet_get_portal_domain(self):
""" WE revert this functionality of odoo. We want to show details of ordered quantities also
Only the timesheets with a product invoiced on delivered quantity are concerned.
since in ordered quantity, the timesheet quantity is not invoiced,
thus there is no meaning of showing invoice with ordered quantity.
"""
domain = super(AccountAnalyticLine, self)._timesheet_get_portal_domain()
return expression.AND(
[domain, [('timesheet_invoice_type', 'in', ['billable_time', 'non_billable', 'billable_fixed'])]])
AccountAnalyticLine._timesheet_get_portal_domain = _timesheet_get_portal_domain
|
class Backstab:
pass
|
import logging
import random
import time
import uuid
from openerp import SUPERUSER_ID
import simplejson
from openerp import api
from openerp import tools
from openerp.osv import fields, osv
from openerp.osv import expression
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval
import openerp
_logger = logging.getLogger(__name__)
FULL_ACCESS = ('perm_read', 'perm_write', 'perm_create', 'perm_unlink')
READ_WRITE_ACCESS = ('perm_read', 'perm_write')
READ_ONLY_ACCESS = ('perm_read',)
UID_ROOT = 1
DOMAIN_ALL = [(1, '=', 1)]
RANDOM_PASS_CHARACTERS = 'aaaabcdeeeefghjkmnpqrstuvwxyzAAAABCDEEEEFGHJKLMNPQRSTUVWXYZ23456789'
def generate_random_pass():
return ''.join(random.sample(RANDOM_PASS_CHARACTERS,10))
class share_wizard(osv.TransientModel):
_name = 'share.wizard'
_description = 'Share Wizard'
def _assert(self, condition, error_message, context=None):
"""Raise a user error with the given message if condition is not met.
The error_message should have been translated with _().
"""
if not condition:
raise osv.except_osv(_('Sharing access cannot be created.'), error_message)
def has_group(self, cr, uid, module, group_xml_id, context=None):
"""Returns True if current user is a member of the group identified by the module, group_xml_id pair."""
# if the group was deleted or does not exist, we say NO (better safe than sorry)
try:
model, group_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, module, group_xml_id)
except ValueError:
return False
return group_id in self.pool.get('res.users').read(cr, uid, [uid], ['groups_id'], context=context)[0]['groups_id']
def has_share(self, cr, uid, unused_param, context=None):
return self.has_group(cr, uid, module='base', group_xml_id='group_no_one', context=context)
def _user_type_selection(self, cr, uid, context=None):
"""Selection values may be easily overridden/extended via inheritance"""
return [('embedded', _('Direct link or embed code')), ('emails',_('Emails')), ]
"""Override of create() to auto-compute the action name"""
def create(self, cr, uid, values, context=None):
if 'action_id' in values and not 'name' in values:
action = self.pool.get('ir.actions.actions').browse(cr, uid, values['action_id'], context=context)
values['name'] = action.name
return super(share_wizard,self).create(cr, uid, values, context=context)
@api.cr_uid_ids_context
def share_url_template(self, cr, uid, _ids, context=None):
# NOTE: take _ids in parameter to allow usage through browse_record objects
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url', default='', context=context)
if base_url:
base_url += '/login?db=%(dbname)s&login=%(login)s&key=%(password)s'
extra = context and context.get('share_url_template_extra_arguments')
if extra:
base_url += '&' + '&'.join('%s=%%(%s)s' % (x,x) for x in extra)
hash_ = context and context.get('share_url_template_hash_arguments')
if hash_:
base_url += '#' + '&'.join('%s=%%(%s)s' % (x,x) for x in hash_)
return base_url
def _share_root_url(self, cr, uid, ids, _fieldname, _args, context=None):
result = dict.fromkeys(ids, '')
data = dict(dbname=cr.dbname, login='', password='')
for this in self.browse(cr, uid, ids, context=context):
result[this.id] = this.share_url_template() % data
return result
def _generate_embedded_code(self, wizard, options=None):
cr, uid, context = wizard.env.args
if options is None:
options = {}
js_options = {}
title = options['title'] if 'title' in options else wizard.embed_option_title
search = (options['search'] if 'search' in options else wizard.embed_option_search) if wizard.access_mode != 'readonly' else False
if not title:
js_options['display_title'] = False
if search:
js_options['search_view'] = True
js_options_str = (', ' + simplejson.dumps(js_options)) if js_options else ''
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url', default=None, context=context)
user = wizard.result_line_ids[0]
return """
<script type="text/javascript" src="%(base_url)s/web/webclient/js"></script>
<script type="text/javascript">
new openerp.init(%(init)s).web.embed(%(server)s, %(dbname)s, %(login)s, %(password)s,%(action)d%(options)s);
</script> """ % {
'init': simplejson.dumps(openerp.conf.server_wide_modules),
'base_url': base_url or '',
'server': simplejson.dumps(base_url),
'dbname': simplejson.dumps(cr.dbname),
'login': simplejson.dumps(user.login),
'password': simplejson.dumps(user.password),
'action': user.user_id.action_id.id,
'options': js_options_str,
}
def _embed_code(self, cr, uid, ids, _fn, _args, context=None):
result = dict.fromkeys(ids, '')
for this in self.browse(cr, uid, ids, context=context):
result[this.id] = self._generate_embedded_code(this)
return result
def _embed_url(self, cr, uid, ids, _fn, _args, context=None):
if context is None:
context = {}
result = dict.fromkeys(ids, '')
for this in self.browse(cr, uid, ids, context=context):
if this.result_line_ids:
ctx = dict(context, share_url_template_hash_arguments=['action'])
user = this.result_line_ids[0]
data = dict(dbname=cr.dbname, login=user.login, password=user.password, action=this.action_id.id)
result[this.id] = this.share_url_template(context=ctx) % data
return result
_columns = {
'action_id': fields.many2one('ir.actions.act_window', 'Action to share', required=True,
help="The action that opens the screen containing the data you wish to share."),
'view_type': fields.char('Current View Type', required=True),
'domain': fields.char('Domain', help="Optional domain for further data filtering"),
'user_type': fields.selection(lambda s, *a, **k: s._user_type_selection(*a, **k),'Sharing method', required=True,
help="Select the type of user(s) you would like to share data with."),
'new_users': fields.text("Emails"),
'email_1': fields.char('New user email', size=64),
'email_2': fields.char('New user email', size=64),
'email_3': fields.char('New user email', size=64),
'invite': fields.boolean('Invite users to OpenSocial record'),
'access_mode': fields.selection([('readonly','Can view'),('readwrite','Can edit')],'Access Mode', required=True,
help="Access rights to be granted on the shared documents."),
'result_line_ids': fields.one2many('share.wizard.result.line', 'share_wizard_id', 'Summary', readonly=True),
'share_root_url': fields.function(_share_root_url, string='Share Access URL', type='char', readonly=True,
help='Main access page for users that are granted shared access'),
'name': fields.char('Share Title', required=True, help="Title for the share (displayed to users as menu and shortcut name)"),
'record_name': fields.char('Record name', help="Name of the shared record, if sharing a precise record"),
'message': fields.text("Personal Message", help="An optional personal message, to be included in the email notification."),
'embed_code': fields.function(_embed_code, type='text', string='Code',
help="Embed this code in your documents to provide a link to the "\
"shared document."),
'embed_option_title': fields.boolean('Display title'),
'embed_option_search': fields.boolean('Display search view'),
'embed_url': fields.function(_embed_url, string='Share URL', size=512, type='char', readonly=True),
}
_defaults = {
'view_type': 'page',
'user_type' : 'embedded',
'invite': False,
'domain': lambda self, cr, uid, context, *a: context.get('domain', '[]'),
'action_id': lambda self, cr, uid, context, *a: context.get('action_id'),
'access_mode': 'readwrite',
'embed_option_title': True,
'embed_option_search': True,
}
def has_email(self, cr, uid, context=None):
return bool(self.pool.get('res.users').browse(cr, uid, uid, context=context).email)
def go_step_1(self, cr, uid, ids, context=None):
wizard_data = self.browse(cr,uid,ids,context)[0]
if wizard_data.user_type == 'emails' and not self.has_email(cr, uid, context=context):
raise osv.except_osv(_('No email address configured'),
_('You must configure your email address in the user preferences before using the Share button.'))
model, res_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'share', 'action_share_wizard_step1')
action = self.pool[model].read(cr, uid, [res_id], context=context)[0]
action['res_id'] = ids[0]
action.pop('context', '')
return action
def _create_share_group(self, cr, uid, wizard_data, context=None):
group_obj = self.pool.get('res.groups')
share_group_name = '%s: %s (%d-%s)' %('Shared', wizard_data.name, uid, time.time())
# create share group without putting admin in it
return group_obj.create(cr, UID_ROOT, {'name': share_group_name, 'share': True}, {'noadmin': True})
def _create_new_share_users(self, cr, uid, wizard_data, group_id, context=None):
"""Create one new res.users record for each email address provided in
wizard_data.new_users, ignoring already existing users.
Populates wizard_data.result_line_ids with one new line for
each user (existing or not). New users will also have a value
for the password field, so they can receive it by email.
Returns the ids of the created users, and the ids of the
ignored, existing ones."""
context = dict(context or {})
user_obj = self.pool.get('res.users')
current_user = user_obj.browse(cr, UID_ROOT, uid, context=context)
# modify context to disable shortcuts when creating share users
context['noshortcut'] = True
context['no_reset_password'] = True
created_ids = []
existing_ids = []
if wizard_data.user_type == 'emails':
# get new user list from email data
new_users = (wizard_data.new_users or '').split('\n')
new_users += [wizard_data.email_1 or '', wizard_data.email_2 or '', wizard_data.email_3 or '']
for new_user in new_users:
# Ignore blank lines
new_user = new_user.strip()
if not new_user: continue
# Ignore the user if it already exists.
if not wizard_data.invite:
existing = user_obj.search(cr, UID_ROOT, [('login', '=', new_user)])
else:
existing = user_obj.search(cr, UID_ROOT, [('email', '=', new_user)])
existing_ids.extend(existing)
if existing:
new_line = { 'user_id': existing[0],
'newly_created': False}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
continue
new_pass = generate_random_pass()
user_id = user_obj.create(cr, UID_ROOT, {
'login': new_user,
'password': new_pass,
'name': new_user,
'email': new_user,
'groups_id': [(6,0,[group_id])],
'company_id': current_user.company_id.id,
'company_ids': [(6, 0, [current_user.company_id.id])],
}, context)
new_line = { 'user_id': user_id,
'password': new_pass,
'newly_created': True}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
created_ids.append(user_id)
elif wizard_data.user_type == 'embedded':
new_login = 'embedded-%s' % (uuid.uuid4().hex,)
new_pass = generate_random_pass()
user_id = user_obj.create(cr, UID_ROOT, {
'login': new_login,
'password': new_pass,
'name': new_login,
'groups_id': [(6,0,[group_id])],
'company_id': current_user.company_id.id,
'company_ids': [(6, 0, [current_user.company_id.id])],
}, context)
new_line = { 'user_id': user_id,
'password': new_pass,
'newly_created': True}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
created_ids.append(user_id)
return created_ids, existing_ids
def _create_action(self, cr, uid, values, context=None):
if context is None:
context = {}
new_context = context.copy()
for key in context:
if key.startswith('default_'):
del new_context[key]
action_id = self.pool.get('ir.actions.act_window').create(cr, UID_ROOT, values, new_context)
return action_id
def _cleanup_action_context(self, context_str, user_id):
"""Returns a dict representing the context_str evaluated (safe_eval) as
a dict where items that are not useful for shared actions
have been removed. If the evaluation of context_str as a
dict fails, context_str is returned unaltered.
:param user_id: the integer uid to be passed as 'uid' in the
evaluation context
"""
result = False
if context_str:
try:
context = safe_eval(context_str, tools.UnquoteEvalContext(), nocopy=True)
result = dict(context)
for key in context:
# Remove all context keys that seem to toggle default
# filters based on the current user, as it makes no sense
# for shared users, who would not see any data by default.
if key and key.startswith('search_default_') and 'user_id' in key:
result.pop(key)
except Exception:
# Note: must catch all exceptions, as UnquoteEvalContext may cause many
# different exceptions, as it shadows builtins.
_logger.debug("Failed to cleanup action context as it does not parse server-side", exc_info=True)
result = context_str
return result
def _shared_action_def(self, cr, uid, wizard_data, context=None):
copied_action = wizard_data.action_id
if wizard_data.access_mode == 'readonly':
view_mode = wizard_data.view_type
view_id = copied_action.view_id.id if copied_action.view_id.type == wizard_data.view_type else False
else:
view_mode = copied_action.view_mode
view_id = copied_action.view_id.id
action_def = {
'name': wizard_data.name,
'domain': copied_action.domain,
'context': self._cleanup_action_context(wizard_data.action_id.context, uid),
'res_model': copied_action.res_model,
'view_mode': view_mode,
'view_type': copied_action.view_type,
'search_view_id': copied_action.search_view_id.id if wizard_data.access_mode != 'readonly' else False,
'view_id': view_id,
'auto_search': True,
}
if copied_action.view_ids:
action_def['view_ids'] = [(0,0,{'sequence': x.sequence,
'view_mode': x.view_mode,
'view_id': x.view_id.id })
for x in copied_action.view_ids
if (wizard_data.access_mode != 'readonly' or x.view_mode == wizard_data.view_type)
]
return action_def
def _setup_action_and_shortcut(self, cr, uid, wizard_data, user_ids, make_home, context=None):
"""Create a shortcut to reach the shared data, as well as the corresponding action, for
each user in ``user_ids``, and assign it as their home action if ``make_home`` is True.
Meant to be overridden for special cases.
"""
values = self._shared_action_def(cr, uid, wizard_data, context=None)
user_obj = self.pool.get('res.users')
for user_id in user_ids:
action_id = self._create_action(cr, user_id, values)
if make_home:
# We do this only for new share users, as existing ones already have their initial home
# action. Resetting to the default menu does not work well as the menu is rather empty
# and does not contain the shortcuts in most cases.
user_obj.write(cr, UID_ROOT, [user_id], {'action_id': action_id})
def _get_recursive_relations(self, cr, uid, model, ttypes, relation_fields=None, suffix=None, context=None):
"""Returns list of tuples representing recursive relationships of type ``ttypes`` starting from
model with ID ``model_id``.
:param model: browsable model to start loading relationships from
:param ttypes: list of relationship types to follow (e.g: ['one2many','many2many'])
:param relation_fields: list of previously followed relationship tuples - to avoid duplicates
during recursion
:param suffix: optional suffix to append to the field path to reach the main object
"""
if relation_fields is None:
relation_fields = []
local_rel_fields = []
models = [x[1].model for x in relation_fields]
model_obj = self.pool.get('ir.model')
model_osv = self.pool[model.model]
for field in model_osv._fields.itervalues():
ftype = field.type
relation_field = None
if ftype in ttypes and field.comodel_name not in models:
relation_model_id = model_obj.search(cr, UID_ROOT, [('model','=',field.comodel_name)])[0]
relation_model_browse = model_obj.browse(cr, UID_ROOT, relation_model_id, context=context)
relation_osv = self.pool[field.comodel_name]
#skip virtual one2many fields (related, ...) as there is no reverse relationship
if ftype == 'one2many' and field.inverse_name:
# don't record reverse path if it's not a real m2o (that happens, but rarely)
dest_fields = relation_osv._fields
reverse_rel = field.inverse_name
if reverse_rel in dest_fields and dest_fields[reverse_rel].type == 'many2one':
relation_field = ('%s.%s'%(reverse_rel, suffix)) if suffix else reverse_rel
local_rel_fields.append((relation_field, relation_model_browse))
for parent in relation_osv._inherits:
if parent not in models:
parent_model = self.pool[parent]
parent_fields = parent_model._fields
parent_model_browse = model_obj.browse(cr, UID_ROOT,
model_obj.search(cr, UID_ROOT, [('model','=',parent)]))[0]
if relation_field and field.inverse_name in parent_fields:
# inverse relationship is available in the parent
local_rel_fields.append((relation_field, parent_model_browse))
else:
# TODO: can we setup a proper rule to restrict inherited models
# in case the parent does not contain the reverse m2o?
local_rel_fields.append((None, parent_model_browse))
if relation_model_id != model.id and ftype in ['one2many', 'many2many']:
local_rel_fields += self._get_recursive_relations(cr, uid, relation_model_browse,
[ftype], relation_fields + local_rel_fields, suffix=relation_field, context=context)
return local_rel_fields
def _get_relationship_classes(self, cr, uid, model, context=None):
"""Computes the *relationship classes* reachable from the given
model. The 4 relationship classes are:
- [obj0]: the given model itself (and its parents via _inherits, if any)
- [obj1]: obj0 and all other models recursively accessible from
obj0 via one2many relationships
- [obj2]: obj0 and all other models recursively accessible from
obj0 via one2many and many2many relationships
- [obj3]: all models recursively accessible from obj1 via many2one
relationships
Each class is returned as a list of pairs [(field,model_browse)], where
``model`` is the browse_record of a reachable ir.model, and ``field`` is
the dot-notation reverse relationship path coming from that model to obj0,
or None if there is no reverse path.
:return: ([obj0], [obj1], [obj2], [obj3])
"""
# obj0 class and its parents
obj0 = [(None, model)]
model_obj = self.pool[model.model]
ir_model_obj = self.pool.get('ir.model')
for parent in model_obj._inherits:
parent_model_browse = ir_model_obj.browse(cr, UID_ROOT,
ir_model_obj.search(cr, UID_ROOT, [('model','=',parent)]))[0]
obj0 += [(None, parent_model_browse)]
obj1 = self._get_recursive_relations(cr, uid, model, ['one2many'], relation_fields=obj0, context=context)
obj2 = self._get_recursive_relations(cr, uid, model, ['one2many', 'many2many'], relation_fields=obj0, context=context)
obj3 = self._get_recursive_relations(cr, uid, model, ['many2one'], relation_fields=obj0, context=context)
for dummy, model in obj1:
obj3 += self._get_recursive_relations(cr, uid, model, ['many2one'], relation_fields=obj0, context=context)
return obj0, obj1, obj2, obj3
def _get_access_map_for_groups_and_models(self, cr, uid, group_ids, model_ids, context=None):
model_access_obj = self.pool.get('ir.model.access')
user_right_ids = model_access_obj.search(cr, uid,
[('group_id', 'in', group_ids), ('model_id', 'in', model_ids)],
context=context)
user_access_matrix = {}
if user_right_ids:
for access_right in model_access_obj.browse(cr, uid, user_right_ids, context=context):
access_line = user_access_matrix.setdefault(access_right.model_id.model, set())
for perm in FULL_ACCESS:
if getattr(access_right, perm, 0):
access_line.add(perm)
return user_access_matrix
def _add_access_rights_for_share_group(self, cr, uid, group_id, mode, fields_relations, context=None):
"""Adds access rights to group_id on object models referenced in ``fields_relations``,
intersecting with access rights of current user to avoid granting too much rights
"""
model_access_obj = self.pool.get('ir.model.access')
user_obj = self.pool.get('res.users')
target_model_ids = [x[1].id for x in fields_relations]
perms_to_add = (mode == 'readonly') and READ_ONLY_ACCESS or READ_WRITE_ACCESS
current_user = user_obj.browse(cr, uid, uid, context=context)
current_user_access_map = self._get_access_map_for_groups_and_models(cr, uid,
[x.id for x in current_user.groups_id], target_model_ids, context=context)
group_access_map = self._get_access_map_for_groups_and_models(cr, uid,
[group_id], target_model_ids, context=context)
_logger.debug("Current user access matrix: %r", current_user_access_map)
_logger.debug("New group current access matrix: %r", group_access_map)
# Create required rights if allowed by current user rights and not
# already granted
for dummy, model in fields_relations:
# mail.message is transversal: it should not received directly the access rights
if model.model in ['mail.message']: continue
values = {
'name': _('Copied access for sharing'),
'group_id': group_id,
'model_id': model.id,
}
current_user_access_line = current_user_access_map.get(model.model,set())
existing_group_access_line = group_access_map.get(model.model,set())
need_creation = False
for perm in perms_to_add:
if perm in current_user_access_line \
and perm not in existing_group_access_line:
values.update({perm:True})
group_access_map.setdefault(model.model, set()).add(perm)
need_creation = True
if need_creation:
model_access_obj.create(cr, UID_ROOT, values)
_logger.debug("Creating access right for model %s with values: %r", model.model, values)
def _link_or_copy_current_user_rules(self, cr, current_user, group_id, fields_relations, context=None):
rule_obj = self.pool.get('ir.rule')
rules_done = set()
for group in current_user.groups_id:
for dummy, model in fields_relations:
for rule in group.rule_groups:
if rule.id in rules_done:
continue
rules_done.add(rule.id)
if rule.model_id.id == model.id:
if 'user.' in rule.domain_force:
# Above pattern means there is likely a condition
# specific to current user, so we must copy the rule using
# the evaluated version of the domain.
# And it's better to copy one time too much than too few
rule_obj.copy(cr, UID_ROOT, rule.id, default={
'name': '%s %s' %(rule.name, _('(Copy for sharing)')),
'groups': [(6,0,[group_id])],
'domain_force': rule.domain, # evaluated version!
})
_logger.debug("Copying rule %s (%s) on model %s with domain: %s", rule.name, rule.id, model.model, rule.domain_force)
else:
# otherwise we can simply link the rule to keep it dynamic
rule_obj.write(cr, SUPERUSER_ID, [rule.id], {
'groups': [(4,group_id)]
})
_logger.debug("Linking rule %s (%s) on model %s with domain: %s", rule.name, rule.id, model.model, rule.domain_force)
def _check_personal_rule_or_duplicate(self, cr, group_id, rule, context=None):
"""Verifies that the given rule only belongs to the given group_id, otherwise
duplicate it for the current group, and unlink the previous one.
The duplicated rule has the original domain copied verbatim, without
any evaluation.
Returns the final rule to use (browse_record), either the original one if it
only belongs to this group, or the copy."""
if len(rule.groups) == 1:
return rule
# duplicate it first:
rule_obj = self.pool.get('ir.rule')
new_id = rule_obj.copy(cr, UID_ROOT, rule.id,
default={
'name': '%s %s' %(rule.name, _('(Duplicated for modified sharing permissions)')),
'groups': [(6,0,[group_id])],
'domain_force': rule.domain_force, # non evaluated!
})
_logger.debug("Duplicating rule %s (%s) (domain: %s) for modified access ", rule.name, rule.id, rule.domain_force)
# then disconnect from group_id:
rule.write({'groups':[(3,group_id)]}) # disconnects, does not delete!
return rule_obj.browse(cr, UID_ROOT, new_id, context=context)
def _create_or_combine_sharing_rule(self, cr, current_user, wizard_data, group_id, model_id, domain, restrict=False, rule_name=None, context=None):
"""Add a new ir.rule entry for model_id and domain on the target group_id.
If ``restrict`` is True, instead of adding a rule, the domain is
combined with AND operator with all existing rules in the group, to implement
an additional restriction (as of 6.1, multiple rules in the same group are
OR'ed by default, so a restriction must alter all existing rules)
This is necessary because the personal rules of the user that is sharing
are first copied to the new share group. Afterwards the filters used for
sharing are applied as an additional layer of rules, which are likely to
apply to the same model. The default rule algorithm would OR them (as of 6.1),
which would result in a combined set of permission that could be larger
than those of the user that is sharing! Hence we must forcefully AND the
rules at this stage.
One possibly undesirable effect can appear when sharing with a
pre-existing group, in which case altering pre-existing rules would not
be desired. This is addressed in the portal module.
"""
if rule_name is None:
rule_name = _('Sharing filter created by user %s (%s) for group %s') % \
(current_user.name, current_user.login, group_id)
rule_obj = self.pool.get('ir.rule')
rule_ids = rule_obj.search(cr, UID_ROOT, [('groups', 'in', group_id), ('model_id', '=', model_id)])
if rule_ids:
for rule in rule_obj.browse(cr, UID_ROOT, rule_ids, context=context):
if rule.domain_force == domain:
# don't create it twice!
if restrict:
continue
else:
_logger.debug("Ignoring sharing rule on model %s with domain: %s the same rule exists already", model_id, domain)
return
if restrict:
# restricting existing rules is done by adding the clause
# with an AND, but we can't alter the rule if it belongs to
# other groups, so we duplicate if needed
rule = self._check_personal_rule_or_duplicate(cr, group_id, rule, context=context)
eval_ctx = rule_obj._eval_context_for_combinations()
org_domain = expression.normalize_domain(eval(rule.domain_force, eval_ctx))
new_clause = expression.normalize_domain(eval(domain, eval_ctx))
combined_domain = expression.AND([new_clause, org_domain])
rule.write({'domain_force': combined_domain, 'name': rule.name + _('(Modified)')})
_logger.debug("Combining sharing rule %s on model %s with domain: %s", rule.id, model_id, domain)
if not rule_ids or not restrict:
# Adding the new rule in the group is ok for normal cases, because rules
# in the same group and for the same model will be combined with OR
# (as of v6.1), so the desired effect is achieved.
rule_obj.create(cr, UID_ROOT, {
'name': rule_name,
'model_id': model_id,
'domain_force': domain,
'groups': [(4,group_id)]
})
_logger.debug("Created sharing rule on model %s with domain: %s", model_id, domain)
def _create_indirect_sharing_rules(self, cr, current_user, wizard_data, group_id, fields_relations, context=None):
rule_name = _('Indirect sharing filter created by user %s (%s) for group %s') % \
(current_user.name, current_user.login, group_id)
try:
domain = safe_eval(wizard_data.domain)
if domain:
for rel_field, model in fields_relations:
# mail.message is transversal: it should not received directly the access rights
if model.model in ['mail.message']: continue
related_domain = []
if not rel_field: continue
for element in domain:
if expression.is_leaf(element):
left, operator, right = element
left = '%s.%s'%(rel_field, left)
element = left, operator, right
related_domain.append(element)
self._create_or_combine_sharing_rule(cr, current_user, wizard_data,
group_id, model_id=model.id, domain=str(related_domain),
rule_name=rule_name, restrict=True, context=context)
except Exception:
_logger.exception('Failed to create share access')
raise osv.except_osv(_('Sharing access cannot be created.'),
_('Sorry, the current screen and filter you are trying to share are not supported at the moment.\nYou may want to try a simpler filter.'))
def _check_preconditions(self, cr, uid, wizard_data, context=None):
self._assert(wizard_data.action_id and wizard_data.access_mode,
_('Action and Access Mode are required to create a shared access.'),
context=context)
self._assert(self.has_share(cr, uid, wizard_data, context=context),
_('You must be a member of the Technical group to use the share wizard.'),
context=context)
if wizard_data.user_type == 'emails':
self._assert((wizard_data.new_users or wizard_data.email_1 or wizard_data.email_2 or wizard_data.email_3),
_('Please indicate the emails of the persons to share with, one per line.'),
context=context)
def _create_share_users_group(self, cr, uid, wizard_data, context=None):
"""Creates the appropriate share group and share users, and populates
result_line_ids of wizard_data with one line for each user.
:return: a tuple composed of the new group id (to which the shared access should be granted),
the ids of the new share users that have been created and the ids of the existing share users
"""
group_id = self._create_share_group(cr, uid, wizard_data, context=context)
# First create any missing user, based on the email addresses provided
new_ids, existing_ids = self._create_new_share_users(cr, uid, wizard_data, group_id, context=context)
# Finally, setup the new action and shortcut for the users.
if existing_ids:
# existing users still need to join the new group
self.pool.get('res.users').write(cr, UID_ROOT, existing_ids, {
'groups_id': [(4,group_id)],
})
# existing user don't need their home action replaced, only a new shortcut
self._setup_action_and_shortcut(cr, uid, wizard_data, existing_ids, make_home=False, context=context)
if new_ids:
# new users need a new shortcut AND a home action
self._setup_action_and_shortcut(cr, uid, wizard_data, new_ids, make_home=True, context=context)
return group_id, new_ids, existing_ids
def go_step_2(self, cr, uid, ids, context=None):
wizard_data = self.browse(cr, uid, ids[0], context=context)
self._check_preconditions(cr, uid, wizard_data, context=context)
# Create shared group and users
group_id, new_ids, existing_ids = self._create_share_users_group(cr, uid, wizard_data, context=context)
current_user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
model_obj = self.pool.get('ir.model')
model_id = model_obj.search(cr, uid, [('model','=', wizard_data.action_id.res_model)])[0]
model = model_obj.browse(cr, uid, model_id, context=context)
# ACCESS RIGHTS
# We have several classes of objects that should receive different access rights:
# Let:
# - [obj0] be the target model itself (and its parents via _inherits, if any)
# - [obj1] be the target model and all other models recursively accessible from
# obj0 via one2many relationships
# - [obj2] be the target model and all other models recursively accessible from
# obj0 via one2many and many2many relationships
# - [obj3] be all models recursively accessible from obj1 via many2one relationships
# (currently not used)
obj0, obj1, obj2, obj3 = self._get_relationship_classes(cr, uid, model, context=context)
mode = wizard_data.access_mode
# Add access to [obj0] and [obj1] according to chosen mode
self._add_access_rights_for_share_group(cr, uid, group_id, mode, obj0, context=context)
self._add_access_rights_for_share_group(cr, uid, group_id, mode, obj1, context=context)
# Add read-only access (always) to [obj2]
self._add_access_rights_for_share_group(cr, uid, group_id, 'readonly', obj2, context=context)
# IR.RULES
# A. On [obj0], [obj1], [obj2]: add all rules from all groups of
# the user that is sharing
# Warning: rules must be copied instead of linked if they contain a reference
# to uid or if the rule is shared with other groups (and it must be replaced correctly)
# B. On [obj0]: 1 rule with domain of shared action
# C. For each model in [obj1]: 1 rule in the form:
# many2one_rel.domain_of_obj0
# where many2one_rel is the many2one used in the definition of the
# one2many, and domain_of_obj0 is the sharing domain
# For example if [obj0] is project.project with a domain of
# ['id', 'in', [1,2]]
# then we will have project.task in [obj1] and we need to create this
# ir.rule on project.task:
# ['project_id.id', 'in', [1,2]]
# A.
all_relations = obj0 + obj1 + obj2
self._link_or_copy_current_user_rules(cr, current_user, group_id, all_relations, context=context)
# B.
main_domain = wizard_data.domain if wizard_data.domain != '[]' else str(DOMAIN_ALL)
self._create_or_combine_sharing_rule(cr, current_user, wizard_data,
group_id, model_id=model.id, domain=main_domain,
restrict=True, context=context)
# C.
self._create_indirect_sharing_rules(cr, current_user, wizard_data, group_id, obj1, context=context)
# refresh wizard_data
wizard_data = self.browse(cr, uid, ids[0], context=context)
# EMAILS AND NOTIFICATIONS
# A. Not invite: as before
# -> send emails to destination users
# B. Invite (OpenSocial)
# -> subscribe all users (existing and new) to the record
# -> send a notification with a summary to the current record
# -> send a notification to all users; users allowing to receive
# emails in preferences will receive it
# new users by default receive all notifications by email
# A.
if not wizard_data.invite:
self.send_emails(cr, uid, wizard_data, context=context)
# B.
else:
# Invite (OpenSocial): automatically subscribe users to the record
res_id = 0
for cond in safe_eval(main_domain):
if cond[0] == 'id':
res_id = cond[2]
# Record id not found: issue
if res_id <= 0:
raise osv.except_osv(_('Record id not found'), _('The share engine has not been able to fetch a record_id for your invitation.'))
self.pool[model.model].message_subscribe(cr, uid, [res_id], new_ids + existing_ids, context=context)
# self.send_invite_email(cr, uid, wizard_data, context=context)
# self.send_invite_note(cr, uid, model.model, res_id, wizard_data, context=context)
# CLOSE
# A. Not invite: as before
# B. Invite: skip summary screen, get back to the record
# A.
if not wizard_data.invite:
dummy, step2_form_view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'share', 'share_step2_form')
return {
'name': _('Shared access created!'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'share.wizard',
'view_id': False,
'res_id': ids[0],
'views': [(step2_form_view_id, 'form'), (False, 'tree'), (False, 'calendar'), (False, 'graph')],
'type': 'ir.actions.act_window',
'target': 'new'
}
# B.
else:
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': model.model,
'view_id': False,
'res_id': res_id,
'views': [(False, 'form'), (False, 'tree'), (False, 'calendar'), (False, 'graph')],
'type': 'ir.actions.act_window',
}
def send_invite_note(self, cr, uid, model_name, res_id, wizard_data, context=None):
subject = _('Invitation')
body = 'has been <b>shared</b> with'
tmp_idx = 0
for result_line in wizard_data.result_line_ids:
body += ' @%s' % (result_line.user_id.login)
if tmp_idx < len(wizard_data.result_line_ids)-2:
body += ','
elif tmp_idx == len(wizard_data.result_line_ids)-2:
body += ' and'
body += '.'
return self.pool[model_name].message_post(cr, uid, [res_id], body=body, context=context)
def send_invite_email(self, cr, uid, wizard_data, context=None):
# TDE Note: not updated because will disappear
message_obj = self.pool.get('mail.message')
notification_obj = self.pool.get('mail.notification')
user = self.pool.get('res.users').browse(cr, UID_ROOT, uid)
if not user.email:
raise osv.except_osv(_('Email Required'), _('The current user must have an email address configured in User Preferences to be able to send outgoing emails.'))
# TODO: also send an HTML version of this mail
for result_line in wizard_data.result_line_ids:
email_to = result_line.user_id.email
if not email_to:
continue
subject = _('Invitation to collaborate about %s') % (wizard_data.record_name)
body = _("Hello,\n\n")
body += _("I have shared %s (%s) with you!\n\n") % (wizard_data.record_name, wizard_data.name)
if wizard_data.message:
body += "%s\n\n" % (wizard_data.message)
if result_line.newly_created:
body += _("The documents are not attached, you can view them online directly on my Odoo server at:\n %s\n\n") % (result_line.share_url)
body += _("These are your credentials to access this protected area:\n")
body += "%s: %s" % (_("Username"), result_line.user_id.login) + "\n"
body += "%s: %s" % (_("Password"), result_line.password) + "\n"
body += "%s: %s" % (_("Database"), cr.dbname) + "\n"
body += _("The documents have been automatically added to your subscriptions.\n\n")
body += '%s\n\n' % ((user.signature or ''))
body += "--\n"
body += _("Odoo is a powerful and user-friendly suite of Business Applications (CRM, Sales, HR, etc.)\n"
"It is open source and can be found on http://www.openerp.com.")
msg_id = message_obj.schedule_with_attach(cr, uid, user.email, [email_to], subject, body, model='', context=context)
notification_obj.create(cr, uid, {'user_id': result_line.user_id.id, 'message_id': msg_id}, context=context)
def send_emails(self, cr, uid, wizard_data, context=None):
_logger.info('Sending share notifications by email...')
mail_mail = self.pool.get('mail.mail')
user = self.pool.get('res.users').browse(cr, UID_ROOT, uid)
if not user.email:
raise osv.except_osv(_('Email Required'), _('The current user must have an email address configured in User Preferences to be able to send outgoing emails.'))
# TODO: also send an HTML version of this mail
mail_ids = []
for result_line in wizard_data.result_line_ids:
email_to = result_line.user_id.email
if not email_to:
continue
subject = wizard_data.name
body = _("Hello,\n\n")
body += _("I've shared %s with you!\n\n") % wizard_data.name
body += _("The documents are not attached, you can view them online directly on my Odoo server at:\n %s\n\n") % (result_line.share_url)
if wizard_data.message:
body += '%s\n\n' % (wizard_data.message)
if result_line.newly_created:
body += _("These are your credentials to access this protected area:\n")
body += "%s: %s\n" % (_("Username"), result_line.user_id.login)
body += "%s: %s\n" % (_("Password"), result_line.password)
body += "%s: %s\n" % (_("Database"), cr.dbname)
else:
body += _("The documents have been automatically added to your current Odoo documents.\n")
body += _("You may use your current login (%s) and password to view them.\n") % result_line.user_id.login
body += "\n\n%s\n\n" % ( (user.signature or '') )
body += "--\n"
body += _("Odoo is a powerful and user-friendly suite of Business Applications (CRM, Sales, HR, etc.)\n"
"It is open source and can be found on http://www.openerp.com.")
mail_ids.append(mail_mail.create(cr, uid, {
'email_from': user.email,
'email_to': email_to,
'subject': subject,
'body_html': '<pre>%s</pre>' % body}, context=context))
# force direct delivery, as users expect instant notification
mail_mail.send(cr, uid, mail_ids, context=context)
_logger.info('%d share notification(s) sent.', len(mail_ids))
def onchange_embed_options(self, cr, uid, ids, opt_title, opt_search, context=None):
wizard = self.browse(cr, uid, ids[0], context)
options = dict(title=opt_title, search=opt_search)
return {'value': {'embed_code': self._generate_embedded_code(wizard, options)}}
class share_result_line(osv.osv_memory):
_name = 'share.wizard.result.line'
_rec_name = 'user_id'
def _share_url(self, cr, uid, ids, _fieldname, _args, context=None):
result = dict.fromkeys(ids, '')
for this in self.browse(cr, uid, ids, context=context):
data = dict(dbname=cr.dbname, login=this.login, password=this.password)
if this.share_wizard_id and this.share_wizard_id.action_id:
data['action_id'] = this.share_wizard_id.action_id.id
this = this.with_context(share_url_template_hash_arguments=['action_id'])
result[this.id] = this.share_wizard_id.share_url_template() % data
return result
_columns = {
'user_id': fields.many2one('res.users', required=True, readonly=True),
'login': fields.related('user_id', 'login', string='Login', type='char', size=64, required=True, readonly=True),
'password': fields.char('Password', size=64, readonly=True),
'share_url': fields.function(_share_url, string='Share URL', type='char', size=512),
'share_wizard_id': fields.many2one('share.wizard', 'Share Wizard', required=True, ondelete='cascade'),
'newly_created': fields.boolean('Newly created', readonly=True),
}
_defaults = {
'newly_created': True,
}
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('patients', '0026_clinicianother_user'),
]
operations = [
migrations.AddField(
model_name='clinicianother',
name='use_other',
field=models.BooleanField(
default=False),
),
migrations.AlterField(
model_name='clinicianother',
name='user',
field=models.ForeignKey(
blank=True,
null=True,
on_delete=models.CASCADE,
to=settings.AUTH_USER_MODEL),
),
]
|
from . import slide_channel_technology_category
from . import slide_channel_technology
from . import slide_channel
|
from django.db import transaction
from django.db import connection
@transaction.atomic
def bulk_update_userstory_custom_attribute_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update custom_attributes_userstorycustomattribute set "order" = $1
where custom_attributes_userstorycustomattribute.id = $2 and
custom_attributes_userstorycustomattribute.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
@transaction.atomic
def bulk_update_task_custom_attribute_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update custom_attributes_taskcustomattribute set "order" = $1
where custom_attributes_taskcustomattribute.id = $2 and
custom_attributes_taskcustomattribute.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
@transaction.atomic
def bulk_update_issue_custom_attribute_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update custom_attributes_issuecustomattribute set "order" = $1
where custom_attributes_issuecustomattribute.id = $2 and
custom_attributes_issuecustomattribute.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
|
from odoo import models
from odoo.tests.common import SavepointCase
class BaseKanbanAbstractTester(models.TransientModel):
_name = 'base.kanban.abstract.tester'
_inherit = 'base.kanban.abstract'
class TestBaseKanbanAbstract(SavepointCase):
@classmethod
def _init_test_model(cls, model_cls):
""" It builds a model from model_cls in order to test abstract models.
Note that this does not actually create a table in the database, so
there may be some unidentified edge cases.
Args:
model_cls (openerp.models.BaseModel): Class of model to initialize
Returns:
model_cls: Instance
"""
registry = cls.env.registry
cr = cls.env.cr
inst = model_cls._build_model(registry, cr)
model = cls.env[model_cls._name].with_context(todo=[])
model._prepare_setup()
model._setup_base(partial=False)
model._setup_fields(partial=False)
model._setup_complete()
model._auto_init()
model.init()
model._auto_end()
cls.test_model_record = cls.env['ir.model'].search([
('name', '=', model._name),
])
return inst
@classmethod
def setUpClass(cls):
super(TestBaseKanbanAbstract, cls).setUpClass()
cls.env.registry.enter_test_mode()
cls._init_test_model(BaseKanbanAbstractTester)
cls.test_model = cls.env[BaseKanbanAbstractTester._name]
@classmethod
def tearDownClass(cls):
cls.env.registry.leave_test_mode()
super(TestBaseKanbanAbstract, cls).tearDownClass()
def setUp(self):
super(TestBaseKanbanAbstract, self).setUp()
test_stage_1 = self.env['base.kanban.stage'].create({
'name': 'Test Stage 1',
'res_model_id': self.test_model_record.id,
})
test_stage_2 = self.env['base.kanban.stage'].create({
'name': 'Test Stage 2',
'res_model_id': self.test_model_record.id,
'fold': True,
})
self.id_1 = test_stage_1.id
self.id_2 = test_stage_2.id
def test_read_group_stage_ids(self):
"""It should return the correct recordset. """
self.assertEqual(
self.test_model._read_group_stage_ids(
self.env['base.kanban.stage'], [], 'id',
),
self.env['base.kanban.stage'].search([], order='id'),
)
def test_default_stage_id(self):
""" It should return an empty RecordSet """
self.assertEqual(
self.env['base.kanban.abstract']._default_stage_id(),
self.env['base.kanban.stage']
)
|
"""
Tests for users API
"""
import datetime
import ddt
import pytz
from django.conf import settings
from django.template import defaultfilters
from django.test import RequestFactory, override_settings
from django.utils import timezone
from milestones.tests.utils import MilestonesTestCaseMixin
from mock import patch
from lms.djangoapps.certificates.api import generate_user_certificates
from lms.djangoapps.certificates.models import CertificateStatuses
from lms.djangoapps.certificates.tests.factories import GeneratedCertificateFactory
from course_modes.models import CourseMode
from courseware.access_response import MilestoneAccessError, StartDateError, VisibilityError
from lms.djangoapps.grades.tests.utils import mock_passing_grade
from mobile_api.testutils import (
MobileAPITestCase,
MobileAuthTestMixin,
MobileAuthUserTestMixin,
MobileCourseAccessTestMixin
)
from openedx.core.lib.courses import course_image_url
from openedx.core.lib.tests import attr
from student.models import CourseEnrollment
from util.milestones_helpers import set_prerequisite_courses
from util.testing import UrlResetMixin
from xmodule.course_module import DEFAULT_START_DATE
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from .. import errors
from .serializers import CourseEnrollmentSerializer
@attr(shard=9)
class TestUserDetailApi(MobileAPITestCase, MobileAuthUserTestMixin):
"""
Tests for /api/mobile/v0.5/users/<user_name>...
"""
REVERSE_INFO = {'name': 'user-detail', 'params': ['username']}
def test_success(self):
self.login()
response = self.api_response()
self.assertEqual(response.data['username'], self.user.username)
self.assertEqual(response.data['email'], self.user.email)
@attr(shard=9)
class TestUserInfoApi(MobileAPITestCase, MobileAuthTestMixin):
"""
Tests for /api/mobile/v0.5/my_user_info
"""
def reverse_url(self, reverse_args=None, **kwargs):
return '/api/mobile/v0.5/my_user_info'
def test_success(self):
"""Verify the endpoint redirects to the user detail endpoint"""
self.login()
response = self.api_response(expected_response_code=302)
self.assertIn(self.username, response['location'])
@attr(shard=9)
@ddt.ddt
@override_settings(MKTG_URLS={'ROOT': 'dummy-root'})
class TestUserEnrollmentApi(UrlResetMixin, MobileAPITestCase, MobileAuthUserTestMixin,
MobileCourseAccessTestMixin, MilestonesTestCaseMixin):
"""
Tests for /api/mobile/v0.5/users/<user_name>/course_enrollments/
"""
REVERSE_INFO = {'name': 'courseenrollment-detail', 'params': ['username']}
ALLOW_ACCESS_TO_UNRELEASED_COURSE = True
ALLOW_ACCESS_TO_MILESTONE_COURSE = True
ALLOW_ACCESS_TO_NON_VISIBLE_COURSE = True
NEXT_WEEK = datetime.datetime.now(pytz.UTC) + datetime.timedelta(days=7)
LAST_WEEK = datetime.datetime.now(pytz.UTC) - datetime.timedelta(days=7)
ADVERTISED_START = "Spring 2016"
ENABLED_SIGNALS = ['course_published']
DATES = {
'next_week': NEXT_WEEK,
'last_week': LAST_WEEK,
'default_start_date': DEFAULT_START_DATE,
}
@patch.dict(settings.FEATURES, {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(TestUserEnrollmentApi, self).setUp()
def verify_success(self, response):
"""
Verifies user course enrollment response for success
"""
super(TestUserEnrollmentApi, self).verify_success(response)
courses = response.data
self.assertEqual(len(courses), 1)
found_course = courses[0]['course']
self.assertIn('courses/{}/about'.format(self.course.id), found_course['course_about'])
self.assertIn('course_info/{}/updates'.format(self.course.id), found_course['course_updates'])
self.assertIn('course_info/{}/handouts'.format(self.course.id), found_course['course_handouts'])
self.assertIn('video_outlines/courses/{}'.format(self.course.id), found_course['video_outline'])
self.assertEqual(found_course['id'], unicode(self.course.id))
self.assertEqual(courses[0]['mode'], CourseMode.DEFAULT_MODE_SLUG)
self.assertEqual(courses[0]['course']['subscription_id'], self.course.clean_id(padding_char='_'))
expected_course_image_url = course_image_url(self.course)
self.assertIsNotNone(expected_course_image_url)
self.assertIn(expected_course_image_url, found_course['course_image'])
self.assertIn(expected_course_image_url, found_course['media']['course_image']['uri'])
def verify_failure(self, response, error_type=None):
self.assertEqual(response.status_code, 200)
courses = response.data
self.assertEqual(len(courses), 0)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_sort_order(self):
self.login()
num_courses = 3
courses = []
for course_index in range(num_courses):
courses.append(CourseFactory.create(mobile_available=True))
self.enroll(courses[course_index].id)
# verify courses are returned in the order of enrollment, with most recently enrolled first.
response = self.api_response()
for course_index in range(num_courses):
self.assertEqual(
response.data[course_index]['course']['id'],
unicode(courses[num_courses - course_index - 1].id)
)
@patch.dict(settings.FEATURES, {
'ENABLE_PREREQUISITE_COURSES': True,
'DISABLE_START_DATES': False,
'ENABLE_MKTG_SITE': True,
})
def test_courseware_access(self):
self.login()
course_with_prereq = CourseFactory.create(start=self.LAST_WEEK, mobile_available=True)
prerequisite_course = CourseFactory.create()
set_prerequisite_courses(course_with_prereq.id, [unicode(prerequisite_course.id)])
# Create list of courses with various expected courseware_access responses and corresponding expected codes
courses = [
course_with_prereq,
CourseFactory.create(start=self.NEXT_WEEK, mobile_available=True),
CourseFactory.create(visible_to_staff_only=True, mobile_available=True),
CourseFactory.create(start=self.LAST_WEEK, mobile_available=True, visible_to_staff_only=False),
]
expected_error_codes = [
MilestoneAccessError().error_code, # 'unfulfilled_milestones'
StartDateError(self.NEXT_WEEK).error_code, # 'course_not_started'
VisibilityError().error_code, # 'not_visible_to_user'
None,
]
# Enroll in all the courses
for course in courses:
self.enroll(course.id)
# Verify courses have the correct response through error code. Last enrolled course is first course in response
response = self.api_response()
for course_index in range(len(courses)):
result = response.data[course_index]['course']['courseware_access']
self.assertEqual(result['error_code'], expected_error_codes[::-1][course_index])
if result['error_code'] is not None:
self.assertFalse(result['has_access'])
@ddt.data(
('next_week', ADVERTISED_START, ADVERTISED_START, "string"),
('next_week', None, defaultfilters.date(NEXT_WEEK, "DATE_FORMAT"), "timestamp"),
('next_week', '', defaultfilters.date(NEXT_WEEK, "DATE_FORMAT"), "timestamp"),
('default_start_date', ADVERTISED_START, ADVERTISED_START, "string"),
('default_start_date', '', None, "empty"),
('default_start_date', None, None, "empty"),
)
@ddt.unpack
@patch.dict(settings.FEATURES, {'DISABLE_START_DATES': False, 'ENABLE_MKTG_SITE': True})
def test_start_type_and_display(self, start, advertised_start, expected_display, expected_type):
"""
Tests that the correct start_type and start_display are returned in the
case the course has not started
"""
self.login()
course = CourseFactory.create(start=self.DATES[start], advertised_start=advertised_start, mobile_available=True)
self.enroll(course.id)
response = self.api_response()
self.assertEqual(response.data[0]['course']['start_type'], expected_type)
self.assertEqual(response.data[0]['course']['start_display'], expected_display)
@patch.dict(settings.FEATURES, {"ENABLE_DISCUSSION_SERVICE": True, 'ENABLE_MKTG_SITE': True})
def test_discussion_url(self):
self.login_and_enroll()
response = self.api_response()
response_discussion_url = response.data[0]['course']['discussion_url']
self.assertIn('/api/discussion/v1/courses/{}'.format(self.course.id), response_discussion_url)
def test_org_query(self):
self.login()
# Create list of courses with various organizations
courses = [
CourseFactory.create(org='edX', mobile_available=True),
CourseFactory.create(org='edX', mobile_available=True),
CourseFactory.create(org='edX', mobile_available=True, visible_to_staff_only=True),
CourseFactory.create(org='Proversity.org', mobile_available=True),
CourseFactory.create(org='MITx', mobile_available=True),
CourseFactory.create(org='HarvardX', mobile_available=True),
]
# Enroll in all the courses
for course in courses:
self.enroll(course.id)
response = self.api_response(data={'org': 'edX'})
# Test for 3 expected courses
self.assertEqual(len(response.data), 3)
# Verify only edX courses are returned
for entry in response.data:
self.assertEqual(entry['course']['org'], 'edX')
@attr(shard=9)
@override_settings(MKTG_URLS={'ROOT': 'dummy-root'})
class TestUserEnrollmentCertificates(UrlResetMixin, MobileAPITestCase, MilestonesTestCaseMixin):
"""
Tests for /api/mobile/v0.5/users/<user_name>/course_enrollments/
"""
REVERSE_INFO = {'name': 'courseenrollment-detail', 'params': ['username']}
ENABLED_SIGNALS = ['course_published']
def verify_pdf_certificate(self):
"""
Verifies the correct URL is returned in the response
for PDF certificates.
"""
self.login_and_enroll()
certificate_url = "http://test_certificate_url"
GeneratedCertificateFactory.create(
user=self.user,
course_id=self.course.id,
status=CertificateStatuses.downloadable,
mode='verified',
download_url=certificate_url,
)
response = self.api_response()
certificate_data = response.data[0]['certificate']
self.assertEquals(certificate_data['url'], certificate_url)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_no_certificate(self):
self.login_and_enroll()
response = self.api_response()
certificate_data = response.data[0]['certificate']
self.assertDictEqual(certificate_data, {})
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': False, 'ENABLE_MKTG_SITE': True})
def test_pdf_certificate_with_html_cert_disabled(self):
"""
Tests PDF certificates with CERTIFICATES_HTML_VIEW set to True.
"""
self.verify_pdf_certificate()
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': True, 'ENABLE_MKTG_SITE': True})
def test_pdf_certificate_with_html_cert_enabled(self):
"""
Tests PDF certificates with CERTIFICATES_HTML_VIEW set to True.
"""
self.verify_pdf_certificate()
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': True, 'ENABLE_MKTG_SITE': True})
def test_web_certificate(self):
CourseMode.objects.create(
course_id=self.course.id,
mode_display_name="Honor",
mode_slug=CourseMode.HONOR,
)
self.login_and_enroll()
self.course.cert_html_view_enabled = True
self.store.update_item(self.course, self.user.id)
with mock_passing_grade():
generate_user_certificates(self.user, self.course.id)
response = self.api_response()
certificate_data = response.data[0]['certificate']
self.assertRegexpMatches(
certificate_data['url'],
r'http.*/certificates/user/{user_id}/course/{course_id}'.format(
user_id=self.user.id,
course_id=self.course.id,
)
)
@attr(shard=9)
class CourseStatusAPITestCase(MobileAPITestCase):
"""
Base test class for /api/mobile/v0.5/users/<user_name>/course_status_info/{course_id}
"""
REVERSE_INFO = {'name': 'user-course-status', 'params': ['username', 'course_id']}
def setUp(self):
"""
Creates a basic course structure for our course
"""
super(CourseStatusAPITestCase, self).setUp()
self.section = ItemFactory.create(
parent=self.course,
category='chapter',
)
self.sub_section = ItemFactory.create(
parent=self.section,
category='sequential',
)
self.unit = ItemFactory.create(
parent=self.sub_section,
category='vertical',
)
self.other_sub_section = ItemFactory.create(
parent=self.section,
category='sequential',
)
self.other_unit = ItemFactory.create(
parent=self.other_sub_section,
category='vertical',
)
@attr(shard=9)
class TestCourseStatusGET(CourseStatusAPITestCase, MobileAuthUserTestMixin,
MobileCourseAccessTestMixin, MilestonesTestCaseMixin):
"""
Tests for GET of /api/mobile/v0.5/users/<user_name>/course_status_info/{course_id}
"""
def test_success(self):
self.login_and_enroll()
response = self.api_response()
self.assertEqual(
response.data["last_visited_module_id"],
unicode(self.sub_section.location)
)
self.assertEqual(
response.data["last_visited_module_path"],
[unicode(module.location) for module in [self.sub_section, self.section, self.course]]
)
@attr(shard=9)
class TestCourseStatusPATCH(CourseStatusAPITestCase, MobileAuthUserTestMixin,
MobileCourseAccessTestMixin, MilestonesTestCaseMixin):
"""
Tests for PATCH of /api/mobile/v0.5/users/<user_name>/course_status_info/{course_id}
"""
def url_method(self, url, **kwargs):
# override implementation to use PATCH method.
return self.client.patch(url, data=kwargs.get('data', None))
def test_success(self):
self.login_and_enroll()
response = self.api_response(data={"last_visited_module_id": unicode(self.other_unit.location)})
self.assertEqual(
response.data["last_visited_module_id"],
unicode(self.other_sub_section.location)
)
def test_invalid_module(self):
self.login_and_enroll()
response = self.api_response(data={"last_visited_module_id": "abc"}, expected_response_code=400)
self.assertEqual(
response.data,
errors.ERROR_INVALID_MODULE_ID
)
def test_nonexistent_module(self):
self.login_and_enroll()
non_existent_key = self.course.id.make_usage_key('video', 'non-existent')
response = self.api_response(data={"last_visited_module_id": non_existent_key}, expected_response_code=400)
self.assertEqual(
response.data,
errors.ERROR_INVALID_MODULE_ID
)
def test_no_timezone(self):
self.login_and_enroll()
past_date = datetime.datetime.now()
response = self.api_response(
data={
"last_visited_module_id": unicode(self.other_unit.location),
"modification_date": past_date.isoformat()
},
expected_response_code=400
)
self.assertEqual(
response.data,
errors.ERROR_INVALID_MODIFICATION_DATE
)
def _date_sync(self, date, initial_unit, update_unit, expected_subsection):
"""
Helper for test cases that use a modification to decide whether
to update the course status
"""
self.login_and_enroll()
# save something so we have an initial date
self.api_response(data={"last_visited_module_id": unicode(initial_unit.location)})
# now actually update it
response = self.api_response(
data={
"last_visited_module_id": unicode(update_unit.location),
"modification_date": date.isoformat()
}
)
self.assertEqual(
response.data["last_visited_module_id"],
unicode(expected_subsection.location)
)
def test_old_date(self):
self.login_and_enroll()
date = timezone.now() + datetime.timedelta(days=-100)
self._date_sync(date, self.unit, self.other_unit, self.sub_section)
def test_new_date(self):
self.login_and_enroll()
date = timezone.now() + datetime.timedelta(days=100)
self._date_sync(date, self.unit, self.other_unit, self.other_sub_section)
def test_no_initial_date(self):
self.login_and_enroll()
response = self.api_response(
data={
"last_visited_module_id": unicode(self.other_unit.location),
"modification_date": timezone.now().isoformat()
}
)
self.assertEqual(
response.data["last_visited_module_id"],
unicode(self.other_sub_section.location)
)
def test_invalid_date(self):
self.login_and_enroll()
response = self.api_response(data={"modification_date": "abc"}, expected_response_code=400)
self.assertEqual(
response.data,
errors.ERROR_INVALID_MODIFICATION_DATE
)
@attr(shard=9)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
@override_settings(MKTG_URLS={'ROOT': 'dummy-root'})
class TestCourseEnrollmentSerializer(MobileAPITestCase, MilestonesTestCaseMixin):
"""
Test the course enrollment serializer
"""
ENABLED_SIGNALS = ['course_published']
def setUp(self):
super(TestCourseEnrollmentSerializer, self).setUp()
self.login_and_enroll()
self.request = RequestFactory().get('/')
self.request.user = self.user
def test_success(self):
serialized = CourseEnrollmentSerializer(
CourseEnrollment.enrollments_for_user(self.user)[0],
context={'request': self.request},
).data
self.assertEqual(serialized['course']['name'], self.course.display_name)
self.assertEqual(serialized['course']['number'], self.course.id.course)
self.assertEqual(serialized['course']['org'], self.course.id.org)
# Assert utm parameters
expected_utm_parameters = {
'twitter': 'utm_campaign=social-sharing-db&utm_medium=social&utm_source=twitter',
'facebook': 'utm_campaign=social-sharing-db&utm_medium=social&utm_source=facebook'
}
self.assertEqual(serialized['course']['course_sharing_utm_parameters'], expected_utm_parameters)
def test_with_display_overrides(self):
self.course.display_coursenumber = "overridden_number"
self.course.display_organization = "overridden_org"
self.store.update_item(self.course, self.user.id)
serialized = CourseEnrollmentSerializer(
CourseEnrollment.enrollments_for_user(self.user)[0],
context={'request': self.request},
).data
self.assertEqual(serialized['course']['number'], self.course.display_coursenumber)
self.assertEqual(serialized['course']['org'], self.course.display_organization)
|
from .MaterialModifier import *
class MagicOre(MaterialModifier):
pass
|
from odoo import models, fields, api, _
from odoo.addons.http_routing.models.ir_http import slug
class EventEvent(models.Model):
_inherit = "event.event"
community_menu = fields.Boolean(
"Community Menu", compute="_compute_community_menu",
readonly=False, store=True,
help="Display community tab on website")
community_menu_ids = fields.One2many(
"website.event.menu", "event_id", string="Event Community Menus",
domain=[("menu_type", "=", "community")])
@api.depends("event_type_id", "website_menu", "community_menu")
def _compute_community_menu(self):
""" At type onchange: synchronize. At website_menu update: synchronize. """
for event in self:
if event.event_type_id and event.event_type_id != event._origin.event_type_id:
event.community_menu = event.event_type_id.community_menu
elif event.website_menu and event.website_menu != event._origin.website_menu or not event.community_menu:
event.community_menu = True
elif not event.website_menu:
event.community_menu = False
# ------------------------------------------------------------
# WEBSITE MENU MANAGEMENT
# ------------------------------------------------------------
# OVERRIDES: ADD SEQUENCE
def _get_menu_update_fields(self):
update_fields = super(EventEvent, self)._get_menu_update_fields()
update_fields += ['community_menu']
return update_fields
def _update_website_menus(self, menus_update_by_field=None):
super(EventEvent, self)._update_website_menus(menus_update_by_field=menus_update_by_field)
for event in self:
if event.menu_id and (not menus_update_by_field or event in menus_update_by_field.get('community_menu')):
event._update_website_menu_entry('community_menu', 'community_menu_ids', '_get_community_menu_entries')
def _get_menu_type_field_matching(self):
res = super(EventEvent, self)._get_menu_type_field_matching()
res['community'] = 'community_menu'
return res
def _get_community_menu_entries(self):
self.ensure_one()
return [(_('Community'), '/event/%s/community' % slug(self), False, 80, 'community')]
def _get_track_menu_entries(self):
""" Remove agenda as this is now managed separately """
self.ensure_one()
return [
(_('Talks'), '/event/%s/track' % slug(self), False, 10, 'track'),
(_('Agenda'), '/event/%s/agenda' % slug(self), False, 70, 'track')
]
def _get_track_proposal_menu_entries(self):
""" See website_event_track._get_track_menu_entries() """
self.ensure_one()
return [(_('Talk Proposals'), '/event/%s/track_proposal' % slug(self), False, 15, 'track_proposal')]
|
{
'name': "Absence Management",
'summary': """Create time based absence notifications""",
'author': 'Onestein',
'website': 'http://www.onestein.eu',
'images': ['static/description/main_screenshot.png'],
'category': 'Human Resources',
'version': '10.0.1.0.0',
'license': 'AGPL-3',
'depends': [
'hr_holidays',
],
'data': [
'security/ir.model.access.csv',
'views/hr_holidays_status.xml',
'views/hr_holidays.xml',
'data/hr_absenteeism_cron.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
'application': False,
}
|
{
"name": "Account Invoice Payment Retention",
"version": "14.0.1.0.1",
"category": "Accounting & Finance",
"author": "Ecosoft, Odoo Community Association (OCA)",
"license": "AGPL-3",
"website": "https://github.com/OCA/account-invoicing",
"depends": ["account"],
"data": [
"security/security.xml",
"views/res_config_settings_views.xml",
"views/account_move_views.xml",
"wizard/account_payment_register_views.xml",
],
"maintainer": ["kittiu"],
"installable": True,
"development_status": "Alpha",
}
|
import unittest
import doctest
import sys
import time
import timeside.core
class _TextTestResult(unittest.TestResult):
"""A test result class that can print formatted text results to a stream.
Used by TextTestRunner.
"""
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self, stream, descriptions, verbosity):
unittest.TestResult.__init__(self)
self.stream = stream
self.showAll = verbosity > 1
self.dots = verbosity == 1
self.descriptions = descriptions
self.currentTestCase = None
def getDescription(self, test):
if self.descriptions:
return test.shortDescription() or str(test)
else:
return str(test)
def startTest(self, test):
unittest.TestResult.startTest(self, test)
if self.showAll:
if self.currentTestCase != test.__class__:
self.currentTestCase = test.__class__
self.stream.writeln()
self.stream.writeln("[%s]" % self.currentTestCase.__name__)
self.stream.write(" " + self.getDescription(test))
self.stream.write(" ... ")
def addSuccess(self, test):
unittest.TestResult.addSuccess(self, test)
if self.showAll:
self.stream.writeln("ok")
elif self.dots:
self.stream.write('.')
def addError(self, test, err):
unittest.TestResult.addError(self, test, err)
if self.showAll:
self.stream.writeln("ERROR")
elif self.dots:
self.stream.write('E')
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
if self.showAll:
self.stream.writeln("FAIL")
elif self.dots:
self.stream.write('F')
def addSkip(self, test, reason):
unittest.TestResult.addSkip(self, test, reason)
if self.showAll:
self.stream.writeln("SKIP : " + reason)
elif self.dots:
self.stream.write('S')
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: [%s] --> %s "
% (flavour,
test.__class__.__name__,
self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
class _WritelnDecorator:
"""Used to decorate file-like objects with a handy 'writeln' method"""
def __init__(self, stream):
self.stream = stream
def __getattr__(self, attr):
return getattr(self.stream, attr)
def writeln(self, arg=None):
if arg:
self.write(arg)
self.write('\n') # text-mode streams translate to \r\n if needed
class TestRunner:
"""A test runner class that displays results in textual form.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
def __init__(self, stream=sys.stderr, descriptions=1, verbosity=2):
self.stream = _WritelnDecorator(stream)
self.descriptions = descriptions
self.verbosity = verbosity
def _makeResult(self):
return _TextTestResult(self.stream, self.descriptions, self.verbosity)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
self.stream.write("FAILED (")
failed, errored = map(len, (result.failures, result.errors))
if failed:
self.stream.write("failures=%d" % failed)
if errored:
if failed:
self.stream.write(", ")
self.stream.write("errors=%d" % errored)
self.stream.writeln(")")
else:
self.stream.writeln("OK")
return result
def run_test_module(test_modules_list=None, test_prefix=None):
suite = unittest.TestSuite()
finder = doctest.DocTestFinder(exclude_empty=False) # finder for doctest
if test_prefix:
unittest.TestLoader.testMethodPrefix = test_prefix
if not test_modules_list:
test_modules_list = []
elif not isinstance(test_modules_list, list):
test_modules_list = [test_modules_list]
test_modules_list.append('__main__')
for test in test_modules_list:
# Doctest
suite.addTest(doctest.DocTestSuite(test, test_finder=finder))
# unittest
suite.addTest(unittest.loader.TestLoader().loadTestsFromModule(test))
TestRunner().run(suite)
|
from flask import Flask
from redis import Redis
from rq import Queue
import rq_dashboard
from flask_mongoengine import MongoEngine
from validator.config import Configuration
app = Flask(__name__)
app.config.from_object(Configuration)
db = MongoEngine(app)
redis_conn = Redis()
queue = Queue('high', connection=redis_conn, default_timeout=1800)
from validator.routes import app_routes
app.register_blueprint(app_routes)
app.config.from_object(rq_dashboard.default_settings)
app.register_blueprint(rq_dashboard.blueprint, url_prefix="/rq")
|
from __future__ import unicode_literals
from django.db import models, migrations
import autoslug.fields
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ProjectPodcast',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=512, verbose_name='Title')),
('slug', autoslug.fields.AutoSlugField(editable=True, populate_from=b'title', unique_with=(b'show',), verbose_name='Slug')),
('description', models.TextField(verbose_name='Description', blank=True)),
('pub_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Publication date')),
('image', models.ImageField(upload_to=b'shows', verbose_name='Image', blank=True)),
],
options={
'verbose_name': 'Project podcast',
'verbose_name_plural': 'Project podcasts',
},
),
migrations.CreateModel(
name='ProjectProducer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=128, verbose_name='Name')),
('slug', autoslug.fields.AutoSlugField(populate_from=b'name', editable=True, unique=True, verbose_name='Slug')),
],
options={
'verbose_name': 'Project producer',
'verbose_name_plural': 'Project producers',
},
),
migrations.CreateModel(
name='ProjectShow',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=256, verbose_name='Name')),
('slug', autoslug.fields.AutoSlugField(editable=True, populate_from=b'name', unique_with=(b'category',), verbose_name='Slug')),
('description', models.TextField(verbose_name='Description', blank=True)),
('featured', models.BooleanField(default=False, verbose_name='Featured')),
('image', models.ImageField(upload_to=b'shows', verbose_name='Image', blank=True)),
('producer', models.ForeignKey(verbose_name='Producer', to='projects.ProjectProducer')),
],
options={
'verbose_name': 'Project show',
'verbose_name_plural': 'Project shows',
},
),
migrations.AddField(
model_name='projectpodcast',
name='show',
field=models.ForeignKey(verbose_name='Show', to='projects.ProjectShow'),
),
]
|
from core.managers.containers import *
from core.managers.imports import *
from core.managers.analysis_manager import AnalysisManager
from core.managers.annotation_manager import AnnotationManager
from core.managers.file_manager import FileManager
from core.managers.filter_manager import FilterEngine
from core.managers.job_manager import JobManager
from core.managers.pipeline_manager import PipelineManager
from core.managers.project_manager import ProjectManager
from core.managers.sample_manager import SampleManager
from core.managers.user_manager import UserManager
from core.managers.search_manager import SearchManager
from core.managers.event_manager import EventManager
from core.managers.subject_manager import SubjectManager
from core.managers.admin_manager import AdminManager
from core.managers.phenotype_manager import PhenotypeManager
from core.managers.panel_manager import PanelManager
|
from __future__ import absolute_import, division, print_function
import json
import os
import sys
from six import PY3
from twisted.internet.selectreactor import SelectReactor
from twisted.internet.task import LoopingCall
from crossbar.controller import cli
from .test_cli import CLITestBase
DEBUG = False
def make_lc(self, reactor, func):
if DEBUG:
self.stdout_length = 0
self.stderr_length = 0
def _(lc, reactor):
if DEBUG:
stdout = self.stdout.getvalue()
stderr = self.stderr.getvalue()
if self.stdout.getvalue()[self.stdout_length:]:
print(self.stdout.getvalue()[self.stdout_length:],
file=sys.__stdout__)
if self.stderr.getvalue()[self.stderr_length:]:
print(self.stderr.getvalue()[self.stderr_length:],
file=sys.__stderr__)
self.stdout_length = len(stdout)
self.stderr_length = len(stderr)
return func(lc, reactor)
lc = LoopingCall(_)
lc.a = (lc, reactor)
lc.clock = reactor
lc.start(0.1)
return lc
class ContainerRunningTests(CLITestBase):
def setUp(self):
CLITestBase.setUp(self)
# Set up the configuration directories
self.cbdir = os.path.abspath(self.mktemp())
os.mkdir(self.cbdir)
self.config = os.path.abspath(os.path.join(self.cbdir, "config.json"))
self.code_location = os.path.abspath(self.mktemp())
os.mkdir(self.code_location)
def _start_run(self, config, app, stdout_expected, stderr_expected,
end_on):
with open(self.config, "wb") as f:
f.write(json.dumps(config, ensure_ascii=False).encode('utf8'))
with open(self.code_location + "/myapp.py", "w") as f:
f.write(app)
reactor = SelectReactor()
make_lc(self, reactor, end_on)
# In case it hard-locks
reactor.callLater(self._subprocess_timeout, reactor.stop)
cli.run("crossbar",
["start",
"--cbdir={}".format(self.cbdir),
"--logformat=syslogd"],
reactor=reactor)
out = self.stdout.getvalue()
err = self.stderr.getvalue()
for i in stdout_expected:
if i not in out:
self.fail(u"Error: '{}' not in:\n{}".format(i, out))
for i in stderr_expected:
if i not in err:
self.fail(u"Error: '{}' not in:\n{}".format(i, err))
def test_start_run(self):
"""
A basic start, that enters the reactor.
"""
expected_stdout = [
"Entering reactor event loop", "Loaded the component!"
]
expected_stderr = []
def _check(lc, reactor):
if "Loaded the component!" in self.stdout.getvalue():
lc.stop()
try:
reactor.stop()
except:
pass
config = {
"controller": {
},
"workers": [
{
"type": "router",
"options": {
"pythonpath": ["."]
},
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"directory": ".",
"type": "static"
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """#!/usr/bin/env python
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
from autobahn.wamp.exception import ApplicationError
class MySession(ApplicationSession):
log = Logger()
def onJoin(self, details):
self.log.info("Loaded the component!")
"""
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_start_run_guest(self):
"""
A basic start of a guest.
"""
expected_stdout = [
"Entering reactor event loop", "Loaded the component!"
]
expected_stderr = []
def _check(lc, reactor):
if "Loaded the component!" in self.stdout.getvalue():
lc.stop()
try:
reactor.stop()
except:
pass
config = {
"controller": {
},
"workers": [
{
"type": "router",
"options": {
"pythonpath": ["."]
},
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"directory": ".",
"type": "static"
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "guest",
"executable": sys.executable,
"arguments": [os.path.join(self.code_location, "myapp.py")]
}
]
}
myapp = """#!/usr/bin/env python
print("Loaded the component!")
"""
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_start_utf8_logging(self):
"""
Logging things that are UTF8 but not Unicode should work fine.
"""
expected_stdout = [
"Entering reactor event loop", u"\u2603"
]
expected_stderr = []
def _check(lc, reactor):
if u"\u2603" in self.stdout.getvalue():
lc.stop()
try:
reactor.stop()
except:
pass
config = {
"controller": {
},
"workers": [
{
"type": "router",
"options": {
"pythonpath": ["."]
},
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"directory": ".",
"type": "static"
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """#!/usr/bin/env python
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
from autobahn.wamp.exception import ApplicationError
class MySession(ApplicationSession):
log = Logger()
def onJoin(self, details):
self.log.info(u"\\u2603")
"""
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_run_exception_utf8(self):
"""
Raising an ApplicationError with Unicode will raise that error through
to the caller.
"""
config = {
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """from __future__ import absolute_import, print_function
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
from autobahn.wamp.exception import ApplicationError
from twisted.internet.defer import inlineCallbacks
class MySession(ApplicationSession):
log = Logger()
@inlineCallbacks
def onJoin(self, details):
def _err():
raise ApplicationError(u"com.example.error.form_error", u"\\u2603")
e = yield self.register(_err, u'com.example.err')
try:
yield self.call(u'com.example.err')
except ApplicationError as e:
assert e.args[0] == u"\\u2603"
print("Caught error:", e)
except:
print('other err:', e)
self.log.info("Loaded the component")
"""
if PY3:
expected_stdout = ["Loaded the component", "\u2603", "Caught error:"]
else:
expected_stdout = ["Loaded the component", "\\u2603", "Caught error:"]
expected_stderr = []
def _check(lc, reactor):
if "Loaded the component" in self.stdout.getvalue():
lc.stop()
try:
reactor.stop()
except:
pass
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_failure1(self):
config = {
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
class MySession(ApplicationSession):
log = Logger()
def __init__(self, config):
self.log.info("MySession.__init__()")
ApplicationSession.__init__(self, config)
def onJoin(self, details):
self.log.info("MySession.onJoin()")
"""
expected_stdout = []
expected_stderr = ["No module named"]
def _check(_1, _2):
pass
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_failure2(self):
config = {
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession2",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
class MySession(ApplicationSession):
log = Logger()
def __init__(self, config):
self.log.info("MySession.__init__()")
ApplicationSession.__init__(self, config)
def onJoin(self, details):
self.log.info("MySession.onJoin()")
"""
def _check(_1, _2):
pass
expected_stdout = []
if sys.version_info >= (3, 5):
expected_stderr = ["module 'myapp' has no attribute 'MySession2'"]
else:
expected_stderr = ["'module' object has no attribute 'MySession2'"]
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_failure3(self):
config = {
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
class MySession(ApplicationSession):
log = Logger()
def __init__(self, config):
a = 1 / 0
self.log.info("MySession.__init__()")
ApplicationSession.__init__(self, config)
def onJoin(self, details):
self.log.info("MySession.onJoin()")
"""
def _check(_1, _2):
pass
expected_stdout = []
expected_stderr = ["Component instantiation failed"]
if PY3:
expected_stderr.append("division by zero")
else:
expected_stderr.append("integer division")
expected_stderr.append("by zero")
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_failure4(self):
config = {
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
class MySession(ApplicationSession):
log = Logger()
def __init__(self, config):
self.log.info("MySession.__init__()")
ApplicationSession.__init__(self, config)
def onJoin(self, details):
self.log.info("MySession.onJoin()")
a = 1 / 0 # trigger exception
"""
def _check(_1, _2):
pass
expected_stdout = []
expected_stderr = ["Fatal error in component", "While firing onJoin"]
if PY3:
expected_stderr.append("division by zero")
else:
expected_stderr.append("integer division")
expected_stderr.append("by zero")
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_failure5(self):
config = {
"controller": {
},
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
class MySession(ApplicationSession):
log = Logger()
def __init__(self, config):
self.log.info("MySession.__init__()")
ApplicationSession.__init__(self, config)
def onJoin(self, details):
self.log.info("MySession.onJoin()")
self.leave()
def onLeave(self, details):
self.log.info("Session ended: {details}", details=details)
self.disconnect()
"""
def _check(_1, _2):
pass
expected_stdout = []
expected_stderr = [
"Component 'component1' failed to start; shutting down node."
]
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_failure6(self):
config = {
"controller": {
},
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """
from twisted.logger import Logger
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession
from autobahn.twisted.util import sleep
class MySession(ApplicationSession):
log = Logger()
def __init__(self, config):
self.log.info("MySession.__init__()")
ApplicationSession.__init__(self, config)
@inlineCallbacks
def onJoin(self, details):
self.log.info("MySession.onJoin()")
self.log.info("Sleeping a couple of secs and then shutting down ..")
yield sleep(2)
self.leave()
def onLeave(self, details):
self.log.info("Session ended: {details}", details=details)
self.disconnect()
"""
def _check(_1, _2):
pass
expected_stdout = [
"Session ended: CloseDetails",
"Sleeping a couple of secs and then shutting down",
"Container is hosting no more components: shutting down"
]
expected_stderr = []
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_failure7(self):
config = {
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8090
},
"url": "ws://127.0.0.1:8090/ws"
}
}
]
}
]
}
myapp = """
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
class MySession(ApplicationSession):
log = Logger()
def __init__(self, config):
self.log.info("MySession.__init__()")
ApplicationSession.__init__(self, config)
def onJoin(self, details):
self.log.info("MySession.onJoin()")
self.leave()
"""
def _check(_1, _2):
pass
expected_stdout = []
expected_stderr = [
("Could not connect container component to router - transport "
"establishment failed")
]
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
class InitTests(CLITestBase):
def test_hello(self):
def _check(lc, reactor):
if "published to 'oncounter'" in self.stdout.getvalue():
lc.stop()
try:
reactor.stop()
except:
pass
appdir = self.mktemp()
cbdir = os.path.join(appdir, ".crossbar")
reactor = SelectReactor()
cli.run("crossbar",
["init",
"--appdir={}".format(appdir),
"--template=hello:python"],
reactor=reactor)
self.assertIn("Application template initialized",
self.stdout.getvalue())
reactor = SelectReactor()
make_lc(self, reactor, _check)
# In case it hard-locks
reactor.callLater(self._subprocess_timeout, reactor.stop)
cli.run("crossbar",
["start",
"--cbdir={}".format(cbdir.path),
"--logformat=syslogd"],
reactor=reactor)
stdout_expected = ["published to 'oncounter'"]
for i in stdout_expected:
self.assertIn(i, self.stdout.getvalue())
if not os.environ.get("CB_FULLTESTS"):
del ContainerRunningTests
del InitTests
|
from odoo import fields, models
SORTING_CRITERIA = [
("name", "By name"),
("product_id.name", "By product name"),
("product_id.default_code", "By product reference"),
("date_planned", "By date planned"),
("price_unit", "By price"),
("product_qty", "By quantity"),
]
SORTING_DIRECTION = [
("asc", "Ascending"),
("desc", "Descending"),
]
class ResCompany(models.Model):
_inherit = "res.company"
default_po_line_order = fields.Selection(
selection=SORTING_CRITERIA,
string="Line Order",
help="Select a sorting criteria for purchase order lines.",
)
default_po_line_direction = fields.Selection(
selection=SORTING_DIRECTION,
string="Sort Direction",
help="Select a sorting direction for purchase order lines.",
)
|
import os
import gobject
import json
import shutil
import tarfile
from glue.paths import INSTALLER
from data.skarphed.Skarphed import AbstractInstaller, AbstractDestroyer
from glue.lng import _
from glue.paths import COREFILES
import logging
TARGETNAME = "Debian 7 / nginx"
EXTRA_PARAMS = {
'nginx.domain':(_('Domain'),_('example.org or leave empty')),
'nginx.subdomain':(_('Subdomain'),_('sub.example.org or leave empty')),
'nginx.port':(_('Port'),_('80'))
}
class Installer(AbstractInstaller):
def execute_installation(self):
os.mkdir(self.BUILDPATH)
p = os.path.dirname(os.path.realpath(__file__))
nginx_template = open(os.path.join(p,"nginx.conf"),"r").read()
nginx_domain = ""
domainlineterm = ""
if self.data['nginx.port'] == "":
self.data['nginx.port'] = "80"
if self.data['nginx.domain'] != "":
nginx_domain = "server_name "+self.data['nginx.domain']
self.domain = self.data['nginx.domain']
domainlineterm = ";"
nginx_subdomain = ""
if self.data['nginx.subdomain'] != "":
nginx_subdomain = "alias "+self.data['nginx.subdomain']
domainlineterm = ";"
nginxconf = nginx_template%{'port':self.data['nginx.port'],
'domain':nginx_domain,
'subdomain':nginx_subdomain,
'domainlineterm':domainlineterm}
nginxconfresult = open(os.path.join(self.BUILDPATH,"nginx.conf"),"w")
nginxconfresult.write(nginxconf)
nginxconfresult.close()
self.status = 10
gobject.idle_add(self.updated)
scv_config = {}
for key,val in self.data.items():
if key.startswith("core.") or key.startswith("db."):
if key == "db.name":
scv_config[key] = val+".fdb"
continue
scv_config[key] = val
scv_config_defaults = {
"core.session_duration":2,
"core.session_extend":1,
"core.cookielaw":1,
"core.debug":True
}
scv_config.update(scv_config_defaults)
jenc = json.JSONEncoder()
config_json = open(os.path.join(self.BUILDPATH,"config.json"),"w")
config_json.write(jenc.encode(scv_config))
config_json.close()
shutil.copyfile(os.path.join(p,"skarphed.conf"), os.path.join(self.BUILDPATH,"skarphed.conf"))
shutil.copyfile(os.path.join(p,"install.sh"), os.path.join(self.BUILDPATH,"install.sh"))
shutil.copyfile(os.path.join(p,"uwsgi.conf"), os.path.join(self.BUILDPATH,"uwsgi.conf"))
self.status = 30
gobject.idle_add(self.updated)
shutil.copytree(os.path.join(COREFILES,"web"), os.path.join(self.BUILDPATH, "web"))
shutil.copytree(os.path.join(COREFILES,"lib"), os.path.join(self.BUILDPATH,"lib"))
tar = tarfile.open(os.path.join(self.BUILDPATH,"scv_install.tar.gz"),"w:gz")
tar.add(os.path.join(self.BUILDPATH,"nginx.conf"))
tar.add(os.path.join(self.BUILDPATH,"uwsgi.conf"))
tar.add(os.path.join(self.BUILDPATH,"config.json"))
tar.add(os.path.join(self.BUILDPATH,"skarphed.conf"))
tar.add(os.path.join(self.BUILDPATH,"install.sh"))
tar.add(os.path.join(self.BUILDPATH,"web"))
tar.add(os.path.join(self.BUILDPATH,"lib"))
tar.close()
self.status = 45
gobject.idle_add(self.updated)
con = self.server.getSSH()
con_stdin, con_stdout, con_stderr = con.exec_command("mkdir /tmp/scvinst"+str(self.installationId))
self.status = 50
gobject.idle_add(self.updated)
con = self.server.getSSH()
ftp = con.open_sftp()
ftp.put(os.path.join(self.BUILDPATH,"scv_install.tar.gz"),"/tmp/scvinst"+str(self.installationId)+"/scv_install.tar.gz")
ftp.close()
self.status = 65
gobject.idle_add(self.updated)
con = self.server.getSSH()
con_stdin, con_stdout, con_stderr = con.exec_command("cd /tmp/scvinst"+str(self.installationId)+"; tar xvfz scv_install.tar.gz -C / ; chmod 755 install.sh ; ./install.sh ")
output = con_stdout.read()
logging.debug("SSH-outputlength: %d"%len(output))
logging.debug(output)
shutil.rmtree(self.BUILDPATH)
self.status = 100
gobject.idle_add(self.updated)
gobject.idle_add(self.addInstanceToServer)
class Destroyer(AbstractDestroyer):
def execute_destruction(self):
p = os.path.dirname(os.path.realpath(__file__))
server = self.instance.getServer()
self.status = 10
gobject.idle_add(self.updated)
con = server.getSSH()
ftp = con.open_sftp()
ftp.put(os.path.join(p,"teardown.sh"),"/tmp/teardown.sh")
ftp.close()
self.status = 30
gobject.idle_add(self.updated)
con = server.getSSH()
con_stdin, con_stdout, con_stderr = con.exec_command("cd /tmp/ ; chmod 755 teardown.sh ; ./teardown.sh %d "%self.instanceid)
logging.debug(con_stdout.read())
self.status = 100
gobject.idle_add(self.updated)
gobject.idle_add(self.updated)
gobject.idle_add(self.removeInstanceFromServer)
|
from documents.models import Document
from categories.models import Category
import os
def move_doc(doc_id, cat_id):
doc = Document.objects.get(pk=int(doc_id))
old_cat = doc.refer_category
new_cat = Category.objects.get(pk=int(cat_id))
for p in doc.pages.all():
cmd = "mv " + p.get_absolute_path() + " " + new_cat.get_absolute_path() + "/"
os.system(cmd)
doc.refer_category = new_cat
doc.save()
old_cat.documents.remove(doc)
new_cat.documents.add(doc)
|
from pyramid.view import view_config
import logging
import pysite.resmgr
L = logging.getLogger('PySite')
@view_config(
name='',
context=pysite.plugins.models.Node,
renderer='pysite:plugins/templates/index.mako',
permission='admin'
)
def index(context, request):
return dict()
|
from odoo import _, fields, models
class ResPartner(models.Model):
_inherit = "res.partner"
_allowed_inactive_link_models = ["res.partner"]
_inactive_cascade = True
sta_mandate_ids = fields.One2many(
comodel_name="sta.mandate",
inverse_name="partner_id",
string="State Mandates",
domain=[("active", "=", True)],
context={"force_recompute": True},
)
sta_mandate_inactive_ids = fields.One2many(
comodel_name="sta.mandate",
inverse_name="partner_id",
string="State Mandates (Inactive)",
domain=[("active", "=", False)],
)
int_mandate_ids = fields.One2many(
comodel_name="int.mandate",
inverse_name="partner_id",
string="Internal Mandates",
domain=[("active", "=", True)],
context={"force_recompute": True},
)
int_mandate_inactive_ids = fields.One2many(
comodel_name="int.mandate",
inverse_name="partner_id",
string="Internal Mandates (Inactive)",
domain=[("active", "=", False)],
)
ext_mandate_ids = fields.One2many(
comodel_name="ext.mandate",
inverse_name="partner_id",
string="External Mandates",
domain=[("active", "=", True)],
context={"force_recompute": True},
)
ext_mandate_inactive_ids = fields.One2many(
comodel_name="ext.mandate",
inverse_name="partner_id",
string="External Mandates (Inactive)",
domain=[("active", "=", False)],
)
ext_mandate_count = fields.Integer(
string="External Mandates Nbr", compute="_compute_mandate_assembly_count"
)
ext_assembly_count = fields.Integer(
string="External Assemblies", compute="_compute_mandate_assembly_count"
)
def get_mandate_action(self):
"""
return an action for an ext.mandate contains into the domain a
specific tuples to get concerned mandates
"""
self.ensure_one()
res_ids = self._get_assemblies()._get_mandates().ids
domain = [("id", "in", res_ids)]
# get model's action to update its domain
action = self.env["ir.actions.act_window"]._for_xml_id(
"mozaik_mandate.ext_mandate_action"
)
action["domain"] = domain
return action
def _get_assemblies(self):
"""
return the assemblies of the current partner
"""
self.ensure_one()
assembly_model = "ext.assembly"
if self.is_assembly:
field = "partner_id"
else:
field = "ref_partner_id"
domain = [(field, "=", self.id)]
assembly_obj = self.env[assembly_model]
assemblies = assembly_obj.search(domain)
return assemblies
def _compute_mandate_assembly_count(self):
"""
count the number of assemblies linked to the current partner
count the number of mandates linked to the assemblies of the
current partner
"""
for partner in self:
assemblies = partner._get_assemblies()
partner.ext_assembly_count = len(assemblies)
partner.ext_mandate_count = len(assemblies._get_mandates())
def add_mandate_action(self):
self.ensure_one()
return {
"type": "ir.actions.act_window",
"name": _("Add a new mandate"),
"res_model": self._context.get("mandate_model"),
"context": {"default_partner_id": self.id},
"view_mode": "form",
"target": "new",
}
|
from default import Test, with_context
from factories import reset_all_pk_sequences
class TestAPI(Test):
endpoints = ['app', 'task', 'taskrun', 'user']
|
from odoo import models, fields, api
class AccountGroup(models.Model):
_inherit = 'account.group'
length_account = fields.Integer(
string='Length account', compute='_compute_length_account',
store=True)
without_headquarter = fields.Boolean(
string='Without headquarter in invoices and accounting entries',
default=True)
@api.depends('code_prefix_start')
def _compute_length_account(self):
for group in self:
group.length_account = len(group.code_prefix_start)
def _find_account_group_headquarter(self):
found = False
group = self
while not found:
if not group.parent_id:
found = True
without_headquarter_control = group.without_headquarter
else:
cond = [('id', '=', group.parent_id.id)]
group = self.env['account.group'].search(cond, limit=1)
return without_headquarter_control
|
from openerp import fields, models, api
from openerp.addons.event_track_assistant._common import\
_convert_to_utc_date, _convert_to_local_date, _convert_time_to_float
date2string = fields.Date.to_string
datetime2string = fields.Datetime.to_string
str2datetime = fields.Datetime.from_string
class WizEventAppendAssistant(models.TransientModel):
_inherit = 'wiz.event.append.assistant'
type_hour = fields.Many2one(
comodel_name='hr.type.hour', string='Type hour')
start_time = fields.Float(string='Start time', default=0.0)
end_time = fields.Float(string='End time', default=0.0)
@api.model
def default_get(self, var_fields):
tz = self.env.user.tz
res = super(WizEventAppendAssistant, self).default_get(var_fields)
res.update({
'start_time': _convert_time_to_float(
_convert_to_utc_date(res.get('min_from_date'), tz=tz), tz=tz),
'end_time': _convert_time_to_float(
_convert_to_utc_date(res.get('max_to_date'), tz=tz), tz=tz),
})
return res
@api.multi
@api.onchange('from_date', 'start_time', 'to_date', 'end_time', 'partner')
def onchange_dates_and_partner(self):
self.ensure_one()
res = super(WizEventAppendAssistant, self).onchange_dates_and_partner()
return res
def revert_dates(self):
tz = self.env.user.tz
super(WizEventAppendAssistant, self).revert_dates()
self.start_time = _convert_time_to_float(_convert_to_utc_date(
self.min_from_date, tz=tz), tz=tz)
self.end_time = _convert_time_to_float(_convert_to_utc_date(
self.max_to_date, tz=tz), tz=tz)
def _update_registration_start_date(self, registration):
super(WizEventAppendAssistant, self)._update_registration_start_date(
registration)
reg_date_start = str2datetime(registration.date_start)
if self.start_time:
wiz_from_date = _convert_to_utc_date(
self.from_date, time=self.start_time, tz=self.env.user.tz)
if wiz_from_date != reg_date_start:
registration.date_start = wiz_from_date
def _update_registration_date_end(self, registration):
super(WizEventAppendAssistant, self)._update_registration_date_end(
registration)
reg_date_end = str2datetime(registration.date_end)
if self.end_time:
wiz_to_date = _convert_to_utc_date(
self.to_date, time=self.end_time, tz=self.env.user.tz)
if wiz_to_date != reg_date_end:
registration.date_end = wiz_to_date
def _prepare_registration_data(self, event):
vals = super(WizEventAppendAssistant,
self)._prepare_registration_data(event)
date_start = _convert_to_local_date(self.from_date).date()
date_start = _convert_to_utc_date(
date_start, time=self.start_time, tz=self.env.user.tz)
date_end = _convert_to_local_date(self.to_date).date()
date_end = _convert_to_utc_date(
date_end, time=self.end_time, tz=self.env.user.tz)
vals.update({
'date_start': event.date_begin
if datetime2string(date_start) < event.date_begin else date_start,
'date_end': event.date_end
if datetime2string(date_end) > event.date_end else date_end,
})
return vals
def _calc_dates_for_search_track(self, from_date, to_date):
super(WizEventAppendAssistant,
self)._calc_dates_for_search_track(from_date, to_date)
from_date = self._prepare_date_for_control(
from_date, time=self.start_time or 0.0)
to_date = self._prepare_date_for_control(
to_date, time=self.end_time or 24.0)
return from_date, to_date
|
from models import *
from django.contrib.auth.models import User as django_User
from datetime import datetime
from django import forms
from django.contrib.gis.geos import Point
class LoginForm(forms.ModelForm):
class Meta:
model = User
widgets = {
'mail': forms.EmailInput(attrs={'aria-invalid': 'true', 'pattern': 'email', 'required': 'required'}),
}
exclude = ['name', 'firstname', 'sex', 'city', 'zipCode', 'phone', 'idHomeAddress', 'idWorkAddress']
class EmailAuthBackend(object):
def authenticate(self,username=None, password=None):
try:
user = django_User.objects.get(email=username)
if user and check_password(password, user.password):
return user
except django_User.DoesNotExist:
return None
def authenticate2(self,username=None, password=None):
try:
user = Provider.objects.filter(idUser__mail__contains=username).first()
if user and (check_password(password, user.password)):
return user
except User.DoesNotExist:
return None
def auth_email(self, username=None):
try:
user = Provider.objects.filter(idUser__mail__contains=username).first()
if user:
return user
except User.DoesNotExist:
return None
def auth_email2(self, username=None):
try:
user = django_User.objects.get(email=username)
if user:
return user
except User.DoesNotExist:
return None
class ContactForm(forms.Form):
firstname = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'required': 'required'}))
lastname = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'required': 'required'}))
phone = forms.CharField(widget=forms.TextInput(
attrs={'maxlength': '10', 'aria-invalid': 'true', 'pattern': 'phone', 'required': 'required'}))
sender = forms.EmailField(widget=forms.EmailInput(attrs={'aria-invalid': 'false', 'pattern': 'email'}), required=False)
subjectCHOICES = (('Demandeur','Je cherche un trajet'),('Offreur','Je souhaite proposer un trajet'),
('Infos','Informations diverses'),('Autre','Autre'))
subject = forms.ChoiceField(choices=subjectCHOICES)
goalOfApplicationCHOICES = [('', '')] + list(MenusSettings.objects.filter(type="goalOfApplication").values_list('string', 'string'))
goalOfApplication = forms.ChoiceField(widget=forms.Select(attrs={'required':'required'}), choices=goalOfApplicationCHOICES, required=False)
yearOfBirthCHOICES = (tuple((str(n), str(n)) for n in range(1900, datetime.now().year - 15))+(('',''),))[::-1]
yearOfBirth = forms.ChoiceField(widget=forms.Select(attrs={'required':'required'}), choices=yearOfBirthCHOICES, required=False)
message = forms.CharField(widget=forms.Textarea(attrs={'required': 'required'}))
def __init__(self, *args, **kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
self.fields['goalOfApplication'].choices = get_menus_settings('goalOfApplication')
def get_menus_settings(type, required=True):
if required:
return [('', '')] + list(MenusSettings.objects.filter(type=type).values_list('string', 'string'))
else:
return list(MenusSettings.objects.filter(type=type).values_list('string', 'string'))
class UserRegisterForm(forms.ModelForm):
class Meta:
model = User
widgets = {
'name': forms.TextInput(attrs={'required': 'required'}),
'firstname': forms.TextInput(attrs={'required': 'required'}),
'sex': forms.RadioSelect(attrs={'required': 'required'}),
'city': forms.TextInput(attrs={'required': 'required'}),
'zipCode': forms.TextInput(attrs={'maxlength': '5', 'aria-invalid': 'true', 'pattern': 'zipCode',
'required': 'required'}),
'mail': forms.EmailInput(attrs={'aria-invalid': 'true', 'pattern': 'email', 'required': 'required'}),
'phone': forms.TextInput(attrs={'maxlength': '10', 'aria-invalid': 'true',
'pattern': 'phone', 'required': 'required'}),
}
exclude = ['idHomeAddress', 'idWorkAddress']
class ProviderRegisterForm(forms.ModelForm):
class Meta:
model = Provider
howKnowledgeCHOICES = get_menus_settings('howKnowledge')
widgets = {
'password': forms.PasswordInput(attrs={'id': 'password', 'required': 'required'}),
'company': forms.TextInput(attrs={'list':'datalistCompany', 'autocomplete':'off'}),
'howKnowledge': forms.Select(attrs={'required':'required'}, choices=howKnowledgeCHOICES)
}
exclude = ['idUser', 'is_active', 'last_login']
def __init__(self, *args, **kwargs):
super(ProviderRegisterForm, self).__init__(*args, **kwargs)
self.fields['howKnowledge'].choices = get_menus_settings('howKnowledge')
class ProviderForm2(forms.ModelForm):
class Meta:
model = Provider
howKnowledgeCHOICES = [('','')] + list(MenusSettings.objects.filter(type="howKnowledge").values_list('string', 'string'))
widgets = {
'company': forms.TextInput(attrs={'list': 'datalistCompany', 'autocomplete': 'off'}),
'howKnowledge': forms.Select(attrs={'required': 'required'}, choices=howKnowledgeCHOICES)
}
exclude = ['idUser', 'is_active', 'last_login', 'password']
def __init__(self, *args, **kwargs):
super(ProviderForm2, self).__init__(*args, **kwargs)
self.fields['howKnowledge'].choices = get_menus_settings('howKnowledge')
class AddressRegisterForm(forms.ModelForm):
latlng = forms.CharField(widget=forms.HiddenInput(), required=False,)
cityHide = forms.CharField(widget=forms.HiddenInput(), required=False,)
zipCodeHide = forms.CharField(widget=forms.HiddenInput(), required=False,)
class Meta:
model = Address
widgets = {
'street':forms.TextInput(attrs={'class': 'field', 'placeholder': 'Indiquez un lieu',
'autocomplete': 'on', 'required': 'required'}),
}
exclude = ['idAddress', 'point', 'city', 'zipCode']
def clean(self):
cleaned_data = super(AddressRegisterForm, self).clean()
coord = cleaned_data['latlng'].replace('(', '')
city = cleaned_data['cityHide']
zipcode = cleaned_data['zipCodeHide']
if city == "":
city = "undefined"
if zipcode == "undefined" or zipcode == "":
zipcode = 0
if coord == "" or coord == "undefined":
raise forms.ValidationError("Bad address")
coord = coord.replace(')', '')
coordTab = coord.split(',')
cleaned_data['point'] = 'POINT(%f %f)' % (float(coordTab[0]), float(coordTab[1]))
cleaned_data['city'] = city
cleaned_data['zipCode'] = zipcode
return cleaned_data
class AddressRegisterFormWork(forms.ModelForm):
latlng = forms.CharField(widget=forms.HiddenInput(), required=False,)
cityHide = forms.CharField(widget=forms.HiddenInput(), required=False,)
zipCodeHide = forms.CharField(widget=forms.HiddenInput(), required=False,)
class Meta:
model = Address
widgets = {
'street': forms.TextInput(attrs={'class': 'field', 'placeholder': 'Indiquez un lieu', 'autocomplete': 'on',
'required': 'required'}),
}
exclude = ['idAddress', 'point', 'city', 'zipCode']
def clean(self):
cleaned_data = super(AddressRegisterFormWork, self).clean()
coord = cleaned_data['latlng'].replace('(', '')
city = cleaned_data['cityHide']
zipcode = cleaned_data['zipCodeHide']
if city == "":
city = "undefined"
if zipcode == "undefined" or zipcode == "":
zipcode = 0
if coord == "" or coord == "undefined":
raise forms.ValidationError("Bad address")
coord = coord.replace(')', '')
coordtab = coord.split(',')
cleaned_data['point'] = 'POINT(%f %f)' % (float(coordtab[0]), float(coordtab[1]))
cleaned_data['city'] = city
cleaned_data['zipCode']= zipcode
return cleaned_data
class PathDepartureRegisterForm(forms.ModelForm):
class Meta:
model = Path
widgets = {
'type': forms.HiddenInput(),
'day': forms.HiddenInput(),
'weekNumber': forms.HiddenInput(),
'schedule': forms.TimeInput(attrs={'class': 'time', 'data-format': 'HH:mm', 'data-template': 'HH : mm',
'value': '08:00'}),
}
exclude = ['idPath', 'idProvider', 'departure', 'arrival', 'startingWeek']
class PathArrivalRegisterForm(forms.ModelForm):
class Meta:
model = Path
widgets = {
'type': forms.HiddenInput(),
'day': forms.HiddenInput(),
'weekNumber': forms.HiddenInput(),
'schedule': forms.TimeInput(attrs={'class': 'time', 'data-format': 'HH:mm', 'data-template': 'HH : mm',
'value':'18:00'}),
}
exclude = ['idPath', 'idProvider', 'departure', 'arrival', 'startingWeek']
class TestUserRegisterForm(forms.ModelForm):
class Meta:
model = User
widgets = {
'name': forms.TextInput(attrs={'required': 'required'}),
'firstname': forms.TextInput(attrs={'required': 'required'}),
'city': forms.TextInput(attrs={'required': 'required'}),
'zipCode': forms.TextInput(attrs={'maxlength': '5', 'aria-invalid': 'true', 'pattern': 'zipCode', 'required': 'required'}),
'mail': forms.EmailInput(attrs={'aria-invalid': 'true', 'pattern': 'email', 'required': 'required'}),
'phone': forms.TextInput(attrs={'maxlength': '10', 'aria-invalid': 'true', 'pattern': 'phone', 'required': 'required'}),
}
exclude = ['idHomeAddress', 'idWorkAddress', 'sex']
class newMdpForm(forms.Form):
oldmdp = forms.CharField(widget=forms.PasswordInput(), label='Ancien mot de passe', required=True)
newmdp1 = forms.CharField(widget=forms.PasswordInput(), label='Nouveau mot de passe', required=True)
|
from abc import ABCMeta, abstractmethod
import numbers
import copy
import random
import numpy as np
from nupic.data.fieldmeta import FieldMetaType
import nupic.math.roc_utils as roc
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.frameworks.opf.opfutils import InferenceType
from nupic.utils import MovingAverage
from collections import deque
from operator import itemgetter
from safe_interpreter import SafeInterpreter
from io import BytesIO, StringIO
from functools import partial
class MetricSpec(object):
""" This class represents a single Metrics specification in the TaskControl
block
"""
_LABEL_SEPARATOR = ":"
def __init__(self, metric, inferenceElement, field=None, params=None):
"""
metric: A metric type name that identifies which metrics module is
to be constructed by the metrics factory method
opf.metrics.getModule(); e.g., "rmse"
inferenceElement: Some inference types (such as classification), can output
more than one type of inference (i.e. the predicted class
AND the predicted next step). This field specifies which
of these inferences to compute the metrics on
field: Field name on which this metric is to be collected
params: Custom parameters dict for the metrics module's constructor
"""
self.metric = metric
self.inferenceElement = inferenceElement
self.field = field
self.params = params
return
def __repr__(self):
return "{0!s}(metric={1!r}, inferenceElement={2!r}, field={3!r}, params={4!r})".format(self.__class__.__name__,
self.metric,
self.inferenceElement,
self.field,
self.params)
def getLabel(self, inferenceType=None):
""" Helper method that generates a unique label
for a MetricSpec / InferenceType pair. The label is formatted
as follows:
<predictionKind>:<metric type>:(paramName=value)*:field=<fieldname>
For example:
classification:aae:paramA=10.2:paramB=20:window=100:field=pounds
"""
result = []
if inferenceType is not None:
result.append(InferenceType.getLabel(inferenceType))
result.append(self.inferenceElement)
result.append(self.metric)
params = self.params
if params is not None:
sortedParams= params.keys()
sortedParams.sort()
for param in sortedParams:
# Don't include the customFuncSource - it is too long an unwieldy
if param in ('customFuncSource', 'customFuncDef', 'customExpr'):
continue
value = params[param]
if isinstance(value, str):
result.extend(["{0!s}='{1!s}'".format(param, value)])
else:
result.extend(["{0!s}={1!s}".format(param, value)])
if self.field:
result.append("field={0!s}".format((self.field)) )
return self._LABEL_SEPARATOR.join(result)
@classmethod
def getInferenceTypeFromLabel(cls, label):
""" Extracts the PredicitonKind (temporal vs. nontemporal) from the given
metric label
Parameters:
-----------------------------------------------------------------------
label: A label (string) for a metric spec generated by getMetricLabel
(above)
Returns: An InferenceType value
"""
infType, _, _= label.partition(cls._LABEL_SEPARATOR)
if not InferenceType.validate(infType):
return None
return infType
def getModule(metricSpec):
"""
factory method to return an appropriate MetricsIface-based module
args:
metricSpec - an instance of MetricSpec.
metricSpec.metric must be one of:
rmse (root-mean-square error)
aae (average absolute error)
acc (accuracy, for enumerated types)
return:
an appropriate Metric module
"""
metricName = metricSpec.metric
if metricName == 'rmse':
return MetricRMSE(metricSpec)
if metricName == 'nrmse':
return MetricNRMSE(metricSpec)
elif metricName == 'aae':
return MetricAAE(metricSpec)
elif metricName == 'acc':
return MetricAccuracy(metricSpec)
elif metricName == 'avg_err':
return MetricAveError(metricSpec)
elif metricName == 'trivial':
return MetricTrivial(metricSpec)
elif metricName == 'two_gram':
return MetricTwoGram(metricSpec)
elif metricName == 'moving_mean':
return MetricMovingMean(metricSpec)
elif metricName == 'moving_mode':
return MetricMovingMode(metricSpec)
elif metricName == 'neg_auc':
return MetricNegAUC(metricSpec)
elif metricName == 'custom_error_metric':
return CustomErrorMetric(metricSpec)
elif metricName == 'multiStep':
return MetricMultiStep(metricSpec)
elif metricName == 'multiStepProbability':
return MetricMultiStepProbability(metricSpec)
elif metricName == 'ms_aae':
return MetricMultiStepAAE(metricSpec)
elif metricName == 'ms_avg_err':
return MetricMultiStepAveError(metricSpec)
elif metricName == 'passThruPrediction':
return MetricPassThruPrediction(metricSpec)
elif metricName == 'altMAPE':
return MetricAltMAPE(metricSpec)
elif metricName == 'MAPE':
return MetricMAPE(metricSpec)
elif metricName == 'multi':
return MetricMulti(metricSpec)
elif metricName == 'negativeLogLikelihood':
return MetricNegativeLogLikelihood(metricSpec)
else:
raise Exception("Unsupported metric type: {0!s}".format(metricName))
class _MovingMode(object):
""" Helper class for computing windowed moving
mode of arbitrary values """
def __init__(self, windowSize = None):
"""
Parameters:
-----------------------------------------------------------------------
windowSize: The number of values that are used to compute the
moving average
"""
self._windowSize = windowSize
self._countDict = dict()
self._history = deque([])
def __call__(self, value):
if len(self._countDict) == 0:
pred = ""
else:
pred = max(self._countDict.items(), key = itemgetter(1))[0]
# Update count dict and history buffer
self._history.appendleft(value)
if not value in self._countDict:
self._countDict[value] = 0
self._countDict[value] += 1
if len(self._history) > self._windowSize:
removeElem = self._history.pop()
self._countDict[removeElem] -= 1
assert(self._countDict[removeElem] > -1)
return pred
def _isNumber(value):
return isinstance(value, (numbers.Number, np.number))
class MetricsIface(object):
"""
A Metrics module compares a prediction Y to corresponding ground truth X and returns a single
measure representing the "goodness" of the prediction. It is up to the implementation to
determine how this comparison is made.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, metricSpec):
"""
instantiate a MetricsIface-based module.
args:
metricSpec is an instance of MetricSpec
"""
@abstractmethod
def addInstance(self, groundTruth, prediction, record = None, result = None):
""" add one instance consisting of ground truth and a prediction.
Parameters:
-----------------------------------------------------------------------
groundTruth:
The actual measured value at the current timestep
prediction:
The value predicted by the network at the current timestep
groundTruthEncoding:
The binary encoding of the groundTruth value (as a numpy array). Right
now this is only used by CLA networks
predictionEncoding:
The binary encoding of the prediction value (as a numpy array). Right
now this is only used by CLA networks
result:
An ModelResult class (see opfutils.py)
return:
The average error as computed over the metric's window size
"""
@abstractmethod
def getMetric(self):
"""
return:
{value : <current measurement>, "stats" : {<stat> : <value> ...}}
metric name is defined by the MetricIface implementation. stats is expected to contain further
information relevant to the given metric, for example the number of timesteps represented in
the current measurement. all stats are implementation defined, and "stats" can be None
"""
class AggregateMetric(MetricsIface):
"""
Partial implementation of Metrics Interface for metrics that
accumulate an error and compute an aggregate score, potentially
over some window of previous data. This is a convenience class that
can serve as the base class for a wide variety of metrics
"""
___metaclass__ = ABCMeta
#FIXME @abstractmethod - this should be marked abstract method and required to be implemented
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result):
"""
Updates the accumulated error given the prediction and the
ground truth.
groundTruth: Actual value that is observed for the current timestep
prediction: Value predicted by the network for the given timestep
accumulatedError: The total accumulated score from the previous
predictions (possibly over some finite window)
historyBuffer: A buffer of the last <self.window> ground truth values
that have been observed.
If historyBuffer = None, it means that no history is being kept.
result: An ModelResult class (see opfutils.py), used for advanced
metric calculation (e.g., MetricNegativeLogLikelihood)
retval:
The new accumulated error. That is:
self.accumulatedError = self.accumulate(groundTruth, predictions, accumulatedError)
historyBuffer should also be updated in this method.
self.spec.params["window"] indicates the maximum size of the window
"""
#FIXME @abstractmethod - this should be marked abstract method and required to be implemented
def aggregate(self, accumulatedError, historyBuffer, steps):
"""
Updates the final aggregated score error given the prediction and the
ground truth.
accumulatedError: The total accumulated score from the previous
predictions (possibly over some finite window)
historyBuffer: A buffer of the last <self.window> ground truth values
that have been observed.
If historyBuffer = None, it means that no history is being kept.
steps: The total number of (groundTruth, prediction) pairs that have
been passed to the metric. This does not include pairs where
the groundTruth = SENTINEL_VALUE_FOR_MISSING_DATA
retval:
The new aggregate (final) error measure.
"""
def __init__(self, metricSpec):
""" Initialize this metric
If the params contains the key 'errorMetric', then that is the name of
another metric to which we will pass a modified groundTruth and prediction
to from our addInstance() method. For example, we may compute a moving mean
on the groundTruth and then pass that to the AbsoluteAveError metric
"""
# Init default member variables
self.id = None
self.verbosity = 0
self.window = -1
self.history = None
self.accumulatedError = 0
self.aggregateError = None
self.steps = 0
self.spec = metricSpec
self.disabled = False
# Number of steps ahead we are trying to predict. This is a list of
# prediction steps are processing
self._predictionSteps = [0]
# Where we store the ground truth history
self._groundTruthHistory = deque([])
# The instances of another metric to which we will pass a possibly modified
# groundTruth and prediction to from addInstance(). There is one instance
# for each step present in self._predictionSteps
self._subErrorMetrics = None
# The maximum number of records to process. After this many records have
# been processed, the metric value never changes. This can be used
# as the optimization metric for swarming, while having another metric without
# the maxRecords limit to get an idea as to how well a production model
# would do on the remaining data
self._maxRecords = None
# Parse the metric's parameters
if metricSpec is not None and metricSpec.params is not None:
self.id = metricSpec.params.get('id', None)
self._predictionSteps = metricSpec.params.get('steps', [0])
# Make sure _predictionSteps is a list
if not hasattr(self._predictionSteps, '__iter__'):
self._predictionSteps = [self._predictionSteps]
self.verbosity = metricSpec.params.get('verbosity', 0)
self._maxRecords = metricSpec.params.get('maxRecords', None)
# Get the metric window size
if 'window' in metricSpec.params:
assert metricSpec.params['window'] >= 1
self.history = deque([])
self.window = metricSpec.params['window']
# Get the name of the sub-metric to chain to from addInstance()
if 'errorMetric' in metricSpec.params:
self._subErrorMetrics = []
for step in self._predictionSteps:
subSpec = copy.deepcopy(metricSpec)
# Do all ground truth shifting before we pass onto the sub-metric
subSpec.params.pop('steps', None)
subSpec.params.pop('errorMetric')
subSpec.metric = metricSpec.params['errorMetric']
self._subErrorMetrics.append(getModule(subSpec))
def _getShiftedGroundTruth(self, groundTruth):
""" Utility function that saves the passed in groundTruth into a local
history buffer, and returns the groundTruth from self._predictionSteps ago,
where self._predictionSteps is defined by the 'steps' parameter.
This can be called from the beginning of a derived class's addInstance()
before it passes groundTruth and prediction onto accumulate().
"""
# Save this ground truth into our input history
self._groundTruthHistory.append(groundTruth)
# This is only supported when _predictionSteps has one item in it
assert (len(self._predictionSteps) == 1)
# Return the one from N steps ago
if len(self._groundTruthHistory) > self._predictionSteps[0]:
return self._groundTruthHistory.popleft()
else:
if hasattr(groundTruth, '__iter__'):
return [None] * len(groundTruth)
else:
return None
def addInstance(self, groundTruth, prediction, record = None, result = None):
# This base class does not support time shifting the ground truth or a
# subErrorMetric.
assert (len(self._predictionSteps) == 1)
assert self._predictionSteps[0] == 0
assert self._subErrorMetrics is None
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA or prediction is None:
return self.aggregateError
if self.verbosity > 0:
print "groundTruth:\n{0!s}\nPredictions:\n{1!s}\n{2!s}\n".format(groundTruth,
prediction, self.getMetric())
# Ignore if we've reached maxRecords
if self._maxRecords is not None and self.steps >= self._maxRecords:
return self.aggregateError
# If there is a sub-metric, chain into it's addInstance
# Accumulate the error
self.accumulatedError = self.accumulate(groundTruth, prediction,
self.accumulatedError, self.history, result)
self.steps += 1
return self._compute()
def getMetric(self):
return {'value': self.aggregateError, "stats" : {"steps" : self.steps}}
def _compute(self):
self.aggregateError = self.aggregate(self.accumulatedError, self.history,
self.steps)
return self.aggregateError
class MetricNegativeLogLikelihood(AggregateMetric):
"""
computes negative log-likelihood. Likelihood is the predicted probability of
the true data from a model. It is more powerful than metrics that only considers
the single best prediction (e.g. MSE) as it considers the entire probability
distribution predicted by a model.
It is more appropriate to use likelihood as the error metric when multiple
predictions are possible.
"""
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result):
bucketll = result.inferences['multiStepBucketLikelihoods']
bucketIdxTruth = result.classifierInput.bucketIndex
if bucketIdxTruth is not None:
# a manually set minimum prediction probability so that the log(LL) doesn't blow up
minProb = 0.00001
negLL = 0
for step in bucketll.keys():
outOfBucketProb = 1 - sum(bucketll[step].values())
if bucketIdxTruth in bucketll[step].keys():
prob = bucketll[step][bucketIdxTruth]
else:
prob = outOfBucketProb
if prob < minProb:
prob = minProb
negLL -= np.log(prob)
accumulatedError += negLL
if historyBuffer is not None:
historyBuffer.append(negLL)
if len(historyBuffer) > self.spec.params["window"]:
accumulatedError -= historyBuffer.popleft()
return accumulatedError
def aggregate(self, accumulatedError, historyBuffer, steps):
n = steps
if historyBuffer is not None:
n = len(historyBuffer)
return accumulatedError / float(n)
class MetricRMSE(AggregateMetric):
"""
computes root-mean-square error
"""
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result = None):
error = (groundTruth - prediction)**2
accumulatedError += error
if historyBuffer is not None:
historyBuffer.append(error)
if len(historyBuffer) > self.spec.params["window"] :
accumulatedError -= historyBuffer.popleft()
return accumulatedError
def aggregate(self, accumulatedError, historyBuffer, steps):
n = steps
if historyBuffer is not None:
n = len(historyBuffer)
return np.sqrt(accumulatedError / float(n))
class MetricNRMSE(MetricRMSE):
"""computes normalized root-mean-square error"""
def __init__(self, *args, **kwargs):
super(MetricNRMSE, self).__init__(*args, **kwargs)
self.groundTruths = []
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result = None):
self.groundTruths.append(groundTruth)
return super(MetricNRMSE, self).accumulate(groundTruth,
prediction,
accumulatedError,
historyBuffer,
result)
def aggregate(self, accumulatedError, historyBuffer, steps):
rmse = super(MetricNRMSE, self).aggregate(accumulatedError,
historyBuffer,
steps)
denominator = np.std(self.groundTruths)
return rmse / denominator if denominator > 0 else float("inf")
class MetricAAE(AggregateMetric):
"""
computes average absolute error
"""
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result = None):
error = abs(groundTruth - prediction)
accumulatedError += error
if historyBuffer is not None:
historyBuffer.append(error)
if len(historyBuffer) > self.spec.params["window"] :
accumulatedError -= historyBuffer.popleft()
return accumulatedError
def aggregate(self, accumulatedError, historyBuffer, steps):
n = steps
if historyBuffer is not None:
n = len(historyBuffer)
return accumulatedError/ float(n)
class MetricAltMAPE(AggregateMetric):
"""
computes the "Alternative" Mean Absolute Percent Error.
A generic MAPE computes the percent error for each sample, and then gets
an average. This can suffer from samples where the actual value is very small
or zero - this one sample can drastically alter the mean.
This metric on the other hand first computes the average of the actual values
and the averages of the errors before dividing. This washes out the effects of
a small number of samples with very small actual values.
"""
def __init__(self, metricSpec):
super(MetricAltMAPE, self).__init__(metricSpec)
self._accumulatedGroundTruth = 0
self._accumulatedError = 0
def addInstance(self, groundTruth, prediction, record = None, result = None):
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA or prediction is None:
return self.aggregateError
# Compute absolute error
error = abs(groundTruth - prediction)
if self.verbosity > 0:
print "MetricAltMAPE:\n groundTruth: %s\n Prediction: " \
"%s\n Error: %s" % (groundTruth, prediction, error)
# Update the accumulated groundTruth and aggregate error
if self.history is not None:
self.history.append((groundTruth, error))
if len(self.history) > self.spec.params["window"] :
(oldGT, oldErr) = self.history.popleft()
self._accumulatedGroundTruth -= oldGT
self._accumulatedError -= oldErr
self._accumulatedGroundTruth += abs(groundTruth)
self._accumulatedError += error
# Compute aggregate pct error
if self._accumulatedGroundTruth > 0:
self.aggregateError = 100.0 * self._accumulatedError / \
self._accumulatedGroundTruth
else:
self.aggregateError = 0
if self.verbosity >= 1:
print " accumGT:", self._accumulatedGroundTruth
print " accumError:", self._accumulatedError
print " aggregateError:", self.aggregateError
self.steps += 1
return self.aggregateError
class MetricMAPE(AggregateMetric):
"""
computes the "Classic" Mean Absolute Percent Error.
This computes the percent error for each sample, and then gets
an average. Note that this can suffer from samples where the actual value is
very small or zero - this one sample can drastically alter the mean. To
avoid this potential issue, use 'altMAPE' instead.
This metric is provided mainly as a convenience when comparing results against
other investigations that have also used MAPE.
"""
def __init__(self, metricSpec):
super(MetricMAPE, self).__init__(metricSpec)
self._accumulatedPctError = 0
def addInstance(self, groundTruth, prediction, record = None, result = None):
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA or prediction is None:
return self.aggregateError
# Compute absolute error
if groundTruth != 0:
pctError = float(abs(groundTruth - prediction))/groundTruth
else:
# Ignore this sample
if self.verbosity > 0:
print "Ignoring sample with groundTruth of 0"
self.steps += 1
return self.aggregateError
if self.verbosity > 0:
print "MetricMAPE:\n groundTruth: %s\n Prediction: " \
"%s\n Error: %s" % (groundTruth, prediction, pctError)
# Update the accumulated groundTruth and aggregate error
if self.history is not None:
self.history.append(pctError)
if len(self.history) > self.spec.params["window"] :
(oldPctErr) = self.history.popleft()
self._accumulatedPctError -= oldPctErr
self._accumulatedPctError += pctError
# Compute aggregate pct error
self.aggregateError = 100.0 * self._accumulatedPctError / len(self.history)
if self.verbosity >= 1:
print " accumPctError:", self._accumulatedPctError
print " aggregateError:", self.aggregateError
self.steps += 1
return self.aggregateError
class MetricPassThruPrediction(MetricsIface):
"""
This is not a metric, but rather a facility for passing the predictions
generated by a baseline metric through to the prediction output cache produced
by a model.
For example, if you wanted to see the predictions generated for the TwoGram
metric, you would specify 'PassThruPredictions' as the 'errorMetric' parameter.
This metric class simply takes the prediction and outputs that as the
aggregateMetric value.
"""
def __init__(self, metricSpec):
self.spec = metricSpec
self.window = metricSpec.params.get("window", 1)
self.avg = MovingAverage(self.window)
self.value = None
def addInstance(self, groundTruth, prediction, record = None, result = None):
"""Compute and store metric value"""
self.value = self.avg(prediction)
def getMetric(self):
"""Return the metric value """
return {"value": self.value}
#def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer):
# # Simply return the prediction as the accumulated error
# return prediction
#
#def aggregate(self, accumulatedError, historyBuffer, steps):
# # Simply return the prediction as the aggregateError
# return accumulatedError
class MetricMovingMean(AggregateMetric):
"""
computes error metric based on moving mean prediction
"""
def __init__(self, metricSpec):
# This metric assumes a default 'steps' of 1
if not 'steps' in metricSpec.params:
metricSpec.params['steps'] = 1
super(MetricMovingMean, self).__init__(metricSpec)
# Only supports 1 item in _predictionSteps
assert (len(self._predictionSteps) == 1)
self.mean_window = 10
if metricSpec.params.has_key('mean_window'):
assert metricSpec.params['mean_window'] >= 1
self.mean_window = metricSpec.params['mean_window']
# Construct moving average instance
self._movingAverage = MovingAverage(self.mean_window)
def getMetric(self):
return self._subErrorMetrics[0].getMetric()
def addInstance(self, groundTruth, prediction, record = None, result = None):
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA:
return self._subErrorMetrics[0].aggregateError
if self.verbosity > 0:
print "groundTruth:\n{0!s}\nPredictions:\n{1!s}\n{2!s}\n".format(groundTruth, prediction, self.getMetric())
# Use ground truth from 'steps' steps ago as our most recent ground truth
lastGT = self._getShiftedGroundTruth(groundTruth)
if lastGT is None:
return self._subErrorMetrics[0].aggregateError
mean = self._movingAverage(lastGT)
return self._subErrorMetrics[0].addInstance(groundTruth, mean, record)
def evalCustomErrorMetric(expr, prediction, groundTruth, tools):
sandbox = SafeInterpreter(writer=StringIO())
if isinstance(prediction, dict):
sandbox.symtable['prediction'] = tools.mostLikely(prediction)
sandbox.symtable['EXP'] = tools.expValue(prediction)
sandbox.symtable['probabilityDistribution'] = prediction
else:
sandbox.symtable['prediction'] = prediction
sandbox.symtable['groundTruth'] = groundTruth
sandbox.symtable['tools'] = tools
error = sandbox(expr)
return error
class CustomErrorMetric(MetricsIface):
"""
Custom Error Metric class that handles user defined error metrics
"""
class CircularBuffer():
"""
implementation of a fixed size constant random access circular buffer
"""
def __init__(self,length):
#Create an array to back the buffer
#If the length<0 create a zero length array
self.data = [None for i in range(max(length,0))]
self.elements = 0
self.index = 0
self.dataLength = length
def getItem(self,n):
#Get item from n steps back
if n >= self.elements or (n >= self.dataLength and not self.dataLength < 0):
assert False,"Trying to access data not in the stored window"
return None
if self.dataLength>=0:
getInd = (self.index-n-1)%min(self.elements,self.dataLength)
else:
getInd = (self.index-n-1)%self.elements
return self.data[getInd]
def pushToEnd(self,obj):
ret = None
#If storing everything simply append right to the list
if(self.dataLength < 0 ):
self.data.append(obj)
self.index+=1
self.elements+=1
return None
if(self.elements==self.dataLength):
#pop last added element
ret = self.data[self.index % self.dataLength]
else:
#else push new element and increment the element counter
self.elements += 1
self.data[self.index % self.dataLength] = obj
self.index += 1
return ret
def __len__(self):
return self.elements
def __init__(self,metricSpec):
self.metricSpec = metricSpec
self.steps = 0
self.error = 0
self.averageError = None
self.errorMatrix = None
self.evalError = self.evalAbsErr
self.errorWindow = 1
self.storeWindow=-1
self.userDataStore = dict()
if "errorWindow" in metricSpec.params:
self.errorWindow = metricSpec.params["errorWindow"]
assert self.errorWindow != 0 , "Window Size cannon be zero"
if "storeWindow" in metricSpec.params:
self.storeWindow = metricSpec.params["storeWindow"]
assert self.storeWindow != 0 , "Window Size cannon be zero"
self.errorStore = self.CircularBuffer(self.errorWindow)
self.recordStore = self.CircularBuffer(self.storeWindow)
if "customExpr" in metricSpec.params:
assert not "customFuncDef" in metricSpec.params
assert not "customFuncSource" in metricSpec.params
self.evalError = partial(evalCustomErrorMetric, metricSpec.params["customExpr"])
elif "customFuncSource" in metricSpec.params:
assert not "customFuncDef" in metricSpec.params
assert not "customExpr" in metricSpec.params
exec(metricSpec.params["customFuncSource"])
#pull out defined function from locals
self.evalError = locals()["getError"]
elif "customFuncDef" in metricSpec.params:
assert not "customFuncSource" in metricSpec.params
assert not "customExpr" in metricSpec.params
self.evalError = metricSpec.params["customFuncDef"]
def getPrediction(self,n):
#Get prediction from n steps ago
return self.recordStore.getItem(n)["prediction"]
def getFieldValue(self,n,field):
#Get field value from record n steps ago
record = self.recordStore.getItem(n)["record"]
value = record[field]
return value
def getGroundTruth(self,n):
#Get the groundTruth from n steps ago
return self.recordStore.getItem(n)["groundTruth"]
def getBufferLen(self):
return len(self.recordStore)
def storeData(self,name,obj):
#Store custom user data
self.userDataStore[name] = obj
def getData(self,name):
#Retrieve user data
if name in self.userDataStore:
return self.userDataStore[name]
return None
def mostLikely(self, pred):
""" Helper function to return a scalar value representing the most
likely outcome given a probability distribution
"""
if len(pred) == 1:
return pred.keys()[0]
mostLikelyOutcome = None
maxProbability = 0
for prediction, probability in pred.items():
if probability > maxProbability:
mostLikelyOutcome = prediction
maxProbability = probability
return mostLikelyOutcome
def expValue(self, pred):
""" Helper function to return a scalar value representing the expected
value of a probability distribution
"""
if len(pred) == 1:
return pred.keys()[0]
return sum([x*p for x,p in pred.items()])
def evalAbsErr(self,pred,ground):
return abs(pred-ground)
def getMetric(self):
return {'value': self.averageError, "stats" : {"steps" : self.steps}}
def addInstance(self, groundTruth, prediction, record = None, result = None):
#If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA or prediction is None:
return self.averageError
self.recordStore.pushToEnd({"groundTruth":groundTruth,
"prediction":prediction,"record":record})
if isinstance(prediction, dict):
assert not any(True for p in prediction if p is None), \
"Invalid prediction of `None` in call to {0!s}.addInstance()".format( \
self.__class__.__name__)
error = self.evalError(prediction,groundTruth,self)
popped = self.errorStore.pushToEnd({"error":error})
if not popped is None:
#Subtract error that dropped out of the buffer
self.error -= popped["error"]
self.error+= error
self.averageError = float(self.error)/self.errorStore.elements
self.steps+=1
return self.averageError
class MetricMovingMode(AggregateMetric):
"""
computes error metric based on moving mode prediction
"""
def __init__(self, metricSpec):
super(MetricMovingMode, self).__init__(metricSpec)
self.mode_window = 100
if metricSpec.params.has_key('mode_window'):
assert metricSpec.params['mode_window'] >= 1
self.mode_window = metricSpec.params['mode_window']
# Only supports one stepsize
assert len(self._predictionSteps) == 1
# Construct moving average instance
self._movingMode = _MovingMode(self.mode_window)
def getMetric(self):
return self._subErrorMetrics[0].getMetric()
def addInstance(self, groundTruth, prediction, record = None, result = None):
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA:
return self._subErrorMetrics[0].aggregateError
if self.verbosity > 0:
print "groundTruth:\n{0!s}\nPredictions:\n{1!s}\n{2!s}\n".format(groundTruth, prediction,
self.getMetric())
# Use ground truth from 'steps' steps ago as our most recent ground truth
lastGT = self._getShiftedGroundTruth(groundTruth)
if lastGT is None:
return self._subErrorMetrics[0].aggregateError
mode = self._movingMode(lastGT)
result = self._subErrorMetrics[0].addInstance(groundTruth, mode, record)
return result
class MetricTrivial(AggregateMetric):
"""
computes a metric against the ground truth N steps ago. The metric to
compute is designated by the 'errorMetric' entry in the metric params.
"""
def __init__(self, metricSpec):
# This metric assumes a default 'steps' of 1
if not 'steps' in metricSpec.params:
metricSpec.params['steps'] = 1
super(MetricTrivial, self).__init__(metricSpec)
# Only supports one stepsize
assert len(self._predictionSteps) == 1
# Must have a suberror metric
assert self._subErrorMetrics is not None, "This metric requires that you" \
+ " specify the name of another base metric via the 'errorMetric' " \
+ " parameter."
def getMetric(self):
return self._subErrorMetrics[0].getMetric()
def addInstance(self, groundTruth, prediction, record = None, result = None):
# Use ground truth from 'steps' steps ago as our "prediction"
prediction = self._getShiftedGroundTruth(groundTruth)
if self.verbosity > 0:
print "groundTruth:\n{0!s}\nPredictions:\n{1!s}\n{2!s}\n".format(groundTruth,
prediction, self.getMetric())
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA:
return self._subErrorMetrics[0].aggregateError
# Our "prediction" is simply what happened 'steps' steps ago
return self._subErrorMetrics[0].addInstance(groundTruth, prediction, record)
class MetricTwoGram(AggregateMetric):
"""
computes error metric based on one-grams. The groundTruth passed into
this metric is the encoded output of the field (an array of 1's and 0's).
"""
def __init__(self, metricSpec):
# This metric assumes a default 'steps' of 1
if not 'steps' in metricSpec.params:
metricSpec.params['steps'] = 1
super(MetricTwoGram, self).__init__(metricSpec)
# Only supports 1 stepsize
assert len(self._predictionSteps) == 1
# Must supply the predictionField
assert(metricSpec.params.has_key('predictionField'))
self.predictionField = metricSpec.params['predictionField']
self.twoGramDict = dict()
def getMetric(self):
return self._subErrorMetrics[0].getMetric()
def addInstance(self, groundTruth, prediction, record = None, result = None):
# If missing data return previous error (assuming one gram will always
# receive an instance of ndarray)
if groundTruth.any() == False:
return self._subErrorMetrics[0].aggregateError
# Get actual ground Truth value from record. For this metric, the
# "groundTruth" parameter is the encoder output and we use actualGroundTruth
# to hold the input to the encoder (either a scalar or a category string).
#
# We will use 'groundTruthKey' (the stringified encoded value of
# groundTruth) as the key for our one-gram dict and the 'actualGroundTruth'
# as the values in our dict, which are used to compute our prediction.
actualGroundTruth = record[self.predictionField]
# convert binary array to a string
groundTruthKey = str(groundTruth)
# Get the ground truth key from N steps ago, that is what we will base
# our prediction on. Note that our "prediction" is the prediction for the
# current time step, to be compared to actualGroundTruth
prevGTKey = self._getShiftedGroundTruth(groundTruthKey)
# -------------------------------------------------------------------------
# Get the prediction based on the previously known ground truth
# If no previous, just default to "" or 0, depending on the groundTruth
# data type.
if prevGTKey is None:
if isinstance(actualGroundTruth,str):
pred = ""
else:
pred = 0
# If the previous was never seen before, create a new dict for it.
elif not prevGTKey in self.twoGramDict:
if isinstance(actualGroundTruth,str):
pred = ""
else:
pred = 0
# Create a new dict for it
self.twoGramDict[prevGTKey] = {actualGroundTruth:1}
# If it was seen before, compute the prediction from the past history
else:
# Find most often occurring 1-gram
if isinstance(actualGroundTruth,str):
# Get the most frequent category that followed the previous timestep
twoGramMax = max(self.twoGramDict[prevGTKey].items(), key=itemgetter(1))
pred = twoGramMax[0]
else:
# Get average of all possible values that followed the previous
# timestep
pred = sum(self.twoGramDict[prevGTKey].iterkeys())
pred /= len(self.twoGramDict[prevGTKey])
# Add current ground truth to dict
if actualGroundTruth in self.twoGramDict[prevGTKey]:
self.twoGramDict[prevGTKey][actualGroundTruth] += 1
else:
self.twoGramDict[prevGTKey][actualGroundTruth] = 1
if self.verbosity > 0:
print "\nencoding:{0!s}\nactual:{1!s}\nprevEncoding:{2!s}\nprediction:{3!s}\nmetric:{4!s}".format(groundTruth, actualGroundTruth, prevGTKey, pred, self.getMetric())
return self._subErrorMetrics[0].addInstance(actualGroundTruth, pred, record)
class MetricAccuracy(AggregateMetric):
"""
computes simple accuracy for an enumerated type. all inputs are treated as
discrete members of a set, therefore for example 0.5 is only a correct
response if the ground truth is exactly 0.5. Inputs can be strings, integers,
or reals
"""
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result = None):
# This is really an accuracy measure rather than an "error" measure
error = 1.0 if groundTruth == prediction else 0.0
accumulatedError += error
if historyBuffer is not None:
historyBuffer.append(error)
if len(historyBuffer) > self.spec.params["window"] :
accumulatedError -= historyBuffer.popleft()
return accumulatedError
def aggregate(self, accumulatedError, historyBuffer, steps):
n = steps
if historyBuffer is not None:
n = len(historyBuffer)
return accumulatedError/ float(n)
class MetricAveError(AggregateMetric):
"""Simply the inverse of the Accuracy metric
More consistent with scalar metrics because
they all report an error to be minimized"""
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result = None):
error = 1.0 if groundTruth != prediction else 0.0
accumulatedError += error
if historyBuffer is not None:
historyBuffer.append(error)
if len(historyBuffer) > self.spec.params["window"] :
accumulatedError -= historyBuffer.popleft()
return accumulatedError
def aggregate(self, accumulatedError, historyBuffer, steps):
n = steps
if historyBuffer is not None:
n = len(historyBuffer)
return accumulatedError/ float(n)
class MetricNegAUC(AggregateMetric):
""" Computes -1 * AUC (Area Under the Curve) of the ROC (Receiver Operator
Characteristics) curve. We compute -1 * AUC because metrics are optimized
to be LOWER when running hypersearch.
For this, we assuming that category 1 is the "positive" category and
we are generating an ROC curve with the TPR (True Positive Rate) of
category 1 on the y-axis and the FPR (False Positive Rate) on the x-axis.
"""
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result = None):
""" Accumulate history of groundTruth and "prediction" values.
For this metric, groundTruth is the actual category and "prediction" is a
dict containing one top-level item with a key of 0 (meaning this is the
0-step classificaton) and a value which is another dict, which contains the
probability for each category as output from the classifier. For example,
this is what "prediction" would be if the classifier said that category 0
had a 0.6 probability and category 1 had a 0.4 probability: {0:0.6, 1: 0.4}
"""
# We disable it within aggregate() if we find that the classifier classes
# are not compatible with AUC calculations.
if self.disabled:
return 0
# Just store the groundTruth, probability into our history buffer. We will
# wait until aggregate gets called to actually compute AUC.
if historyBuffer is not None:
historyBuffer.append((groundTruth, prediction[0]))
if len(historyBuffer) > self.spec.params["window"] :
historyBuffer.popleft()
# accumulatedError not used in this metric
return 0
def aggregate(self, accumulatedError, historyBuffer, steps):
# If disabled, do nothing.
if self.disabled:
return 0.0
if historyBuffer is not None:
n = len(historyBuffer)
else:
return 0.0
# For performance reasons, only re-compute this every 'computeEvery' steps
frequency = self.spec.params.get('computeEvery', 1)
if ((steps+1) % frequency) != 0:
return self.aggregateError
# Compute the ROC curve and the area underneath it
actuals = [gt for (gt, probs) in historyBuffer]
classes = np.unique(actuals)
# We can only compute ROC when we have at least 1 sample of each category
if len(classes) < 2:
return -1 * 0.5
# Print warning the first time this metric is asked to be computed on a
# problem with more than 2 classes
if sorted(classes) != [0,1]:
print "WARNING: AUC only implemented for binary classifications where " \
"the categories are category 0 and 1. In this network, the " \
"categories are: %s" % (classes)
print "WARNING: Computation of this metric is disabled for the remainder of " \
"this experiment."
self.disabled = True
return 0.0
# Compute the ROC and AUC. Note that because we are online, there's a
# chance that some of the earlier classification probabilities don't
# have the True class (category 1) yet because it hasn't been seen yet.
# Therefore, we use probs.get() with a default value of 0.
scores = [probs.get(1, 0) for (gt, probs) in historyBuffer]
(fpr, tpr, thresholds) = roc.ROCCurve(actuals, scores)
auc = roc.AreaUnderCurve(fpr, tpr)
# Debug?
if False:
print
print "AUC metric debug info ({0:d} steps):".format((steps))
print " actuals:", actuals
print " probabilities:", ["{0:.2f}".format(x) for x in scores]
print " fpr:", fpr
print " tpr:", tpr
print " thresholds:", thresholds
print " AUC:", auc
return -1 * auc
class MetricMultiStep(AggregateMetric):
"""
This is an "uber" metric which is used to apply one of the other basic
metrics to a specific step in a multi-step prediction.
The specParams are expected to contain:
'errorMetric': name of basic metric to apply
'steps': compare prediction['steps'] to the current
ground truth.
Note that the metrics manager has already performed the time shifting
for us - it passes us the prediction element from 'steps' steps ago
and asks us to compare that to the current ground truth.
When multiple steps of prediction are requested, we average the results of
the underlying metric for each step.
"""
def __init__(self, metricSpec):
super(MetricMultiStep, self).__init__(metricSpec)
assert self._subErrorMetrics is not None
def getMetric(self):
return {'value': self.aggregateError, "stats" : {"steps" : self.steps}}
def addInstance(self, groundTruth, prediction, record = None, result = None):
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA:
return self.aggregateError
# Get the prediction for this time step
aggErrSum = 0
try:
for step, subErrorMetric in \
zip(self._predictionSteps, self._subErrorMetrics):
stepPrediction = prediction[step]
# Unless this is a custom_error_metric, when we have a dict of
# probabilities, get the most probable one. For custom error metrics,
# we pass the probabilities in so that it can decide how best to deal with
# them.
if isinstance(stepPrediction, dict) \
and not isinstance(subErrorMetric, CustomErrorMetric):
predictions = [(prob,value) for (value, prob) in \
stepPrediction.iteritems()]
predictions.sort()
stepPrediction = predictions[-1][1]
# Get sum of the errors
aggErr = subErrorMetric.addInstance(groundTruth, stepPrediction, record, result)
if self.verbosity >= 2:
print "MetricMultiStep {0!s}: aggErr for stepSize {1:d}: {2!s}".format(self._predictionSteps, step, aggErr)
aggErrSum += aggErr
except:
pass
# Return average aggregate error across all step sizes
self.aggregateError = aggErrSum / len(self._subErrorMetrics)
if self.verbosity >= 2:
print "MetricMultiStep {0!s}: aggErrAvg: {1!s}".format(self._predictionSteps,
self.aggregateError)
self.steps += 1
if self.verbosity >= 1:
print "\nMetricMultiStep %s: \n groundTruth: %s\n Predictions: %s" \
"\n Metric: %s" % (self._predictionSteps, groundTruth, prediction,
self.getMetric())
return self.aggregateError
class MetricMultiStepProbability(AggregateMetric):
"""
This is an "uber" metric which is used to apply one of the other basic
metrics to a specific step in a multi-step prediction.
The specParams are expected to contain:
'errorMetric': name of basic metric to apply
'steps': compare prediction['steps'] to the current
ground truth.
Note that the metrics manager has already performed the time shifting
for us - it passes us the prediction element from 'steps' steps ago
and asks us to compare that to the current ground truth.
"""
def __init__(self, metricSpec):
# Default window should be 1
if not 'window' in metricSpec.params:
metricSpec.params['window'] = 1
super(MetricMultiStepProbability, self).__init__(metricSpec)
# Must have a suberror metric
assert self._subErrorMetrics is not None, "This metric requires that you" \
+ " specify the name of another base metric via the 'errorMetric' " \
+ " parameter."
# Force all subErrorMetric windows to 1. This is necessary because by
# default they each do their own history averaging assuming that their
# addInstance() gets called once per interation. But, in this metric
# we actually call into each subErrorMetric multiple times per iteration
for subErrorMetric in self._subErrorMetrics:
subErrorMetric.window = 1
subErrorMetric.spec.params['window'] = 1
self._movingAverage = MovingAverage(self.window)
def getMetric(self):
return {'value': self.aggregateError, "stats" :
{"steps" : self.steps}}
def addInstance(self, groundTruth, prediction, record = None, result = None):
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA:
return self.aggregateError
if self.verbosity >= 1:
print "\nMetricMultiStepProbability %s: \n groundTruth: %s\n " \
"Predictions: %s" % (self._predictionSteps, groundTruth,
prediction)
# Get the aggregateErrors for all requested step sizes and average them
aggErrSum = 0
for step, subErrorMetric in \
zip(self._predictionSteps, self._subErrorMetrics):
stepPrediction = prediction[step]
# If it's a dict of probabilities, get the expected value
error = 0
if isinstance(stepPrediction, dict):
expectedValue = 0
# For every possible prediction multiply its error by its probability
for (pred, prob) in stepPrediction.iteritems():
error += subErrorMetric.addInstance(groundTruth, pred, record) \
* prob
else:
error += subErrorMetric.addInstance(groundTruth, stepPrediction,
record)
if self.verbosity >= 2:
print ("MetricMultiStepProbability {0!s}: aggErr for stepSize {1:d}: {2!s}".format(self._predictionSteps, step, error))
aggErrSum += error
# Return aggregate error
avgAggErr = aggErrSum / len(self._subErrorMetrics)
self.aggregateError = self._movingAverage(avgAggErr)
if self.verbosity >= 2:
print ("MetricMultiStepProbability %s: aggErr over all steps, this "
"iteration (%d): %s" % (self._predictionSteps, self.steps, avgAggErr))
print ("MetricMultiStepProbability {0!s}: aggErr moving avg: {1!s}".format(self._predictionSteps, self.aggregateError))
self.steps += 1
if self.verbosity >= 1:
print "MetricMultiStepProbability {0!s}: \n Error: {1!s}\n Metric: {2!s}".format(self._predictionSteps, avgAggErr, self.getMetric())
return self.aggregateError
class MetricMulti(MetricsIface):
"""Multi metric can combine multiple other (sub)metrics and
weight them to provide combined score."""
def __init__(self, metricSpec):
"""MetricMulti constructor using metricSpec is not allowed."""
raise ValueError("MetricMulti cannot be constructed from metricSpec string! "
"Use MetricMulti(weights,metrics) constructor instead.")
def __init__(self, weights, metrics, window=None):
"""MetricMulti
@param weights - [list of floats] used as weights
@param metrics - [list of submetrics]
@param window - (opt) window size for moving average, or None when disabled
"""
if (weights is None or not isinstance(weights, list) or
not len(weights) > 0 or
not isinstance(weights[0], float)):
raise ValueError("MetricMulti requires 'weights' parameter as a [list of floats]")
self.weights = weights
if (metrics is None or not isinstance(metrics, list) or
not len(metrics) > 0 or
not isinstance(metrics[0], MetricsIface)):
raise ValueError("MetricMulti requires 'metrics' parameter as a [list of Metrics]")
self.metrics = metrics
if window is not None:
self.movingAvg = MovingAverage(windowSize=window)
else:
self.movingAvg = None
def addInstance(self, groundTruth, prediction, record = None, result = None):
err = 0.0
subResults = [m.addInstance(groundTruth, prediction, record) for m in self.metrics]
for i in xrange(len(self.weights)):
if subResults[i] is not None:
err += subResults[i]*self.weights[i]
else: # submetric returned None, propagate
self.err = None
return None
if self.verbosity > 2:
print "IN=",groundTruth," pred=",prediction,": w=",self.weights[i]," metric=",self.metrics[i]," value=",m," err=",err
if self.movingAvg is not None:
err=self.movingAvg(err)
self.err = err
return err
def __repr__(self):
return "MetricMulti(weights={0!s}, metrics={1!s})".format(self.weights, self.metrics)
def getMetric(self):
return {'value': self.err, "stats" : {"weights" : self.weights}}
|
from . import print_calendar_report
|
import datetime
from django.db.models import Q
from django.http import HttpResponse, HttpResponseServerError, Http404, HttpResponseNotFound, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.template import RequestContext
from django.core import serializers
from django.contrib.auth.decorators import login_required
from django.core.exceptions import MultipleObjectsReturned
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.forms.models import formset_factory, modelformset_factory, inlineformset_factory, BaseModelFormSet
from django.forms import ValidationError
import json as simplejson
from django.utils.datastructures import SortedDict
from django.contrib.auth.forms import UserCreationForm
from django.conf import settings
from django_rea.valueaccounting.models import *
from django_rea.board.forms import *
from django_rea.valueaccounting.views import get_agent
def default_context_agent():
return EconomicAgent.objects.get(id=3) #todo: BIG hack alert!!!!
def dhen_board(request, context_agent_id=None):
#import pdb; pdb.set_trace()
agent = get_agent(request)
pattern = ProcessPattern.objects.get(name="Herbs")
selected_resource_type = None
#filter_form = FilterForm(pattern=pattern, data=request.POST or None,)
if context_agent_id:
context_agent = EconomicAgent.objects.get(id=context_agent_id)
else:
context_agent = default_context_agent()
seller = EconomicAgent.objects.get(id=4) #todo: even worse hack!!
rec_extype = ExchangeType.objects.get(name="Purchase to Drying Site")
e_date = datetime.date.today()
init = {"start_date": e_date }
available_extype = ExchangeType.objects.get(name="Make Available")
available_form = AvailableForm(initial=init, exchange_type=available_extype, context_agent=context_agent, prefix="AVL")
init = {"event_date": e_date, "paid": "later", }
receive_form = ReceiveForm(initial=init, exchange_type=rec_extype, context_agent=context_agent, prefix="REC")
et = EventType.objects.get(name="Resource Production")
farm_stage = None
#harvester_stage = ExchangeType.objects.get(name="Farm to Harvester")
dryer_stage = ExchangeType.objects.get(name="Harvester to Drying Site")
seller_stage = ExchangeType.objects.get(name="Drying Site to Seller")
rts = pattern.get_resource_types(event_type=et)
for rt in rts:
init = {"event_date": e_date,}
rt.farm_commits = rt.commits_for_exchange_stage(stage=farm_stage)
for com in rt.farm_commits:
if com.start_date > e_date:
com.future = True
prefix = com.form_prefix()
qty_help = " ".join([com.unit_of_quantity.abbrev, ", up to 2 decimal places"])
com.transfer_form = ExchangeFlowForm(initial=init, qty_help=qty_help, assoc_type_identifier="DryingSite", context_agent=context_agent, prefix=prefix)
com.zero_form = ZeroOutForm(prefix=prefix)
com.lot_form = NewResourceForm(prefix=prefix)
com.multiple_formset = create_exchange_formset(context_agent=context_agent, assoc_type_identifier="Harvester", prefix=prefix)
rt.dryer_resources = rt.onhand_for_exchange_stage(stage=dryer_stage)
init = {"event_date": e_date, "paid": "later"}
for res in rt.dryer_resources:
prefix = res.form_prefix()
qty_help = " ".join([res.unit_of_quantity().abbrev, ", up to 2 decimal places"])
res.transfer_form = TransferFlowForm(initial=init, qty_help=qty_help, assoc_type_identifier="Seller", context_agent=context_agent, prefix=prefix)
rt.seller_resources = rt.onhand_for_exchange_stage(stage=seller_stage)
if rt.seller_resources:
init_rt = {"event_date": e_date,}
rt.combine_form = CombineResourcesForm(prefix = rt.form_prefix(), initial=init_rt, resource_type=rt, stage=seller_stage)
return render_to_response("board/dhen_board.html", {
"agent": agent,
"context_agent": context_agent,
"seller": seller,
"available_form": available_form,
"receive_form": receive_form,
#"filter_form": filter_form,
"resource_types": rts,
"available_extype": available_extype,
}, context_instance=RequestContext(request))
@login_required
def add_available(request, context_agent_id):
if request.method == "POST":
#import pdb; pdb.set_trace()
context_agent = EconomicAgent.objects.get(id=context_agent_id)
form = AvailableForm(data=request.POST, prefix="AVL")
if form.is_valid():
commit = form.save(commit=False)
commit.event_type = EventType.objects.get(name="Give")
commit.to_agent = context_agent
commit.context_agent = context_agent
commit.due_date = commit.start_date
commit.commitment_date = commit.start_date
commit.unit_of_quantity = commit.resource_type.unit
commit.exchange_stage = None
commit.created_by = request.user
commit.save()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
@login_required
def receive_directly(request, context_agent_id):
if request.method == "POST":
#import pdb; pdb.set_trace()
context_agent = EconomicAgent.objects.get(id=context_agent_id)
stage = ExchangeType.objects.get(name="Harvester to Drying Site")
exchange_type = ExchangeType.objects.get(name="Purchase to Drying Site") #todo: odd to have stage different....
form = ReceiveForm(data=request.POST, prefix="REC")
if form.is_valid():
data = form.cleaned_data
event_date = data["event_date"]
identifier = data["identifier"]
from_agent = data["from_agent"]
to_agent = data["to_agent"]
resource_type = data["resource_type"]
quantity = data["quantity"]
description = data["description"]
paid = data["paid"]
value = data["value"]
unit_of_value = data["unit_of_value"]
receive_et = EventType.objects.get(name="Receive")
give_et = EventType.objects.get(name="Give")
pay_rt = EconomicResourceType.objects.filter(unit__unit_type="value")[0]
exchange = Exchange(
name="Purchase " + resource_type.name + " from " + from_agent.nick,
use_case=UseCase.objects.get(identifier="supply_xfer"),
start_date=event_date,
context_agent=context_agent,
exchange_type=exchange_type,
created_by=request.user,
)
exchange.save()
resource = EconomicResource(
identifier=identifier,
resource_type=resource_type,
quantity=quantity,
exchange_stage=stage,
notes=description,
created_by=request.user
)
resource.save()
transfer_type = exchange_type.transfer_types_non_reciprocal()[0]
xfer_name = transfer_type.name + " of " + resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = exchange,
context_agent = context_agent,
transfer_date = event_date,
notes = description,
created_by = request.user
)
xfer.save()
event = EconomicEvent(
event_type = receive_et,
event_date = event_date,
resource = resource,
resource_type = resource_type,
transfer = xfer,
exchange_stage=stage,
from_agent = from_agent,
to_agent = to_agent,
context_agent = context_agent,
quantity = quantity,
unit_of_quantity = resource_type.unit,
value = value,
unit_of_value = unit_of_value,
description=description,
created_by = request.user,
)
event.save()
if paid == "paid":
if value > 0:
transfer_type = exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + resource_type.name
pay_xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = exchange,
context_agent = context_agent,
transfer_date = event_date,
notes = description,
created_by = request.user
)
pay_xfer.save()
pay_event = EconomicEvent(
event_type = give_et,
event_date = event_date,
resource_type = pay_rt,
transfer = pay_xfer,
exchange_stage=stage,
from_agent = event.to_agent,
to_agent = event.from_agent,
context_agent = context_agent,
quantity = value,
unit_of_quantity = unit_of_value,
value = value,
unit_of_value = unit_of_value,
created_by = request.user,
)
pay_event.save()
elif paid == "later":
if value > 0:
transfer_type = exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + resource_type.name
pay_xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = exchange,
context_agent = context_agent,
transfer_date = event_date,
notes = description,
created_by = request.user
)
pay_xfer.save()
commit = Commitment (
commitment_date=event_date,
event_type=give_et,
transfer=pay_xfer,
exchange_stage=stage,
due_date=event_date,
from_agent=event.to_agent,
to_agent=event.from_agent,
context_agent=context_agent,
resource_type=pay_rt,
quantity=value,
unit_of_quantity=unit_of_value,
value=value,
unit_of_value=unit_of_value,
created_by=request.user,
)
commit.save()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
def create_exchange_formset(context_agent, assoc_type_identifier, prefix, data=None):
ExchangeFormSet = formset_factory(MultipleExchangeEventForm, extra=10)
#init = {"paid": "paid"}
formset = ExchangeFormSet(data=data, prefix=prefix)
to_agents = context_agent.all_has_associates_by_type(assoc_type_identifier=assoc_type_identifier)
for form in formset:
#id = int(form["facet_id"].value())
form.fields["to_agent"].queryset = to_agents
form.fields["paid_stage_1"].initial = "never"
form.fields["paid_stage_2"].initial = "later"
return formset
def get_next_stage(exchange_type=None):
if not exchange_type:
next_stage = ExchangeType.objects.get(name="Farm to Harvester")
elif exchange_type.name == "Farm to Harvester":
next_stage = ExchangeType.objects.get(name="Harvester to Drying Site")
elif exchange_type.name == "Harvester to Drying Site":
next_stage = ExchangeType.objects.get(name="Drying Site to Seller")
else:
next_stage = None
return next_stage
@login_required
def purchase_resource(request, context_agent_id, commitment_id): #this is the farm > harvester > drying site, confusing name
if request.method == "POST":
#import pdb; pdb.set_trace()
commitment = get_object_or_404(Commitment, id=commitment_id)
context_agent = EconomicAgent.objects.get(id=context_agent_id)
stage = None
next_stage = get_next_stage(stage)
next_next_stage = get_next_stage(next_stage)
prefix = commitment.form_prefix()
form = ExchangeFlowForm(prefix=prefix, data=request.POST)
lot_form = NewResourceForm(prefix=prefix, data=request.POST)
zero_form = ZeroOutForm(prefix=prefix, data=request.POST)
if zero_form.is_valid():
#import pdb; pdb.set_trace()
zero_data = zero_form.cleaned_data
zero_out = zero_data["zero_out"]
if zero_out == True:
commitment.finished = True
commitment.save()
if form.is_valid() and lot_form.is_valid():
data = form.cleaned_data
event_date = data["event_date"]
to_agent = data["to_agent"]
unit_of_value = data["unit_of_value"]
notes = data["notes"]
lot_data = lot_form.cleaned_data
identifier = lot_data["identifier"]
purch_use_case = UseCase.objects.get(identifier="supply_xfer")
purch_exchange_type = ExchangeType.objects.get(name="Farm to Harvester")
xfer_use_case = UseCase.objects.get(identifier="intrnl_xfer")
xfer_exchange_type = ExchangeType.objects.get(name="Harvester to Drying Site")
proc_use_case = UseCase.objects.get(identifier="rand")
proc_pattern = None
proc_patterns = [puc.pattern for puc in proc_use_case.patterns.all()]
if proc_patterns:
proc_pattern = proc_patterns[0]
give_et = EventType.objects.get(name="Give")
receive_et = EventType.objects.get(name="Receive")
consume_et = EventType.objects.get(name="Resource Consumption")
produce_et = EventType.objects.get(name="Resource Production")
pay_rt = EconomicResourceType.objects.filter(unit__unit_type="value")[0]
formset = create_exchange_formset(prefix=prefix, data=request.POST, context_agent=context_agent, assoc_type_identifier="Harvester")
quantity = 0
ces = []
#import pdb; pdb.set_trace()
for form_ee in formset.forms:
if form_ee.is_valid():
data_ee = form_ee.cleaned_data
breakout_to_agent = data_ee["to_agent"]
if breakout_to_agent:
breakout_quantity = data_ee["quantity"]
quantity += breakout_quantity
value_stage_1 = data_ee["value_stage_1"]
paid_stage_1 = data_ee["paid_stage_1"]
value_stage_2 = data_ee["value_stage_2"]
paid_stage_2 = data_ee["paid_stage_2"]
exchange = Exchange(
name="Transfer " + commitment.resource_type.name + " from farm",
use_case=purch_use_case,
exchange_type=purch_exchange_type,
start_date=event_date,
context_agent=context_agent,
created_by=request.user,
)
exchange.save()
resource = EconomicResource(
identifier=commitment.resource_type.name + " from farm",
resource_type=commitment.resource_type,
quantity=0,
exchange_stage=next_next_stage,
created_by=request.user
)
resource.save()
transfer_type = purch_exchange_type.transfer_types_non_reciprocal()[0]
xfer_name = transfer_type.name + " of " + commitment.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
receipt_event = EconomicEvent(
event_type = receive_et,
event_date = event_date,
resource = resource,
resource_type = resource.resource_type,
exchange_stage=next_stage,
transfer=xfer,
commitment=commitment,
from_agent = commitment.from_agent,
to_agent = breakout_to_agent,
context_agent = context_agent,
quantity = breakout_quantity,
unit_of_quantity = resource.resource_type.unit,
value = value_stage_1,
unit_of_value = unit_of_value,
created_by = request.user,
)
receipt_event.save()
if paid_stage_1 == "paid":
if value_stage_1 > 0:
transfer_type = purch_exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + commitment.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
pay_event_1 = EconomicEvent(
event_type = give_et,
event_date = event_date,
resource_type = pay_rt,
exchange_stage=next_stage,
transfer=xfer,
from_agent = receipt_event.to_agent,
to_agent = receipt_event.from_agent,
context_agent = context_agent,
quantity = value_stage_1,
unit_of_quantity = unit_of_value,
value = value_stage_1,
unit_of_value = unit_of_value,
created_by = request.user,
)
pay_event_1.save()
elif paid_stage_1 == "later":
if value_stage_1 > 0:
transfer_type = purch_exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + commitment.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
commit_1 = Commitment (
commitment_date=event_date,
event_type=give_et,
exchange_stage=next_stage,
transfer=xfer,
due_date=event_date,
from_agent=receipt_event.to_agent,
to_agent=receipt_event.from_agent,
context_agent=context_agent,
resource_type=pay_rt,
quantity=value_stage_1,
unit_of_quantity=unit_of_value,
value=value_stage_1,
unit_of_value=unit_of_value,
created_by=request.user,
)
commit_1.save()
xfer_exchange = Exchange(
name="Transfer " + commitment.resource_type.name,
use_case=xfer_use_case,
start_date=event_date,
context_agent=context_agent,
exchange_type=xfer_exchange_type,
created_by=request.user,
)
xfer_exchange.save()
transfer_type = xfer_exchange_type.transfer_types_non_reciprocal()[0]
xfer_name = transfer_type.name + " of " + commitment.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = xfer_exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
xfer_event = EconomicEvent(
event_type = give_et,
event_date = event_date,
resource = resource,
resource_type = resource.resource_type,
exchange_stage=next_next_stage,
transfer=xfer,
from_agent = breakout_to_agent,
to_agent = to_agent,
context_agent = context_agent,
quantity = breakout_quantity,
unit_of_quantity = resource.resource_type.unit,
value = value_stage_2,
unit_of_value = unit_of_value,
created_by = request.user,
)
xfer_event.save()
xfer_event_receive = EconomicEvent(
event_type = receive_et,
event_date = event_date,
resource = resource,
resource_type = resource.resource_type,
exchange_stage=next_next_stage,
transfer=xfer,
from_agent = breakout_to_agent,
to_agent = to_agent,
context_agent = context_agent,
quantity = breakout_quantity,
unit_of_quantity = resource.resource_type.unit,
value = value_stage_2,
unit_of_value = unit_of_value,
created_by = request.user,
)
xfer_event_receive.save()
if paid_stage_2 == "paid":
if value_stage_2 > 0:
transfer_type = xfer_exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + commitment.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = xfer_exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
pay_event_2 = EconomicEvent(
event_type = give_et,
event_date = event_date,
resource_type = pay_rt,
transfer = xfer,
exchange_stage=next_next_stage,
from_agent = xfer_event.to_agent,
to_agent = xfer_event.from_agent,
context_agent = context_agent,
quantity = value_stage_2,
unit_of_quantity = unit_of_value,
value = value_stage_2,
unit_of_value = unit_of_value,
created_by = request.user,
)
pay_event_2.save()
pay_event_2_receive = EconomicEvent(
event_type = receive_et,
event_date = event_date,
resource_type = pay_rt,
transfer = xfer,
exchange_stage=next_next_stage,
from_agent = xfer_event.to_agent,
to_agent = xfer_event.from_agent,
context_agent = context_agent,
quantity = value_stage_2,
unit_of_quantity = unit_of_value,
value = value_stage_2,
unit_of_value = unit_of_value,
created_by = request.user,
)
pay_event_2_receive.save()
elif paid_stage_2 == "later":
if value_stage_2 > 0:
transfer_type = xfer_exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + commitment.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = xfer_exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
commit_2 = Commitment (
commitment_date=event_date,
event_type=give_et,
transfer=xfer,
exchange_stage=next_next_stage,
due_date=event_date,
from_agent=xfer_event.to_agent,
to_agent=xfer_event.from_agent,
context_agent=context_agent,
resource_type=pay_rt,
quantity=value_stage_2,
unit_of_quantity=unit_of_value,
value=value_stage_2,
unit_of_value=unit_of_value,
created_by=request.user,
)
commit_2.save()
consume_event = EconomicEvent(
event_type = consume_et,
event_date = event_date,
resource = resource,
resource_type = resource.resource_type,
exchange_stage=next_next_stage,
from_agent = to_agent,
to_agent = to_agent,
context_agent = context_agent,
quantity = breakout_quantity,
unit_of_quantity = resource.resource_type.unit,
created_by = request.user,
)
consume_event.save()
ces.append(consume_event)
process = Process(
name="Combined harvested: new lot",
process_pattern=proc_pattern,
end_date=event_date,
start_date=event_date,
started=event_date,
context_agent=context_agent,
finished=True,
process_type=ProcessType.objects.get(name="Into Drying Room"),
created_by=request.user,
)
process.save()
for ce in ces:
ce.process = process
ce.save()
prod_resource = EconomicResource(
identifier=identifier,
resource_type=commitment.resource_type,
quantity=quantity,
exchange_stage=next_next_stage,
notes=notes,
created_by=request.user
)
prod_resource.save()
prod_event = EconomicEvent(
event_type = produce_et,
event_date = event_date,
resource = prod_resource,
resource_type = prod_resource.resource_type,
exchange_stage=next_next_stage,
process = process,
from_agent = to_agent,
to_agent = to_agent,
context_agent = context_agent,
quantity = quantity,
unit_of_quantity = prod_resource.resource_type.unit,
description=notes,
created_by = request.user,
)
prod_event.save()
#todo: put skip stage here!
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
@login_required
def transfer_resource(request, context_agent_id, resource_id): #this is drying site to seller
if request.method == "POST":
#import pdb; pdb.set_trace()
resource = get_object_or_404(EconomicResource, id=resource_id)
context_agent = EconomicAgent.objects.get(id=context_agent_id)
stage = ExchangeType.objects.get(name="Harvester to Drying Site")
next_stage = get_next_stage(stage)
prefix = resource.form_prefix()
form = TransferFlowForm(prefix=prefix, data=request.POST)
if form.is_valid():
data = form.cleaned_data
event_date = data["event_date"]
to_agent = data["to_agent"]
quantity = data["quantity"]
value = data["value"]
if not value:
value = 0
unit_of_value = data["unit_of_value"]
paid = data["paid"]
notes = data["notes"]
xfer_use_case = UseCase.objects.get(identifier="intrnl_xfer")
exchange_type = next_stage
give_et = EventType.objects.get(name="Give")
receive_et = EventType.objects.get(name="Receive")
pay_rt = EconomicResourceType.objects.filter(unit__unit_type="value")[0]
#import pdb; pdb.set_trace()
xfer_exchange = Exchange(
name="Transfer " + resource.resource_type.name,
use_case=xfer_use_case,
start_date=event_date,
context_agent=context_agent,
exchange_type=exchange_type,
created_by=request.user,
)
xfer_exchange.save()
transfer_type = exchange_type.transfer_types_non_reciprocal()[0]
xfer_name = transfer_type.name + " of " + resource.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = xfer_exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
xfer_give_event = EconomicEvent(
event_type = give_et,
event_date = event_date,
resource = resource,
resource_type = resource.resource_type,
transfer=xfer,
exchange_stage=next_stage,
from_agent = resource.owner_based_on_exchange(),
to_agent = to_agent,
context_agent = context_agent,
quantity = quantity,
unit_of_quantity = resource.resource_type.unit,
value = value,
unit_of_value = unit_of_value,
created_by = request.user,
)
xfer_give_event.save()
xfer_rec_event = EconomicEvent(
event_type = receive_et,
event_date = event_date,
resource = resource,
resource_type = resource.resource_type,
transfer=xfer,
exchange_stage=next_stage,
from_agent = resource.owner_based_on_exchange(),
to_agent = to_agent,
context_agent = context_agent,
quantity = quantity,
unit_of_quantity = resource.resource_type.unit,
value = value,
unit_of_value = unit_of_value,
created_by = request.user,
)
xfer_rec_event.save()
resource.exchange_stage = next_stage
resource.quantity = quantity
if resource.notes:
resource.notes = resource.notes + " ------- " + notes
else:
resource.notes = notes
resource.save()
if paid == "paid":
if value > 0:
transfer_type = exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + resource.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = xfer_exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
pay_event = EconomicEvent(
event_type = give_et,
event_date = event_date,
resource_type = pay_rt,
transfer=xfer,
exchange_stage=next_stage,
from_agent = xfer_give_event.to_agent,
to_agent = xfer_give_event.from_agent,
context_agent = context_agent,
quantity = value,
unit_of_quantity = unit_of_value,
value = value,
unit_of_value = unit_of_value,
created_by = request.user,
)
pay_event.save()
pay_rec_event = EconomicEvent(
event_type = receive_et,
event_date = event_date,
resource_type = pay_rt,
transfer=xfer,
exchange_stage=next_stage,
from_agent = xfer_give_event.to_agent,
to_agent = xfer_give_event.from_agent,
context_agent = context_agent,
quantity = value,
unit_of_quantity = unit_of_value,
value = value,
unit_of_value = unit_of_value,
created_by = request.user,
)
pay_event.save()
elif paid == "later":
if value > 0:
transfer_type = exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + resource.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = xfer_exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
commit = Commitment (
commitment_date=event_date,
event_type=give_et,
transfer=xfer,
exchange_stage=next_stage,
due_date=event_date,
from_agent=xfer_give_event.to_agent,
to_agent=xfer_give_event.from_agent,
context_agent=context_agent,
resource_type=pay_rt,
quantity=value,
unit_of_quantity=unit_of_value,
value=value,
unit_of_value=unit_of_value,
created_by=request.user,
)
commit.save()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
def combine_resources(request, context_agent_id, resource_type_id):
if request.method == "POST":
#import pdb; pdb.set_trace()
resource_type = get_object_or_404(EconomicResourceType, id=resource_type_id)
context_agent = EconomicAgent.objects.get(id=context_agent_id)
stage = ExchangeType.objects.get(name="Drying Site to Seller") #actually the stage here should be the process stage, and the rest should handle that
prefix = resource_type.form_prefix()
form = CombineResourcesForm(prefix=prefix, data=request.POST)
if form.is_valid():
data = form.cleaned_data
event_date = data["event_date"]
resources = data["resources"]
identifier = data["identifier"]
notes = data["notes"]
proc_use_case = UseCase.objects.get(identifier="rand")
proc_pattern = None
proc_patterns = [puc.pattern for puc in proc_use_case.patterns.all()]
if proc_patterns:
proc_pattern = proc_patterns[0]
consume_et = EventType.objects.get(name="Resource Consumption")
produce_et = EventType.objects.get(name="Resource Production")
if resources:
process = Process(
name="Combined: new lot",
process_pattern=proc_pattern,
end_date=event_date,
start_date=event_date,
started=event_date,
context_agent=context_agent,
finished=True,
process_type=ProcessType.objects.get(name="Combine Lots"),
created_by=request.user,
)
process.save()
qty = 0
for res in resources:
consume_event = EconomicEvent(
event_type = consume_et,
event_date = event_date,
resource = res,
resource_type = res.resource_type,
process=process,
exchange_stage=stage,
from_agent = res.owner_based_on_exchange(),
to_agent = res.owner_based_on_exchange(),
context_agent = context_agent,
quantity = res.quantity,
unit_of_quantity = res.resource_type.unit,
created_by = request.user,
)
consume_event.save()
qty += res.quantity
res.quantity = 0
res.save()
prod_resource = EconomicResource(
identifier=identifier,
resource_type=resource_type,
quantity=qty,
exchange_stage=stage,
notes=notes,
created_by=request.user
)
prod_resource.save()
prod_event = EconomicEvent(
event_type = produce_et,
event_date = event_date,
resource = prod_resource,
resource_type = prod_resource.resource_type,
exchange_stage=stage,
process = process,
from_agent = res.owner_based_on_exchange(),
to_agent = res.owner_based_on_exchange(),
context_agent = context_agent,
quantity = qty,
unit_of_quantity = prod_resource.resource_type.unit,
description=notes,
created_by = request.user,
)
prod_event.save()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
@login_required
def change_available(request, commitment_id):
commitment = get_object_or_404(Commitment, pk=commitment_id)
context_agent_id = commitment.context_agent.id
if request.method == "POST":
prefix = commitment.form_prefix()
form = CommitmentForm(instance=commitment, data=request.POST, prefix=prefix)
if form.is_valid():
data = form.cleaned_data
form.save()
commitment.unit_of_quantity = commitment.resource_type.unit
commitment.save()
zero_form = ZeroOutForm(prefix=prefix, data=request.POST)
if zero_form.is_valid():
zero_data = zero_form.cleaned_data
zero_out = zero_data["zero_out"]
if zero_out == True:
commitment.finished = True
commitment.save()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
@login_required
def delete_farm_commitment(request, commitment_id):
commitment = get_object_or_404(Commitment, pk=commitment_id)
context_agent_id = commitment.context_agent.id
if commitment.is_deletable():
commitment.delete()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
@login_required
def undo_col2(request, resource_id):
resource = get_object_or_404(EconomicResource, pk=resource_id)
context_agent_id = default_context_agent().id
#import pdb; pdb.set_trace()
flows = resource.incoming_value_flows()
for item in flows:
if item.class_label() == "Economic Event":
if item.commitment:
commit = item.commitment
commit.finished = False
commit.save()
item.delete()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
@login_required
def undo_col3(request, resource_id):
resource = get_object_or_404(EconomicResource, pk=resource_id)
context_agent_id = default_context_agent().id
#import pdb; pdb.set_trace()
flows = resource.incoming_value_flows()
#todo: I'm not sure how to delete the right rows without going too far back in the chain......
#for item in flows:
# if item.class_label() == "Economic Event":
# item.delete()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
|
from __future__ import unicode_literals
import ckeditor.fields
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('meinberlin_budgeting', '0007_update-strings'),
]
operations = [
migrations.AlterField(
model_name='proposal',
name='description',
field=ckeditor.fields.RichTextField(verbose_name='Description'),
),
migrations.AlterField(
model_name='proposal',
name='name',
field=models.CharField(max_length=120, verbose_name='Name'),
),
]
|
import string
import random
import json
from collections import defaultdict
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from catmaid.fields import Double3D
from catmaid.models import Log, NeuronSearch, CELL_BODY_CHOICES, \
SORT_ORDERS_DICT, Relation, Class, ClassInstance, \
ClassInstanceClassInstance
def _create_relation(user, project_id, relation_id, instance_a_id, instance_b_id):
relation = ClassInstanceClassInstance()
relation.user = user
relation.project_id = project_id
relation.relation_id = relation_id
relation.class_instance_a_id = instance_a_id
relation.class_instance_b_id = instance_b_id
relation.save()
return relation
def insert_into_log(project_id, user_id, op_type, location=None, freetext=None):
""" Inserts a new entry into the log table. If the location parameter is
passed, it is expected to be an iteratable (list, tuple).
"""
# valid operation types
operation_type_array = [
"rename_root",
"create_neuron",
"rename_neuron",
"remove_neuron",
"move_neuron",
"create_group",
"rename_group",
"remove_group",
"move_group",
"create_skeleton",
"rename_skeleton",
"remove_skeleton",
"move_skeleton",
"split_skeleton",
"join_skeleton",
"reroot_skeleton",
"change_confidence"
]
if not op_type in operation_type_array:
return {'error': 'Operation type {0} not valid'.format(op_type)}
new_log = Log()
new_log.user_id = user_id
new_log.project_id = project_id
new_log.operation_type = op_type
if not location is None:
new_log.location = Double3D(*location)
if not freetext is None:
new_log.freetext = freetext
new_log.save()
# $q = $db->insertIntoId('log', $data );
# echo json_encode( array ( 'error' => "Failed to insert operation $op_type for user $uid in project %pid." ) );
def my_render_to_response(req, *args, **kwargs):
kwargs['context_instance'] = RequestContext(req)
return render_to_response(*args, **kwargs)
def json_error_response(message):
"""
When an operation fails we should return a JSON dictionary
with the key 'error' set to an error message. This is a
helper method to return such a structure:
"""
return HttpResponse(json.dumps({'error': message}),
content_type='text/json')
def order_neurons(neurons, order_by=None):
column, reverse = 'name', False
if order_by and (order_by in SORT_ORDERS_DICT):
column, reverse, _ = SORT_ORDERS_DICT[order_by]
if column == 'name':
neurons.sort(key=lambda x: x.name)
elif column == 'gal4':
neurons.sort(key=lambda x: x.cached_sorted_lines_str)
elif column == 'cell_body':
neurons.sort(key=lambda x: x.cached_cell_body)
else:
raise Exception("Unknown column (%s) in order_neurons" % (column,))
if reverse:
neurons.reverse()
return neurons
def get_form_and_neurons(request, project_id, kwargs):
# If we've been passed parameters in a REST-style GET request,
# create a form from them. Otherwise, if it's a POST request,
# create the form from the POST parameters. Otherwise, it's a
# plain request, so create the default search form.
rest_keys = ('search', 'cell_body_location', 'order_by')
if any((x in kwargs) for x in rest_keys):
kw_search = kwargs.get('search', None) or ""
kw_cell_body_choice = kwargs.get('cell_body_location', None) or "a"
kw_order_by = kwargs.get('order_by', None) or 'name'
search_form = NeuronSearch({'search': kw_search,
'cell_body_location': kw_cell_body_choice,
'order_by': kw_order_by})
elif request.method == 'POST':
search_form = NeuronSearch(request.POST)
else:
search_form = NeuronSearch({'search': '',
'cell_body_location': 'a',
'order_by': 'name'})
if search_form.is_valid():
search = search_form.cleaned_data['search']
cell_body_location = search_form.cleaned_data['cell_body_location']
order_by = search_form.cleaned_data['order_by']
else:
search = ''
cell_body_location = 'a'
order_by = 'name'
cell_body_choices_dict = dict(CELL_BODY_CHOICES)
all_neurons = ClassInstance.objects.filter(
project__id=project_id,
class_column__class_name='neuron',
name__icontains=search).exclude(name='orphaned pre').exclude(name='orphaned post')
if cell_body_location != 'a':
location = cell_body_choices_dict[cell_body_location]
all_neurons = all_neurons.filter(
project__id=project_id,
cici_via_a__relation__relation_name='has_cell_body',
cici_via_a__class_instance_b__name=location)
cici_qs = ClassInstanceClassInstance.objects.filter(
project__id=project_id,
relation__relation_name='has_cell_body',
class_instance_a__class_column__class_name='neuron',
class_instance_b__class_column__class_name='cell_body_location')
neuron_id_to_cell_body_location = dict(
(x.class_instance_a.id, x.class_instance_b.name) for x in cici_qs)
neuron_id_to_driver_lines = defaultdict(list)
for cici in ClassInstanceClassInstance.objects.filter(
project__id=project_id,
relation__relation_name='expresses_in',
class_instance_a__class_column__class_name='driver_line',
class_instance_b__class_column__class_name='neuron'):
neuron_id_to_driver_lines[cici.class_instance_b.id].append(cici.class_instance_a)
all_neurons = list(all_neurons)
for n in all_neurons:
n.cached_sorted_lines = sorted(
neuron_id_to_driver_lines[n.id], key=lambda x: x.name)
n.cached_sorted_lines_str = ", ".join(x.name for x in n.cached_sorted_lines)
n.cached_cell_body = neuron_id_to_cell_body_location.get(n.id, 'Unknown')
all_neurons = order_neurons(all_neurons, order_by)
return (all_neurons, search_form)
def makeJSON_legacy_list(objects):
'''
The PHP function makeJSON, when operating on a list of rows as
results, will output a JSON list of key-values, with keys being
integers from 0 and upwards. We return a dict with the same
structure so that it looks the same when used with json.dumps.
'''
i = 0
res = {}
for o in objects:
res[i] = o
i += 1
return res
def cursor_fetch_dictionary(cursor):
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
def get_relation_to_id_map(project_id):
return {rname: ID for rname, ID in Relation.objects.filter(project=project_id).values_list("relation_name", "id")}
def get_class_to_id_map(project_id):
return {cname: ID for cname, ID in Class.objects.filter(project=project_id).values_list("class_name", "id")}
def urljoin(a, b):
""" Joins to URL parts a and b while making sure this
exactly one slash inbetween.
"""
if a[-1] != '/':
a = a + '/'
if b[0] == '/':
b = b[1:]
return a + b
def id_generator(size=6, chars=string.ascii_lowercase + string.digits):
""" Creates a random string of the specified length.
"""
return ''.join(random.choice(chars) for x in range(size))
|
from openerp import models, fields, api
import openerp.addons.decimal_precision as dp
class ProductSupplierInfo(models.Model):
_inherit = 'product.supplierinfo'
discount = fields.Float(
string='Discount (%)', digits_compute=dp.get_precision('Discount'))
@api.onchange('name')
@api.multi
def onchange_name(self):
for supplierinfo in self.filtered('name'):
supplierinfo.discount =\
supplierinfo.name.default_supplierinfo_discount
|
import sys
sys.path.append('../../lib/python')
sys.path.append('./normalizer/')
import random
import time
import subprocess
import cPickle
import os.path
import imp
import re
import traceback
from voltdbclient import *
from optparse import OptionParser
from Query import VoltQueryClient
from SQLCoverageReport import generate_summary
from SQLGenerator import SQLGenerator
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
from subprocess import call # invoke unix/linux cmds
from XMLUtils import prettify # To create a human readable xml file
class Config:
def __init__(self, filename):
fd = open(filename, "r")
self.__content = fd.read()
fd.close()
self.__config = eval(self.__content.strip())
def get_configs(self):
return self.__config.keys()
def get_config(self, config_name):
return self.__config[config_name]
def minutes_colon_seconds(seconds):
return re.sub("^0:", "", str(datetime.timedelta(0, round(seconds))), 1)
def print_seconds(seconds=0, message_end="", message_begin="Total time: ",
include_current_time=False):
""" Prints, and returns, a message containing the specified number of
seconds, first in a minutes:seconds format (e.g. "01:02", or "1:43:48"),
then just the exact number of seconds in parentheses, e.g.,
"1:02 (61.9 seconds)", preceded by the 'message_begin' and followed by
'message_end'. Optionally, if 'include_current_time' is True, the current
time (in seconds since January 1, 1970) is also printed, in brackets, e.g.,
"1:02 (61.9 seconds) [at 1408645826.68], ", which is useful for debugging
purposes.
"""
time_msg = minutes_colon_seconds(seconds) + " ({0:.6f} seconds)".format(seconds)
if (include_current_time):
time_msg += " [at " + str(time.time()) + "]"
message = message_begin + time_msg + ", " + message_end
print message
return message
def print_elapsed_seconds(message_end="", prev_time=-1,
message_begin="Elapsed time: "):
"""Computes, returns and prints the difference (in seconds) between the
current system time and a previous time, which is either the specified
'prev_time' or, if that is negative (or unspecified), the previous time
at which this function was called. The printed message is preceded by
'message_begin' and followed by 'message_end'; the elapsed time is printed
in a minutes:seconds format, with the exact number of seconds in parentheses,
e.g., 61.9 seconds would be printed as "01:02 (61.9 seconds), ".
"""
now = time.time()
global save_prev_time
if (prev_time < 0):
prev_time = save_prev_time
save_prev_time = now
diff_time = now - prev_time
print_seconds(diff_time, message_end, message_begin)
return diff_time
def run_once(name, command, statements_path, results_path,
submit_verbosely, testConfigKit, precision):
print "Running \"run_once\":"
print " name: %s" % (name)
print " command: %s" % (command)
print " statements_path: %s" % (statements_path)
print " results_path: %s" % (results_path)
if precision:
print " precision: %s" % (precision)
sys.stdout.flush()
host = defaultHost
port = defaultPort
if(name == "jni"):
akey = "hostname"
if akey in testConfigKit:
host = testConfigKit["hostname"]
port = testConfigKit["hostport"]
global normalize
if(host == defaultHost):
server = subprocess.Popen(command + " backend=" + name, shell=True)
client = None
clientException = None
for i in xrange(30):
try:
client = VoltQueryClient(host, port)
client.set_quiet(True)
client.set_timeout(5.0) # 5 seconds
break
except socket.error as e:
clientException = e
time.sleep(1)
if client == None:
print >> sys.stderr, "Unable to connect/create client: there may be a problem with the VoltDB server or its ports:"
print >> sys.stderr, "name:", str(name)
print >> sys.stderr, "host:", str(host)
print >> sys.stderr, "port:", str(port)
print >> sys.stderr, "client (socket.error) exception:", str(clientException)
sys.stderr.flush()
return -1
if(host != defaultHost):
# Flush database
client.onecmd("updatecatalog " + testConfigKit["testCatalog"] + " " + testConfigKit["deploymentFile"])
statements_file = open(statements_path, "rb")
results_file = open(results_path, "wb")
while True:
try:
statement = cPickle.load(statements_file)
except EOFError:
break
try:
if submit_verbosely:
print "Submitting to backend " + name + " adhoc " + statement["SQL"]
client.onecmd("adhoc " + statement["SQL"])
except:
print >> sys.stderr, "Error occurred while executing '%s': %s" % \
(statement["SQL"], sys.exc_info()[1])
if(host == defaultHost):
# Should kill the server now
killer = subprocess.Popen("kill -9 %d" % (server.pid), shell=True)
killer.communicate()
if killer.returncode != 0:
print >> sys.stderr, \
"Failed to kill the server process %d" % (server.pid)
break
table = None
if client.response == None:
print >> sys.stderr, "No error, but an unexpected null client response (server crash?) from executing statement '%s': %s" % \
(statement["SQL"], sys.exc_info()[1])
if(host == defaultHost):
killer = subprocess.Popen("kill -9 %d" % (server.pid), shell=True)
killer.communicate()
if killer.returncode != 0:
print >> sys.stderr, \
"Failed to kill the server process %d" % (server.pid)
break
if client.response.tables:
### print "DEBUG: got table(s) from ", statement["SQL"] ,"."
if precision:
table = normalize(client.response.tables[0], statement["SQL"], precision)
else:
table = normalize(client.response.tables[0], statement["SQL"])
if len(client.response.tables) > 1:
print "WARNING: ignoring extra table(s) from result of query ?", statement["SQL"] , "?"
# else:
# print "WARNING: returned no table(s) from ?", statement["SQL"] ,"?"
cPickle.dump({"Status": client.response.status,
"Info": client.response.statusString,
"Result": table,
"Exception": str(client.response.exception)},
results_file)
results_file.close()
statements_file.close()
if(host == defaultHost):
client.onecmd("shutdown")
server.communicate()
else:
client.onecmd("disconnect")
sys.stdout.flush()
sys.stderr.flush()
if(host == defaultHost):
return server.returncode
else:
return 0
def get_max_mismatches(comparison_database, suite_name):
"""Returns the maximum number of acceptable mismatches, i.e., the number of
'known' failures for VoltDB to match the results of the comparison database
(HSQL or PostgreSQL), which is normally zero; however, there are sometimes
a few exceptions, e.g., for queries that are not supported by PostgreSQL.
"""
max_mismatches = 0
# Kludge to not fail for known issues, when running against PostgreSQL
# (or the PostGIS extension of PostgreSQL)
if comparison_database.startswith('Post'):
# Known failures in the basic-joins test suite, and in the basic-index-joins,
# and basic-compoundex-joins "extended" test suites (see ENG-10775)
if (config_name == 'basic-joins' or config_name == 'basic-index-joins' or
config_name == 'basic-compoundex-joins'):
max_mismatches = 5280
# Known failures, related to the ones above, in the basic-int-joins test
# suite (see ENG-10775, ENG-11401)
elif config_name == 'basic-int-joins':
max_mismatches = 600
# Known failures in the joined-matview-* test suites ...
# Failures in joined-matview-default-full due to ENG-11086
elif config_name == 'joined-matview-default-full':
max_mismatches = 3387
# Failures in joined-matview-int due to ENG-11086
elif config_name == 'joined-matview-int':
max_mismatches = 46440
return max_mismatches
def get_config_path(basedir, config_key, config_value):
"""Returns the correct path to a specific (ddl, normalizer, schema, or
template) file, given its config 'key' and 'value'. The 'key' will be one
of 'ddl', 'normalizer', 'schema', or 'template', the last of which is the
more complicated case, requiring us to check the various subdirectories.
"""
for subdir in os.walk(os.path.join(basedir, config_key)):
filename = os.path.join(subdir[0], config_value)
if os.path.isfile(filename):
return os.path.abspath(filename)
# If you cannot find the file, leave the value unchanged
return config_value
def run_config(suite_name, config, basedir, output_dir, random_seed,
report_invalid, report_all, generate_only, subversion_generation,
submit_verbosely, ascii_only, args, testConfigKit):
# Store the current, initial system time (in seconds since January 1, 1970)
time0 = time.time()
precision = 0
within_minutes = 0
for key in config.iterkeys():
if key == "precision":
precision = int(config["precision"])
elif key == "within-minutes":
within_minutes = int(config["within-minutes"])
elif not os.path.isabs(config[key]):
config[key] = get_config_path(basedir, key, config[key])
print "in run_config key = '%s', config[key] = '%s'" % (key, str(config[key]))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
global comparison_database
comparison_database_lower = comparison_database.lower()
statements_path = os.path.abspath(os.path.join(output_dir, "statements.data"))
cmpdb_path = os.path.abspath(os.path.join(output_dir, comparison_database_lower + ".data"))
jni_path = os.path.abspath(os.path.join(output_dir, "jni.data"))
modified_sql_path = None
debug_transform_sql_arg = ''
global debug_transform_sql
if debug_transform_sql:
if comparison_database == 'PostgreSQL' or comparison_database == 'PostGIS':
modified_sql_path = os.path.abspath(os.path.join(output_dir, 'postgresql_transform.out'))
debug_transform_sql_arg = ' -Dsqlcoverage.transform.sql.file='+modified_sql_path
template = config["template"]
global normalize
if "normalizer" in config:
normalize = imp.load_source("normalizer", config["normalizer"]).normalize
# print "DEBUG: using normalizer ", config["normalizer"], " for ", template
self_check_safecmp = imp.load_source("normalizer", config["normalizer"]).safecmp
theNow = datetime.datetime.now()
if self_check_safecmp([theNow], [theNow]) != 0:
print >> sys.stderr, "safe_cmp fails [datetime] selfcheck"
exit(2)
if self_check_safecmp([None], [None]) != 0:
print >> sys.stderr, "safe_cmp fails [None] selfcheck"
exit(2)
if self_check_safecmp([theNow], [None]) <= 0:
print >> sys.stderr, "safe_cmp fails [datetime], [None] selfcheck"
exit(2)
theLater = datetime.datetime.now()
if self_check_safecmp([None, theNow], [None, theLater]) >= 0:
print >> sys.stderr, "safe_cmp fails [None, datetime] selfcheck"
exit(2)
else:
normalize = lambda x, y: x
# print "DEBUG: using no normalizer for ", template
command = " ".join(args[2:])
command += " schema=" + os.path.basename(config['ddl'])
if debug_transform_sql:
command = command.replace(" -server ", debug_transform_sql_arg+" -server ")
random_state = random.getstate()
if "template-jni" in config:
template = config["template-jni"]
generator = SQLGenerator(config["schema"], template, subversion_generation, ascii_only)
counter = 0
statements_file = open(statements_path, "wb")
for i in generator.generate(submit_verbosely):
cPickle.dump({"id": counter, "SQL": i}, statements_file)
counter += 1
statements_file.close()
min_statements_per_pattern = generator.min_statements_per_pattern()
max_statements_per_pattern = generator.max_statements_per_pattern()
num_inserts = generator.num_insert_statements()
num_patterns = generator.num_patterns()
num_unresolved = generator.num_unresolved_statements()
if generate_only or submit_verbosely:
print "Generated %d statements." % counter
if generate_only:
# Claim success without running servers.
return {"keyStats" : None, "mis" : 0}
# Print the elapsed time, with a message
global total_gensql_time
gensql_time = print_elapsed_seconds("for generating statements (" + suite_name + ")", time0)
total_gensql_time += gensql_time
num_crashes = 0
failed = False
try:
if run_once("jni", command, statements_path, jni_path,
submit_verbosely, testConfigKit, precision) != 0:
print >> sys.stderr, "Test with the JNI (VoltDB) backend had errors (crash?)."
failed = True
except:
print >> sys.stderr, "JNI (VoltDB) backend crashed!!!"
traceback.print_exc()
failed = True
if (failed):
print >> sys.stderr, " jni_path: %s" % (jni_path)
sys.stderr.flush()
num_crashes += 1
#exit(1)
# Print the elapsed time, with a message
global total_voltdb_time
voltdb_time = print_elapsed_seconds("for running VoltDB (JNI) statements (" + suite_name + ")")
total_voltdb_time += voltdb_time
random.seed(random_seed)
random.setstate(random_state)
failed = False
try:
if run_once(comparison_database_lower, command, statements_path, cmpdb_path,
submit_verbosely, testConfigKit, precision) != 0:
print >> sys.stderr, "Test with the " + comparison_database + " backend had errors (crash?)."
failed = True
except:
print >> sys.stderr, comparison_database + " backend crashed!!"
traceback.print_exc()
failed = True
if (failed):
print >> sys.stderr, " cmpdb_path: %s" % (cmpdb_path)
sys.stderr.flush()
num_crashes += 1
#exit(1)
# Print the elapsed time, with a message
global total_cmpdb_time
cmpdb_time = print_elapsed_seconds("for running " + comparison_database + " statements (" + suite_name + ")")
total_cmpdb_time += cmpdb_time
someStats = (get_numerical_html_table_element(min_statements_per_pattern, strong_warn_below=1) +
get_numerical_html_table_element(max_statements_per_pattern, strong_warn_below=1, warn_above=100000) +
get_numerical_html_table_element(num_inserts, warn_below=4, strong_warn_below=1, warn_above=1000) +
get_numerical_html_table_element(num_patterns, warn_below=4, strong_warn_below=1, warn_above=10000) +
get_numerical_html_table_element(num_unresolved, error_above=0) +
get_time_html_table_element(gensql_time) +
get_time_html_table_element(voltdb_time) +
get_time_html_table_element(cmpdb_time) )
extraStats = get_numerical_html_table_element(num_crashes, error_above=0) + someStats
max_mismatches = get_max_mismatches(comparison_database, suite_name)
global compare_results
try:
compare_results = imp.load_source("normalizer", config["normalizer"]).compare_results
success = compare_results(suite_name, random_seed, statements_path, cmpdb_path,
jni_path, output_dir, report_invalid, report_all, extraStats,
comparison_database, modified_sql_path, max_mismatches, within_minutes)
except:
print >> sys.stderr, "Compare (VoltDB & " + comparison_database + ") results crashed!"
traceback.print_exc()
print >> sys.stderr, " jni_path: %s" % (jni_path)
print >> sys.stderr, " cmpdb_path: %s" % (cmpdb_path)
sys.stderr.flush()
num_crashes += 1
gray_zero_html_table_element = get_numerical_html_table_element(0, use_gray=True)
errorStats = (gray_zero_html_table_element + gray_zero_html_table_element +
gray_zero_html_table_element + gray_zero_html_table_element +
gray_zero_html_table_element + gray_zero_html_table_element +
gray_zero_html_table_element + gray_zero_html_table_element +
gray_zero_html_table_element +
get_numerical_html_table_element(num_crashes, error_above=0) + someStats + '</tr>' )
success = {"keyStats": errorStats, "mis": -1}
# Print & save the elapsed time and total time, with a message
global total_compar_time
compar_time = print_elapsed_seconds("for comparing DB results (" + suite_name + ")")
total_compar_time += compar_time
suite_secs = print_elapsed_seconds("for run_config of '" + suite_name + "'", time0, "Sub-tot time: ")
sys.stdout.flush()
# Accumulate the total number of Valid, Invalid, Mismatched & Total statements
global total_statements
def next_keyStats_column_value():
prefix = "<td"
suffix = "</td>"
global keyStats_start_index
start_index = 0
end_index = 0
next_col_val = "0"
try:
start_index = success["keyStats"].index(prefix, keyStats_start_index) + len(prefix)
start_index = success["keyStats"].index('>', start_index) + 1
end_index = success["keyStats"].index(suffix, start_index)
next_col_val = success["keyStats"][start_index: end_index]
keyStats_start_index = end_index + len(suffix)
except:
print "Caught exception:\n", sys.exc_info()[0]
print "success[keyStats]:\n", success["keyStats"]
print "keyStats_start_index:", keyStats_start_index
print "start_index :", start_index
print "end_index :", end_index
print "next_col_val:", next_col_val
return next_col_val
global valid_statements
global invalid_statements
global mismatched_statements
global keyStats_start_index
global total_volt_npes
global total_cmp_npes
global total_num_crashes
global total_num_inserts
global total_num_patterns
global total_num_unresolved
global min_all_statements_per_pattern
global max_all_statements_per_pattern
keyStats_start_index = 0
valid_statements += int(next_keyStats_column_value())
next_keyStats_column_value() # ignore Valid %
invalid_statements += int(next_keyStats_column_value())
next_keyStats_column_value() # ignore Invalid %
total_statements += int(next_keyStats_column_value())
mismatched_statements += int(next_keyStats_column_value())
next_keyStats_column_value() # ignore Mismatched %
total_volt_npes += int(next_keyStats_column_value())
total_cmp_npes += int(next_keyStats_column_value())
total_num_crashes += num_crashes
total_num_inserts += num_inserts
total_num_patterns += num_patterns
total_num_unresolved += num_unresolved
min_all_statements_per_pattern = min(min_all_statements_per_pattern, min_statements_per_pattern)
max_all_statements_per_pattern = max(max_all_statements_per_pattern, max_statements_per_pattern)
finalStats = (get_time_html_table_element(compar_time) +
get_time_html_table_element(suite_secs) )
success["keyStats"] = success["keyStats"].replace('</tr>', finalStats + '</tr>')
return success
def get_html_table_element_color(value, error_below, strong_warn_below, warn_below,
error_above, strong_warn_above, warn_above, use_gray):
color = ''
if (use_gray):
color = ' bgcolor=#D3D3D3' # gray
elif (value < error_below or value > error_above):
color = ' bgcolor=#FF0000' # red
elif (value < strong_warn_below or value > strong_warn_above):
color = ' bgcolor=#FFA500' # orange
elif (value < warn_below or value > warn_above):
color = ' bgcolor=#FFFF00' # yellow
return color
def get_numerical_html_table_element(value, error_below=-1, strong_warn_below=0, warn_below=0,
error_above=1000000000, strong_warn_above=1000000, warn_above=100000, # 1 billion, 1 million, 100,000
use_gray=False):
return ('<td align=right%s>%d</td>' %
(get_html_table_element_color(value, error_below, strong_warn_below, warn_below,
error_above, strong_warn_above, warn_above, use_gray),
value) )
def get_time_html_table_element(seconds, error_below=0, strong_warn_below=0, warn_below=0,
error_above=28800, strong_warn_above=3600, warn_above=600, # 8 hours, 1 hour, 10 minutes
use_gray=False):
return ('<td align=right%s>%s</td>' %
(get_html_table_element_color(seconds, error_below, strong_warn_below, warn_below,
error_above, strong_warn_above, warn_above, use_gray),
minutes_colon_seconds(seconds)) )
def get_voltcompiler(basedir):
key = "voltdb"
(head, tail) = basedir.split(key)
voltcompiler = head + key + "/bin/voltcompiler"
if(os.access(voltcompiler, os.X_OK)):
return voltcompiler
else:
return None
def get_hostinfo(options):
if options.hostname == None:
hostname = defaultHost
else:
hostname = options.hostname
if options.hostport == None:
hostport = defaultPort
else:
if(options.hostport.isdigit()):
hostport = int(options.hostport)
else:
print "Invalid value for port number: #%s#" % options.hostport
usage()
sys.exit(3)
return (hostname, hostport)
def create_catalogFile(voltcompiler, projectFile, catalogFilename):
catalogFile = "/tmp/" + catalogFilename + ".jar"
cmd = voltcompiler + " /tmp " + projectFile + " " + catalogFile
call(cmd, shell=True)
if not os.path.exists(catalogFile):
catalogFile = None
return catalogFile
def create_projectFile(ddl, projFilename):
proj = Element('project')
db = SubElement(proj, 'database')
schemas = SubElement(db, 'schemas')
schema = SubElement(schemas, 'schema', {'path':ddl})
thisProjectFile = "/tmp/" + projFilename + "4projectFile.xml"
fo = open(thisProjectFile, "wb")
fo.write(prettify(proj))
fo.close()
if not os.path.exists(thisProjectFile):
thisProjectFile = None
return thisProjectFile
def create_deploymentFile(options):
kfactor = options.kfactor
sitesperhost = options.sitescount
hostcount = options.hostcount
deployment = Element('deployment')
cluster = SubElement(deployment, 'cluster',
{'kfactor':kfactor, 'sitesperhost':sitesperhost, 'hostcount':hostcount})
httpd = SubElement(deployment, 'httpd', {'port':"8080"})
jsonapi = SubElement(httpd, 'jsonapi', {'enabled':"true"})
deploymentFile = "/tmp/deploymentFile.xml"
fo = open(deploymentFile, "wb")
fo.write(prettify(deployment))
fo.close()
if not os.path.exists(deploymentFile):
deploymentFile = None
return deploymentFile
def create_testConfigKits(options, basedir):
testConfigKits = {}
voltcompiler = get_voltcompiler(basedir)
if voltcompiler == None:
print >> sys.stderr, "Cannot find the executable voltcompiler!"
sys.exit(3)
else:
testConfigKits["voltcompiler"] = voltcompiler
deploymentFile = create_deploymentFile(options)
if deploymentFile == None:
print >> sys.stderr, "Cannot find the deployment xml file!"
sys.exit(3)
else:
testConfigKits["deploymentFile"] = deploymentFile
(hostname, hostport) = get_hostinfo(options)
testConfigKits["hostname"] = hostname
testConfigKits["hostport"] = hostport
return testConfigKits
def usage():
print sys.argv[0], "config output_dir command"
print """
config\t\tThe configuration file containing the filenames of the schema,
\t\tthe template, and the normalizer.
output_dir\tThe output directory for the HTML reports.
command\t\tThe command to launch the server.
The schema is merely a Python dictionary which describes the name of the tables
and the column names and types in those tables. The following is an example of a
schema description,
\t{
\t "T": {
\t "columns": (("DESC", FastSerializer.VOLTTYPE_STRING),
\t ("ID", FastSerializer.VOLTTYPE_INTEGER),
\t ("NUM", FastSerializer.VOLTTYPE_INTEGER)),
\t "partitions": (),
\t "indexes": ("ID")
\t }
\t }
This dictionary describes a table called "T" with three columns "DESC", "ID",
and "NUM".
The template is a .sql file containing SQL statements to run in the test. The
SQL statements are templates with place holders which will be substituted with
real values when the test is run. An example looks like this,
\tSELECT _variable FROM _table WHERE _variable _cmp _variable LIMIT _value[byte];
A possible SQL statement generated from this template based on the table
description above would be
\tSELECT ID FROM T WHERE ID < NUM LIMIT 3;
The following place holders are supported,
\t_variable[type]\tWill be replaced with a column name of the given type,
\t\t\ttype can be int,byte,int16,int32,int64,float,string,
\t\t\tdate. int is a superset of byte,int16,int32,int64.
\t\t\tType can be omitted.
\t_table\t\tWill be replaced with a table name
\t_value[type]\tWill be replaced with a random value of the given type,
\t\t\ttype can be id,byte,int16,int32,int64,float,string,date.
\t\t\tid is unique integer type incremented by 1 each time.
\t\t\tYou can also specify an integer within a range,
\t\t\te.g. _value[int:0,100]
\t_cmp\t\tWill be replaced with a comparison operator
\t_math\t\tWill be replaced with a arithmatic operator
\t_agg\t\tWill be replaced with an aggregation operator
\t_maybe\t\tWill be replaced with NOT or simply removed
\t_distinct\t\tWill be replaced with DISTINCT or simply removed
\t_like\t\tWill be replaced with LIKE or NOT LIKE
\t_set\t\tWill be replaced with a set operator
\t_logic\t\tWill be replaced with a logic operator
\t_sortordert\tWill be replaced with ASC, DESC, or 'blank' (implicitly ascending)
"""
if __name__ == "__main__":
#print the whole command line, maybe useful for debugging
#print " ".join(sys.argv)
# Print the current, initial system time
time0 = time.time()
print "Initial time: " + str(time0) + ", at start (in seconds since January 1, 1970)"
save_prev_time = time0
total_gensql_time = 0.0
total_voltdb_time = 0.0
total_cmpdb_time = 0.0
total_compar_time = 0.0
keyStats_start_index = 0
valid_statements = 0
invalid_statements = 0
mismatched_statements = 0
total_statements = 0
total_volt_npes = 0
total_cmp_npes = 0
total_num_crashes = 0
total_num_inserts = 0
total_num_patterns = 0
total_num_unresolved = 0
max_all_statements_per_pattern = 0
min_all_statements_per_pattern = sys.maxint
parser = OptionParser()
parser.add_option("-l", "--leader", dest="hostname",
help="the hostname of the leader")
parser.add_option("-n", "--number", dest="hostcount",
help="the number of total hosts used in this test")
parser.add_option("-k", "--kfactor", dest="kfactor",
help="the number of kfactor used in this test")
parser.add_option("-t", "--sitescount", dest="sitescount",
help="the number of partitions used in this test")
parser.add_option("-p", "--port", dest="hostport",
help="the port number of the leader")
parser.add_option("-s", "--seed", dest="seed",
help="seed for random number generator")
parser.add_option("-c", "--config", dest="config", default=None,
help="the name of the config to run")
parser.add_option("-S", "--subversion_generation", dest="subversion_generation",
action="store_true", default=None,
help="enable generation of additional subquery forms for select statements")
parser.add_option("-a", "--ascii-only", action="store_true",
dest="ascii_only", default=False,
help="include only ASCII values in randomly generated string constants")
parser.add_option("-i", "--report-invalid", action="store_true",
dest="report_invalid", default=False,
help="report invalid SQL statements, not just mismatches")
parser.add_option("-r", "--report-all", action="store_true",
dest="report_all", default=False,
help="report all attempted SQL statements, not just mismatches")
parser.add_option("-g", "--generate-only", action="store_true",
dest="generate_only", default=False,
help="only generate and report SQL statements, do not start any database servers")
parser.add_option("-P", "--postgresql", action="store_true",
dest="postgresql", default=False,
help="compare VoltDB results to PostgreSQL, rather than HSqlDB")
parser.add_option("-G", "--postgis", action="store_true",
dest="postgis", default=False,
help="compare VoltDB results to PostgreSQL, with the PostGIS extension")
(options, args) = parser.parse_args()
if options.seed == None:
seed = random.randint(0, 2 ** 63)
print "Random seed: %d" % seed
else:
seed = int(options.seed)
print "Using supplied seed: " + str(seed)
random.seed(seed)
if len(args) < 3:
usage()
sys.exit(3)
config_filename = args[0]
output_dir = args[1]
# Parent directory of the 'config' directory (i.e., this would
# normally be the 'sqlcoverage' directory)
basedir = os.path.dirname(os.path.dirname(config_filename))
config_list = Config(config_filename)
configs_to_run = []
if options.config != None:
if options.config not in config_list.get_configs():
print >> sys.stderr, \
"Selected config %s not present in config file %s" % (options.config, config_filename)
sys.exit(3)
else:
configs_to_run.append(options.config)
else:
configs_to_run = config_list.get_configs()
comparison_database = "HSqlDB" # default value
debug_transform_sql = False
if options.postgresql:
comparison_database = 'PostgreSQL'
debug_transform_sql = True
if options.postgis:
comparison_database = 'PostGIS'
debug_transform_sql = True
testConfigKits = {}
defaultHost = "localhost"
defaultPort = 21212
if(options.hostname != None and options.hostname != defaultHost):
# To set a dictionary with following 4 keys:
# testConfigKits["voltcompiler"]
# testConfigKits["deploymentFile"]
# testConfigKits["hostname"]
# testConfigKits["hostport"]
testConfigKits = create_testConfigKits(options, basedir)
success = True
statistics = {}
for config_name in configs_to_run:
print >> sys.stderr, "\nSQLCOVERAGE: STARTING ON CONFIG: %s\n" % config_name
report_dir = output_dir + '/' + config_name
config = config_list.get_config(config_name)
if(options.hostname != None and options.hostname != defaultHost):
testDDL = basedir + "/" + config['ddl']
testProjectFile = create_projectFile(testDDL, 'test')
testCatalog = create_catalogFile(testConfigKits['voltcompiler'], testProjectFile, 'test')
# To add one more key
testConfigKits["testCatalog"] = testCatalog
result = run_config(config_name, config, basedir, report_dir, seed,
options.report_invalid, options.report_all,
options.generate_only, options.subversion_generation,
options.report_all, options.ascii_only, args, testConfigKits)
statistics[config_name] = result["keyStats"]
statistics["seed"] = seed
# The maximum number of acceptable mismatches is normally zero, except
# for certain rare cases involving known errors in PostgreSQL
if result["mis"] > get_max_mismatches(comparison_database, config_name):
success = False
# Write the summary
time1 = time.time()
if total_statements > 0:
valid_percent = '{0:.2f}'.format(100.00 * valid_statements / total_statements)
invalid_percent = '{0:.2f}'.format(100.00 * invalid_statements / total_statements)
mismatched_percent = '{0:.2f}'.format(100.00 * mismatched_statements / total_statements)
else:
valid_percent = '0.00'
invalid_percent = '0.00'
mismatched_percent = '0.00'
statistics["totals"] = "\n<td align=right>" + str(valid_statements) + "</td>" + \
"\n<td align=right>" + valid_percent + "%</td>" + \
"\n<td align=right>" + str(invalid_statements) + "</td>" + \
"\n<td align=right>" + invalid_percent + "%</td>" + \
"\n<td align=right>" + str(total_statements) + "</td>" + \
"\n<td align=right>" + str(mismatched_statements) + "</td>" + \
"\n<td align=right>" + mismatched_percent + "%</td>" + \
"\n<td align=right>" + str(total_volt_npes) + "</td>" + \
"\n<td align=right>" + str(total_cmp_npes) + "</td>" + \
"\n<td align=right>" + str(total_num_crashes) + "</td>" + \
"\n<td align=right>" + str(min_all_statements_per_pattern) + "</td>" + \
"\n<td align=right>" + str(max_all_statements_per_pattern) + "</td>" + \
"\n<td align=right>" + str(total_num_inserts) + "</td>" + \
"\n<td align=right>" + str(total_num_patterns) + "</td>" + \
"\n<td align=right>" + str(total_num_unresolved) + "</td>" + \
"\n<td align=right>" + minutes_colon_seconds(total_gensql_time) + "</td>" + \
"\n<td align=right>" + minutes_colon_seconds(total_voltdb_time) + "</td>" + \
"\n<td align=right>" + minutes_colon_seconds(total_cmpdb_time) + "</td>" + \
"\n<td align=right>" + minutes_colon_seconds(total_compar_time) + "</td>" + \
"\n<td align=right>" + minutes_colon_seconds(time1-time0) + "</td></tr>\n"
generate_summary(output_dir, statistics, comparison_database)
# Print the total time, for each type of activity
print_seconds(total_gensql_time, "for generating ALL SQL statements")
print_seconds(total_voltdb_time, "for running ALL VoltDB (JNI) statements")
print_seconds(total_cmpdb_time, "for running ALL " + comparison_database + " statements")
print_seconds(total_compar_time, "for comparing ALL DB results")
print_elapsed_seconds("for generating the output report", time1, "Total time: ")
print_elapsed_seconds("for the entire run", time0, "Total time: ")
if total_num_unresolved > 0:
success = False
print "Total number of invalid statements with unresolved symbols: %d" % total_num_unresolved
if total_cmp_npes > 0:
print "Total number of " + comparison_database + " NullPointerExceptions (NPEs): %d" % total_cmp_npes
if total_volt_npes > 0:
success = False
print "Total number of VoltDB NullPointerExceptions (NPEs): %d" % total_volt_npes
if mismatched_statements > 0:
print "Total number of mismatched statements (i.e., test failures): %d" % mismatched_statements
if total_num_crashes > 0:
print "Total number of (VoltDB, " + comparison_database + ", or compare results) crashes: %d" % total_num_crashes
success = False
if not success:
sys.stdout.flush()
sys.stderr.flush()
print >> sys.stderr, "SQL coverage has errors."
exit(1)
|
import logging
from openfisca_core.tools import assert_near
from openfisca_country_template import CountryTaxBenefitSystem
from openfisca_survey_manager.tests.test_scenario import (
create_randomly_initialized_survey_scenario
)
log = logging.getLogger(__name__)
tax_benefit_system = CountryTaxBenefitSystem()
def test_compute_marginal_tax_rate():
survey_scenario = create_randomly_initialized_survey_scenario(use_marginal_tax_rate = True)
assert survey_scenario._modified_simulation is not None
assert_near(
survey_scenario.compute_marginal_tax_rate(target_variable = 'income_tax', period = 2017),
(1 - .15),
relative_error_margin = 1e-6,
)
survey_scenario.compute_marginal_tax_rate(target_variable = 'disposable_income', period = 2017)
if __name__ == "__main__":
import sys
log = logging.getLogger(__name__)
logging.basicConfig(level = logging.DEBUG, stream = sys.stdout)
test_compute_marginal_tax_rate()
|
import setuptools
__author__ = 'Damjan Georgievski'
__version__ = '2.0'
__email__ = 'gdamjan@gmail.com'
setuptools.setup(
name = 'convertor',
version = __version__,
author = __author__,
author_email = __email__,
description = 'converts ODF files from a YUSCII font-encoding to proper UTF-8 ODF',
license = 'AGPL 3.0',
url = 'http://github.com/gdamjan/convertor',
packages = ['convertor'],
package_data = {},
keywords = "ODF",
include_package_data = True,
classifiers = [
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3.6'
],
test_suite = '',
zip_safe = False,
entry_points = {
'console_scripts':
['convertor=convertor.__main__:main']
},
install_requires = ['lxml'],
extras_require = {
"web": "Werkzeug"
}
)
|
from django.core.management.base import BaseCommand, CommandError
from quotes_app.tasks import rank_all
import logging
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Runs reranking algorithms on the Quotes.'
def handle(self, *args, **options):
logger.info('Running {0} management task.'.format(__name__))
rank_all()
|
from collections import defaultdict
from django.core.files.storage import DefaultStorage
from django.core.management.base import BaseCommand, CommandError
from candidates.csv_helpers import list_to_csv, memberships_dicts_for_csv
from elections.models import Election
def safely_write(output_filename, memberships_list):
"""
Use Django's storage backend to write the CSV file to the MEDIA_ROOT.
If using S3 (via Django Storages) the file is atomically written when the
file is closed (when the context manager closes).
That is, the file can be opened and written to but nothing changes at
the public S3 URL until the object is closed. Meaning it's not possible to
have a half written file.
If not using S3, there will be a short time where the file is empty
during write.
"""
csv = list_to_csv(memberships_list)
file_store = DefaultStorage()
with file_store.open(output_filename, "wb") as out_file:
out_file.write(csv.encode("utf-8"))
class Command(BaseCommand):
help = "Output CSV files for all elections"
def add_arguments(self, parser):
parser.add_argument(
"--site-base-url",
help="The base URL of the site (for full image URLs)",
)
parser.add_argument(
"--election",
metavar="ELECTION-SLUG",
help="Only output CSV for the election with this slug",
)
def slug_to_file_name(self, slug):
return "{}-{}.csv".format(self.output_prefix, slug)
def handle(self, **options):
if options["election"]:
try:
election = Election.objects.get(slug=options["election"])
election_slug = election.slug
except Election.DoesNotExist:
message = "Couldn't find an election with slug {election_slug}"
raise CommandError(
message.format(election_slug=options["election"])
)
else:
election_slug = None
self.options = options
self.output_prefix = "candidates"
membership_by_election, elected_by_election = memberships_dicts_for_csv(
election_slug
)
# Write a file per election, optionally adding candidates
# We still want a file to exist if there are no candidates yet,
# as the files linked to as soon as the election is created
election_qs = Election.objects.all()
if election_slug:
election_qs = election_qs.filter(slug=election_slug)
for election in election_qs:
safely_write(
self.slug_to_file_name(election.slug),
membership_by_election.get(election.slug, []),
)
# Make a CSV file per election date
slugs_by_date = defaultdict(list)
for slug in membership_by_election.keys():
slugs_by_date[slug.split(".")[-1]].append(slug)
for date, slugs in slugs_by_date.items():
memberships_for_date = []
for slug in slugs:
memberships_for_date += membership_by_election[slug]
safely_write(self.slug_to_file_name(date), memberships_for_date)
# If we're not outputting a single election, output all elections
if not election_slug:
sorted_elections = sorted(
membership_by_election.keys(),
key=lambda key: key.split(".")[-1],
)
all_memberships = []
all_elected = []
for slug in sorted_elections:
all_memberships += membership_by_election[slug]
all_elected += elected_by_election[slug]
safely_write(self.slug_to_file_name("all"), all_memberships)
safely_write(self.slug_to_file_name("elected-all"), all_elected)
|
from __future__ import unicode_literals
import webnotes
from webnotes.utils import add_days, cstr, getdate
from webnotes.model.doc import addchild
from webnotes.model.bean import getlist
from webnotes import msgprint, _
from stock.utils import get_valid_serial_nos
from utilities.transaction_base import TransactionBase, delete_events
class DocType(TransactionBase):
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
def get_item_details(self, item_code):
item = webnotes.conn.sql("""select item_name, description from `tabItem`
where name = %s""", (item_code), as_dict=1)
ret = {
'item_name': item and item[0]['item_name'] or '',
'description' : item and item[0]['description'] or ''
}
return ret
def generate_schedule(self):
self.doclist = self.doc.clear_table(self.doclist, 'maintenance_schedule_detail')
count = 0
webnotes.conn.sql("delete from `tabMaintenance Schedule Detail` where parent='%s'" %(self.doc.name))
for d in getlist(self.doclist, 'item_maintenance_detail'):
self.validate_maintenance_detail()
s_list =[]
s_list = self.create_schedule_list(d.start_date, d.end_date, d.no_of_visits)
for i in range(d.no_of_visits):
child = addchild(self.doc, 'maintenance_schedule_detail',
'Maintenance Schedule Detail', self.doclist)
child.item_code = d.item_code
child.item_name = d.item_name
child.scheduled_date = s_list[i].strftime('%Y-%m-%d')
if d.serial_no:
child.serial_no = d.serial_no
child.idx = count
count = count+1
child.incharge_name = d.incharge_name
child.save(1)
self.on_update()
def on_submit(self):
if not getlist(self.doclist, 'maintenance_schedule_detail'):
msgprint("Please click on 'Generate Schedule' to get schedule")
raise Exception
self.check_serial_no_added()
self.validate_schedule()
email_map ={}
for d in getlist(self.doclist, 'item_maintenance_detail'):
if d.serial_no:
serial_nos = get_valid_serial_nos(d.serial_no)
self.validate_serial_no(serial_nos, d.start_date)
self.update_amc_date(serial_nos, d.end_date)
if d.incharge_name not in email_map:
email_map[d.incharge_name] = webnotes.bean("Sales Person",
d.incharge_name).run_method("get_email_id")
scheduled_date =webnotes.conn.sql("select scheduled_date from `tabMaintenance Schedule Detail` \
where incharge_name='%s' and item_code='%s' and parent='%s' " %(d.incharge_name, \
d.item_code, self.doc.name), as_dict=1)
for key in scheduled_date:
if email_map[d.incharge_name]:
description = "Reference: %s, Item Code: %s and Customer: %s" % \
(self.doc.name, d.item_code, self.doc.customer)
webnotes.bean({
"doctype": "Event",
"owner": email_map[d.incharge_name] or self.doc.owner,
"subject": description,
"description": description,
"starts_on": key["scheduled_date"] + " 10:00:00",
"event_type": "Private",
"ref_type": self.doc.doctype,
"ref_name": self.doc.name
}).insert()
webnotes.conn.set(self.doc, 'status', 'Submitted')
#get schedule dates
#----------------------
def create_schedule_list(self, start_date, end_date, no_of_visit):
schedule_list = []
start_date1 = start_date
date_diff = (getdate(end_date) - getdate(start_date)).days
add_by = date_diff/no_of_visit
#schedule_list.append(start_date1)
while(getdate(start_date1) < getdate(end_date)):
start_date1 = add_days(start_date1, add_by)
if len(schedule_list) < no_of_visit:
schedule_list.append(getdate(start_date1))
return schedule_list
#validate date range and periodicity selected
#-------------------------------------------------
def validate_period(self, arg):
arg1 = eval(arg)
if getdate(arg1['start_date']) >= getdate(arg1['end_date']):
msgprint("Start date should be less than end date ")
raise Exception
period = (getdate(arg1['end_date'])-getdate(arg1['start_date'])).days+1
if (arg1['periodicity']=='Yearly' or arg1['periodicity']=='Half Yearly' or arg1['periodicity']=='Quarterly') and period<365:
msgprint(cstr(arg1['periodicity'])+ " periodicity can be set for period of atleast 1 year or more only")
raise Exception
elif arg1['periodicity']=='Monthly' and period<30:
msgprint("Monthly periodicity can be set for period of atleast 1 month or more")
raise Exception
elif arg1['periodicity']=='Weekly' and period<7:
msgprint("Weekly periodicity can be set for period of atleast 1 week or more")
raise Exception
def get_no_of_visits(self, arg):
arg1 = eval(arg)
self.validate_period(arg)
period = (getdate(arg1['end_date'])-getdate(arg1['start_date'])).days+1
count =0
if arg1['periodicity'] == 'Weekly':
count = period/7
elif arg1['periodicity'] == 'Monthly':
count = period/30
elif arg1['periodicity'] == 'Quarterly':
count = period/91
elif arg1['periodicity'] == 'Half Yearly':
count = period/182
elif arg1['periodicity'] == 'Yearly':
count = period/365
ret = {'no_of_visits':count}
return ret
def validate_maintenance_detail(self):
if not getlist(self.doclist, 'item_maintenance_detail'):
msgprint("Please enter Maintaince Details first")
raise Exception
for d in getlist(self.doclist, 'item_maintenance_detail'):
if not d.item_code:
msgprint("Please select item code")
raise Exception
elif not d.start_date or not d.end_date:
msgprint("Please select Start Date and End Date for item "+d.item_code)
raise Exception
elif not d.no_of_visits:
msgprint("Please mention no of visits required")
raise Exception
elif not d.incharge_name:
msgprint("Please select Incharge Person's name")
raise Exception
if getdate(d.start_date) >= getdate(d.end_date):
msgprint("Start date should be less than end date for item "+d.item_code)
raise Exception
def validate_sales_order(self):
for d in getlist(self.doclist, 'item_maintenance_detail'):
if d.prevdoc_docname:
chk = webnotes.conn.sql("select t1.name from `tabMaintenance Schedule` t1, `tabMaintenance Schedule Item` t2 where t2.parent=t1.name and t2.prevdoc_docname=%s and t1.docstatus=1", d.prevdoc_docname)
if chk:
msgprint("Maintenance Schedule against "+d.prevdoc_docname+" already exist")
raise Exception
def validate(self):
self.validate_maintenance_detail()
self.validate_sales_order()
def on_update(self):
webnotes.conn.set(self.doc, 'status', 'Draft')
def update_amc_date(self, serial_nos, amc_expiry_date=None):
for serial_no in serial_nos:
serial_no_bean = webnotes.bean("Serial No", serial_no)
serial_no_bean.doc.amc_expiry_date = amc_expiry_date
serial_no_bean.save()
def validate_serial_no(self, serial_nos, amc_start_date):
for serial_no in serial_nos:
sr_details = webnotes.conn.get_value("Serial No", serial_no,
["warranty_expiry_date", "amc_expiry_date", "status", "delivery_date"], as_dict=1)
if sr_details.warranty_expiry_date and sr_details.warranty_expiry_date>=amc_start_date:
webnotes.throw("""Serial No: %s is already under warranty upto %s.
Please check AMC Start Date.""" % (serial_no, sr_details.warranty_expiry_date))
if sr_details.amc_expiry_date and sr_details.amc_expiry_date >= amc_start_date:
webnotes.throw("""Serial No: %s is already under AMC upto %s.
Please check AMC Start Date.""" % (serial_no, sr_details.amc_expiry_date))
if sr_details.status=="Delivered" and sr_details.delivery_date and \
sr_details.delivery_date >= amc_start_date:
webnotes.throw(_("Maintenance start date can not be before \
delivery date for serial no: ") + serial_no)
def validate_schedule(self):
item_lst1 =[]
item_lst2 =[]
for d in getlist(self.doclist, 'item_maintenance_detail'):
if d.item_code not in item_lst1:
item_lst1.append(d.item_code)
for m in getlist(self.doclist, 'maintenance_schedule_detail'):
if m.item_code not in item_lst2:
item_lst2.append(m.item_code)
if len(item_lst1) != len(item_lst2):
msgprint("Maintenance Schedule is not generated for all the items. Please click on 'Generate Schedule'")
raise Exception
else:
for x in item_lst1:
if x not in item_lst2:
msgprint("Maintenance Schedule is not generated for item "+x+". Please click on 'Generate Schedule'")
raise Exception
#check if serial no present in item maintenance table
#-----------------------------------------------------------
def check_serial_no_added(self):
serial_present =[]
for d in getlist(self.doclist, 'item_maintenance_detail'):
if d.serial_no:
serial_present.append(d.item_code)
for m in getlist(self.doclist, 'maintenance_schedule_detail'):
if serial_present:
if m.item_code in serial_present and not m.serial_no:
msgprint("Please click on 'Generate Schedule' to fetch serial no added for item "+m.item_code)
raise Exception
def on_cancel(self):
for d in getlist(self.doclist, 'item_maintenance_detail'):
if d.serial_no:
serial_nos = get_valid_serial_nos(d.serial_no)
self.update_amc_date(serial_nos)
webnotes.conn.set(self.doc, 'status', 'Cancelled')
delete_events(self.doc.doctype, self.doc.name)
def on_trash(self):
delete_events(self.doc.doctype, self.doc.name)
@webnotes.whitelist()
def make_maintenance_visit(source_name, target_doclist=None):
from webnotes.model.mapper import get_mapped_doclist
def update_status(source, target, parent):
target.maintenance_type = "Scheduled"
doclist = get_mapped_doclist("Maintenance Schedule", source_name, {
"Maintenance Schedule": {
"doctype": "Maintenance Visit",
"field_map": {
"name": "maintenance_schedule"
},
"validation": {
"docstatus": ["=", 1]
},
"postprocess": update_status
},
"Maintenance Schedule Item": {
"doctype": "Maintenance Visit Purpose",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype",
"incharge_name": "service_person"
}
}
}, target_doclist)
return [d.fields for d in doclist]
|
class Year(object):
def __init__(self, year):
self.year = year
def is_leap_year(self):
return (self._by_4() and not self._by_100()) \
or self._by_400()
def _by_4(self):
return self.year % 4 == 0
def _by_100(self):
return self.year % 100 == 0
def _by_400(self):
return self.year % 400 == 0
|
from openerp import fields, models, api
import re
class res_partner(models.Model):
_inherit = 'res.partner'
#def _get_default_tp_type(self):
# return self.env.ref('l10n_cl_invoice.res_IVARI').id
# todo: pasar los valores por defecto a un nuevo módulo
# por ejemplo "l10n_cl_res_partner_defaults
#def _get_default_doc_type(self):
# return self.env.ref('l10n_cl_invoice.dt_RUT').id
responsability_id = fields.Many2one(
'sii.responsability', 'Sale/Purchase Doc Type')
# dejamos el default pendiente para instalar en otro modulo,
# porque da problemas en instalaciones nuevas
# 'sii.responsability', 'Responsability', default = _get_default_tp_type)
document_type_id = fields.Many2one(
'sii.document_type', 'ID Type')
# 'sii.document_type', 'Document type', default = _get_default_doc_type)
document_number = fields.Char('Document number', size=64)
start_date = fields.Date('Start-up Date')
tp_sii_code = fields.Char('Tax Payer SII Code', compute='_get_tp_sii_code',
readonly=True)
@api.multi
@api.onchange('responsability_id')
def _get_tp_sii_code(self):
for record in self:
record.tp_sii_code=str(record.responsability_id.tp_sii_code)
@api.onchange('document_number', 'document_type_id')
def onchange_document(self):
mod_obj = self.env['ir.model.data']
if self.document_number and ((
'sii.document_type',
self.document_type_id.id) == mod_obj.get_object_reference(
'l10n_cl_invoice', 'dt_RUT') or ('sii.document_type',
self.document_type_id.id) == mod_obj.get_object_reference(
'l10n_cl_invoice', 'dt_RUN')):
document_number = (
re.sub('[^1234567890Kk]', '', str(
self.document_number))).zfill(9).upper()
self.vat = 'CL%s' % document_number
self.document_number = '%s.%s.%s-%s' % (
document_number[0:2], document_number[2:5],
document_number[5:8], document_number[-1])
elif self.document_number and (
'sii.document_type',
self.document_type_id.id) == mod_obj.get_object_reference(
'l10n_cl_invoice', 'dt_Sigd'):
self.document_number = ''
|
""""""
from __future__ import annotations
from flask import Flask
from .criterion import TagCriterion
from .extension import TagsExtension
__all__ = ["TagsExtension", "TagCriterion"]
def register_plugin(app: Flask):
TagsExtension(app)
|
import os
import sys
from nxdrive.logging_config import get_logger
from nxdrive.utils import safe_long_path
from tests.common_unit_test import UnitTestCase
if sys.platform == 'win32':
import win32api
log = get_logger(__name__)
FOLDER_A = 'A' * 90
FOLDER_B = 'B' * 90
FOLDER_C = 'C' * 90
FOLDER_D = 'D' * 50
class TestLongPath(UnitTestCase):
def setUp(self):
UnitTestCase.setUp(self)
self.local_1 = self.local_client_1
self.remote_1 = self.remote_document_client_1
log.info("Create a folder AAAA... (90 chars) in server")
self.folder_a = self.remote_1.make_folder("/", FOLDER_A)
self.folder_b = self.remote_1.make_folder(self.folder_a, FOLDER_B)
self.folder_c = self.remote_1.make_folder(self.folder_b, FOLDER_C)
self.remote_1.make_file(self.folder_c, "File1.txt", "Sample Content")
def tearDown(self):
log.info("Delete the folder AAA... in server")
self.remote_1.delete(self.folder_a, use_trash=False)
UnitTestCase.tearDown(self)
def test_long_path(self):
self.engine_1.start()
self.wait_sync(wait_for_async=True)
parent_path = os.path.join(self.local_1.abspath('/'),
FOLDER_A, FOLDER_B, FOLDER_C, FOLDER_D)
log.info("Creating folder with path: %s", parent_path)
if sys.platform == 'win32' and not os.path.exists(parent_path):
log.debug('Add \\\\?\\ prefix to path %r', parent_path)
parent_path = safe_long_path(parent_path)
os.makedirs(parent_path)
if sys.platform == 'win32':
log.info("Convert path of FOLDER_D\File2.txt to short path format")
parent_path = win32api.GetShortPathName(parent_path)
new_file = os.path.join(parent_path, "File2.txt")
log.info("Creating file with path: %s", new_file)
with open(new_file, "w") as f:
f.write("Hello world")
self.wait_sync(wait_for_async=True, timeout=45, fail_if_timeout=False)
remote_children_of_c = self.remote_1.get_children_info(self.folder_c)
children_names = [item.name for item in remote_children_of_c]
log.warn("Verify if FOLDER_D is uploaded to server")
self.assertIn(FOLDER_D, children_names)
folder_d = [item.uid for item in remote_children_of_c if item.name == FOLDER_D][0]
remote_children_of_d = self.remote_1.get_children_info(folder_d)
children_names = [item.name for item in remote_children_of_d]
log.warn("Verify if FOLDER_D\File2.txt is uploaded to server")
self.assertIn('File2.txt', children_names)
def test_setup_on_long_path(self):
""" NXDRIVE 689: Fix error when adding a new account when installation
path is greater than 245 characters.
"""
self.engine_1.stop()
self.engine_1.reinit()
# On Mac, avoid permission denied error
self.engine_1.get_local_client().clean_xattr_root()
test_folder_len = 245 - len(str(self.local_nxdrive_folder_1))
test_folder = 'A' * test_folder_len
self.local_nxdrive_folder_1 = os.path.join(self.local_nxdrive_folder_1,
test_folder)
self.assertTrue(len(self.local_nxdrive_folder_1) > 245)
self.manager_1.unbind_all()
self.engine_1 = self.manager_1.bind_server(
self.local_nxdrive_folder_1, self.nuxeo_url, self.user_2,
self.password_2, start_engine=False)
self.engine_1.start()
self.engine_1.stop()
|
from __future__ import absolute_import
from fife import fifechan
from fife.extensions.pychan.attrs import IntAttr, FloatAttr
from .widget import Widget
class Slider(Widget):
""" A slider widget
Use a callback to read out the slider value every time the marker
is moved.
New Attributes
==============
- orientation: 1 = horizontal, 0=vertical
- scale_start: float: default 0.0
- scale_end: float: default 1.0
- step_length: float: default scale_end/10
- marker_length: int: default 10
FIXME:
- update docstrings
"""
HORIZONTAL = fifechan.Slider.Horizontal
VERTICAL = fifechan.Slider.Vertical
ATTRIBUTES = Widget.ATTRIBUTES + [ IntAttr('orientation'),
FloatAttr('scale_start'),
FloatAttr('scale_end'),
FloatAttr('step_length'),
IntAttr('marker_length')
]
DEFAULT_HEXPAND = True
DEFAULT_VEXPAND = False
DEFAULT_SIZE = 10,10
DEFAULT_MIN_SIZE = 10,10
DEFAULT_SCALE_START = 0.0
DEFAULT_SCALE_END = 1.0
DEFAULT_STEP_LENGTH = 0.1
DEFAULT_MARKER_LENGTH = 10
DEFAULT_ORIENTATION = HORIZONTAL
def __init__(self,
parent = None,
name = None,
size = None,
min_size = None,
max_size = None,
fixed_size = None,
margins = None,
padding = None,
helptext = None,
position = None,
style = None,
hexpand = None,
vexpand = None,
font = None,
base_color = None,
background_color = None,
foreground_color = None,
selection_color = None,
border_color = None,
outline_color = None,
border_size = None,
outline_size = None,
position_technique = None,
is_focusable = None,
comment = None,
scale_start = None,
scale_end = None,
step_length = None,
marker_length = None,
orientation = None):
self.real_widget = fifechan.Slider(scale_start or self.DEFAULT_SCALE_START, scale_end or self.DEFAULT_SCALE_END)
self.orientation = self.DEFAULT_ORIENTATION
self.step_length = self.DEFAULT_STEP_LENGTH
self.marker_length = self.DEFAULT_MARKER_LENGTH
super(Slider, self).__init__(parent=parent,
name=name,
size=size,
min_size=min_size,
max_size=max_size,
fixed_size=fixed_size,
margins=margins,
padding=padding,
helptext=helptext,
position=position,
style=style,
hexpand=hexpand,
vexpand=vexpand,
font=font,
base_color=base_color,
background_color=background_color,
foreground_color=foreground_color,
selection_color=selection_color,
border_color=border_color,
outline_color=outline_color,
border_size=border_size,
outline_size=outline_size,
position_technique=position_technique,
is_focusable=is_focusable,
comment=comment)
if orientation is not None: self.orientation = orientation
if scale_start is not None: self.scale_start = scale_start
if scale_end is not None: self.scale_end = scale_end
if step_length is not None: self.step_length = step_length
if marker_length is not None: self.marker_length = marker_length
self.accepts_data = True
self._realSetData = self._setValue
self._realGetData = self._getValue
def clone(self, prefix):
sliderClone = Slider(None,
self._createNameWithPrefix(prefix),
self.size,
self.min_size,
self.max_size,
self.fixed_size,
self.margins,
self.padding,
self.helptext,
self.position,
self.style,
self.hexpand,
self.vexpand,
self.font,
self.base_color,
self.background_color,
self.foreground_color,
self.selection_color,
self.border_color,
self.outline_color,
self.border_size,
self.outline_size,
self.position_technique,
self.is_focusable,
self.comment,
self.scale_start,
self.scale_end,
self.step_length,
self.marker_length,
self.orientation)
return sliderClone
def _setScale(self, start, end):
"""setScale(self, double scaleStart, double scaleEnd)"""
if type(start) != float:
raise RuntimeError("Slider expects float for start scale")
if type(end) != float:
raise RuntimeError("Slider expects float for end scale")
self.real_widget.setScale(start, end)
def _getScaleStart(self):
"""getScaleStart(self) -> double"""
return self.real_widget.getScaleStart()
def _setScaleStart(self, start):
"""setScaleStart(self, double scaleStart)"""
if type(start) != float:
raise RuntimeError("Slider expects float for start scale")
self.real_widget.setScaleStart(start)
scale_start = property(_getScaleStart, _setScaleStart)
def _getScaleEnd(self):
"""getScaleEnd(self) -> double"""
return self.real_widget.getScaleEnd()
def _setScaleEnd(self, end):
"""setScaleEnd(self, double scaleEnd)"""
if type(end) != float:
raise RuntimeError("Slider expects float for end scale")
self.real_widget.setScaleEnd(end)
scale_end = property(_getScaleEnd, _setScaleEnd)
def _getValue(self):
"""getValue(self) -> double"""
return self.real_widget.getValue()
def _setValue(self, value):
"""setValue(self, double value)"""
if type(value) != float:
raise RuntimeError("Slider only accepts float values")
self.real_widget.setValue(value)
value = property(_getValue, _setValue)
def _setMarkerLength(self, length):
"""setMarkerLength(self, int length)"""
if type(length) != int:
raise RuntimeError("Slider only accepts int for Marker length")
self.real_widget.setMarkerLength(length)
def _getMarkerLength(self):
"""getMarkerLength(self) -> int"""
return self.real_widget.getMarkerLength()
marker_length = property(_getMarkerLength, _setMarkerLength)
def _setOrientation(self, orientation):
"""setOrientation(self, Orientation orientation)"""
self.real_widget.setOrientation(orientation)
def _getOrientation(self):
"""getOrientation(self) -> int"""
return self.real_widget.getOrientation()
orientation = property(_getOrientation, _setOrientation)
def _setStepLength(self, length):
"""setStepLength(self, double length)"""
if type(length) != float:
raise RuntimeError("Slider only accepts floats for step length")
self.real_widget.setStepLength(length)
def _getStepLength(self):
"""getStepLength(self) -> double"""
return self.real_widget.getStepLength()
step_length = property(_getStepLength, _setStepLength)
|
"""Document management blueprint."""
from __future__ import annotations
from flask import g
from abilian.i18n import _l
from abilian.sbe.apps.communities.blueprint import Blueprint
from abilian.sbe.apps.communities.security import is_manager
from abilian.sbe.apps.documents.actions import register_actions
from abilian.web.action import Endpoint
from abilian.web.nav import BreadcrumbItem
__all__ = ["blueprint"]
blueprint = Blueprint(
"documents", __name__, url_prefix="/docs", template_folder="../templates"
)
route = blueprint.route
blueprint.record_once(register_actions)
@blueprint.url_value_preprocessor
def init_document_values(endpoint: str, values: dict[str, int]):
g.current_tab = "documents"
g.is_manager = is_manager()
g.breadcrumb.append(
BreadcrumbItem(
label=_l("Documents"),
url=Endpoint("documents.index", community_id=g.community.slug),
)
)
|
import os.path
import re
import shutil
import llnl.util.tty as tty
import llnl.util.lang
import spack.compiler
import spack.compilers.clang
import spack.util.executable
import spack.version
class AppleClang(spack.compilers.clang.Clang):
openmp_flag = "-Xpreprocessor -fopenmp"
@classmethod
@llnl.util.lang.memoized
def extract_version_from_output(cls, output):
ver = 'unknown'
match = re.search(
# Apple's LLVM compiler has its own versions, so suffix them.
r'^Apple (?:LLVM|clang) version ([^ )]+)',
output,
# Multi-line, since 'Apple clang' may not be on the first line
# in particular, when run as gcc, it seems to output
# "Configured with: --prefix=..." as the first line
re.M,
)
if match:
ver = match.group(match.lastindex)
return ver
@property
def cxx11_flag(self):
# Adapted from CMake's AppleClang-CXX rules
# Spack's AppleClang detection only valid from Xcode >= 4.6
if self.version < spack.version.ver('4.0.0'):
raise spack.compiler.UnsupportedCompilerFlag(
self, "the C++11 standard", "cxx11_flag", "Xcode < 4.0.0"
)
return "-std=c++11"
@property
def cxx14_flag(self):
# Adapted from CMake's rules for AppleClang
if self.version < spack.version.ver('5.1.0'):
raise spack.compiler.UnsupportedCompilerFlag(
self, "the C++14 standard", "cxx14_flag", "Xcode < 5.1.0"
)
elif self.version < spack.version.ver('6.1.0'):
return "-std=c++1y"
return "-std=c++14"
@property
def cxx17_flag(self):
# Adapted from CMake's rules for AppleClang
if self.version < spack.version.ver('6.1.0'):
raise spack.compiler.UnsupportedCompilerFlag(
self, "the C++17 standard", "cxx17_flag", "Xcode < 6.1.0"
)
return "-std=c++1z"
def setup_custom_environment(self, pkg, env):
"""Set the DEVELOPER_DIR environment for the Xcode toolchain.
On macOS, not all buildsystems support querying CC and CXX for the
compilers to use and instead query the Xcode toolchain for what
compiler to run. This side-steps the spack wrappers. In order to inject
spack into this setup, we need to copy (a subset of) Xcode.app and
replace the compiler executables with symlinks to the spack wrapper.
Currently, the stage is used to store the Xcode.app copies. We then set
the 'DEVELOPER_DIR' environment variables to cause the xcrun and
related tools to use this Xcode.app.
"""
super(AppleClang, self).setup_custom_environment(pkg, env)
if not pkg.use_xcode:
# if we do it for all packages, we get into big troubles with MPI:
# filter_compilers(self) will use mockup XCode compilers on macOS
# with Clang. Those point to Spack's compiler wrappers and
# consequently render MPI non-functional outside of Spack.
return
# Use special XCode versions of compiler wrappers when using XCode
# Overwrites build_environment's setting of SPACK_CC and SPACK_CXX
xcrun = spack.util.executable.Executable('xcrun')
xcode_clang = xcrun('-f', 'clang', output=str).strip()
xcode_clangpp = xcrun('-f', 'clang++', output=str).strip()
env.set('SPACK_CC', xcode_clang, force=True)
env.set('SPACK_CXX', xcode_clangpp, force=True)
xcode_select = spack.util.executable.Executable('xcode-select')
# Get the path of the active developer directory
real_root = xcode_select('--print-path', output=str).strip()
# The path name can be used to determine whether the full Xcode suite
# or just the command-line tools are installed
if real_root.endswith('Developer'):
# The full Xcode suite is installed
pass
else:
if real_root.endswith('CommandLineTools'):
# Only the command-line tools are installed
msg = 'It appears that you have the Xcode command-line tools '
msg += 'but not the full Xcode suite installed.\n'
else:
# Xcode is not installed
msg = 'It appears that you do not have Xcode installed.\n'
msg += 'In order to use Spack to build the requested application, '
msg += 'you need the full Xcode suite. It can be installed '
msg += 'through the App Store. Make sure you launch the '
msg += 'application and accept the license agreement.\n'
raise OSError(msg)
real_root = os.path.dirname(os.path.dirname(real_root))
developer_root = os.path.join(spack.stage.get_stage_root(),
'xcode-select',
self.name,
str(self.version))
xcode_link = os.path.join(developer_root, 'Xcode.app')
if not os.path.exists(developer_root):
tty.warn('Copying Xcode from %s to %s in order to add spack '
'wrappers to it. Please do not interrupt.'
% (real_root, developer_root))
# We need to make a new Xcode.app instance, but with symlinks to
# the spack wrappers for the compilers it ships. This is necessary
# because some projects insist on just asking xcrun and related
# tools where the compiler runs. These tools are very hard to trick
# as they do realpath and end up ignoring the symlinks in a
# "softer" tree of nothing but symlinks in the right places.
shutil.copytree(
real_root, developer_root, symlinks=True,
ignore=shutil.ignore_patterns(
'AppleTV*.platform', 'Watch*.platform', 'iPhone*.platform',
'Documentation', 'swift*'
))
real_dirs = [
'Toolchains/XcodeDefault.xctoolchain/usr/bin',
'usr/bin',
]
bins = ['c++', 'c89', 'c99', 'cc', 'clang', 'clang++', 'cpp']
for real_dir in real_dirs:
dev_dir = os.path.join(developer_root,
'Contents',
'Developer',
real_dir)
for fname in os.listdir(dev_dir):
if fname in bins:
os.unlink(os.path.join(dev_dir, fname))
os.symlink(
os.path.join(spack.paths.build_env_path, 'cc'),
os.path.join(dev_dir, fname))
os.symlink(developer_root, xcode_link)
env.set('DEVELOPER_DIR', xcode_link)
|
"""Functions to import builds/defconfigs."""
try:
import simplejson as json
except ImportError:
import json
try:
from os import walk
except ImportError:
from scandir import walk
import bson
import datetime
import io
import os
import pymongo.errors
import re
import redis
import types
import models
import models.build as mbuild
import models.job as mjob
import utils
import utils.database.redisdb as redisdb
import utils.db
import utils.errors
ERR_ADD = utils.errors.add_error
ERR_UPDATE = utils.errors.update_errors
KERNEL_VERSION_MATCH = re.compile(r"^(?P<version>\d+\.{1}\d+(?:\.{1}\d+)?)")
KERNEL_RC_VERSION_MATCH = re.compile(
r"^(?P<version>\d+\.{1}\d+(?:\.{1}\d+)?-{1}rc\d*)")
def _search_prev_build_doc(build_doc, database):
"""Search for a similar defconfig document in the database.
Search for an already imported defconfig/build document in the database
and return its object ID and creation date. This is done to make sure
we do not create double documents when re-importing the same data or
updating it.
:param build_doc: The new defconfig document.
:param database: The db connection.
:return The previous doc ID and its creation date, or None.
"""
doc_id = None
c_date = None
if build_doc and database:
spec = {
models.ARCHITECTURE_KEY: build_doc.arch,
models.DEFCONFIG_FULL_KEY: build_doc.defconfig_full,
models.DEFCONFIG_KEY: build_doc.defconfig,
models.GIT_BRANCH_KEY: build_doc.git_branch,
models.JOB_KEY: build_doc.job,
models.KERNEL_KEY: build_doc.kernel,
models.BUILD_ENVIRONMENT_KEY: build_doc.build_environment
}
collection = database[models.BUILD_COLLECTION]
prev_doc_count = collection.count_documents(spec, limit=2)
if prev_doc_count > 0:
if prev_doc_count == 1:
prev_doc = utils.db.find_one2(collection, spec)
doc_id = prev_doc.get(models.ID_KEY)
c_date = prev_doc.get(models.CREATED_KEY)
else:
utils.LOG.warn(
"Found multiple defconfig docs matching: {}".format(spec))
utils.LOG.error(
"Cannot keep old document ID, don't know which one to "
"use!")
return doc_id, c_date
class BuildError(Exception):
def __init__(self, code, *args, **kwargs):
self.code = code
self.from_exc = kwargs.pop('from_exc', None)
super(BuildError, self).__init__(*args, **kwargs)
def _update_job_doc(job_doc, job_id, status, build_doc, database):
"""Update the JobDocument with values from a BuildDocument.
:param job_doc: The job document to update.
:type job_doc: JobDocument
:param status: The job status value.
:type status: string
:param build_doc: A BuildDocument object.
:type build_doc: BuildDocument
"""
to_update = False
ret_val = 201
if (job_id and job_doc.id != job_id):
job_doc.id = job_id
to_update = True
if job_doc.status != status:
job_doc.status = status
to_update = True
no_git = all([
not job_doc.git_url,
not job_doc.git_commit,
not job_doc.git_describe,
not job_doc.git_describe_v
])
no_compiler = all([
not job_doc.compiler,
not job_doc.compiler_version,
not job_doc.compiler_version_ext,
not job_doc.compiler_version_full,
not job_doc.cross_compile
])
if (build_doc and no_git and no_compiler):
# Kind of a hack:
# We want to store some metadata at the job document level as well,
# like git tree, git commit...
# Since, at the moment, we do not have the metadata file at the job
# level we need to pick one from the build documents, and extract some
# values.
if isinstance(build_doc, mbuild.BuildDocument):
if (build_doc.job == job_doc.job and
build_doc.kernel == job_doc.kernel):
job_doc.git_commit = build_doc.git_commit
job_doc.git_describe = build_doc.git_describe
job_doc.git_describe_v = build_doc.git_describe_v
job_doc.kernel_version = build_doc.kernel_version
job_doc.git_url = build_doc.git_url
job_doc.compiler = build_doc.compiler
job_doc.compiler_version = build_doc.compiler_version
job_doc.compiler_version_ext = build_doc.compiler_version_ext
job_doc.compiler_version_full = build_doc.compiler_version_full
job_doc.cross_compile = build_doc.cross_compile
to_update = True
if to_update:
ret_val, _ = utils.db.save(database, job_doc)
return ret_val
def _get_or_create_job(meta, database, db_options):
"""Get or create a job in the database.
:param job: The name of the job.
:type job: str
:param kernel: The name of the kernel.
:type kernel: str
:param database: The mongodb database connection.
:param db_options: The database connection options.
:type db_options: dict
:return a 3-tuple: return value, job document and job ID.
"""
ret_val = 201
job_doc = None
job_id = None
rev = meta["bmeta"]["revision"]
tree, descr, branch = (rev[key] for key in ["tree", "describe", "branch"])
redis_conn = redisdb.get_db_connection(db_options)
# We might be importing builds in parallel through multi-processes. Keep a
# lock here when looking for a job or we might end up with multiple job
# creations.
# ToDo: rename Job as Revision since that's what it really is
lock_key = "build-import-{}-{}-{}".format(tree, descr, branch)
with redis.lock.Lock(redis_conn, lock_key, timeout=5):
p_doc = utils.db.find_one2(
database[models.JOB_COLLECTION],
{
models.JOB_KEY: tree,
models.KERNEL_KEY: descr,
models.GIT_BRANCH_KEY: branch,
})
if p_doc:
job_doc = mjob.JobDocument.from_json(p_doc)
job_id = job_doc.id
else:
job_doc = mjob.JobDocument(tree, descr, branch)
job_doc.status = models.BUILD_STATUS
job_doc.created_on = datetime.datetime.now(tz=bson.tz_util.utc)
ret_val, job_id = utils.db.save(database, job_doc)
job_doc.id = job_id
return ret_val, job_doc, job_id
def _get_build(meta, database):
"""Make a BuildDocument object and return it"""
bmeta, steps, artifacts = (meta[key] for key in [
"bmeta", "steps", "artifacts"
])
env, kernel, rev, build = (bmeta[key] for key in [
"environment", "kernel", "revision", "build"
])
doc = mbuild.BuildDocument(
rev["tree"],
rev["describe"],
kernel["defconfig"],
rev["branch"],
env["name"],
defconfig_full=kernel["defconfig_full"]
)
# Required fields
doc.arch = env["arch"]
doc.git_commit = rev["commit"]
doc.git_describe = rev["describe"]
doc.status = build["status"]
doc.git_url = rev["url"]
doc.file_server_resource = kernel["publish_path"]
doc.compiler_version_full = env["compiler_version_full"]
doc.compiler_version_ext = doc.compiler_version_full # ToDo: deprecate
# Optional fields
uname = env.get("platform", {}).get("uname")
if uname and len(uname) == 6 and not uname[5]:
uname[5] = steps[0]['cpus'].keys()[0]
doc.build_platform = uname or []
doc.build_time = build.get("duration")
doc.compiler = env.get("compiler")
doc.compiler_version = env.get("compiler_version")
doc.cross_compile = env.get("cross_compile")
doc.git_describe_v = rev.get("describe_verbose")
doc.text_offset = kernel.get("text_offset")
doc.vmlinux_bss_size = kernel.get("vmlinux_bss_size")
doc.vmlinux_data_size = kernel.get("vmlinux_data_size")
doc.vmlinux_file_size = kernel.get("vmlinux_file_size")
doc.vmlinux_text_size = kernel.get("vmlinux_text_size")
# Artifacts fields
def _find_artifacts(artifacts, step, key=None, artifact_type=None):
data = artifacts.get(step)
found = list()
if data:
for entry in data:
if key and entry.get("key") != key or \
artifact_type and entry.get("type") != artifact_type:
continue
found.append(entry)
return found
kernel_config = _find_artifacts(artifacts, 'config', 'config')
doc.kernel_config = kernel_config[0]['path'] if kernel_config else None
doc.kconfig_fragments = [
entry['path'] for entry in
_find_artifacts(artifacts, 'config', 'fragment')
]
kernel_images = _find_artifacts(artifacts, 'kernel', 'image')
doc.kernel_image = kernel_images[0]['path'] if kernel_images else None
system_map = _find_artifacts(artifacts, 'kernel', 'system_map')
doc.system_map = system_map[0]['path'] if system_map else None
modules = _find_artifacts(artifacts, 'modules', artifact_type='tarball')
doc.modules = modules[0]['path'] if modules else None
dtbs = _find_artifacts(artifacts, 'dtbs', artifact_type='directory')
doc.dtb_dir = 'dtbs' if dtbs else None
doc.dtb_dir_data = dtbs[0]['contents'] if dtbs else []
# Build log
log_artifacts = [
_find_artifacts(artifacts, step, 'log')
for step in ['kernel', 'modules']
]
doc.kernel_build_logs = [log[0]['path'] for log in log_artifacts if log]
doc.build_log = 'logs'
doc.errors = 0
doc.warnings = 0
# Constant fields
# FIXME: set in bmeta.json
doc.version = "1.1"
doc.build_type = "kernel"
# Unused fields
# FIXME: delete or make use of them if they're justified
doc.file_server_url = None
doc.kernel_image_size = None
doc.modules_size = None
doc.modules_dir = None
doc.kernel_version = None
return doc
def import_single_build(meta, db_options, base_path=utils.BASE_PATH):
"""Import a single build from the file system.
:param json_obj: The json object containing the necessary data.
:type json_obj: dictionary
:param db_options: The database connection options.
:type db_options: dictionary
:param base_path: The base path on the file system where to look for.
:type base_path: string
:return The build id, job id and errors
"""
build_id = None
job_id = None
database = utils.db.get_db_connection(db_options)
ret_val, job_doc, job_id = _get_or_create_job(meta, database, db_options)
if ret_val != 201:
return None, None, {500: ["Failed to create job document"]}
build_doc = _get_build(meta, database)
build_doc.job_id = job_doc.id
doc_id, c_date = _search_prev_build_doc(build_doc, database)
build_doc.id = doc_id
build_doc.created_on = c_date or datetime.datetime.now(tz=bson.tz_util.utc)
ret_val = _update_job_doc(
job_doc, job_id, job_doc.status, build_doc, database)
if ret_val != 201:
return None, None, {500: ["Failed to update job document"]}
ret_val, build_id = utils.db.save(database, build_doc)
if ret_val != 201:
return None, None, {500: ["Failed to save build document"]}
return build_id, job_id, {}
|
import unittest
from pyscsi.pyscsi import scsi_enum_inquiry as INQUIRY
from pyscsi.pyscsi.scsi_cdb_inquiry import Inquiry
from pyscsi.pyscsi.scsi_enum_command import sbc
from pyscsi.utils.converter import scsi_int_to_ba
from .mock_device import MockDevice, MockSCSI
class MockInquiryStandard(MockDevice):
def execute(self, cmd):
cmd.datain[0] = 0x25 # QUAL:1 TYPE:5
cmd.datain[1] = 0x80 # RMB:1
cmd.datain[2] = 0x07 # VERSION:7
cmd.datain[3] = 0x23 # NORMACA:1 HISUP:0 RDF:3
cmd.datain[4] = 0x40 # ADDITIONAL LENGTH:64
cmd.datain[5] = 0xb9 # SCCS:1 ACC:0 TGPS:3 3PC:1 PROTECT:1
cmd.datain[6] = 0x71 # ENCSERV:1 VS:1 MULTIP:1 ADDR16:1
cmd.datain[7] = 0x33 # WBUS16:1 SYNC:1 CMDQUE:1 VS2:1
# t10 vendor id
cmd.datain[8:16] = bytearray(ord(c) for c in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'])
# product id
cmd.datain[16:32] = bytearray(ord(c) for c in ['i', 'i', 'i', 'i', 'i', 'i', 'i', 'i',
'j', 'j', 'j', 'j', 'j', 'j', 'j', 'j'])
# product revision level
cmd.datain[32:36] = bytearray(ord(c) for c in ['r', 'e', 'v', 'n'])
cmd.datain[56] = 0x09 # CLOCKING:2 QAS:0 IUS:1
class MockLBP(MockDevice):
def execute(self, cmd):
cmd.datain[0] = 0x00 # QUAL:0 TYPE:0
cmd.datain[1] = 0xb2 # logical block provisioning
cmd.datain[2] = 0x00 #
cmd.datain[3] = 0x04 # page length == 4
cmd.datain[4] = 0x12 # threshold exponent
cmd.datain[5] = 0xe7 # LBPU:1 LBPWS:1 LBPWS10:1 LBPRZ:1 ANC_SUP:1 DP:1
cmd.datain[6] = 0x02 # Provisioning Type:2
cmd.datain[7] = 0x00 #
class MockUSN(MockDevice):
def execute(self, cmd):
cmd.datain[0] = 0x00 # QUAL:0 TYPE:0
cmd.datain[1] = 0x80 # unit serial number
cmd.datain[2] = 0x00 #
cmd.datain[3] = 0x04 # page length == 4
cmd.datain[4:8] = "ABCD".encode()
class MockDevId(MockDevice):
def execute(self, cmd):
cmd.datain[0] = 0x00 # QUAL:0 TYPE:0
cmd.datain[1] = 0x83 # device identifier
cmd.datain[2] = 0x00
cmd.datain[3] = 0x00
pos = 4
# Designation Descriptor: T10_VENDOR_ID
t10 = bytearray(8)
t10[0] = ord('T')
t10[1] = ord('e')
t10[2] = ord('s')
t10[3] = ord('t')
t10[4] = ord(' ')
t10[5] = ord('T')
t10[6] = ord('1')
t10[7] = ord('0')
dd = bytearray(4)
dd += t10
dd[0] = 0x52 # iSCSI, ASCII
dd[1] = 0xa1 # AssociatedWithTargetDevice, T10_VENDOR_ID
dd[3] = len(t10)
cmd.datain[pos:pos + len(dd)] = dd
pos += len(dd)
# Designation Descriptor: EUI-64, 8 byte version
eui = bytearray(8)
# IEEE company identifier
eui[0] = 0x11
eui[1] = 0x22
eui[2] = 0x33
# vendor specific
eui[3] = ord('a')
eui[4] = ord('b')
eui[5] = ord('c')
eui[6] = ord('d')
eui[7] = ord('e')
dd = bytearray(4)
dd += eui
dd[0] = 0x01 # BINARY
dd[1] = 0x22 # AssociatedWithTargetDevice, EUI-64
dd[2:4] = scsi_int_to_ba(len(t10), 2)
cmd.datain[pos:pos + len(dd)] = dd
pos += len(dd)
cmd.datain[2:4] = scsi_int_to_ba(pos - 4, 2)
class MockReferrals(MockDevice):
def execute(self, cmd):
cmd.datain[0] = 0x00 # QUAL:0 TYPE:0
cmd.datain[1] = 0xb3 # referrals
cmd.datain[2] = 0x00 #
cmd.datain[3] = 0x0c # page length: 12
cmd.datain[11] = 23
cmd.datain[15] = 37
class MockExtendedInquiry(MockDevice):
def execute(self, cmd):
cmd.datain[0] = 0x00 # QUAL:0 TYPE:0
cmd.datain[1] = 0x86 # extended inquiry
cmd.datain[2] = 0x00 #
cmd.datain[3] = 0x3c # page length: 60
cmd.datain[4] = 0x57 # activate microcode:1 spt:2 grd_chk:1
# app_chk:1 ref_chk:1
cmd.datain[5] = 0x33 # uask_sup:1 group_sup:1 prior_sup:0 headsup:0
# ordsup:1 simpsup:1
cmd.datain[6] = 0x05 # wu_sup:0 crd_sup:1 nv_sup:0 v_sup:1
cmd.datain[7] = 0x11 # p_i_i_sup:1 luiclr:1
cmd.datain[8] = 0x11 # r_sup:1 cbcs:1
cmd.datain[9] = 0x03 # multi...:3
cmd.datain[11] = 0x0f # extended...:15
cmd.datain[12] = 0xe0 # poa_sup:1 hra_sup:1 vsa_sup:1
cmd.datain[13] = 0x05 # maximum...:5
class UnmarshallInquiryTest(unittest.TestCase):
def test_main(self):
with MockSCSI(MockInquiryStandard(sbc)) as s:
cmd = s.inquiry()
i = cmd.result
self.assertEqual(i['peripheral_qualifier'], 1)
self.assertEqual(i['peripheral_device_type'], 5)
self.assertEqual(i['rmb'], 1)
self.assertEqual(i['version'], 7)
self.assertEqual(i['normaca'], 1)
self.assertEqual(i['hisup'], 0)
self.assertEqual(i['response_data_format'], 3)
self.assertEqual(i['additional_length'], 64)
self.assertEqual(i['sccs'], 1)
self.assertEqual(i['acc'], 0)
self.assertEqual(i['tpgs'], 3)
self.assertEqual(i['3pc'], 1)
self.assertEqual(i['protect'], 1)
self.assertEqual(i['encserv'], 1)
self.assertEqual(i['vs'], 1)
self.assertEqual(i['multip'], 1)
self.assertEqual(i['addr16'], 1)
self.assertEqual(i['wbus16'], 1)
self.assertEqual(i['sync'], 1)
self.assertEqual(i['cmdque'], 1)
self.assertEqual(i['vs2'], 1)
self.assertEqual(i['clocking'], 2)
self.assertEqual(i['qas'], 0)
self.assertEqual(i['ius'], 1)
self.assertEqual(i['t10_vendor_identification'].decode("utf-8"), 'abcdefgh')
self.assertEqual(i['product_identification'].decode("utf-8"), 'iiiiiiiijjjjjjjj')
self.assertEqual(i['product_revision_level'].decode("utf-8"), 'revn')
d = Inquiry.unmarshall_datain(Inquiry.marshall_datain(i))
self.assertEqual(d, i)
with MockSCSI(MockLBP(sbc)) as s:
cmd = s.inquiry(evpd=1, page_code=INQUIRY.VPD.LOGICAL_BLOCK_PROVISIONING)
i = cmd.result
self.assertEqual(i['peripheral_qualifier'], 0)
self.assertEqual(i['peripheral_qualifier'], 0)
self.assertEqual(i['threshold_exponent'], 0x12)
self.assertEqual(i['lbpu'], 1)
self.assertEqual(i['lpbws'], 1)
self.assertEqual(i['lbpws10'], 1)
self.assertEqual(i['lbprz'], 1)
self.assertEqual(i['anc_sup'], 1)
self.assertEqual(i['dp'], 1)
self.assertEqual(i['provisioning_type'], INQUIRY.PROVISIONING_TYPE.THIN_PROVISIONED)
d = Inquiry.unmarshall_datain(Inquiry.marshall_datain(i), evpd=1)
self.assertEqual(d, i)
with MockSCSI(MockUSN(sbc)) as s:
cmd = s.inquiry(evpd=1, page_code=INQUIRY.VPD.UNIT_SERIAL_NUMBER)
i = cmd.result
self.assertEqual(i['peripheral_qualifier'], 0)
self.assertEqual(i['peripheral_qualifier'], 0)
self.assertEqual(i['unit_serial_number'].decode("utf-8"), "ABCD")
d = Inquiry.unmarshall_datain(Inquiry.marshall_datain(i), evpd=1)
self.assertEqual(d, i)
with MockSCSI(MockReferrals(sbc)) as s:
cmd = s.inquiry(evpd=1, page_code=INQUIRY.VPD.REFERRALS)
i = cmd.result
self.assertEqual(i['peripheral_qualifier'], 0)
self.assertEqual(i['peripheral_qualifier'], 0)
self.assertEqual(i['user_data_segment_size'], 23)
self.assertEqual(i['user_data_segment_multiplier'], 37)
d = Inquiry.unmarshall_datain(Inquiry.marshall_datain(i), evpd=1)
self.assertEqual(d, i)
with MockSCSI(MockExtendedInquiry(sbc)) as s:
cmd = s.inquiry(evpd=1, page_code=INQUIRY.VPD.EXTENDED_INQUIRY_DATA)
i = cmd.result
self.assertEqual(i['peripheral_qualifier'], 0)
self.assertEqual(i['peripheral_qualifier'], 0)
self.assertEqual(i['activate_microcode'], 1)
self.assertEqual(i['spt'], 2)
self.assertEqual(i['grd_chk'], 1)
self.assertEqual(i['app_chk'], 1)
self.assertEqual(i['ref_chk'], 1)
self.assertEqual(i['uask_sup'], 1)
self.assertEqual(i['group_sup'], 1)
self.assertEqual(i['prior_sup'], 0)
self.assertEqual(i['headsup'], 0)
self.assertEqual(i['ordsup'], 1)
self.assertEqual(i['simpsup'], 1)
self.assertEqual(i['wu_sup'], 0)
self.assertEqual(i['crd_sup'], 1)
self.assertEqual(i['nv_sup'], 0)
self.assertEqual(i['v_sup'], 1)
self.assertEqual(i['p_i_i_sup'], 1)
self.assertEqual(i['luiclr'], 1)
self.assertEqual(i['r_sup'], 1)
self.assertEqual(i['cbcs'], 1)
self.assertEqual(i['multi_it_nexus_microcode_download'], 3)
self.assertEqual(i['extended_self_test_completion_minutes'], 15)
self.assertEqual(i['poa_sup'], 1)
self.assertEqual(i['hra_sup'], 1)
self.assertEqual(i['vsa_sup'], 1)
self.assertEqual(i['maximum_supported_sense_data_length'], 5)
d = Inquiry.unmarshall_datain(Inquiry.marshall_datain(i), evpd=1)
self.assertEqual(d, i)
s.device = MockDevId(sbc)
cmd = s.inquiry(evpd=1, page_code=INQUIRY.VPD.DEVICE_IDENTIFICATION)
i = cmd.result
self.assertEqual(i['peripheral_qualifier'], 0)
self.assertEqual(i['peripheral_qualifier'], 0)
dd = i['designator_descriptors']
self.assertEqual(len(dd), 2)
# T10 designation descriptor
self.assertEqual(dd[0]['association'], 2)
self.assertEqual(dd[0]['code_set'], 2)
self.assertEqual(dd[0]['designator_length'], 8)
self.assertEqual(dd[0]['designator_type'], 1)
self.assertEqual(dd[0]['piv'], 1)
self.assertEqual(dd[0]['protocol_identifier'], 5)
self.assertEqual(dd[0]['designator']['t10_vendor_id'].decode("utf-8"), 'Test T10')
self.assertEqual(dd[0]['designator']['vendor_specific_id'].decode("utf-8"), '')
# EUI-64 designation descriptor
self.assertEqual(dd[1]['association'], 2)
self.assertEqual(dd[1]['code_set'], 1)
self.assertEqual(dd[1]['designator_length'], 8)
self.assertEqual(dd[1]['designator_type'], 2)
self.assertEqual(dd[1]['piv'], 0)
self.assertFalse(hasattr(dd[1], 'protocol_identifier'))
self.assertEqual(dd[1]['designator']['ieee_company_id'], 0x112233)
self.assertEqual(dd[1]['designator']['vendor_specific_extension_id'].decode("utf-8"), 'abcde')
d = Inquiry.unmarshall_datain(Inquiry.marshall_datain(i), evpd=1)
self.assertEqual(d, i)
|
"""
irc/server.py
Copyright © 2009 Ferry Boender
Copyright © 2012 Jason R. Coombs
This server has basic support for:
* Connecting
* Channels
* Nicknames
* Public/private messages
It is MISSING support for notably:
* Server linking
* Modes (user and channel)
* Proper error reporting
* Basically everything else
It is mostly useful as a testing tool or perhaps for building something like a
private proxy on. Do NOT use it in any kind of production code or anything that
will ever be connected to by the public.
"""
from __future__ import print_function, absolute_import
import argparse
import logging
import socket
import select
import re
from . import client
from . import _py2_compat
from . import logging as log_util
from . import events
from . import buffer
SRV_WELCOME = "Welcome to %s v%s, the ugliest IRC server in the world." % (
__name__, client.VERSION)
log = logging.getLogger(__name__)
class IRCError(Exception):
"""
Exception thrown by IRC command handlers to notify client of a server/client error.
"""
def __init__(self, code, value):
self.code = code
self.value = value
def __str__(self):
return repr(self.value)
@classmethod
def from_name(cls, name, value):
return cls(events.codes[name], value)
class IRCChannel(object):
"""
Object representing an IRC channel.
"""
def __init__(self, name, topic='No topic'):
self.name = name
self.topic_by = 'Unknown'
self.topic = topic
self.clients = set()
class IRCClient(_py2_compat.socketserver.BaseRequestHandler):
"""
IRC client connect and command handling. Client connection is handled by
the `handle` method which sets up a two-way communication with the client.
It then handles commands sent by the client by dispatching them to the
handle_ methods.
"""
class Disconnect(BaseException): pass
def __init__(self, request, client_address, server):
self.user = None
self.host = client_address # Client's hostname / ip.
self.realname = None # Client's real name
self.nick = None # Client's currently registered nickname
self.send_queue = [] # Messages to send to client (strings)
self.channels = {} # Channels the client is in
_py2_compat.socketserver.BaseRequestHandler.__init__(self, request,
client_address, server)
def handle(self):
log.info('Client connected: %s', self.client_ident())
self.buffer = buffer.LineBuffer()
try:
while True:
self._handle_one()
except self.Disconnect:
self.request.close()
def _handle_one(self):
"""
Handle one read/write cycle.
"""
ready_to_read, ready_to_write, in_error = select.select(
[self.request], [self.request], [self.request], 0.1)
if in_error:
raise self.Disconnect()
# Write any commands to the client
while self.send_queue and ready_to_write:
msg = self.send_queue.pop(0)
self._send(msg)
# See if the client has any commands for us.
if ready_to_read:
self._handle_incoming()
def _handle_incoming(self):
try:
data = self.request.recv(1024)
except Exception:
raise self.Disconnect()
if not data:
raise self.Disconnect()
self.buffer.feed(data)
for line in self.buffer:
self._handle_line(line)
def _handle_line(self, line):
try:
log.debug('from %s: %s' % (self.client_ident(), line))
command, sep, params = line.partition(' ')
handler = getattr(self, 'handle_%s' % command.lower(), None)
if not handler:
log.info('No handler for command: %s. '
'Full line: %s' % (command, line))
raise IRCError.from_name('unknowncommand',
'%s :Unknown command' % command)
response = handler(params)
except AttributeError as e:
log.error(_py2_compat.str(e))
raise
except IRCError as e:
response = ':%s %s %s' % (self.server.servername, e.code, e.value)
log.error(response)
except Exception as e:
response = ':%s ERROR %r' % (self.server.servername, e)
log.error(response)
raise
if response:
self._send(response)
def _send(self, msg):
log.debug('to %s: %s', self.client_ident(), msg)
self.request.send(msg + '\r\n')
def handle_nick(self, params):
"""
Handle the initial setting of the user's nickname and nick changes.
"""
nick = params
# Valid nickname?
if re.search('[^a-zA-Z0-9\-\[\]\'`^{}_]', nick):
raise IRCError.from_name('erroneusnickname', ':%s' % nick)
if self.server.clients.get(nick, None) == self:
# Already registered to user
return
if nick in self.server.clients:
# Someone else is using the nick
raise IRCError.from_name('nicknameinuse', 'NICK :%s' % (nick))
if not self.nick:
# New connection and nick is available; register and send welcome
# and MOTD.
self.nick = nick
self.server.clients[nick] = self
response = ':%s %s %s :%s' % (self.server.servername,
events.codes['welcome'], self.nick, SRV_WELCOME)
self.send_queue.append(response)
response = ':%s 376 %s :End of MOTD command.' % (
self.server.servername, self.nick)
self.send_queue.append(response)
return
# Nick is available. Change the nick.
message = ':%s NICK :%s' % (self.client_ident(), nick)
self.server.clients.pop(self.nick)
self.nick = nick
self.server.clients[self.nick] = self
# Send a notification of the nick change to all the clients in the
# channels the client is in.
for channel in self.channels.values():
self._send_to_others(message, channel)
# Send a notification of the nick change to the client itself
return message
def handle_user(self, params):
"""
Handle the USER command which identifies the user to the server.
"""
params = params.split(' ', 3)
if len(params) != 4:
raise IRCError.from_name('needmoreparams',
'USER :Not enough parameters')
user, mode, unused, realname = params
self.user = user
self.mode = mode
self.realname = realname
return ''
def handle_ping(self, params):
"""
Handle client PING requests to keep the connection alive.
"""
response = ':%s PONG :%s' % (self.server.servername, self.server.servername)
return response
def handle_join(self, params):
"""
Handle the JOINing of a user to a channel. Valid channel names start
with a # and consist of a-z, A-Z, 0-9 and/or '_'.
"""
channel_names = params.split(' ', 1)[0] # Ignore keys
for channel_name in channel_names.split(','):
r_channel_name = channel_name.strip()
# Valid channel name?
if not re.match('^#([a-zA-Z0-9_])+$', r_channel_name):
raise IRCError.from_name('nosuchchannel',
'%s :No such channel' % r_channel_name)
# Add user to the channel (create new channel if not exists)
channel = self.server.channels.setdefault(r_channel_name, IRCChannel(r_channel_name))
channel.clients.add(self)
# Add channel to user's channel list
self.channels[channel.name] = channel
# Send the topic
response_join = ':%s TOPIC %s :%s' % (channel.topic_by, channel.name, channel.topic)
self.send_queue.append(response_join)
# Send join message to everybody in the channel, including yourself and
# send user list of the channel back to the user.
response_join = ':%s JOIN :%s' % (self.client_ident(), r_channel_name)
for client in channel.clients:
client.send_queue.append(response_join)
nicks = [client.nick for client in channel.clients]
response_userlist = ':%s 353 %s = %s :%s' % (self.server.servername, self.nick, channel.name, ' '.join(nicks))
self.send_queue.append(response_userlist)
response = ':%s 366 %s %s :End of /NAMES list' % (self.server.servername, self.nick, channel.name)
self.send_queue.append(response)
def handle_privmsg(self, params):
"""
Handle sending a private message to a user or channel.
"""
target, sep, msg = params.partition(' ')
if not msg:
raise IRCError.from_name('needmoreparams',
'PRIVMSG :Not enough parameters')
message = ':%s PRIVMSG %s %s' % (self.client_ident(), target, msg)
if target.startswith('#') or target.startswith('$'):
# Message to channel. Check if the channel exists.
channel = self.server.channels.get(target)
if not channel:
raise IRCError.from_name('nosuchnick', 'PRIVMSG :%s' % target)
if not channel.name in self.channels:
# The user isn't in the channel.
raise IRCError.from_name('cannotsendtochan',
'%s :Cannot send to channel' % channel.name)
self._send_to_others(message, channel)
else:
# Message to user
client = self.server.clients.get(target, None)
if not client:
raise IRCError.from_name('nosuchnick', 'PRIVMSG :%s' % target)
client.send_queue.append(message)
def _send_to_others(self, message, channel):
"""
Send the message to all clients in the specified channel except for
self.
"""
other_clients = [client for client in channel.clients
if not client == self]
for client in other_clients:
client.send_queue.append(message)
def handle_topic(self, params):
"""
Handle a topic command.
"""
channel_name, sep, topic = params.partition(' ')
channel = self.server.channels.get(channel_name)
if not channel:
raise IRCError.from_name('nosuchnick', 'PRIVMSG :%s' % channel_name)
if not channel.name in self.channels:
# The user isn't in the channel.
raise IRCError.from_name('cannotsendtochan',
'%s :Cannot send to channel' % channel.name)
if topic:
channel.topic = topic.lstrip(':')
channel.topic_by = self.nick
message = ':%s TOPIC %s :%s' % (self.client_ident(), channel_name,
channel.topic)
return message
def handle_part(self, params):
"""
Handle a client parting from channel(s).
"""
for pchannel in params.split(','):
if pchannel.strip() in self.server.channels:
# Send message to all clients in all channels user is in, and
# remove the user from the channels.
channel = self.server.channels.get(pchannel.strip())
response = ':%s PART :%s' % (self.client_ident(), pchannel)
if channel:
for client in channel.clients:
client.send_queue.append(response)
channel.clients.remove(self)
self.channels.pop(pchannel)
else:
response = ':%s 403 %s :%s' % (self.server.servername, pchannel, pchannel)
self.send_queue.append(response)
def handle_quit(self, params):
"""
Handle the client breaking off the connection with a QUIT command.
"""
response = ':%s QUIT :%s' % (self.client_ident(), params.lstrip(':'))
# Send quit message to all clients in all channels user is in, and
# remove the user from the channels.
for channel in self.channels.values():
for client in channel.clients:
client.send_queue.append(response)
channel.clients.remove(self)
def handle_dump(self, params):
"""
Dump internal server information for debugging purposes.
"""
print("Clients:", self.server.clients)
for client in self.server.clients.values():
print(" ", client)
for channel in client.channels.values():
print(" ", channel.name)
print("Channels:", self.server.channels)
for channel in self.server.channels.values():
print(" ", channel.name, channel)
for client in channel.clients:
print(" ", client.nick, client)
def client_ident(self):
"""
Return the client identifier as included in many command replies.
"""
return client.NickMask.from_params(self.nick, self.user,
self.server.servername)
def finish(self):
"""
The client conection is finished. Do some cleanup to ensure that the
client doesn't linger around in any channel or the client list, in case
the client didn't properly close the connection with PART and QUIT.
"""
log.info('Client disconnected: %s', self.client_ident())
response = ':%s QUIT :EOF from client' % self.client_ident()
for channel in self.channels.values():
if self in channel.clients:
# Client is gone without properly QUITing or PARTing this
# channel.
for client in channel.clients:
client.send_queue.append(response)
channel.clients.remove(self)
self.server.clients.pop(self.nick)
log.info('Connection finished: %s', self.client_ident())
def __repr__(self):
"""
Return a user-readable description of the client
"""
return '<%s %s!%s@%s (%s)>' % (
self.__class__.__name__,
self.nick,
self.user,
self.host[0],
self.realname,
)
class IRCServer(_py2_compat.socketserver.ThreadingMixIn,
_py2_compat.socketserver.TCPServer):
daemon_threads = True
allow_reuse_address = True
channels = {}
"Existing channels (IRCChannel instances) by channel name"
clients = {}
"Connected clients (IRCClient instances) by nick name"
def __init__(self, *args, **kwargs):
self.servername = 'localhost'
self.channels = {}
self.clients = {}
_py2_compat.socketserver.TCPServer.__init__(self, *args, **kwargs)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--address", dest="listen_address",
default='127.0.0.1', help="IP on which to listen")
parser.add_argument("-p", "--port", dest="listen_port", default=6667,
type=int, help="Port on which to listen")
log_util.add_arguments(parser)
return parser.parse_args()
def main():
options = get_args()
log_util.setup(options)
log.info("Starting irc.server")
#
# Start server
#
try:
bind_address = options.listen_address, options.listen_port
ircserver = IRCServer(bind_address, IRCClient)
log.info('Listening on {listen_address}:{listen_port}'.format(
**vars(options)))
ircserver.serve_forever()
except socket.error as e:
log.error(repr(e))
raise SystemExit(-2)
if __name__ == "__main__":
main()
|
import os.path
from celery.task import task
from celery.task.sets import subtask
from django.core.files import File
import logging
import subprocess
import string
import shutil
import pycurl
import tempfile
import re
import pycurl
import sys
class Storage:
def __init__(self):
self.contents = []
def store(self, buf):
self.contents.append(buf) #= "%s%i: %s" % (self.contents, self.line, buf)
def __str__(self):
return ", ".join(self.contents)
@task
def copyfromurl(inputs,outputs,options={},callbacks=[]):
url = options["url"]
logging.info(url)
tfile = tempfile.NamedTemporaryFile('wb',delete=False)
retrieved_headers = Storage()
f = open(tfile.name,'w')
c = pycurl.Curl()
c.setopt(c.URL, str(url))
c.setopt(pycurl.FOLLOWLOCATION, 1)
c.setopt(c.WRITEFUNCTION, f.write)
c.setopt(c.HEADERFUNCTION, retrieved_headers.store)
c.perform()
status = c.getinfo(c.HTTP_CODE)
c.close()
f.close()
logging.debug(retrieved_headers)
filename = "Imported File"
for header in retrieved_headers.contents:
if header.lower().startswith("content-disposition"):
filename = re.match(".*filename=(?P<filename>.*)", header).group('filename')
if status > 400:
logging.warn("Copy From URL %s return error status code '%s' " % (url, status))
return { "message" : "Copy from url failed error_code '%s'" % status }
else:
mfileid = inputs[0]
from dataservice.models import MFile
mfile = MFile.objects.get(id=mfileid)
filename = mfile.service.get_unique_name(filename)
mfile.update_mfile(filename, file=File(open(tfile.name, 'r')))
mfile.save()
for callback in callbacks:
subtask(callback).delay()
return { "message" : "Copy from url was successful"}
@task
def render_blender(inputs,outputs,options={},callbacks=[]):
padding = 4
frame = options["frame"]
if options.has_key("fname"):
fname = options["format"]
else:
fname="image"
if options.has_key("format"):
format = options["format"]
else:
format="PNG"
mfileid = inputs[0]
from dataservice.models import MFile
mf = MFile.objects.get(id=mfileid)
inputfile = mf.file.path
outputfile = outputs[0]
logging.info("Processing render job %s frame: %s " % (inputfile,frame))
if not os.path.exists(inputfile):
logging.info("Scene %s does not exist" % inputfile)
return False
[outputdir,ffff]= os.path.split(outputfile)
hashes = "#" * padding
outputformat = "%s/%s.%s" % (outputdir,fname,hashes)
ss= string.zfill(str(frame), padding)
args = ["blender","-b",inputfile,"-x","1","-o",outputformat,"-F",format.upper(),"-s",ss,"-e",ss,"-a"]
logging.info(args)
n = str(frame).zfill(padding)
resultfile = os.path.join(outputdir,"%s.%s.%s"%(fname,n,format.lower()))
ret = subprocess.call(args)
if resultfile != outputfile:
logging.debug("result file %s is not outputfile %s ... Moving" % (resultfile, outputfile))
shutil.move(resultfile, outputfile)
for callback in callbacks:
subtask(callback).delay()
return ret
|
"""
setup.py file for augeas
"""
import os
prefix = os.environ.get("prefix", "/usr")
from distutils.core import setup
setup (name = 'python-augeas',
version = '0.3.0',
author = "Harald Hoyer",
author_email = "augeas-devel@redhat.com",
description = """Python bindings for Augeas""",
py_modules = [ "augeas" ],
url = "http://augeas.net/",
)
|
import sys
import gdb
import os
import os.path
pythondir = '/Users/igrokhotkov/projects/esp8266/esptools/crosstool-NG/builds/xtensa-lx106-elf/share/gcc-4.8.2/python'
libdir = '/Users/igrokhotkov/projects/esp8266/esptools/crosstool-NG/builds/xtensa-lx106-elf/xtensa-lx106-elf/lib'
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
from libstdcxx.v6.printers import register_libstdcxx_printers
register_libstdcxx_printers (gdb.current_objfile ())
|
from configparser import RawConfigParser
import os
import re
from subprocess import call, check_call, Popen, PIPE, STDOUT
import sys
from errors import Errors
from general import (
exists_and_is_directory, shellquote, print_stderr
)
from githelpers import has_objects_and_refs
class OptionFrom:
'''enum-like values to indicate the source of different options, used in
directory_to_backup_from, git_directory_from and branch_from'''
COMMAND_LINE = 1
CONFIGURATION_FILE = 2
DEFAULT_VALUE = 3
string_versions = { COMMAND_LINE : "command line",
CONFIGURATION_FILE : "configuration file",
DEFAULT_VALUE : "default value" }
class GibSetup:
def __init__(self, command_line_options):
self.configuration_file = '.gib.conf'
self.directory_to_backup = None
self.directory_to_backup_from = None
self.git_directory = None
self.git_directory_from = None
self.branch = None
self.branch_from = None
if command_line_options.directory:
self.directory_to_backup = command_line_options.directory
self.directory_to_backup_from = OptionFrom.COMMAND_LINE
else:
if 'HOME' not in os.environ:
# Then we can't use HOME as default directory:
print_stderr("The HOME environment variable was not set")
sys.exit(Errors.STRANGE_ENVIRONMENT)
self.directory_to_backup = os.environ['HOME']
self.directory_to_backup_from = OptionFrom.DEFAULT_VALUE
# We need to make sure that this is an absolute path before
# changing directory:
self.directory_to_backup = os.path.abspath(self.directory_to_backup)
if not exists_and_is_directory(self.directory_to_backup):
sys.exit(Errors.DIRECTORY_TO_BACKUP_MISSING)
# Now we know the directory that we're backing up, try to load the
# config file:
configuration = RawConfigParser()
configuration.read(os.path.join(self.directory_to_backup,
self.configuration_file))
# Now set the git directory:
if command_line_options.git_directory:
self.git_directory = command_line_options.git_directory
self.git_directory_from = OptionFrom.COMMAND_LINE
elif configuration.has_option('repository','git_directory'):
self.git_directory = configuration.get(
'repository','git_directory'
)
self.git_directory_from = OptionFrom.CONFIGURATION_FILE
else:
self.git_directory = os.path.join(self.directory_to_backup,'.git')
self.git_directory_from = OptionFrom.DEFAULT_VALUE
if not os.path.isabs(self.git_directory):
print_stderr("The git directory must be an absolute path.")
sys.exit(Errors.GIT_DIRECTORY_RELATIVE)
# And finally the branch:
if command_line_options.branch:
self.branch = command_line_options.branch
self.branch_from = OptionFrom.COMMAND_LINE
elif configuration.has_option('repository','branch'):
self.branch = configuration.get('repository','branch')
self.branch_from = OptionFrom.CONFIGURATION_FILE
else:
self.branch = 'master'
self.branch_from = OptionFrom.DEFAULT_VALUE
# Check that the git_directory ends in '.git':
if not re.search('\.git/*$',self.git_directory):
message = "The git directory ({}) did not end in '.git'"
print_stderr(message.format(self.git_directory))
sys.exit(Errors.BAD_GIT_DIRECTORY)
# Also check that it actually exists:
if not os.path.exists(self.git_directory):
message = "The git directory '{}' does not exist."
print_stderr(message.format(self.git_directory))
sys.exit(Errors.GIT_DIRECTORY_MISSING)
def get_directory_to_backup(self):
return self.directory_to_backup
def get_git_directory(self):
return self.git_directory
def get_file_list_directory(self):
return os.path.join(
self.get_git_directory(),
'file-lists'
)
def get_branch(self):
return self.branch
def print_settings(self):
print_stderr('''Settings for backup:
backing up the directory {} (set from the {})
... to the branch "{}" (set from the {})
... in the git repository {} (set from the {})'''.format(
self.directory_to_backup,
OptionFrom.string_versions[self.directory_to_backup_from],
self.branch,
OptionFrom.string_versions[self.branch_from],
self.git_directory,
OptionFrom.string_versions[self.git_directory_from]),
)
def get_invocation(self):
'''Return an invocation that would run the script with options
that will set directory_to_backup, git_directory and branch as on
this invocation. After init has been called, we can just specify
the directory to backup, since the configuration file .gib.conf in
that directory will store the git_directory and the branch. If
the directory to backup is just the current user's home directory,
then that doesn't need to be specified either.'''
invocation = sys.argv[0]
if self.directory_to_backup != os.environ['HOME']:
invocation += " " + "--directory="
invocation += shellquote(self.directory_to_backup)
return invocation
def git(self,rest_of_command):
'''Create an list (suitable for passing to subprocess.call or
subprocess.check_call) which runs a git command with the correct
git directory and work tree'''
return [ "git",
"--git-dir="+self.git_directory,
"--work-tree="+self.directory_to_backup ] + rest_of_command
def git_for_shell(self):
'''Returns a string with shell-safe invocation of git which can be used
in calls that are subject to shell interpretation.'''
command = "git --git-dir="+shellquote(self.git_directory)
command += " --work-tree="+shellquote(self.directory_to_backup)
return command
def git_initialized(self):
'''Returns True if it seems as if the git directory has already
been intialized, and returns False otherwise'''
return has_objects_and_refs(self.git_directory)
def abort_if_not_initialized(self):
'''Check that the git repository exists and exit otherwise'''
if not self.git_initialized():
message = "You don't seem to have initialized {} for backup."
print_stderr(message.format(self.directory_to_backup))
message = "Please use '{} init' to initialize it"
print_stderr(message.format(self.get_invocation()))
sys.exit(Errors.REPOSITORY_NOT_INITIALIZED)
def check_ref(self,ref):
'''Returns True if a ref can be resolved to a commit and False
otherwise.'''
return 0 == call(
self.git(["rev-parse","--verify",ref]),
stdout=open('/dev/null','w'),
stderr=STDOUT
)
def check_tree(self,tree):
'''Returns True if 'tree' can be understood as a tree, e.g. with
"git ls-tree" or false otherwise'''
with open('/dev/null','w') as null:
return 0 == call(
self.git(["ls-tree",tree]),
stdout=null,
stderr=STDOUT
)
def set_HEAD_to(self,ref):
'''Update head to point to a particular branch, without touching
the index or the working tree'''
check_call(
self.git(["symbolic-ref","HEAD","refs/heads/{}".format(ref)])
)
def currently_on_correct_branch(self):
'''Return True if HEAD currently points to 'self.branch', and
return False otherwise.'''
p = Popen(self.git(["symbolic-ref","HEAD"]),stdout=PIPE)
c = p.communicate()
if 0 != p.returncode:
print_stderr("Finding what HEAD points to failed")
sys.exit(Errors.FINDING_HEAD)
result = c[0].decode().strip()
if self.branch == result:
return True
elif ("refs/heads/"+self.branch) == result:
return True
else:
return False
def switch_to_correct_branch(self):
self.set_HEAD_to(self.branch)
self.abort_unless_HEAD_exists()
# Also reset the index to match HEAD. Otherwise things go
# horribly wrong when switching from backing up one computer to
# another, since the index is still that from the first one.
msg = "Now working on a new branch, so resetting the index to match..."
print_stderr(msg)
check_call(self.git(["read-tree","HEAD"]))
def config_value(self,key):
'''Retrieve the git config value for "key", or return
None if it is not defined'''
p = Popen(self.git(["config",key]),stdout=PIPE)
c = p.communicate()
if 0 == p.returncode:
# Then check that the option is right:
return c[0].decode().strip()
else:
return None
def set_config_value(self,key,value):
check_call(self.git(["config",key,value]))
def unset_config_value(self,key):
call(self.git(["config","--unset",key]))
def abort_unless_particular_config(self,key,required_value):
'''Unless the git config has "required_value" set for "key", exit.'''
current_value = self.config_value(key)
if current_value:
if current_value != required_value:
message = "The current value for {} is {}, should be: {}"
print_stderr(message.format(
key,
current_value,
required_value
))
sys.exit(Errors.GIT_CONFIG_ERROR)
else:
message = "The {} config option was not set, setting to {}"
print_stderr(message.format(key,required_value))
self.set_config_value(key,required_value)
def abort_unless_no_auto_gc(self):
'''Exit unless git config has gc.auto set to "0"'''
self.abort_unless_particular_config("gc.auto","0")
def abort_unless_HEAD_exists(self):
if not self.check_ref("HEAD"):
message = '''The branch you are trying to back up to does not exist.
(Perhaps you haven't run "{} init")'''
print_stderr(message.format(self.get_invocation()))
sys.exit(Errors.NO_SUCH_BRANCH)
|
from __future__ import print_function
import sys
import os
import time
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from Xlib.display import Display
from Xlib.ext import xfixes
def main(argv):
if len(sys.argv) != 2:
sys.exit('usage: {0} SELECTION\n\n'
'SELECTION is typically PRIMARY, SECONDARY or CLIPBOARD.\n'
.format(sys.argv[0]))
display = Display()
sel_name = sys.argv[1]
sel_atom = display.get_atom(sel_name)
if not display.has_extension('XFIXES'):
if display.query_extension('XFIXES') is None:
print('XFIXES extension not supported', file=sys.stderr)
return 1
xfixes_version = display.xfixes_query_version()
print('Found XFIXES version %s.%s' % (
xfixes_version.major_version,
xfixes_version.minor_version,
), file=sys.stderr)
screen = display.screen()
mask = xfixes.XFixesSetSelectionOwnerNotifyMask | \
xfixes.XFixesSelectionWindowDestroyNotifyMask | \
xfixes.XFixesSelectionClientCloseNotifyMask
display.xfixes_select_selection_input(screen.root, sel_atom, mask)
while True:
e = display.next_event()
print(e)
if (e.type, e.sub_code) == display.extension_event.SetSelectionOwnerNotify:
print('SetSelectionOwner: owner=0x{0:08x}'.format(e.owner.id))
elif (e.type, e.sub_code) == display.extension_event.SelectionWindowDestroyNotify:
print('SelectionWindowDestroy: owner=0x{0:08x}'.format(e.owner.id))
elif (e.type, e.sub_code) == display.extension_event.SelectionClientCloseNotify:
print('SelectionClientClose: owner=0x{0:08x}'.format(e.owner.id))
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
from sys import argv
import logging
from MonkeyScraper import MonkeyScraper
LOG_FILENAME = 'MonkeyScraper.log'
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)
def main(username, password, survey_url):
"""
Creates a MonkeyScraper, logs in, and scrapes the survey at the provided url
:param username: str: surveymonkey username
:param password: str: surveymonkey password
:param survey_url: str: the "analyze" page url for your survey
:return:
"""
# scraper = MonkeyScraper()
# scraper.init()
# scraper.log_in(username=username, password=password)
# scraper.scrape(survey_url)
# scraper.log_out()
# scraper.close()
with MonkeyScraper(username=username, password=password) as scraper:
survey = scraper.scrape(survey_url)
if __name__ == '__main__':
main(*argv[1:])
|
"""Test suite for language_check."""
from __future__ import unicode_literals
import unittest
import warnings
from collections import namedtuple
import language_check
class TestLanguageTool(unittest.TestCase):
CheckTest = namedtuple('CheckTest', ('text', 'matches'))
Match = namedtuple('Match', ('fromy', 'fromx', 'ruleId'))
check_tests = {
'en': [
CheckTest(
('Paste your own text here... or check this text too see '
'a few of the problems that that LanguageTool can detect. '
'Did you notice that their is no spelcheckin included?'),
[
Match(0, 47, 'TOO_TO'),
Match(0, 132, 'THEIR_IS'),
]
),
],
'fr': [
CheckTest(
('Se texte est un exemple pour pour vous montrer '
'le fonctionnement de LanguageTool. '
'notez que LanguageTool ne comporte pas '
'de correcteur orthographique.'),
[
Match(0, 0, 'SE_CE'),
Match(0, 3, 'TE_NV'),
Match(0, 24, 'FRENCH_WORD_REPEAT_RULE'),
Match(0, 82, 'UPPERCASE_SENTENCE_START'),
]
),
CheckTest(
'je me rappelle de tout sans aucun soucis!',
[
Match(0, 0, 'UPPERCASE_SENTENCE_START'),
Match(0, 6, 'RAPPELER_DE'),
Match(0, 28, 'ACCORD_NOMBRE'),
Match(0, 34, 'FRENCH_WHITESPACE'),
]
),
],
}
correct_tests = {
'en-US': {
'that would of been to impressive.':
'That would have been too impressive.',
},
'fr': {
'il monte en haut si il veut.':
'Il monte s’il veut.',
},
}
def test_check(self):
lang_check = language_check.LanguageTool()
for language, tests in self.check_tests.items():
try:
lang_check.language = language
except ValueError:
version = language_check.get_version()
warnings.warn(
'LanguageTool {} doesn’t support language {!r}'
.format(version, language)
)
for text, expected_matches in tests:
matches = lang_check.check(text)
for expected_match in expected_matches:
for match in matches:
if (
(match.fromy, match.fromx, match.ruleId) ==
(expected_match.fromy, expected_match.fromx,
expected_match.ruleId)
):
break
else:
raise IndexError(
'can’t find {!r}'.format(expected_match))
def test_correct(self):
lang_check = language_check.LanguageTool()
for language, tests in self.correct_tests.items():
try:
lang_check.language = language
except ValueError:
version = language_check.get_version()
warnings.warn(
'LanguageTool {} doesn’t support language {!r}'
.format(version, language)
)
for text, result in tests.items():
self.assertEqual(lang_check.correct(text), result)
def test_languages(self):
self.assertIn('en', language_check.get_languages())
def test_version(self):
self.assertTrue(language_check.get_version())
def test_get_build_date(self):
self.assertTrue(language_check.get_build_date())
def test_get_directory(self):
path = language_check.get_directory()
language_check.set_directory(path)
self.assertEqual(path, language_check.get_directory())
def test_disable_spellcheck(self):
sentence_with_misspelling = 'This is baad.'
lang_check = language_check.LanguageTool()
self.assertTrue(lang_check.check(sentence_with_misspelling))
lang_check.disable_spellchecking()
self.assertFalse(lang_check.check(sentence_with_misspelling))
lang_check.enable_spellchecking()
self.assertTrue(lang_check.check(sentence_with_misspelling))
def test_README_with_unicode(self):
tool = language_check.LanguageTool('en-US')
text = ('A sentence with a error in the '
'Hitchhiker’s Guide tot he Galaxy')
matches = tool.check(text)
self.assertEqual(len(matches), 2)
self.assertEqual((matches[0].fromy, matches[0].fromx),
(0, 16))
self.assertEqual((matches[0].ruleId, matches[0].replacements),
('EN_A_VS_AN', ['an']))
self.assertEqual((matches[1].fromy, matches[1].fromx),
(0, 50))
self.assertEqual((matches[1].ruleId, matches[1].replacements),
('TOT_HE', ['to the']))
corrected = language_check.correct(text, matches)
self.assertEqual(corrected, 'A sentence with an error in the '
'Hitchhiker’s Guide to the Galaxy')
if __name__ == '__main__':
unittest.main()
|
import fnmatch
import glob
import itertools
import os
import re
import subprocess
import sys
import fileutil
version_number_re = r'([0-9]+(?:.[0-9]+){2,3})'
incomplete_version_number_re = r'^[0-9]+(?:.[0-9]+){2}$'
version_line_re = r'^### v{0}.*$'.format(version_number_re)
def get_topmost_version_line(changelog_file):
with open(changelog_file, 'r', encoding=fileutil.get_file_encoding(changelog_file, 'utf-8')) as fp:
return next(filter(lambda line: re.match(version_line_re, line), fp.readlines()))
def get_version_number(version_line):
match_res = re.match(version_line_re, version_line)
assert match_res, 'Invalid version line'
if match_res:
return match_res.groups()[0]
def canonicalize_version_number(version_number):
assert re.match(r'^{0}$'.format(version_number_re), version_number), 'Invalid version number format(neither x.x.x nor x.x.x.x)'
if re.match(incomplete_version_number_re, version_number):
version_number += '.0'
return version_number
def perror(*args, **kwargs):
sys.stderr.write(*args, **kwargs)
sys.exit(1)
def quote_path(path):
if path.startswith('"') and path.endswith('"'):
return path
return '"{0}"'.format(path)
def is_dll_or_exe(file):
assert os.path.isfile(file)
return fnmatch.fnmatch(file, '*.dll') or fnmatch.fnmatch(file, '*.exe')
def _get_full_path(candidate_path, file_name):
if candidate_path is None:
candidate_path = ''
if os.path.isfile(candidate_path):
return candidate_path
elif os.path.isdir(candidate_path):
return os.path.join(candidate_path, file_name)
else:
return os.path.join(os.path.dirname(sys.argv[0]), file_name)
def _iterate_module_files_legacy(module_path):
assert os.path.isdir(module_path)
yield from filter(is_dll_or_exe,
map(lambda item: os.path.join(module_path, item),
os.listdir(module_path)))
def _iterate_module_files_new(module_path):
assert os.path.isdir(module_path)
yield from filter(is_dll_or_exe,
filter(os.path.isfile,
map(lambda item_name: os.path.join(module_path, item_name),
map(lambda item: item.name,
os.scandir(module_path)))))
if sys.version_info >= (3, 5):
iterate_module_files_v1 = _iterate_module_files_new
run_subprocess = subprocess.run
else:
iterate_module_files_v1 = _iterate_module_files_legacy
run_subprocess = subprocess.call
_module_patterns = '*.dll', '*.exe'
def iterate_module_files_v2(module_path):
assert os.path.isdir(module_path)
for pattern in _module_patterns:
pattern = os.path.join(module_path, pattern)
yield from glob.iglob(pattern)
def iterate_module_files_v3(module_path):
assert os.path.isdir(module_path)
yield from itertools.chain.from_iterable(
glob.iglob(pattern) for pattern in map(lambda pattern: os.path.join(module_path, pattern), _module_patterns))
def main():
"""
Usage:
SetPEVersion.py (--module-path=<PATH>) [--changelog=FILE] [--stampver=FILE] [--debug]
SetPEVersion.py -h | --help
SetPEVersion.py -v | --version
Options:
-c FILE --changelog=FILE Specify the full path of "Changelog.txt"
-s FILE --stampver=FILE Specify the full path of "StampVer.exe"
-m PATH --module-path=PATH Specify a single module file(DLL or EXE) or a directory that contains module files
-d --debug Show more messages for debug purpose
-h --help Show this help message
-v --version Show version message
"""
import docopt
import pprint
args = docopt.docopt(main.__doc__, version='SetPEVersion v0.1.0')
changelog = _get_full_path(args['--changelog'], 'Changelog.txt')
stampver = _get_full_path(args['--stampver'], 'StampVer.exe')
if not os.path.isfile(changelog):
perror('Changelog file not found at "{0}".'.format(changelog))
if not os.path.isfile(stampver):
perror('StampVer.exe not found at "{0}".'.format(changelog))
modules = []
if args['--module-path']:
if os.path.isfile(args['--module-path']):
modules.append(args['--module-path'])
elif os.path.isdir(args['--module-path']):
modules.extend(iterate_module_files_v3(args['--module-path']))
else:
perror('Invalid module path "{0}": Neither an existing file nor an existing directory.'.format(args['--module-path']))
else:
perror('"--module-path" option is required.')
# Get the topmost line which contains a valid version number from Changelog.txt
topmost_version_line = get_topmost_version_line(changelog)
version_number = canonicalize_version_number(get_version_number(topmost_version_line))
if args['--debug']:
print('-' * 79)
print(args)
print(changelog)
print(stampver)
print(version_number)
pprint.pprint(modules)
print('-' * 79)
for module in modules:
# Code below does work for `StampVer.exe`.
#cmd_args = (stampver, '-k', '-f"{0}"'.format(version_number), '-p"{0}"'.format(version_number), module)
#subprocess.run(cmd_args)
#
# so I have to quote those arguments all by myself
cmd_args = ' '.join((quote_path(stampver), '-k', '-f"{0}"'.format(version_number), '-p"{0}"'.format(version_number), quote_path(module)))
run_subprocess(cmd_args)
if __name__ == '__main__':
main()
|
import sys
import os
default_variant = 'PySide'
env_api = os.environ.get('QT_API', 'pyqt')
if '--pyside' in sys.argv:
variant = 'PySide'
elif '--pyqt4' in sys.argv:
variant = 'PyQt4'
elif env_api == 'pyside':
variant = 'PySide'
elif env_api == 'pyqt':
variant = 'PyQt4'
else:
variant = default_variant
if variant == 'PySide':
from PySide import QtGui, QtCore
# This will be passed on to new versions of matplotlib
os.environ['QT_API'] = 'pyside'
def QtLoadUI(uifile):
from PySide import QtUiTools
loader = QtUiTools.QUiLoader()
uif = QtCore.QFile(uifile)
uif.open(QtCore.QFile.ReadOnly)
result = loader.load(uif)
uif.close()
return result
elif variant == 'PyQt4':
import sip
api2_classes = [
'QData', 'QDateTime', 'QString', 'QTextStream',
'QTime', 'QUrl', 'QVariant',
]
for cl in api2_classes:
sip.setapi(cl, 2)
from PyQt4 import QtGui, QtCore
QtCore.Signal = QtCore.pyqtSignal
QtCore.QString = str
os.environ['QT_API'] = 'pyqt'
def QtLoadUI(uifile):
from PyQt4 import uic
return uic.loadUi(uifile)
else:
raise ImportError("Python Variant not specified")
__all__ = [QtGui, QtCore, QtLoadUI, variant]
|
import _metagam3d
from _metagam3d import AxisAlignment, AlignmentType
from metagam3d.channels import blocking
from metagam3d.scripts import m3d_expr
from concurrence import Tasklet
class LoadError(Exception):
pass
class Object(_metagam3d.Object):
def __init__(self, objid):
_metagam3d.Object.__init__(self, objid)
self._params = {}
def param(self, paramid):
"Get parameter object for given parameter id"
try:
return self._params[paramid]
except KeyError:
pass
param = ObjectParam(self, paramid)
self._params[paramid] = param
return param
def load(self, filename, flags=0):
"Load and return new subobject from file"
objid = _metagam3d._loadObject(filename, self.id, flags)
if objid is None:
raise LoadError("Error loading %s" % filename)
return Object(objid)
def createText(self, axisAlignment=AxisAlignment.XY_PLANE, alignment=AlignmentType.CENTER_CENTER):
"Create text object"
return Object(_metagam3d._createText(self.id, axisAlignment, alignment))
def getParam(self, paramid, t):
return self.param(paramid).getValue(t)
def setParam(self, paramid, val):
if type(val) is not _metagam3d.DynamicValue:
if type(val) is not _metagam3d.Variant:
val = _metagam3d.Variant(val)
val = _metagam3d.DynamicValue(val)
self.param(paramid).setValue(val)
def setParam3(self, paramid, x, y, z):
self.setParam(paramid, _metagam3d.Vec3d(x, y, z))
def setParamExpr(self, paramid, expr, till=None):
self.param(paramid).setValue(m3d_expr(expr, till))
def assignMaterial(self, geodeName, ambient=0, diffuse=0, specular=0, emission=0, shininess=0):
_metagam3d._assignMaterial(self.id, geodeName, ambient, diffuse, specular, emission, shininess)
def createConsole(self, cols=80, rows=25, fontSize=1.0):
return Console(_metagam3d._createConsole(self.id, cols, rows, fontSize))
def createLine(self):
return Object(_metagam3d._createLine(self.id))
def destroyAfter(self, t):
Tasklet.new(self._destroyAfter)(t)
def _destroyAfter(self, t):
Tasklet.sleep(t)
self.destroy()
class Console(Object):
def println(self, elements):
line = _metagam3d.ConsoleLine()
for el in elements:
line.add(_metagam3d.ConsoleLineElement(el[0], el[1]))
_metagam3d._printConsole(self.id, line)
class ObjectParam(_metagam3d.ObjectParam):
def __init__(self, obj, paramid):
_metagam3d.ObjectParam.__init__(self, obj.id, paramid)
self._obj = obj
@property
def obj(self):
return self._obj
def load(filename, flags=0):
"Load root level object from file"
objid = _metagam3d._loadObject(filename, 0, flags)
if objid is None:
raise LoadError("Error loading %s" % filename)
return Object(objid)
def createText(axisAlignment=AxisAlignment.XY_PLANE, alignment=AlignmentType.CENTER_CENTER):
"Create text object"
return Object(_metagam3d._createText(0, axisAlignment, alignment))
def createConsole(cols=80, rows=25, fontSize=1.0):
return Console(_metagam3d._createConsole(0, cols, rows, fontSize))
def createLine():
return Object(_metagam3d._createLine(0))
|
from ufl import (Coefficient, TestFunction, TrialFunction, VectorElement, dot,
dx, grad, triangle)
element = VectorElement("Lagrange", triangle, 1)
u = TrialFunction(element)
v = TestFunction(element)
w = Coefficient(element)
a = dot(dot(w, grad(u)), v) * dx
|
import sys, signal, logging, time, RPi.GPIO as GPIO
FLOATSW_HIGH_WL = 26 # high water level float switch
WATER_VALVE = 10 # GPIO port for the Water Electo valve, High by default after boot
VALVE_CHGSTATE_TIMER = 25 # Electro valve needs roughly 20 seconds to switch from open to close and vice versa
logger = None
def Setup():
global logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler('/var/log/rodi.log')
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(message)s',"%Y-%m-%d %H:%M:%S")
handler.setFormatter(formatter)
logger.addHandler(handler)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(WATER_VALVE, GPIO.OUT)
GPIO.setup(FLOATSW_HIGH_WL, GPIO.IN, pull_up_down=GPIO.PUD_UP) #, initial = GPIO.HIGH)
if not sys.stdout.isatty():
sys.stderr = open('/var/log/rodi_stderr.log', 'a')
sys.stdout = open('/var/log/rodi_stdout.log', 'a')
def Alert(message):
global logger
logger.info(message) # log the event
print(message)
logger.handlers[0].flush()
def Close_valve():
GPIO.output(WATER_VALVE, False)
Alert("Closing the RO/DI valve")
def Open_valve():
if GPIO.input(WATER_VALVE) == True:
Alert("RO/DI Valve already opened")
sys.exit(5)
else:
Alert("Opening the RO/DI valve")
GPIO.output(WATER_VALVE, True)
time.sleep(VALVE_CHGSTATE_TIMER)
def Refilling():
if GPIO.input(WATER_VALVE) == True:
return True
else:
return False
class GracefulKiller:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self,signum, frame):
self.kill_now = True
if not len(sys.argv) > 1:
print("You must provide one numerical argument to this function (duration in seconds). Exiting.")
sys.exit(1)
if sys.argv[1] != "close" and sys.argv[1] != "stop" and not sys.argv[1].isdigit():
print("Value is neither 'close', 'stop' or a refill duration expressed in seconds")
sys.exit(1)
i = 0
killer = GracefulKiller()
Setup()
if sys.argv[1] == "close" or sys.argv[1] == "stop":
Close_valve()
if str.count(subprocess.check_output(["ps", "aux"]), "rodi") > 1:
Alert("Warning, we were called while another instance of rodi.py was already in Memory")
sys.exit(1)
if GPIO.input(FLOATSW_HIGH_WL) == 0:
Alert("Water level in sump already high, refilling would be dangerous, exiting")
if GPIO.input(WATER_VALVE) == True:
Alert("RO/DI Valve already opened while high water in the sump, closing.")
Close_valve()
sys.exit(3)
if sys.argv[1].isdigit():
Alert("Not already refilling, sump water level normal, proceeding.")
Alert("Refilling for " + sys.argv[1] + " seconds")
try:
Open_valve()
while i<VALVE_CHGSTATE_TIMER+int(sys.argv[1]):
time.sleep(1)
i=i+1
if GPIO.input(FLOATSW_HIGH_WL) == 0:
Alert("Water level in sump is now high, stopping the refill")
Close_valve()
sys.exit(3)
break
if killer.kill_now:
Alert("Caught a Sigterm, Sigkill or CTRL+C, exiting.")
Close_valve()
sys.exit(2)
break
Alert("Refill done, exiting.")
Close_valve()
sys.exit(0)
except (RuntimeError, IOError):
Alert("Caught an exception, exiting.")
Close_valve()
sys.exit(4)
|
try:
import gnomecanvas
except:
import gnome.canvas as gnomecanvas
from ecell.ui.model_editor.Constants import *
from ecell.ui.model_editor.Utils import *
from ecell.ui.model_editor.ResizeableText import *
class ComplexLine:
def __init__( self, anObject, aCanvas ):
self.theCanvas = aCanvas
self.parentObject = anObject
self.graphUtils = self.parentObject.getGraphUtils()
self.shapeMap = {}
self.lastmousex = 0
self.lastmousey = 0
self.buttonpressed = False
self.firstdrag=False
def show ( self ):
self.theRoot = self.parentObject.theCanvas.getRoot()
self.shapeDescriptorList = self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptorList()
self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).reCalculate()
self.__sortByZOrder( self.shapeDescriptorList )
self.isSelected = False
for aKey in self.shapeDescriptorList.keys():
aDescriptor = self.shapeDescriptorList[aKey]
if aDescriptor[SD_TYPE] == CV_TEXT:
self.createText( aDescriptor )
elif aDescriptor[SD_TYPE] == CV_LINE:
self.createLine( aDescriptor )
elif aDescriptor[SD_TYPE] == CV_BPATH:
self.createBpath( aDescriptor )
self.isSelected = False
def repaint ( self ):
self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).reCalculate()
self.shapeDescriptorList = self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptorList()
self.__sortByZOrder( self.shapeDescriptorList )
for aKey in self.shapeDescriptorList.keys():
aDescriptor = self.shapeDescriptorList[aKey]
if aDescriptor[SD_TYPE] == CV_TEXT:
self.redrawText( aDescriptor )
elif aDescriptor[SD_TYPE] == CV_LINE:
self.redrawLine( aDescriptor )
elif aDescriptor[SD_TYPE] == CV_BPATH:
self.redrawBpath( aDescriptor )
def reName( self ):
self.shapeDescriptorList = self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptorList()
self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).renameLabel( self.parentObject.getProperty( CO_NAME ) )
aDescriptor = self.shapeDescriptorList["textbox"]
self.renameText( aDescriptor )
def delete( self ):
for aShapeName in self.shapeMap.keys():
self.shapeMap[ aShapeName ].destroy()
def selected( self ):
self.isSelected = True
def unselected( self ):
self.isSelected = False
def outlineColorChanged( self ):
self.fillColorChanged()
def fillColorChanged( self ):
# find shapes with outline color
anRGB = copyValue( self.parentObject.getProperty( OB_FILL_COLOR ) )
if self.isSelected:
for i in range(0,3):
anRGB[i] = 32768 + anRGB[i]
for aKey in self.shapeDescriptorList.keys():
aDescriptor = self.shapeDescriptorList[aKey]
if aDescriptor[ SD_COLOR ] == SD_FILL:
aColor = self.graphUtils.getGdkColorByRGB( anRGB )
if aDescriptor[SD_TYPE] in CV_LINE:
self.changeLineColor( aDescriptor[ SD_NAME ] , aColor )
elif aDescriptor[SD_TYPE] in CV_BPATH:
self.changeLineColorB( aDescriptor[ SD_NAME ] , aColor )
def createBpath(self, aDescriptor):
aSpecific= aDescriptor[SD_SPECIFIC]
# get pathdef
pathdef= aSpecific[BPATH_PATHDEF]
pd = gnomecanvas.path_def_new(pathdef)
aGdkColor = self.getGdkColor( aDescriptor )
#cheCk: 1starg > the Bpath, 2ndarg > Bpath width(def 3), 3rdarg > Color of Bpath(def black)
bpath = self.theRoot.add(gnomecanvas.CanvasBpath, width_units=3,
outline_color_gdk = aGdkColor)
bpath.set_bpath(pd)
self.addHandlers( bpath, aDescriptor[ SD_NAME ] )
self.shapeMap[ aDescriptor[ SD_NAME ] ] = bpath
#cheCk: createLine is in charge of the Simple Line, displaying it width, colour ..blabla..
#regardless of whether it is the arrowheads or the middle stuffs (MS), it creates all
#but, if the MS is a bpath (eg. curvedLineSD) it will overwrite the middle line, I THINK OLI
def createLine( self, aDescriptor ):
lineSpec = aDescriptor[SD_SPECIFIC]
( X1, X2, Y1, Y2 ) = [lineSpec[0], lineSpec[2], lineSpec[1], lineSpec[3] ]
aGdkColor = self.getGdkColor( aDescriptor )
firstArrow = lineSpec[4]
secondArrow = lineSpec[5]
aLine = self.theRoot.add( gnomecanvas.CanvasLine,points=[X1,Y1,X2,Y2], width_units=lineSpec[ 6 ], fill_color_gdk = aGdkColor, first_arrowhead = firstArrow, last_arrowhead = secondArrow,arrow_shape_a=5, arrow_shape_b=5, arrow_shape_c=5 )
self.addHandlers( aLine, aDescriptor[ SD_NAME ] )
self.shapeMap[ aDescriptor[ SD_NAME ] ] = aLine
def changeLineColor ( self, shapeName, aColor ):
aShape = self.shapeMap[ shapeName ]
aShape.set_property('fill_color_gdk', aColor )
def changeLineColorB ( self, shapeName, aColor ):
aShape = self.shapeMap[ shapeName ]
aShape.set_property('outline_color_gdk', aColor )
def createText( self, aDescriptor ):
textSpec = aDescriptor[SD_SPECIFIC]
(X1, Y1) = ( textSpec[TEXT_ABSX], textSpec[TEXT_ABSY] )
aGdkColor = self.getGdkColor( aDescriptor )
aText = ResizeableText( self.theRoot, self.theCanvas, X1, Y1, aGdkColor, textSpec[TEXT_TEXT], gtk.ANCHOR_NW )
self.addHandlers( aText, aDescriptor[ SD_NAME ] )
self.shapeMap[ aDescriptor[ SD_NAME ] ] = aText
def redrawLine( self, aDescriptor ):
aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ]
aSpecific = aDescriptor[ SD_SPECIFIC ]
x1 = aSpecific[0]
y1 = aSpecific[1]
x2 = aSpecific[2]
y2 = aSpecific[3]
hasFirstArrow = aSpecific[4]
hasLastArrow = aSpecific[5]
aShape.set_property( 'points', (x1, y1, x2, y2) )
aShape.set_property('first_arrowhead', hasFirstArrow )
aShape.set_property('last_arrowhead', hasLastArrow )
def redrawBpath( self, aDescriptor ):
aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ]
pathdef = aDescriptor[ SD_SPECIFIC ][BPATH_PATHDEF]
pd=gnomecanvas.path_def_new(pathdef)
aShape.set_bpath(pd)
def redrawText( self, aDescriptor ):
aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ]
aSpecific = aDescriptor[ SD_SPECIFIC ]
x = aSpecific[TEXT_ABSX]
y = aSpecific[TEXT_ABSY]
aShape.set_property( 'x', x )
aShape.set_property( 'y', y )
def renameText (self, aDescriptor ):
aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ]
aSpecific = aDescriptor[ SD_SPECIFIC ]
label = aSpecific[ TEXT_TEXT ]
aShape.set_property( 'text', label )
def getGdkColor( self, aDescriptor ):
aColorType = aDescriptor[ SD_COLOR ]
if aColorType == SD_FILL:
queryProp = OB_FILL_COLOR
elif aColorType == CV_TEXT:
queryProp = OB_TEXT_COLOR
anRGBColor = self.parentObject.getProperty( queryProp )
return self.graphUtils.getGdkColorByRGB( anRGBColor )
def __sortByZOrder ( self, desclist ):
keys = desclist.keys()
fn = lambda x, y: ( x[SD_Z] < y[SD_Z] ) - ( y[SD_Z] < x[SD_Z] )
keys.sort(fn)
def leftClick( self, shapeName, x, y, shift_pressed = False ):
# usually select
self.parentObject.doSelect( shift_pressed )
if self.getShapeDescriptor(shapeName)[SD_FUNCTION] == SD_ARROWHEAD:
self.changeCursor( shapeName, x, y, True )
def rightClick ( self, shapeName, x, y, anEvent, shift ):
# usually show menu
if not self.parentObject.isSelected:
self.parentObject.doSelect( shift )
self.parentObject.showMenu( anEvent)
def getFirstDrag(self):
return self.firstdrag
def setFirstDrag(self,aValue):
self.firstdrag=aValue
def mouseDrag( self, shapeName, deltax, deltay, origx, origy ):
# decide whether resize or move or draw arrow
if self.getShapeDescriptor(shapeName)[SD_FUNCTION] == SD_MOVINGLINE:
'''
if shapeName == SHAPE_TYPE_MULTIBCURVE_LINE:
self.parentObject.getArrowType(SHAPE_TYPE_MULTIBCURVE_LINE)
#Accessing BPATH_DEF now, the coords like above
bpathDefcheCk = self.parentObject.theSD.theDescriptorList[SHAPE_TYPE_MULTIBCURVE_LINE][SD_SPECIFIC][BPATH_PATHDEF]
self.parentObject.thePropertyMap[CO_CONTROL_POINTS] = bpathDefcheCk
bpathDefcheCk[1][1] = deltax
bpathDefcheCk[1][2] = deltay
bpathDefcheCk[1][3] = deltax
bpathDefcheCk[1][4] = deltay
bpathDefcheCk[2][1] = deltax
bpathDefcheCk[2][2] = deltay
bpathDefcheCk[2][3] = deltax
bpathDefcheCk[2][4] = deltay
#bpathDefcheCk[2][1,2,3,4] = [deltax,deltay,deltax,deltay]
'''
elif self.getShapeDescriptor(shapeName)[SD_FUNCTION] == SD_ARROWHEAD:
if not self.firstdrag:
self.firstdrag=True
self.parentObject.arrowheadDragged( shapeName,deltax, deltay, origx, origy)
def checkConnection( self ):
self.parentObject.checkConnection()
def doubleClick( self, shapeName ):
self.parentObject.popupEditor()
def getShapeDescriptor( self, shapeName ):
return self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptor( shapeName )
def addHandlers( self, canvasObject, aName ):
canvasObject.connect('event', self.rect_event, aName )
def releaseButton( self, shapeName, x, y ):
self.changeCursor( shapeName, x, y, False )
self.parentObject.mouseReleased( shapeName,x, y)
def mouseEntered( self, shapeName, x, y ):
self.changeCursor( shapeName, x, y )
def changeCursor( self, shapeName, x, y, buttonpressed = False):
aFunction = self.getShapeDescriptor(shapeName)[SD_FUNCTION]
aCursorType = self.parentObject.getCursorType( aFunction, x, y , buttonpressed)
self.theCanvas.setCursor( aCursorType )
def rect_event( self, *args ):
event = args[1]
item = args[0]
shapeName = args[2]
if event.type == gtk.gdk.BUTTON_PRESS:
if event.state>k.gdk.SHIFT_MASK == gtk.gdk.SHIFT_MASK:
shift_press = True
else:
shift_press = False
if event.button == 1:
self.lastmousex = event.x
self.lastmousey = event.y
self.buttonpressed = True
self.leftClick( shapeName, event.x, event.y, shift_press )
elif event.button == 3:
self.rightClick(shapeName, event.x, event.y, event, shift_press )
elif event.type == gtk.gdk.BUTTON_RELEASE:
if event.button == 1:
self.buttonpressed = False
self.releaseButton(shapeName, event.x, event.y )
elif event.type == gtk.gdk.MOTION_NOTIFY:
self.buttonpressed=(event.state>k.gdk.BUTTON1_MASK)>0
if not self.buttonpressed:
return
oldx = self.lastmousex
oldy = self.lastmousey
deltax = event.x - oldx
deltay = event.y - oldy
self.lastmousex = event.x
self.lastmousey = event.y
self.mouseDrag( shapeName, deltax, deltay, oldx, oldy )
elif event.type == gtk.gdk._2BUTTON_PRESS:
if event.button == 1:
self.doubleClick( shapeName )
elif event.type == gtk.gdk.ENTER_NOTIFY:
self.mouseEntered( shapeName, event.x, event.y )
|
import base64
import struct
__version__ = "0.23"
SBP_PREAMBLE = 0x55
crc16_tab = [0x0000,0x1021,0x2042,0x3063,0x4084,0x50a5,0x60c6,0x70e7,
0x8108,0x9129,0xa14a,0xb16b,0xc18c,0xd1ad,0xe1ce,0xf1ef,
0x1231,0x0210,0x3273,0x2252,0x52b5,0x4294,0x72f7,0x62d6,
0x9339,0x8318,0xb37b,0xa35a,0xd3bd,0xc39c,0xf3ff,0xe3de,
0x2462,0x3443,0x0420,0x1401,0x64e6,0x74c7,0x44a4,0x5485,
0xa56a,0xb54b,0x8528,0x9509,0xe5ee,0xf5cf,0xc5ac,0xd58d,
0x3653,0x2672,0x1611,0x0630,0x76d7,0x66f6,0x5695,0x46b4,
0xb75b,0xa77a,0x9719,0x8738,0xf7df,0xe7fe,0xd79d,0xc7bc,
0x48c4,0x58e5,0x6886,0x78a7,0x0840,0x1861,0x2802,0x3823,
0xc9cc,0xd9ed,0xe98e,0xf9af,0x8948,0x9969,0xa90a,0xb92b,
0x5af5,0x4ad4,0x7ab7,0x6a96,0x1a71,0x0a50,0x3a33,0x2a12,
0xdbfd,0xcbdc,0xfbbf,0xeb9e,0x9b79,0x8b58,0xbb3b,0xab1a,
0x6ca6,0x7c87,0x4ce4,0x5cc5,0x2c22,0x3c03,0x0c60,0x1c41,
0xedae,0xfd8f,0xcdec,0xddcd,0xad2a,0xbd0b,0x8d68,0x9d49,
0x7e97,0x6eb6,0x5ed5,0x4ef4,0x3e13,0x2e32,0x1e51,0x0e70,
0xff9f,0xefbe,0xdfdd,0xcffc,0xbf1b,0xaf3a,0x9f59,0x8f78,
0x9188,0x81a9,0xb1ca,0xa1eb,0xd10c,0xc12d,0xf14e,0xe16f,
0x1080,0x00a1,0x30c2,0x20e3,0x5004,0x4025,0x7046,0x6067,
0x83b9,0x9398,0xa3fb,0xb3da,0xc33d,0xd31c,0xe37f,0xf35e,
0x02b1,0x1290,0x22f3,0x32d2,0x4235,0x5214,0x6277,0x7256,
0xb5ea,0xa5cb,0x95a8,0x8589,0xf56e,0xe54f,0xd52c,0xc50d,
0x34e2,0x24c3,0x14a0,0x0481,0x7466,0x6447,0x5424,0x4405,
0xa7db,0xb7fa,0x8799,0x97b8,0xe75f,0xf77e,0xc71d,0xd73c,
0x26d3,0x36f2,0x0691,0x16b0,0x6657,0x7676,0x4615,0x5634,
0xd94c,0xc96d,0xf90e,0xe92f,0x99c8,0x89e9,0xb98a,0xa9ab,
0x5844,0x4865,0x7806,0x6827,0x18c0,0x08e1,0x3882,0x28a3,
0xcb7d,0xdb5c,0xeb3f,0xfb1e,0x8bf9,0x9bd8,0xabbb,0xbb9a,
0x4a75,0x5a54,0x6a37,0x7a16,0x0af1,0x1ad0,0x2ab3,0x3a92,
0xfd2e,0xed0f,0xdd6c,0xcd4d,0xbdaa,0xad8b,0x9de8,0x8dc9,
0x7c26,0x6c07,0x5c64,0x4c45,0x3ca2,0x2c83,0x1ce0,0x0cc1,
0xef1f,0xff3e,0xcf5d,0xdf7c,0xaf9b,0xbfba,0x8fd9,0x9ff8,
0x6e17,0x7e36,0x4e55,0x5e74,0x2e93,0x3eb2,0x0ed1,0x1ef0]
def crc16(s, crc=0):
"""CRC16 implementation acording to CCITT standards.
"""
for ch in s:
crc = ((crc<<8)&0xFFFF) ^ crc16_tab[ ((crc>>8)&0xFF) ^ (ord(ch)&0xFF) ]
crc &= 0xFFFF
return crc
class SBP(object):
"""Swift Binary Protocol container.
"""
def __init__(self, msg_type=None, sender=None,
length=None, payload=None, crc=None):
self.preamble = SBP_PREAMBLE
self.msg_type = msg_type
self.sender = sender
self.length = length
self.payload = payload
self.crc = crc
def __eq__(self, other):
return self.__dict__ == other.__dict__
def pack(self):
"""Pack to framed binary message.
"""
framed_msg = struct.pack('<BHHB',
self.preamble,
self.msg_type,
self.sender,
len(self.payload))
framed_msg += self.payload
crc = crc16(framed_msg[1:], 0)
framed_msg += struct.pack('<H', crc)
return framed_msg
def __repr__(self):
p = (self.preamble, self.msg_type, self.sender, self.length,
self.payload, self.crc)
fmt = "<SBP (preamble=0x%X, msg_type=0x%X, sender=%s, length=%d, payload=%s, crc=0x%X)>"
return fmt % p
@staticmethod
def from_json_dict(data):
msg_type = data['msg_type']
sender = data['sender']
length = data['length']
payload = base64.standard_b64decode(data['payload'])
crc = data['crc']
return SBP(msg_type, sender, length, payload, crc)
def to_json_dict(self):
return {'preamble': self.preamble,
'msg_type': self.msg_type,
'sender': self.sender,
'length': self.length,
'payload': base64.standard_b64encode(self.payload),
'crc': self.crc}
|
from time import sleep
from NaoCommunication import *
nao=NaoControle(Nao())
for a in range(16):
if a%2==0:
nao.reglerCouleur(a,a*15,50,50)
else :
nao.reglerCouleur(a,255,0,0)
sleep(0.1)
for a in range(15,-1,-1):
nao.eteindreLed(a)
sleep(0.1)
for a in range(15,-1,-1):
nao.allumerLed(a)
sleep(0.1)
for a in range(0,16,1):
nao.eteindreLed(a)
sleep(0.1)
|
from gnss_analysis.runner import run as single_run
import pandas as pd
import numpy as np
def main():
import argparse
parser = argparse.ArgumentParser(description='RTK Filter SITL tests.')
parser.add_argument('infile', help='Specify the HDF5 file to use for input.')
parser.add_argument('outfile', help='Specify the HDF5 file to output into.')
parser.add_argument('baselineX', help='The baseline north component.')
parser.add_argument('baselineY', help='The baseline east component.')
parser.add_argument('baselineZ', help='The baseline down component.')
parser.add_argument('--NED', action='store_true')
parser.add_argument('-k', '--key',
default='table', nargs=1,
help='The key for the output table to insert into.')
parser.add_argument('-r', '--row',
default=None, nargs=1,
help='The key for the output table to insert into.')
args = parser.parse_args()
hdf5_filename_in = args.infile
hdf5_filename_out = args.outfile
baselineX = args.baselineX
baselineY = args.baselineY
baselineZ = args.baselineZ
baseline = np.array(map(float, [baselineX, baselineY, baselineZ]))
out_key = args.key
row = args.row
if row is None:
row = hdf5_filename_in
reports = single_run(hdf5_filename_in, baseline, baseline_is_NED=args.NED)
out_store = pd.HDFStore(hdf5_filename_out)
if ('/' + out_key) in out_store.keys():
out_df = out_store[out_key]
else:
out_df = pd.DataFrame()
new_cols = [col for col in reports.keys() if col not in out_df.columns]
for new_col in new_cols:
out_df[new_col] = pd.Series(np.nan * np.empty_like(out_df.index),
index=out_df.index)
out_df.loc[row] = pd.Series(reports)
out_store[out_key] = out_df
out_store.close()
if __name__ == "__main__":
main()
|
"""@brief Lightweight pure-Python neural network library.
@file neuralnet.py
@package pybooster.neuralnet
@version 2019.12.23
@author Devyn Collier Johnson <DevynCJohnson@Gmail.com>
@copyright LGPLv3
@section DESCRIPTION
@code{.py}
from pybooster.neuralnet import NeuroCode
data = [ # The input and output of an XOR gate
([0, 0], [0]), # The first list in the tuple represents the input(s)
([0, 1], [1]), # The last list in the tuple represents the output(s)
([1, 0], [1]),
([1, 1], [0])
] # Provide sample input and expected output
net = NeuroCode(
data, # The data table created above
layers = [4, 3], # Number of nodes in each hidden layers (between input and output)
iterations = 40000, # Maximum training iterations
rate = 0.1 # Learning rate
)
net.train() # Returns (correctness, iterations)
output = net.run([1, 0]) # Execute neuralnet
net.writedump(r'xor_code.py') # Save the generated code
net.neurocode2cfile(r'neural_xor.c', r'neural_xor') # Save the generated code as plain C code
net.neurocode2javafile(r'neural_xor.java', r'neural_xor') # Save the generated code as plain Java code
net.neurocode2pythonfile(r'neural_xor.py', r'neural_xor') # Save the generated code as plain Python code
@endcode
@section LICENSE
GNU Lesser General Public License v3
Copyright (c) Devyn Collier Johnson, All rights reserved.
This software is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software.
"""
from base64 import b64decode, b64encode
from math import exp, floor
from pickle import dumps, loads # nosec
from random import Random
from typing import Any, Dict, Generator, List, Tuple
from zlib import compress, decompress
__all__: list = [
r'flatten',
r'NeuroCode'
]
def flatten(_lst: list) -> Generator[list, None, None]:
"""Flatten list of lists."""
for _sublist in _lst:
if isinstance(_sublist, list):
for _sublist in flatten(_sublist):
yield _sublist
else:
yield _sublist
def _indent(txt: str, chars: int) -> str:
"""Indent the given code."""
result: str = r''
d: str = r' ' * chars
for line in txt.split('\n'):
result += (d + line + '\n')
return result
class NeuroCode: # pylint: disable=C0200,R0902
"""Neurocode class."""
def __init__(self, data: list, layers: list, iterations: int = 40000, rate: float = 0.2) -> None:
"""Initialize Neurocode-learning.
@param[in] data A list of lists of the input data
@param[in] layers Specify the number of hidden layers in the network and the size of each layer. For example, `layers = [3, 4]` makes two hidden layers, the first with 3 nodes and the second with 4 nodes. By default, one hidden layer is used with a size proportionate to the size of the input array
@param[in] iterations Number of times to run the training
@param[in] rate Learning rate (float less than 1.0)
"""
# Setup input data
input_size: int = len(data[0][0])
output_size: int = len(data[0][1])
# Settings
self.hidden_layers = [max(3, int(floor(input_size / 2)))] if not layers else layers
self.sizes: List[Any] = list(flatten([input_size, self.hidden_layers, output_size]))
self.iterations: int = iterations
self.rate: float = rate if rate < 1.0 else 0.4
self.io_rules: list = data
self.io_rules_len: int = len(data)
self.outputlayer: int = len(self.sizes) - 1
self.error_threshold: float = 0.0001
neural_rand = Random()
# Training State
self.deltas: List[Any] = [[]] * (self.outputlayer + 1)
self.changes: List[Any] = [[]] * (self.outputlayer + 1)
self.errors: List[Any] = [[]] * (self.outputlayer + 1)
self.outputs: List[Any] = [[]] * (self.outputlayer + 1)
self.biases: List[Any] = [[]] * (self.outputlayer + 1)
self.weights: List[Any] = [[]] * (self.outputlayer + 1)
for layer in range(self.outputlayer + 1):
_size = self.sizes[layer]
self.deltas[layer] = [0] * _size
self.errors[layer] = [0] * _size
self.outputs[layer] = [0] * _size
if layer > 0:
self.biases[layer] = [(neural_rand.random() * 0.4) - 0.2 for i in range(_size)]
self.weights[layer] = [0] * _size
self.changes[layer] = self.weights[layer]
for node in range(_size):
_prev_size = self.sizes[layer - 1]
self.weights[layer][node] = [(neural_rand.random() * 0.4) - 0.2 for j in range(_prev_size)]
self.changes[layer][node] = [0] * _prev_size
def train(self) -> Tuple[float, int]: # noqa: C901
"""Neurocode training (core function)."""
error: float = 1.0
used_iterations: int = 0
for i in range(self.iterations):
used_iterations = i
if error <= self.error_threshold: # Error Threshold
break
_sum = 0.0
for d in self.io_rules:
self.run(d[0])
self._calculate_deltas(d[1])
# Adjust Weights
for _layer in range(1, self.outputlayer + 1):
incoming = self.outputs[_layer - 1]
for _node in range(self.sizes[_layer]):
delta = self.deltas[_layer][_node]
for k in range(len(incoming)):
change = (self.rate * delta * incoming[k]) + (0.1 * self.changes[_layer][_node][k]) # 0.1 = momentum
self.changes[_layer][_node][k] = change
self.weights[_layer][_node][k] = change + self.weights[_layer][_node][k]
self.biases[_layer][_node] = self.biases[_layer][_node] + (self.rate * delta)
_errsum = 0.0
for err in self.errors[self.outputlayer]:
_errsum += err ** 2.0
_sum += _errsum / len(self.errors[self.outputlayer])
error = _sum / self.io_rules_len
return (error, used_iterations)
def run(self, _input: List[Any]) -> list:
"""Forward Propagation; Execute neuralnet."""
output = self.outputs[0] = _input # Set output state of input layer
for _layer in range(1, self.outputlayer + 1):
for _node in range(self.sizes[_layer]):
weights = self.weights[_layer][_node]
_sum = self.biases[_layer][_node]
for k in range(len(weights)):
_sum += weights[k] * _input[k]
self.outputs[_layer][_node] = 1.0 / (1.0 + exp(-_sum))
_input = self.outputs[_layer]
output = _input
return output
def _calculate_deltas(self, target: list) -> None:
"""Backward Propagation."""
layer: int = self.outputlayer
while layer >= 0:
for node in range(self.sizes[layer]):
output = self.outputs[layer][node]
if layer == self.outputlayer:
error = target[node] - output
else:
deltas = self.deltas[layer + 1]
error = 0.0
for k in range(len(deltas)):
error += (deltas[k] * self.weights[layer + 1][k][node])
self.errors[layer][node] = error
self.deltas[layer][node] = (error * output) * (1 - output)
layer -= 1
def bestof(self, generations: int = 16) -> bytes:
"""Return the best neuralnet from the given amount produced as a byte string."""
rounds: int = generations
best_result: float = 1.0 # Store the best error-rate
best_neuralnet: bytes = b''
while rounds != 0:
result = self.train()
if result[0] < best_result:
best_result = result[0]
best_neuralnet = self.dump()
rounds -= 1
return best_neuralnet
def dump(self) -> bytes:
"""Pickle neural-network and compress it using Zlib."""
return b64encode(compress(dumps(self), 9))
def writedump(self, _filename: str) -> None:
"""Pickle neural-network, compress it using Zlib, and then write it to a file."""
with open(_filename, mode=r'wt', encoding=r'utf-8') as _file:
_file.write(str(b64encode(compress(dumps(self), 9), altchars=br'-_'), encoding=r'utf-8'))
def neurocode2pythonfile(self, _filename: str, _neuroname: str) -> None:
"""Write the Neurocode to a file as Python code."""
with open(_filename, mode=r'wt', encoding=r'utf-8') as _code:
_code.write(self.to_python_function(_neuroname))
def neurocode2cfile(self, _filename: str, _neuroname: str) -> None:
"""Write the Neurocode to a file as C code."""
with open(_filename, mode=r'wt', encoding=r'utf-8') as _code:
_code.write(self.to_c_function(_neuroname))
def neurocode2javafile(self, _filename: str, _neuroname: str) -> None:
"""Write the Neurocode to a file as Java code."""
with open(_filename, mode=r'wt', encoding=r'utf-8') as _code:
_code.write(self.to_java_method(_neuroname))
@staticmethod
def load(_str: str) -> object:
"""Load the given compressed+pickled neural-network."""
return loads(decompress(b64decode(bytes(_str, encoding=r'utf-8'), altchars=br'-_')))
def to_python_function(self, fnname: str = r'nn_run', indent: int = 0) -> str:
"""Convert the neural-network to Python code."""
fn: str = fr'def {fnname}(i):\n'
for _layer in range(1, self.outputlayer + 1):
fn += ' o = [\n' if _layer < self.outputlayer else ' return [\n'
size = self.sizes[_layer]
for n in range(size):
term: str = fr'{-self.biases[_layer][n]}'
length = len(self.weights[_layer][n])
for k in range(length):
w = self.weights[_layer][n][k]
term += (r'-' if w > 0 else r'+') + fr'{abs(w)} * i[{k}]'
fn += fr' 1 / (1 + math.exp({term}))' + (',\n' if n != size - 1 else '\n')
fn += ' ]\n'
if _layer != self.outputlayer:
fn += ' i = o\n'
return _indent(fn, indent)
def to_java_method(self, fnname: str = r'nn_run', static: bool = False, scope: str = r'protected', indent: int = 4) -> str:
"""Convert the neural-network to Java code."""
fn: str = scope + (r' static ' if static else r' ') + fr'double[] {fnname}(double[] i){{\n'
fn += ' double[] o;\n'
for _layer in range(1, self.outputlayer + 1):
fn += ' o = new double[]{\n' if _layer < self.outputlayer else ' return new double[]{\n'
size = self.sizes[_layer]
for n in range(size):
term: str = fr'{-self.biases[_layer][n]}'
length = len(self.weights[_layer][n])
for k in range(length):
w = self.weights[_layer][n][k]
term += (r'-' if w > 0 else r'+') + fr'{abs(w)} * i[{k}]'
fn += fr' 1 / (1 + Math.exp({term}))' + (',\n' if n != size - 1 else '\n')
fn += ' };\n'
if _layer != self.outputlayer:
fn += ' i = o;\n'
fn += r'}'
return _indent(fn, indent)
def to_c_function(self, fnname: str = r'nn_run', indent: int = 0) -> str: # pylint: disable=R0914
"""Convert the neural-network to C code."""
terms: Dict[str, str] = {}
lterms: List[str] = []
for k in range(self.sizes[0]):
lterms.append(fr'o0_{k}')
terms[lterms[-1]] = fr'i[{k}]'
oterms: dict = {}
for _layer in range(1, self.outputlayer + 1):
for n in range(self.sizes[_layer]):
term: str = fr'{-self.biases[_layer][n]}'
for k in range(len(self.weights[_layer][n])):
w = self.weights[_layer][n][k]
term += (r'-' if w > 0 else r'+') + fr'{abs(w)} * o{_layer - 1}_{k}'
v = fr'(1.0 / (1.0 + exp({term})))'
for _str in lterms:
v = v.replace(_str, terms[_str])
lterms.append(fr'o{_layer}_{n}')
terms[lterms[-1]] = v
if _layer == self.outputlayer:
oterms[fr'o{_layer}_{n}'] = fr'o[{n}]'
del k, lterms
fn: str = fr'void {fnname}(double* i, double* o){{\n'
for _str, v in oterms.items():
fn += f' {v} = {terms[_str]};\n'
fn += '}\n'
return _indent(fn, indent)
|
'''
Created on 05.11.2013
@author: gena
'''
from __future__ import print_function
from PyQt4 import QtCore
from escore.plate import Plate
from escore.approximations import indexByName
class PlateRecord(object):
def __init__(self, plate, name,path):
self.plate=plate
self.name=name
self.path=path
class PlateManager(QtCore.QObject):
'''
PlateManager holds all plates, and handles related actions,
such as plate open,save,close,select, etc
'''
signalPlateListUpdated=QtCore.pyqtSignal(QtCore.QStringList)
signalCurrentPlateSet=QtCore.pyqtSignal(object)
signalCurrentIndexChanged=QtCore.pyqtSignal(int)
signalApproximationSelected = QtCore.pyqtSignal(int)
def __init__(self, parent=None):
super(PlateManager, self).__init__(parent)
self.plates=[]
self.currentPlateIndex = -1
self.defaultApproximationIndex=0
def getFileInfo(self,fileName):
fileInfo=QtCore.QFileInfo(fileName)
return fileInfo.baseName(), fileInfo.dir()
def openPlate(self, fileName):
plates = Plate.loadFromFile(fileName)
for number,plate in enumerate(plates):
plate.setParent(self)
if plate.approximation is None:
print("set default approximation for plate",self.defaultApproximationIndex)
plate.setApproximation(self.defaultApproximationIndex)
name,path = self.getFileInfo(fileName)
if len(plates)>1:
name+='_'+str(number+1)
plateRecord=PlateRecord(plate,name,path)
self.plates.append(plateRecord)
plate.signalApplyReference.connect(self.applyReference)
self.signalPlateListUpdated.emit(self.names())
if not self.isEmpty():
self.setCurrentPlate(0)
def setApproximation(self, index):
if self.defaultApproximationIndex==index:
return
self.defaultApproximationIndex=index
if self.currentPlateIndex >= 0 :
self.plates[self.currentPlateIndex].plate.setApproximation(index)
self.signalApproximationSelected.emit(index)
def openPlates(self, fileNameList):
for fileName in fileNameList :
self.openPlate(fileName)
def savePlateAs(self,fileName):
if self.currentPlateIndex < 0 :
return
plateRecord=self.plates[self.currentPlateIndex]
plateRecord.plate.saveToFile(fileName)
plateRecord.name,plateRecord.path = self.getFileInfo(fileName)
self.signalPlateListUpdated.emit(self.names())
def savePlateWithDefaultName(self, index):
plateRecord=self.plates[index]
fileInfo=QtCore.QFileInfo(plateRecord.path,plateRecord.name+'.csv')
plateRecord.plate.saveToFile(fileInfo.filePath())
def savePlate(self):
if self.currentPlateIndex < 0 :
return
self.savePlateWithDefaultName(self.currentPlateIndex)
def saveAllPlates(self):
for index in range(len(self.plates)):
self.savePlateWithDefaultName(index)
def removePlate(self):
if self.currentPlateIndex < 0 :
return
self.signalCurrentPlateSet.emit(None)
self.plates[self.currentPlateIndex].plate.signalApplyReference.disconnect()
del self.plates[self.currentPlateIndex]
self.signalPlateListUpdated.emit(self.names())
if not self.isEmpty():
self.setCurrentPlate(0)
def isDirty(self):
return self.plates[self.currentPlateIndex].plate.dirty
def isEmpty(self):
return self.plates == []
def names(self):
return QtCore.QStringList([QtCore.QString(record.name) for record in self.plates])
def setCurrentPlate(self, index):
if self.currentPlateIndex == index :
return
self.currentPlateIndex = index
if index >= 0:
plate = self.plates[index].plate
appindex= indexByName(plate.approximation.name)
self.defaultApproximationIndex = appindex
self.signalApproximationSelected.emit(appindex)
else :
plate = None
self.signalCurrentIndexChanged.emit(self.currentPlateIndex)
self.signalCurrentPlateSet.emit(plate)
def applyReference(self, reference):
print('Applying reference to all plates')
sender = self.sender()
for plateRecord in self.plates:
plate = plateRecord.plate
if not plate is sender:
plate.setReference(reference)
|
"""\
"""
__revision__ = "$Id: mypydoc.py,v 1.9 2009/10/07 20:52:24 rliebscher Exp $"
import sys, inspect
from string import join, split, strip
import pydoc
from pydoc import visiblename, pkgutil, getdoc, isdata
class MyHTMLDoc(pydoc.HTMLDoc):
"""Formatter class for HTML documentation."""
def filelink(self, url, path):
"""Create link to source file."""
return '<a href="file:%s">%s</a>' % (url, path)
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = split(name, '.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
(join(parts[:i+1], '.'), parts[i]))
linkedname = join(links + parts[-1:], '.')
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
# modified
filelink = self.filelink(url, path)
# end modified
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda (key, value), s=self: s.modulelink(value))
result = result + self.bigsection(
'Modules', '#fffff', '#aa55cc', contents)
if classes:
classlist = map(lambda (key, value): value, classes)
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
pydoc.html = MyHTMLDoc()
if __name__ == '__main__': pydoc.cli()
|
from django.contrib.sites.models import Site
from django.conf import settings
def get_site_url(request, slash=False):
domain = Site.objects.get_current().domain
protocol = 'https' if request.is_secure() else 'http'
root = "%s://%s" % (protocol, domain)
if slash:
root += '/'
return root
def absolute(request):
urls = {
'ABSOLUTE_ROOT': request.build_absolute_uri('/')[:-1],
'ABSOLUTE_ROOT_URL': request.build_absolute_uri('/'),
}
if 'django.contrib.sites' in settings.INSTALLED_APPS:
urls['SITE_ROOT'] = get_site_url(request)
urls['SITE_ROOT_URL'] = get_site_url(request, True)
return urls
|
from hpp.corbaserver.rbprm import Client as RbprmClient
from hpp.corbaserver import Client as BasicClient
import hpp.gepetto.blender.exportmotion as em
class CorbaClient:
"""
Container for corba clients to various interfaces.
"""
def __init__ (self):
self.basic = BasicClient ()
self.rbprm = RbprmClient ()
class Builder (object):
## Constructor
def __init__ (self, load = True):
self.tf_root = "base_link"
self.rootJointType = dict()
self.client = CorbaClient ()
self.load = load
## Virtual function to load the robot model.
#
# \param urdfName urdf description of the robot trunk,
# \param urdfNameroms either a string, or an array of strings, indicating the urdf of the different roms to add.
# \param rootJointType type of root joint among ("freeflyer", "planar",
# "anchor"),
# \param meshPackageName name of the meshpackage from where the robot mesh will be loaded
# \param packageName name of the package from where the robot will be loaded
# \param urdfSuffix optional suffix for the urdf of the robot package
# \param srdfSuffix optional suffix for the srdf of the robot package
def loadModel (self, urdfName, urdfNameroms, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix):
if(isinstance(urdfNameroms, list)):
for urdfNamerom in urdfNameroms:
self.client.rbprm.rbprm.loadRobotRomModel(urdfNamerom, rootJointType, packageName, urdfNamerom, urdfSuffix, srdfSuffix)
else:
self.client.rbprm.rbprm.loadRobotRomModel(urdfNameroms, rootJointType, packageName, urdfNameroms, urdfSuffix, srdfSuffix)
self.client.rbprm.rbprm.loadRobotCompleteModel(urdfName, rootJointType, packageName, urdfName, urdfSuffix, srdfSuffix)
self.name = urdfName
self.displayName = urdfName
self.tf_root = "base_link"
self.rootJointType = rootJointType
self.jointNames = self.client.basic.robot.getJointNames ()
self.allJointNames = self.client.basic.robot.getAllJointNames ()
self.client.basic.robot.meshPackageName = meshPackageName
self.meshPackageName = meshPackageName
self.rankInConfiguration = dict ()
self.rankInVelocity = dict ()
self.packageName = packageName
self.urdfName = urdfName
self.urdfSuffix = urdfSuffix
self.srdfSuffix = srdfSuffix
rankInConfiguration = rankInVelocity = 0
for j in self.jointNames:
self.rankInConfiguration [j] = rankInConfiguration
rankInConfiguration += self.client.basic.robot.getJointConfigSize (j)
self.rankInVelocity [j] = rankInVelocity
rankInVelocity += self.client.basic.robot.getJointNumberDof (j)
## Init RbprmShooter
#
def initshooter (self):
return self.client.rbprm.rbprm.initshooter ()
## Sets limits on robot orientation, described according to Euler's ZYX rotation order
#
# \param bounds 6D vector with the lower and upperBound for each rotation axis in sequence
def boundSO3 (self, bounds):
return self.client.rbprm.rbprm.boundSO3 (bounds)
## Specifies a preferred affordance for a given rom.
# This constrains the planner to accept a rom configuration only if
# it collides with a surface the normal of which has these properties.
#
# \param rom name of the rome,
# \param affordances list of affordance names
def setAffordanceFilter (self, rom, affordances):
return self.client.rbprm.rbprm.setAffordanceFilter (rom, affordances)
## Specifies a rom constraint for the planner.
# A configuration will be valid if and only if the considered rom collides
# with the environment.
#
# \param romFilter array of roms indicated by name, which determine the constraint.
def setFilter (self, romFilter):
return self.client.rbprm.rbprm.setFilter (romFilter)
## Export a computed path for blender
#
# \param problem the problem associated with the path computed for the robot
# \param stepsize increment along the path
# \param pathId if of the considered path
# \param filename name of the output file where to save the output
def exportPath (self, viewer, problem, pathId, stepsize, filename):
em.exportPath(viewer, self.client.basic.robot, problem, pathId, stepsize, filename)
## \name Degrees of freedom
# \{
## Get size of configuration
# \return size of configuration
def getConfigSize (self):
return self.client.basic.robot.getConfigSize ()
# Get size of velocity
# \return size of velocity
def getNumberDof (self):
return self.client.basic.robot.getNumberDof ()
## \}
## \name Joints
#\{
## Get joint names in the same order as in the configuration.
def getJointNames (self):
return self.client.basic.robot.getJointNames ()
## Get joint names in the same order as in the configuration.
def getAllJointNames (self):
return self.client.basic.robot.getAllJointNames ()
## Get joint position.
def getJointPosition (self, jointName):
return self.client.basic.robot.getJointPosition (jointName)
## Set static position of joint in its parent frame
def setJointPosition (self, jointName, position):
return self.client.basic.robot.setJointPosition (jointName, position)
## Get joint number degrees of freedom.
def getJointNumberDof (self, jointName):
return self.client.basic.robot.getJointNumberDof (jointName)
## Get joint number config size.
def getJointConfigSize (self, jointName):
return self.client.basic.robot.getJointConfigSize (jointName)
## set bounds for the joint
def setJointBounds (self, jointName, inJointBound):
return self.client.basic.robot.setJointBounds (jointName, inJointBound)
## Set bounds on the translation part of the freeflyer joint.
#
# Valid only if the robot has a freeflyer joint.
def setTranslationBounds (self, xmin, xmax, ymin, ymax, zmin, zmax):
self.client.basic.robot.setJointBounds \
(self.displayName + "base_joint_x", [xmin, xmax])
self.client.basic.robot.setJointBounds \
(self.displayName + "base_joint_y", [ymin, ymax])
self.client.basic.robot.setJointBounds \
(self.displayName + "base_joint_z", [zmin, zmax])
## Get link position in joint frame
#
# Joints are oriented in a different way as in urdf standard since
# rotation and uni-dimensional translation joints act around or along
# their x-axis. This method returns the position of the urdf link in
# world frame.
#
# \param jointName name of the joint
# \return position of the link in world frame.
def getLinkPosition (self, jointName):
return self.client.basic.robot.getLinkPosition (jointName)
## Get link name
#
# \param jointName name of the joint,
# \return name of the link.
def getLinkName (self, jointName):
return self.client.basic.robot.getLinkName (jointName)
## \}
## \name Access to current configuration
#\{
## Set current configuration of composite robot
#
# \param q configuration of the composite robot
def setCurrentConfig (self, q):
self.client.basic.robot.setCurrentConfig (q)
## Get current configuration of composite robot
#
# \return configuration of the composite robot
def getCurrentConfig (self):
return self.client.basic.robot.getCurrentConfig ()
## Shoot random configuration
# \return dofArray Array of degrees of freedom.
def shootRandomConfig(self):
return self.client.basic.robot.shootRandomConfig ()
## \}
## \name Bodies
# \{
## Get the list of objects attached to a joint.
# \param inJointName name of the joint.
# \return list of names of CollisionObject attached to the body.
def getJointInnerObjects (self, jointName):
return self.client.basic.robot.getJointInnerObjects (jointName)
## Get list of collision objects tested with the body attached to a joint
# \param inJointName name of the joint.
# \return list of names of CollisionObject
def getJointOuterObjects (self, jointName):
return self.client.basic.robot.getJointOuterObjects (jointName)
## Get position of robot object
# \param objectName name of the object.
# \return transformation as a hpp.Transform object
def getObjectPosition (self, objectName):
return Transform (self.client.basic.robot.getObjectPosition
(objectName))
## \brief Remove an obstacle from outer objects of a joint body
#
# \param objectName name of the object to remove,
# \param jointName name of the joint owning the body,
# \param collision whether collision with object should be computed,
# \param distance whether distance to object should be computed.
def removeObstacleFromJoint (self, objectName, jointName, collision,
distance):
return self.client.basic.obstacle.removeObstacleFromJoint \
(objectName, jointName, collision, distance)
## \}
## \name Collision checking and distance computation
# \{
## Test collision with obstacles and auto-collision.
#
# Check whether current configuration of robot is valid by calling
# CkwsDevice::collisionTest ().
# \return whether configuration is valid
# \note Deprecated. Use isConfigValid instead.
def collisionTest (self):
print "Deprecated. Use isConfigValid instead"
return self.client.basic.robot.collisionTest ()
## Check the validity of a configuration.
#
# Check whether a configuration of robot is valid.
# \param cfg a configuration
# \return whether configuration is valid
def isConfigValid (self, cfg):
return self.client.basic.robot.isConfigValid (cfg)
## Compute distances between bodies and obstacles
#
# \return list of distances,
# \return names of the objects belonging to a body
# \return names of the objects tested with inner objects,
# \return closest points on the body,
# \return closest points on the obstacles
# \note outer objects for a body can also be inner objects of another
# body.
def distancesToCollision (self):
return self.client.basic.robot.distancesToCollision ()
## \}
## \}
## \name Mass and inertia
# \{
## Get mass of robot
def getMass (self):
return self.client.basic.robot.getMass ()
## Get position of center of mass
def getCenterOfMass (self):
return self.client.basic.robot.getCenterOfMass ()
## Get Jacobian of the center of mass
def getJacobianCenterOfMass (self):
return self.client.basic.robot.getJacobianCenterOfMass ()
##\}
## Get the dimension of the extra configuration space
def getDimensionExtraConfigSpace(self):
return self.client.basic.robot.getDimensionExtraConfigSpace()
## Convert a direction vector to a quaternion (use Eigen::Quaterniond::FromTwoVectors with Z vector)
# \param u the vector director
def quaternionFromVector(self,vector):
return self.client.basic.robot.quaternionFromVector(vector)
|
import os
import numpy as np
import flopy
ml = flopy.modflow.Modflow.load('l2a_2k.nam', version='mf2005', verbose=True)
delx = ml.dis.delr.array
dely = ml.dis.delc.array
f = open('l2a_2k.lst', 'r')
for line in f:
if 'LAYER # ROW # COLUMN # LAKE # INTERFACE TYPE LAKEBED LEAKANCE' in line:
break
cdata = []
for idx, line in enumerate(f):
if (len(line.strip()) < 1):
break
cdata.append(line)
f.close()
tpth = 'mf5.conn.dat'
f = open(tpth, 'w')
for c in cdata:
f.write(c)
f.close()
dir_dict = {1:'HORIZONTAL',
2:'HORIZONTAL',
3:'HORIZONTAL',
4:'HORIZONTAL',
6:'VERTICAL'}
dtype = [('k', np.int), ('i', np.int), ('j', np.int),
('lake', np.int), ('itype', np.int),
('bedleak', np.float)]
cdata = np.loadtxt(tpth, dtype=dtype)
cdata['k'] -= 1
cdata['i'] -= 1
cdata['j'] -= 1
nlakes = np.unique(cdata['lake'])
print(nlakes)
lake_cnt = {}
for lake in nlakes:
lake_cnt[lake] = 0
print(lake_cnt)
dtype2 = [('iconn', np.int), ('belev', np.float), ('telev', np.float),
('dx', np.float), ('width', np.float)]
cdata2 = np.zeros((cdata.shape[0]), dtype=dtype2)
for idx in range(cdata.shape[0]):
k = cdata['k'][idx]
i = cdata['i'][idx]
j = cdata['j'][idx]
ilak = cdata['lake'][idx]
lake_cnt[ilak] += 1
itype = cdata['itype'][idx]
cdir = dir_dict[itype]
belev = 0.
telev = 0.
if cdir == 'HORIZONTAL':
if itype == 1 or itype == 2:
dx = 0.5 * delx[j]
width = dely[i]
elif itype == 3 or itype == 4:
dx = 0.5 * dely[i]
width = delx[j]
else:
dx = 0.
width = 0.
cdata2['iconn'][idx] = lake_cnt[ilak]
cdata2['belev'][idx] = belev
cdata2['telev'][idx] = telev
cdata2['dx'][idx] = dx
cdata2['width'][idx] = width
tpth = 'mf6.conn.dat'
f = open(tpth, 'w')
f.write('begin lakes\n')
c = '# lakeno strt lakeconn boundname'
f.write('{}\n'.format(c))
for lake in nlakes:
f.write(' LAKE {:10d}{:10.3g}{:10d} LAKE_{:03d}\n'.format(lake, 130., lake_cnt[lake], lake))
f.write('end lakes\n\n')
f.write('begin lake_connections\n')
c = '# lakeno iconn layer row ' + \
'column ctype bedleak belev '+ \
'telev dx width'
f.write('{}\n'.format(c))
for idx in range(cdata.shape[0]):
itype = cdata['itype'][idx]
c = ' LAKE'
c += ' {:10d}{:10d}{:10d}{:10d}{:10d}'.format(cdata['lake'][idx],
cdata2['iconn'][idx],
cdata['k'][idx]+1,
cdata['i'][idx]+1,
cdata['j'][idx]+1)
c += '{:>15s} '.format(dir_dict[itype])
c += '{:10.3g}'.format(cdata['bedleak'][idx])
c += '{:10.3g}'.format(cdata2['belev'][idx])
c += '{:10.3g}'.format(cdata2['telev'][idx])
c += '{:10.3g}'.format(cdata2['dx'][idx])
c += '{:10.3g}'.format(cdata2['width'][idx])
f.write('{}\n'.format(c))
f.write('end lake_connections\n\n')
f.close()
|
from rbnics.problems.base import NonlinearProblem
from rbnics.problems.elliptic import EllipticProblem
from rbnics.backends import product, sum, transpose
NonlinearEllipticProblem_Base = NonlinearProblem(EllipticProblem)
class NonlinearEllipticProblem(NonlinearEllipticProblem_Base):
# Default initialization of members
def __init__(self, V, **kwargs):
# Call to parent
NonlinearEllipticProblem_Base.__init__(self, V, **kwargs)
# Form names for nonlinear problems
self.terms = ["a", "c", "dc", "f", "s"]
self.terms_order = {"a": 2, "c": 1, "dc": 2, "f": 1, "s": 1}
class ProblemSolver(NonlinearEllipticProblem_Base.ProblemSolver):
def residual_eval(self, solution):
problem = self.problem
assembled_operator = dict()
assembled_operator["a"] = sum(product(problem.compute_theta("a"), problem.operator["a"]))
assembled_operator["c"] = sum(product(problem.compute_theta("c"), problem.operator["c"]))
assembled_operator["f"] = sum(product(problem.compute_theta("f"), problem.operator["f"]))
return assembled_operator["a"] * solution + assembled_operator["c"] - assembled_operator["f"]
def jacobian_eval(self, solution):
problem = self.problem
assembled_operator = dict()
assembled_operator["a"] = sum(product(problem.compute_theta("a"), problem.operator["a"]))
assembled_operator["dc"] = sum(product(problem.compute_theta("dc"), problem.operator["dc"]))
return assembled_operator["a"] + assembled_operator["dc"]
# Perform a truth evaluation of the output
def _compute_output(self):
self._output = transpose(self._solution) * sum(product(self.compute_theta("s"), self.operator["s"]))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.