repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
CollinsIchigo/hdx_2 | venv/lib/python2.7/site-packages/requests/packages/urllib3/__init__.py | 155 | 1864 | """
urllib3 - Thread-safe connection pooling and re-using.
"""
__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
__license__ = 'MIT'
__version__ = '1.10.2'
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util.request import make_headers
from .util.url import get_host
from .util.timeout import Timeout
from .util.retry import Retry
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added a stderr logging handler to logger: %s' % __name__)
return handler
# ... Clean up.
del NullHandler
# Set security warning to always go off by default.
import warnings
warnings.simplefilter('always', exceptions.SecurityWarning)
def disable_warnings(category=exceptions.HTTPWarning):
"""
Helper for quickly disabling all urllib3 warnings.
"""
warnings.simplefilter('ignore', category)
| mit |
Rudde/pyroscope | pyrocore/src/pyrocore/torrent/jobs.py | 2 | 5864 | # -*- coding: utf-8 -*-
# pylint: disable=I0011
""" rTorrent Daemon Jobs.
Copyright (c) 2012 The PyroScope Project <pyroscope.project@gmail.com>
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import time
try:
import json
except ImportError:
import simplejson as json # pylint: disable=F0401
import requests
from requests.exceptions import RequestException
from pyrobase.parts import Bunch
from pyrocore import error
from pyrocore import config as config_ini
from pyrocore.util import fmt, xmlrpc, pymagic, stats
def _flux_engine_data(engine):
""" Return rTorrent data set for pushing to InfluxDB.
"""
data = stats.engine_data(engine)
# Make it flat
data["up_rate"] = data["upload"][0]
data["up_limit"] = data["upload"][1]
data["down_rate"] = data["download"][0]
data["down_limit"] = data["download"][1]
data["version"] = data["versions"][0]
views = data["views"]
del data["upload"]
del data["download"]
del data["versions"]
del data["views"]
return data, views
class EngineStats(object):
""" rTorrent connection statistics logger.
"""
def __init__(self, config=None):
""" Set up statistics logger.
"""
self.config = config or Bunch()
self.LOG = pymagic.get_class_logger(self)
self.LOG.debug("Statistics logger created with config %r" % self.config)
def run(self):
""" Statistics logger job callback.
"""
try:
proxy = config_ini.engine.open()
self.LOG.info("Stats for %s - up %s, %s" % (
config_ini.engine.engine_id,
fmt.human_duration(proxy.system.time() - config_ini.engine.startup, 0, 2, True).strip(),
proxy
))
except (error.LoggableError, xmlrpc.ERRORS), exc:
self.LOG.warn(str(exc))
class InfluxDBStats(object):
""" Push rTorrent and host statistics to InfluxDB.
"""
def __init__(self, config=None):
""" Set up statistics logger.
"""
self.config = config or Bunch()
self.influxdb = Bunch(config_ini.influxdb)
self.influxdb.timeout = float(self.influxdb.timeout or '0.250')
self.LOG = pymagic.get_class_logger(self)
self.LOG.debug("InfluxDB statistics feed created with config %r" % self.config)
def _influxdb_url(self):
""" Return REST API URL to access time series.
"""
url = "{0}/db/{1}/series".format(self.influxdb.url.rstrip('/'), self.config.dbname)
if self.influxdb.user and self.influxdb.password:
url += "?u={0}&p={1}".format(self.influxdb.user, self.influxdb.password)
return url
def _push_data(self):
""" Push stats data to InfluxDB.
"""
if not (self.config.series or self.config.series_host):
self.LOG.info("Misconfigured InfluxDB job, neither 'series' nor 'series_host' is set!")
return
# Assemble data
fluxdata = []
if self.config.series:
try:
config_ini.engine.open()
data, views = _flux_engine_data(config_ini.engine)
fluxdata.append(dict(
name = self.config.series,
columns = data.keys(),
points = [data.values()]
))
fluxdata.append(dict(
name = self.config.series + '_views',
columns = views.keys(),
points = [views.values()]
))
except (error.LoggableError, xmlrpc.ERRORS), exc:
self.LOG.warn("InfluxDB stats: {0}".format(exc))
# if self.config.series_host:
# fluxdata.append(dict(
# name = self.config.series_host,
# columns = .keys(),
# points = [.values()]
# ))
if not fluxdata:
self.LOG.debug("InfluxDB stats: no data (previous errors?)")
return
# Encode into InfluxDB data packet
fluxurl = self._influxdb_url()
fluxjson = json.dumps(fluxdata)
self.LOG.debug("POST to {0} with {1}".format(fluxurl.split('?')[0], fluxjson))
# Push it!
try:
# TODO: Use a session
requests.post(fluxurl, data=fluxjson, timeout=self.influxdb.timeout)
except RequestException, exc:
self.LOG.info("InfluxDB POST error: {0}".format(exc))
def run(self):
""" Statistics feed job callback.
"""
self._push_data()
def module_test():
""" Quick test using…
python -m pyrocore.daemon.webapp
"""
import pprint
from pyrocore import connect
try:
engine = connect()
print("%s - %s" % (engine.engine_id, engine.open()))
data, views = _flux_engine_data(engine)
print "data = ",
pprint.pprint(data)
print "views = ",
pprint.pprint(views)
print("%s - %s" % (engine.engine_id, engine.open()))
except (error.LoggableError, xmlrpc.ERRORS), torrent_exc:
print("ERROR: %s" % torrent_exc)
if __name__ == "__main__":
module_test()
| gpl-2.0 |
alexmogavero/home-assistant | tests/components/test_ffmpeg.py | 19 | 7287 | """The tests for Home Assistant ffmpeg."""
import asyncio
from unittest.mock import patch, MagicMock
import homeassistant.components.ffmpeg as ffmpeg
from homeassistant.setup import setup_component, async_setup_component
from tests.common import (
get_test_home_assistant, assert_setup_component, mock_coro)
class MockFFmpegDev(ffmpeg.FFmpegBase):
"""FFmpeg device mock."""
def __init__(self, hass, initial_state=True,
entity_id='test.ffmpeg_device'):
"""Initialize mock."""
super().__init__(initial_state)
self.hass = hass
self.entity_id = entity_id
self.ffmpeg = MagicMock
self.called_stop = False
self.called_start = False
self.called_restart = False
self.called_entities = None
@asyncio.coroutine
def _async_start_ffmpeg(self, entity_ids):
"""Mock start."""
self.called_start = True
self.called_entities = entity_ids
@asyncio.coroutine
def _async_stop_ffmpeg(self, entity_ids):
"""Mock stop."""
self.called_stop = True
self.called_entities = entity_ids
class TestFFmpegSetup(object):
"""Test class for ffmpeg."""
def setup_method(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_component(self):
"""Setup ffmpeg component."""
with assert_setup_component(2):
setup_component(self.hass, ffmpeg.DOMAIN, {ffmpeg.DOMAIN: {}})
assert self.hass.data[ffmpeg.DATA_FFMPEG].binary == 'ffmpeg'
def test_setup_component_test_service(self):
"""Setup ffmpeg component test services."""
with assert_setup_component(2):
setup_component(self.hass, ffmpeg.DOMAIN, {ffmpeg.DOMAIN: {}})
assert self.hass.services.has_service(ffmpeg.DOMAIN, 'start')
assert self.hass.services.has_service(ffmpeg.DOMAIN, 'stop')
assert self.hass.services.has_service(ffmpeg.DOMAIN, 'restart')
@asyncio.coroutine
def test_setup_component_test_register(hass):
"""Setup ffmpeg component test register."""
with assert_setup_component(2):
yield from async_setup_component(
hass, ffmpeg.DOMAIN, {ffmpeg.DOMAIN: {}})
hass.bus.async_listen_once = MagicMock()
ffmpeg_dev = MockFFmpegDev(hass)
yield from ffmpeg_dev.async_added_to_hass()
assert hass.bus.async_listen_once.called
assert hass.bus.async_listen_once.call_count == 2
@asyncio.coroutine
def test_setup_component_test_register_no_startup(hass):
"""Setup ffmpeg component test register without startup."""
with assert_setup_component(2):
yield from async_setup_component(
hass, ffmpeg.DOMAIN, {ffmpeg.DOMAIN: {}})
hass.bus.async_listen_once = MagicMock()
ffmpeg_dev = MockFFmpegDev(hass, False)
yield from ffmpeg_dev.async_added_to_hass()
assert hass.bus.async_listen_once.called
assert hass.bus.async_listen_once.call_count == 1
@asyncio.coroutine
def test_setup_component_test_servcie_start(hass):
"""Setup ffmpeg component test service start."""
with assert_setup_component(2):
yield from async_setup_component(
hass, ffmpeg.DOMAIN, {ffmpeg.DOMAIN: {}})
ffmpeg_dev = MockFFmpegDev(hass, False)
yield from ffmpeg_dev.async_added_to_hass()
ffmpeg.async_start(hass)
yield from hass.async_block_till_done()
assert ffmpeg_dev.called_start
@asyncio.coroutine
def test_setup_component_test_servcie_stop(hass):
"""Setup ffmpeg component test service stop."""
with assert_setup_component(2):
yield from async_setup_component(
hass, ffmpeg.DOMAIN, {ffmpeg.DOMAIN: {}})
ffmpeg_dev = MockFFmpegDev(hass, False)
yield from ffmpeg_dev.async_added_to_hass()
ffmpeg.async_stop(hass)
yield from hass.async_block_till_done()
assert ffmpeg_dev.called_stop
@asyncio.coroutine
def test_setup_component_test_servcie_restart(hass):
"""Setup ffmpeg component test service restart."""
with assert_setup_component(2):
yield from async_setup_component(
hass, ffmpeg.DOMAIN, {ffmpeg.DOMAIN: {}})
ffmpeg_dev = MockFFmpegDev(hass, False)
yield from ffmpeg_dev.async_added_to_hass()
ffmpeg.async_restart(hass)
yield from hass.async_block_till_done()
assert ffmpeg_dev.called_stop
assert ffmpeg_dev.called_start
@asyncio.coroutine
def test_setup_component_test_servcie_start_with_entity(hass):
"""Setup ffmpeg component test service start."""
with assert_setup_component(2):
yield from async_setup_component(
hass, ffmpeg.DOMAIN, {ffmpeg.DOMAIN: {}})
ffmpeg_dev = MockFFmpegDev(hass, False)
yield from ffmpeg_dev.async_added_to_hass()
ffmpeg.async_start(hass, 'test.ffmpeg_device')
yield from hass.async_block_till_done()
assert ffmpeg_dev.called_start
assert ffmpeg_dev.called_entities == ['test.ffmpeg_device']
@asyncio.coroutine
def test_setup_component_test_run_test_false(hass):
"""Setup ffmpeg component test run_test false."""
with assert_setup_component(2):
yield from async_setup_component(
hass, ffmpeg.DOMAIN, {ffmpeg.DOMAIN: {
'run_test': False,
}})
manager = hass.data[ffmpeg.DATA_FFMPEG]
with patch('haffmpeg.Test.run_test', return_value=mock_coro(False)):
yield from manager.async_run_test("blabalblabla")
assert len(manager._cache) == 0
@asyncio.coroutine
def test_setup_component_test_run_test(hass):
"""Setup ffmpeg component test run_test."""
with assert_setup_component(2):
yield from async_setup_component(
hass, ffmpeg.DOMAIN, {ffmpeg.DOMAIN: {}})
manager = hass.data[ffmpeg.DATA_FFMPEG]
with patch('haffmpeg.Test.run_test', return_value=mock_coro(True)) \
as mock_test:
yield from manager.async_run_test("blabalblabla")
assert mock_test.called
assert mock_test.call_count == 1
assert len(manager._cache) == 1
assert manager._cache['blabalblabla']
yield from manager.async_run_test("blabalblabla")
assert mock_test.called
assert mock_test.call_count == 1
assert len(manager._cache) == 1
assert manager._cache['blabalblabla']
@asyncio.coroutine
def test_setup_component_test_run_test_test_fail(hass):
"""Setup ffmpeg component test run_test."""
with assert_setup_component(2):
yield from async_setup_component(
hass, ffmpeg.DOMAIN, {ffmpeg.DOMAIN: {}})
manager = hass.data[ffmpeg.DATA_FFMPEG]
with patch('haffmpeg.Test.run_test', return_value=mock_coro(False)) \
as mock_test:
yield from manager.async_run_test("blabalblabla")
assert mock_test.called
assert mock_test.call_count == 1
assert len(manager._cache) == 1
assert not manager._cache['blabalblabla']
yield from manager.async_run_test("blabalblabla")
assert mock_test.called
assert mock_test.call_count == 1
assert len(manager._cache) == 1
assert not manager._cache['blabalblabla']
| apache-2.0 |
daicang/Leetcode-solutions | 330-patching-array.py | 1 | 1730 | # 330-patching-array.py
class Solution(object):
def minPatches_wa(self, nums, n): # Wrong answer. not this method.
"""
:type nums: List[int]
:type n: int
:rtype: int
"""
# Example:
# nums = [1, 2, 4, 9]
# rbound = 1 + 2 + 4 = 7
count = 0
toInsert = 2
rbound = 1
size = len(nums)
if size == 0 or nums[0] != 1:
nums.insert(0, 1)
count += 1
insert_idx = 1 # nums[0] is always 1
while rbound < n:
print "r: ", rbound, " count: ", count
while insert_idx < size:
curr = nums[insert_idx]
if curr == toInsert:
rbound += curr
insert_idx += 1
toInsert *= 2
elif curr <= rbound:
rbound += curr
insert_idx += 1
else:
break
if rbound >= n: break
nums.insert(insert_idx, toInsert)
count += 1
rbound += toInsert
toInsert *= 2
insert_idx += 1
size += 1
print nums
return count
def minPatches(self, nums, n):
count, top = 0, 0
i = 0
while top < n:
if i < len(nums) and nums[i] <= top + 1:
top += nums[i]
i += 1
else:
print 'top: ', top, ' count: ', count
top = top * 2 + 1
count += 1
return count
s = Solution()
print s.minPatches([1,5,10], 20)
print s.minPatches([1,2,2], 5)
print s.minPatches([1,2,31,33], 2147483647)
| mit |
RavuAlHemio/bcusdk | contrib/swig/eibclient/__init__.py | 2 | 3140 | """EIBclient module.
Allows to connect to eibd from bcusdk.
"""
#EIBD client library
#Copyright (C) 2006 Tony Przygienda, Z2 GmbH
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#In addition to the permissions in the GNU General Public License,
#you may link the compiled version of this file into combinations
#with other programs, and distribute those combinations without any
#restriction coming from the use of this file. (The General Public
#License restrictions do apply in other respects; for example, they
#cover modification of the file, and distribution when not linked into
#a combine executable.)
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
__all__ = [ 'EIB_INVALID_REQUEST', 'EIB_CONNECTION_INUSE', 'EIB_PROCESSING_ERROR', 'EIB_CLOSED', 'EIB_OPEN_BUSMONITOR', 'EIB_OPEN_BUSMONITOR_TEXT', 'EIB_OPEN_VBUSMONITOR', 'EIB_OPEN_VBUSMONITOR_TEXT', 'EIB_BUSMONITOR_PACKET', 'EIB_OPEN_T_CONNECTION', 'EIB_OPEN_T_INDIVIDUAL', 'EIB_OPEN_T_GROUP', 'EIB_OPEN_T_BROADCAST', 'EIB_OPEN_T_TPDU', 'EIB_APDU_PACKET', 'EIB_OPEN_GROUPCON', 'EIB_GROUP_PACKET', 'EIB_PROG_MODE', 'EIB_MASK_VERSION', 'EIB_M_INDIVIDUAL_ADDRESS_READ', 'EIB_M_INDIVIDUAL_ADDRESS_WRITE', 'EIB_ERROR_ADDR_EXISTS', 'EIB_ERROR_MORE_DEVICE', 'EIB_ERROR_TIMEOUT', 'EIB_MC_CONNECTION', 'EIB_MC_READ', 'EIB_MC_WRITE', 'EIB_MC_PROP_READ', 'EIB_MC_PROP_WRITE', 'EIB_MC_PEI_TYPE', 'EIB_MC_ADC_READ', 'EIB_MC_AUTHORIZE', 'EIB_MC_KEY_WRITE', 'EIB_MC_MASK_VERSION', 'EIB_MC_PROG_MODE', 'EIB_MC_PROP_DESC', 'EIB_MC_PROP_SCAN', 'EIB_LOAD_IMAGE', 'EIBSocketURL', 'EIBSocketLocal', 'EIBSocketRemote', 'EIBClose', 'EIBOpenBusmonitor', 'EIBOpenBusmonitorText', 'EIBOpenVBusmonitor', 'EIBOpenVBusmonitorText', 'EIBOpenT_Connection', 'EIBOpenT_Individual', 'EIBOpenT_Group', 'EIBOpenT_Broadcast', 'EIBOpenT_TPDU', 'EIBSendAPDU', 'EIBGetAPDU', 'EIBGetAPDU_Src', 'EIBSendTPDU', 'EIBOpen_GroupSocket', 'EIBSendGroup', 'EIBGetGroup_Src', 'EIB_M_ReadIndividualAddresses', 'EIB_M_Progmode_On', 'EIB_M_Progmode_Off', 'EIB_M_Progmode_Toggle', 'EIB_M_Progmode_Status', 'EIB_M_GetMaskVersion', 'EIB_M_WriteIndividualAddress', 'EIB_MC_Connect', 'EIB_MC_Read', 'EIB_MC_Write', 'EIB_MC_Progmode_On', 'EIB_MC_Progmode_Off', 'EIB_MC_Progmode_Toggle', 'EIB_MC_Progmode_Status', 'EIB_MC_GetMaskVersion', 'EIB_MC_PropertyRead', 'EIB_MC_PropertyWrite', 'EIB_MC_PropertyDesc', 'EIB_MC_PropertyScan', 'EIB_MC_GetPEIType', 'EIB_MC_ReadADC', 'EIB_MC_Authorize', 'EIB_MC_SetKey', 'EIB_LoadImage', 'EIBGetBusmonitorPacket' ]
| gpl-2.0 |
PietPtr/FinalProject | backend/restaurant/views.py | 1 | 17334 | from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required, permission_required
import json
import random
from django.views.decorators.csrf import csrf_exempt
from restaurant.models import Order, CardSwipe, Account, Food, Card, Variables
def checkurl(request):
"""This url is called by the arduino to check if the server is compatible"""
return HttpResponse("Correct IP")
@login_required
@permission_required('restaurant.isBoss')
def reset(request):
"""Make the user complete a captcha so the system doesn't get reset by accident"""
# get the desired result in a variable
variable = getvalue("reset")
# get the user input
userinput = request.GET.get("reset", "0")
# check if the variable is unset or the entry was wrong
if not variable or userinput != variable:
# generate two random numbers between 1 and 15
a, b = random.randint(1, 15), random.randint(1, 15)
# store the result of their addition in the database
setvalue("reset", a + b)
# send both variables as a response
return HttpResponse(str(a) + " + " + str(b))
else:
# if the varialble matches the userinput match, reset all tables
for account in Account.objects.all():
account.delete()
for cardswipe in CardSwipe.objects.all():
cardswipe.delete()
for variable in Variables.objects.all():
variable.delete()
for order in Order.objects.all():
order.delete()
# send a confirmation message if everything worked out
# (otherwise the system would show an error page)
return HttpResponse("Deleted!")
def login_view(request):
"""This method serves the login interface"""
# serve the login page
return render(request, "login.html")
def verify(request):
"""This method is called to verify a users credentials"""
# ask for authentication
username = request.POST.get("username", "")
password = request.POST.get("password", "")
user = authenticate(username=username, password=password)
if user is not None:
# if auth. successful, redirect to menu
login(request, user)
return HttpResponseRedirect("menu")
else:
# if auth. failed, return to login with error message
return render(request, "login.html", {'Error': True})
# @login_required defines if you need permission to enter the .html page
# @permission_required defines which specific permission you need to view the content that the method below produces
@login_required
def logout_view(request):
"""This method serves the login page after loggin out"""
# return to login page but with a logout message
logout(request)
return render(request, "login.html", {'Logout': True})
@login_required
def menu(request):
"""This method serves the menu interface"""
# serve the menu page
return render(request, "menu.html")
@login_required
@permission_required('restaurant.isCook')
def cook(request):
"""This method serves the cooks interface"""
# get all orders that are not done
orders = Order.objects.filter(done=0)
# render the orders to the cooks page
return render(request, "cook.html", {'orders': orders})
def confirmorder(request):
"""This method is called when a cook marks an order as done"""
# get the food from the request or emptystring
name = request.GET.get("food", "")
# get the an order containing that food from the DB
orders = Order.objects.filter(food__name=name, done=0)
# if at least one was found
if orders:
# get the first order in the set
order = orders[0]
# mark it done
order.done = 1
# and store the change in the DB
order.save()
return HttpResponse("Done!")
@login_required
@permission_required('restaurant.isWaiter')
def waiter(request):
"""This method serves the waiter interface"""
# get all foods from database
foods = Food.objects.all().order_by('name')
context = {'foods': foods}
# and render them into the template
return render(request, "waiter.html", context)
def cleanitems(request):
"""This method is called to check if the waiter page should be refreshed"""
# check if there is a swiped card in queue
swipes = CardSwipe.objects.filter(device="waiter")
if swipes:
# get the first swipe
swipe = swipes[0]
try:
# try to get an account that belongs to that card
Account.objects.filter(card=swipe.card)[0]
except:
# if it fails delete the swipe, since it has been processed
swipe.delete()
# and show an error page
return HttpResponse(json.dumps({'doreload': True, 'error': True, 'message': "Card rejected!"}),
content_type="text/json")
# if account was found, delete the swipe since it was processed
swipe.delete()
# and reload the page to clean the ordered items
return HttpResponse(json.dumps({'doreload': True, 'error': False}), content_type="text/json")
else:
# if no swipe was returned, then just return the default JSON-object
return HttpResponse(json.dumps({'doreload': False, 'error': False}), content_type="text/json")
def error(request):
"""This method serves an all-purpose error page"""
# get the error message from the request
message = request.GET.get("message", "")
# and render it to the client
return render(request, "error.html", {'message': message, 'returnpage': "waiter"})
def addorder(request):
"""This function is called, when the waiter adds an item to the checkout-list"""
# get the food that was selected
food = Food.objects.filter(name=request.GET["food"])[:1][0]
print("Adding: " + request.GET.get("food", "ERROR"))
# create a new order without an assigned account
order = Order(food=food)
order.save()
print("Added: " + request.GET.get("food", "ERROR"))
return HttpResponse("Done!")
def rmorder(request):
"""This function is called, when the waiter removes an item from the checkout-list"""
print("Removing: " + request.GET.get("food", "ERROR"))
# get the food that has been ordered and delete one order
# (it doesn't matter which, since there is only one waiter that can have orders) of that food
food = Food.objects.filter(name=request.GET.get("food", "ERROR"))[:1][0]
order = Order.objects.filter(food=food)[:1][0]
order.delete()
print("Removed: " + request.GET.get("food", "ERROR"))
return HttpResponse("Done!")
@login_required
@permission_required('restaurant.isCashier')
def cashier(request):
"""This method serves the cashier interface"""
# get all recent card-swipes
swipes = (CardSwipe.objects.filter(device="cashier"))
# if there are any card-swipes
if swipes:
# get the first one
swiped = swipes[0]
# get the first (and probably only) swipe
accounts = Account.objects.filter(card=swiped.card, active=1)
if not accounts:
swiped.delete()
return render(request, "error.html",
{'message': "This card does not belong to an account!", 'returnpage': "cashier"})
# get the account that belongs to that swipe
items = Order.objects.filter(account=accounts[0])
# get all items that account has bought
price = 0
if items:
for item in items:
# add up the price
price += item.food.price
else:
items = []
# pack it into the context
context = {'id': swiped.identifier, 'items': items, 'price': price, 'doreload': False}
else:
# if there are no swipes, then just serve the page
context = {'id': 0, 'price': "0,00", 'doreload': True}
# render the template
return render(request, 'cashier.html', context)
def checkout(request):
"""This function is called when the checkout-button in the cashier interface is pressed"""
try:
# this is called, when the pay-button is pressed
print("Doing the checkout for cardswipe: " + request.GET.get("swipeid", "0"))
# get the swipe-object belonging to that swipeid
swipe = CardSwipe.objects.filter(identifier=request.GET.get("swipeid", "0"))[:1][0]
# get the account which the order belongs to
account = Account.objects.filter(card=swipe.card, active=1)[:1][0]
# get the orders that account has made
orders = Order.objects.filter(account=account, done=0)
# set the new parameters and save it
account.paid = 1
account.active = 0
account.save()
# go through all orders
for order in orders:
# and mark them done
order.save()
# finally delete the swipe-object, so the job is done
swipe.delete()
return HttpResponseRedirect("cashier", False)
except:
return HttpResponseRedirect("cashier", False)
def getbill(request):
"""This function is called by the cashier to check if new data is available"""
swipes = CardSwipe.objects.filter(device='cashier')
#
if swipes:
return HttpResponse(json.dumps({'isdata': True}), content_type="text/json")
else:
return HttpResponse(json.dumps({'isdata': False}), content_type="text/json")
@csrf_exempt
def cardswiped(request):
"""This function is called when a card is swiped at either arduino"""
# get the data from the client and parse the json-data
data = json.loads(request.body.decode('utf-8'))
# if the data-object has been correctly parsed
if data:
cardid = treyferdec(data["id"])
# cardid = data["id"]
cardtype = data["type"]
# print the information for debug-purposes
print("Card swiped:")
print(cardid)
print(cardtype)
# get all the cards with this specific card-id from the system (expected is wither 1 or 0 entries)
cards = Card.objects.filter(identifier=str(cardid))
if getvalue("addmode") == "1":
# if the card is already known to the system, the cards-object will be empty and return False
if not cards:
# create a new card with the received identifier
newcard = Card(identifier=cardid)
# and save it
newcard.save()
print("Card was unknown to the system and has been saved!")
else:
print("Card was already known to the system!")
else:
# if the system is not in addmode, add the already known card to a new CardSwipe-object,
# which will later be processed by the webinterfaces
swipe = CardSwipe(card=Card.objects.filter(identifier=cardid)[0], device=cardtype)
# we also get a list of possible accounts
accounts = Account.objects.filter(card=cards[0], active=1)
# if the source of the packet is a waiter, then
if cardtype == "waiter":
try:
# we know that the accounts-list contains exactly one item
account = accounts[0]
except:
swipe.save()
return HttpResponse("Ok!")
# we also want to know what orders have no account assigned to it
orders = Order.objects.filter(account=None)
# assign the account to the new orders
for order in orders:
order.account = account
order.save()
swipe.save()
print("Card has been swiped at the waiter-terminal to make an order!")
elif cardtype == "cashier":
# if the card has been swiped as the cashier
if not accounts:
# create a new account since the card has been picked up by a customer and
# is now going to be used
newaccount = Account(card=Card.objects.filter(identifier=cardid)[0], active=1)
newaccount.save()
print("Card has been swiped at the cashier-terminal to activate a Card!")
else:
swipe.save()
print("Card has been swiped at the cashier-terminal to make a Payment and return the Card!")
else:
print("Packet contained an invalid type!")
return HttpResponse("OK")
@login_required
@permission_required('restaurant.isBoss')
def bookkeeping(request):
"""This method serves the bookkeeping interface"""
# get all orders that are paid
orders = Order.objects.filter(account__paid=1)
totalprice = 0
if orders:
# add up their total cost
for order in orders:
totalprice += order.food.price
foodlist = []
# create a list of all foods and their quantities
for food in Food.objects.all():
torders = Order.objects.filter(food=food)
if torders:
foodlist.append({'date': "Today", 'food': food.name, 'quantity': len(torders)})
# render the foodlist to the interface
return render(request, "bookkeeping.html", {'payments': [
{'date': "Today", 'cash': "€" + str(totalprice), 'pin': "€0,00", 'credit': "€0,00", 'check': "€0,00"}],
'sales': foodlist})
# if there are no orders, there is nothing to render
return render(request, "bookkeeping.html", {})
def getvalue(key):
"""gets a stored variable from the database or an empty string if there is no variable stored"""
variable = Variables.objects.filter(key=key)
# if the query returns something, then
if variable:
# return the value for the specific key
return variable[0].value
else:
# return nothing
return ""
def setvalue(key, value):
"""creates or changes a variable stored in the database"""
variable = Variables.objects.filter(key=key)
# check if a Variable with that key exists
if not variable:
# create a new variable
variable = Variables(key=key, value=str(value))
else:
# select the first (only) object
variable = variable[0]
# update the existing variable
variable.value = str(value)
# save the variable in the database
variable.save()
return None
# All functions below are used for decryption
def rotl(x):
return (x << 1) | (x >> 7)
def rotr(x):
return (x >> 1) | (x << 7)
def treyferdec(text):
"""This method decrypts the data from the arduino"""
sbox = [0x02, 0x03, 0x05, 0x07, 0x0B, 0x0D, 0x11, 0x13,
0x17, 0x1D, 0x1F, 0x25, 0x29, 0x2B, 0x2F, 0x35,
0x3B, 0x3D, 0x43, 0x47, 0x49, 0x4F, 0x53, 0x59,
0x61, 0x65, 0x67, 0x6B, 0x6D, 0x71, 0x7F, 0x83,
0x89, 0x8B, 0x95, 0x97, 0x9D, 0xA3, 0xA7, 0xAD,
0xB3, 0xB5, 0xBF, 0xC1, 0xC5, 0xC7, 0xD3, 0xDF,
0xE3, 0xE5, 0xE9, 0xEF, 0xF1, 0xFB, 0x01, 0x07,
0x0D, 0x0F, 0x15, 0x19, 0x1B, 0x25, 0x33, 0x37,
0x39, 0x3D, 0x4B, 0x51, 0x5B, 0x5D, 0x61, 0x67,
0x6F, 0x75, 0x7B, 0x7F, 0x85, 0x8D, 0x91, 0x99,
0xA3, 0xA5, 0xAF, 0xB1, 0xB7, 0xBB, 0xC1, 0xC9,
0xCD, 0xCF, 0xD3, 0xDF, 0xE7, 0xEB, 0xF3, 0xF7,
0xFD, 0x09, 0x0B, 0x1D, 0x23, 0x2D, 0x33, 0x39,
0x3B, 0x41, 0x4B, 0x51, 0x57, 0x59, 0x5F, 0x65,
0x69, 0x6B, 0x77, 0x81, 0x83, 0x87, 0x8D, 0x93,
0x95, 0xA1, 0xA5, 0xAB, 0xB3, 0xBD, 0xC5, 0xCF,
0xD7, 0xDD, 0xE3, 0xE7, 0xEF, 0xF5, 0xF9, 0x01,
0x05, 0x13, 0x1D, 0x29, 0x2B, 0x35, 0x37, 0x3B,
0x3D, 0x47, 0x55, 0x59, 0x5B, 0x5F, 0x6D, 0x71,
0x73, 0x77, 0x8B, 0x8F, 0x97, 0xA1, 0xA9, 0xAD,
0xB3, 0xB9, 0xC7, 0xCB, 0xD1, 0xD7, 0xDF, 0xE5,
0xF1, 0xF5, 0xFB, 0xFD, 0x07, 0x09, 0x0F, 0x19,
0x1B, 0x25, 0x27, 0x2D, 0x3F, 0x43, 0x45, 0x49,
0x4F, 0x55, 0x5D, 0x63, 0x69, 0x7F, 0x81, 0x8B,
0x93, 0x9D, 0xA3, 0xA9, 0xB1, 0xBD, 0xC1, 0xC7,
0xCD, 0xCF, 0xD5, 0xE1, 0xEB, 0xFD, 0xFF, 0x03,
0x09, 0x0B, 0x11, 0x15, 0x17, 0x1B, 0x27, 0x29,
0x2F, 0x51, 0x57, 0x5D, 0x65, 0x77, 0x81, 0x8F,
0x93, 0x95, 0x99, 0x9F, 0xA7, 0xAB, 0xAD, 0xB3,
0xBF, 0xC9, 0xCB, 0xCF, 0xD1, 0xD5, 0xDB, 0xE7,
0xF3, 0xFB, 0x07, 0x0D, 0x11, 0x17, 0x1F, 0x23,
0x2B, 0x2F, 0x3D, 0x41, 0x47, 0x49, 0x4D, 0x53,
]
key = [0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xFF]
NUMROUNDS = 32
data = []
for i in range(0, len(text), 2):
data.append(int(text[i] + text[i + 1], 16))
for i in range(8 * NUMROUNDS - 1, 0, -1):
t = data[i % 8]
t = (t + key[i % 8]) % 256
data[(i + 1) % 8] = rotr(data[(i + 1) % 8])
data[(i + 1) % 8] = (data[(i + 1) % 8] - sbox[t]) % 256
output = ''
for i in range(3, 6, 1):
output += str(data[i])
return output
| gpl-3.0 |
MassStash/m8whl_sense | tools/perf/scripts/python/net_dropmonitor.py | 4235 | 1554 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms:
if (i['loc'] >= loc):
return (i['name'], i['loc']-loc)
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
466152112/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 248 | 2903 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
maxpinto/Ptz | bootcamp/messages/models.py | 16 | 2116 | from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.db.models import Max
class Message(models.Model):
user = models.ForeignKey(User, related_name='+')
message = models.TextField(max_length=1000, blank=True)
date = models.DateTimeField(auto_now_add=True)
conversation = models.ForeignKey(User, related_name='+')
from_user = models.ForeignKey(User, related_name='+')
is_read = models.BooleanField(default=False)
class Meta:
verbose_name = _('Message')
verbose_name_plural = _('Messages')
ordering = ('date',)
def __unicode__(self):
return self.message
@staticmethod
def send_message(from_user, to_user, message):
message = message[:1000]
current_user_message = Message(from_user=from_user,
message=message,
user=from_user,
conversation=to_user,
is_read=True)
current_user_message.save()
Message(from_user=from_user,
conversation=from_user,
message=message,
user=to_user).save()
return current_user_message
@staticmethod
def get_conversations(user):
conversations = Message.objects.filter(user=user).values('conversation').annotate(last=Max('date')).order_by('-last')
users = []
for conversation in conversations:
users.append({
'user': User.objects.get(pk=conversation['conversation']),
'last': conversation['last'],
'unread': Message.objects.filter(user=user, conversation__pk=conversation['conversation'], is_read=False).count(),
})
return users
#Message.objects.filter(is_read=False, user__username='vitorfs').values('to_user').annotate(Count('to_user')).order_by()
#Message.objects.filter(user__username='vitorfs').values('to_user', 'last').annotate(last=Max('date')).order_by()
#Message.objects.filter(user__username='vitorfs').values('to_user').annotate(last=Max('date')).order_by('last') | mit |
ajmendez/PyMix | pymix/examples/csi.py | 1 | 3862 | import mixture
# Example for context-specific independence (CSI) structure learning.
# First we generate a data set from a three component mixture with a CSI like structure
# in the distribution parameters. Then a five component CSI mixture is trained.
# The training should recover the true number of components (three),
# the CSI structure of the generating model as well as the distribution parameters.
# Setting up the generating model. This is a benign case in the
# sense that the components are reasonably well separated and we
# allow ourselves plenty of training data.
# Component distributions
n11 = mixture.NormalDistribution(1.0,0.5)
n12 = mixture.NormalDistribution(2.0,1.5)
n13 = mixture.NormalDistribution(3.0,0.7)
d14 = mixture.DiscreteDistribution(4,[0.4,0.3,0.1,0.2])
c1 = mixture.ProductDistribution([n11,n12,n13,d14])
n21 = mixture.NormalDistribution(1.0,0.5)
n22 = mixture.NormalDistribution(-6.0,0.5)
n23 = mixture.NormalDistribution(3.0,0.7)
d24 = mixture.DiscreteDistribution(4,[0.1,0.1,0.4,0.4])
c2 = mixture.ProductDistribution([n21,n22,n23,d24])
n31 = mixture.NormalDistribution(2.0,0.5)
n32 = mixture.NormalDistribution(-3.0,0.5)
n33 = mixture.NormalDistribution(3.0,0.7)
d34 = mixture.DiscreteDistribution(4,[0.4,0.3,0.1,0.2])
c3 = mixture.ProductDistribution([n31,n32,n33,d34])
# creating the model
pi = [0.4,0.3,0.3]
m = mixture.MixtureModel(3,pi,[c1,c2,c3])
# sampling of the training data
data = m.sampleDataSet(800)
#---------------------------------------------------
# setting up the five component model we are going to train
tn11 = mixture.NormalDistribution(1.0,0.5)
tn12 = mixture.NormalDistribution(2.0,0.5)
tn13 = mixture.NormalDistribution(-3.0,0.5)
td14 = mixture.DiscreteDistribution(4,[0.25]*4)
tc1 = mixture.ProductDistribution([tn11,tn12,tn13,td14])
tn21 = mixture.NormalDistribution(4.0,0.5)
tn22 = mixture.NormalDistribution(-6.0,0.5)
tn23 = mixture.NormalDistribution(1.0,0.5)
td24 = mixture.DiscreteDistribution(4,[0.25]*4)
tc2 = mixture.ProductDistribution([tn21,tn22,tn23,td24])
tn31 = mixture.NormalDistribution(1.0,0.5)
tn32 = mixture.NormalDistribution(2.0,0.5)
tn33 = mixture.NormalDistribution(-3.0,0.5)
td34 = mixture.DiscreteDistribution(4,[0.25]*4)
tc3 = mixture.ProductDistribution([tn31,tn32,tn33,td34])
tn41 = mixture.NormalDistribution(4.0,0.5)
tn42 = mixture.NormalDistribution(-6.0,0.5)
tn43 = mixture.NormalDistribution(1.0,0.5)
td44 = mixture.DiscreteDistribution(4,[0.25]*4)
tc4 = mixture.ProductDistribution([tn41,tn42,tn43,td44])
tn51 = mixture.NormalDistribution(4.0,0.5)
tn52 = mixture.NormalDistribution(-6.0,0.5)
tn53 = mixture.NormalDistribution(1.0,0.5)
td54 = mixture.DiscreteDistribution(4,[0.25]*4)
tc5 = mixture.ProductDistribution([tn51,tn52,tn53,td54])
tpi = [0.3,0.2,0.2,0.2,0.1]
# the hyperparameter of the NormalGamma distributions are
# estimated heuristically in .setParams(...)
sp1 = mixture.NormalGammaPrior(1.0,1.0,1.0,1.0)
sp1.setParams(data.getInternalFeature(0),5)
sp2 = mixture.NormalGammaPrior(1.0,1.0,1.0,1.0)
sp2.setParams(data.getInternalFeature(1),5)
sp3 = mixture.NormalGammaPrior(1.0,1.0,1.0,1.0)
sp3.setParams(data.getInternalFeature(2),5)
sp4 = mixture.DirichletPrior(4,[1.02]*4)
pipr = mixture.DirichletPrior(5,[1.0]*5)
# the hyperparameter alpha is chosen based on the heuristic below
delta = 0.1
structPrior = 1.0 / (1.0+delta)**data.N
# creating the model prior
prior = mixture.MixtureModelPrior(structPrior,0.03, pipr,[sp1,sp2,sp3,sp4])
# creating the model
tm = mixture.BayesMixtureModel(5,tpi,[tc1,tc2,tc3,tc4,tc5],prior,struct=1)
# call to the learning algorithm
tm.bayesStructureEM(data,1,5,40,0.1)
# printing out the result of the training. The model should have three components and
# parameters closely matching the generating model.
print "---------------------"
print tm
print tm.leaders
print tm.groups
| gpl-2.0 |
GheRivero/ansible | lib/ansible/modules/network/nxos/nxos_ospf_vrf.py | 36 | 16422 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_ospf_vrf
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages a VRF for an OSPF router.
description:
- Manages a VRF for an OSPF router.
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Value I(default) restores params default value, if any.
Otherwise it removes the existing param configuration.
options:
vrf:
description:
- Name of the resource instance. Valid value is a string.
The name 'default' is a valid VRF representing the global OSPF.
default: default
ospf:
description:
- Name of the OSPF instance.
required: true
router_id:
description:
- Router Identifier (ID) of the OSPF router VRF instance.
default_metric:
description:
- Specify the default Metric value. Valid values are an integer
or the keyword 'default'.
log_adjacency:
description:
- Controls the level of log messages generated whenever a
neighbor changes state. Valid values are 'log', 'detail',
and 'default'.
choices: ['log','detail','default']
timer_throttle_lsa_start:
description:
- Specify the start interval for rate-limiting Link-State
Advertisement (LSA) generation. Valid values are an integer,
in milliseconds, or the keyword 'default'.
timer_throttle_lsa_hold:
description:
- Specify the hold interval for rate-limiting Link-State
Advertisement (LSA) generation. Valid values are an integer,
in milliseconds, or the keyword 'default'.
timer_throttle_lsa_max:
description:
- Specify the max interval for rate-limiting Link-State
Advertisement (LSA) generation. Valid values are an integer,
in milliseconds, or the keyword 'default'.
timer_throttle_spf_start:
description:
- Specify initial Shortest Path First (SPF) schedule delay.
Valid values are an integer, in milliseconds, or
the keyword 'default'.
timer_throttle_spf_hold:
description:
- Specify minimum hold time between Shortest Path First (SPF)
calculations. Valid values are an integer, in milliseconds,
or the keyword 'default'.
timer_throttle_spf_max:
description:
- Specify the maximum wait time between Shortest Path First (SPF)
calculations. Valid values are an integer, in milliseconds,
or the keyword 'default'.
auto_cost:
description:
- Specifies the reference bandwidth used to assign OSPF cost.
Valid values are an integer, in Mbps, or the keyword 'default'.
passive_interface:
description:
- Setting to C(yes) will suppress routing update on interface.
version_added: "2.4"
type: bool
state:
description:
- State of ospf vrf configuration.
default: present
choices: ['present', 'absent']
'''
EXAMPLES = '''
- nxos_ospf_vrf:
ospf: 1
timer_throttle_spf_start: 50
timer_throttle_spf_hold: 1000
timer_throttle_spf_max: 2000
timer_throttle_lsa_start: 60
timer_throttle_lsa_hold: 1100
timer_throttle_lsa_max: 3000
vrf: test
state: present
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["router ospf 1", "vrf test", "timers throttle lsa 60 1100 3000",
"ospf 1", "timers throttle spf 50 1000 2000", "vrf test"]
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import CustomNetworkConfig
BOOL_PARAMS = [
'passive_interface'
]
PARAM_TO_COMMAND_KEYMAP = {
'vrf': 'vrf',
'router_id': 'router-id',
'default_metric': 'default-metric',
'log_adjacency': 'log-adjacency-changes',
'timer_throttle_lsa_start': 'timers throttle lsa',
'timer_throttle_lsa_max': 'timers throttle lsa',
'timer_throttle_lsa_hold': 'timers throttle lsa',
'timer_throttle_spf_max': 'timers throttle spf',
'timer_throttle_spf_start': 'timers throttle spf',
'timer_throttle_spf_hold': 'timers throttle spf',
'auto_cost': 'auto-cost reference-bandwidth',
'passive_interface': 'passive-interface default'
}
PARAM_TO_DEFAULT_KEYMAP = {
'timer_throttle_lsa_start': '0',
'timer_throttle_lsa_max': '5000',
'timer_throttle_lsa_hold': '5000',
'timer_throttle_spf_start': '200',
'timer_throttle_spf_max': '5000',
'timer_throttle_spf_hold': '1000',
'auto_cost': '40000',
'default_metric': '',
'passive_interface': False,
'router_id': '',
'log_adjacency': '',
}
def get_existing(module, args):
existing = {}
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
parents = ['router ospf {0}'.format(module.params['ospf'])]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
config = netcfg.get_section(parents)
for arg in args:
if arg not in ['ospf', 'vrf']:
existing[arg] = PARAM_TO_DEFAULT_KEYMAP.get(arg)
if config:
if module.params['vrf'] == 'default':
splitted_config = config.splitlines()
vrf_index = False
for index in range(0, len(splitted_config) - 1):
if 'vrf' in splitted_config[index].strip():
vrf_index = index
break
if vrf_index:
config = '\n'.join(splitted_config[0:vrf_index])
splitted_config = config.splitlines()
for line in splitted_config:
if 'passive' in line:
existing['passive_interface'] = True
elif 'router-id' in line:
existing['router_id'] = re.search(r'router-id (\S+)', line).group(1)
elif 'metric' in line:
existing['default_metric'] = re.search(r'default-metric (\S+)', line).group(1)
elif 'adjacency' in line:
log = re.search(r'log-adjacency-changes(?: (\S+))?', line).group(1)
if log:
existing['log_adjacency'] = log
else:
existing['log_adjacency'] = 'log'
elif 'auto' in line:
cost = re.search(r'auto-cost reference-bandwidth (\d+) (\S+)', line).group(1)
if 'Gbps' in line:
cost *= 1000
existing['auto_cost'] = str(cost)
elif 'timers throttle lsa' in line:
tmp = re.search(r'timers throttle lsa (\S+) (\S+) (\S+)', line)
existing['timer_throttle_lsa_start'] = tmp.group(1)
existing['timer_throttle_lsa_hold'] = tmp.group(2)
existing['timer_throttle_lsa_max'] = tmp.group(3)
elif 'timers throttle spf' in line:
tmp = re.search(r'timers throttle spf (\S+) (\S+) (\S+)', line)
existing['timer_throttle_spf_start'] = tmp.group(1)
existing['timer_throttle_spf_hold'] = tmp.group(2)
existing['timer_throttle_spf_max'] = tmp.group(3)
existing['vrf'] = module.params['vrf']
existing['ospf'] = module.params['ospf']
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key in table:
new_key = key_map.get(key)
if new_key:
new_dict[new_key] = table.get(key)
return new_dict
def get_timer_prd(key, proposed):
if proposed.get(key):
return proposed.get(key)
else:
return PARAM_TO_DEFAULT_KEYMAP.get(key)
def state_present(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.items():
if value is True:
commands.append(key)
elif value is False:
if key == 'passive-interface default':
if existing_commands.get(key):
commands.append('no {0}'.format(key))
else:
commands.append('no {0}'.format(key))
elif value == 'default' or value == '':
if key == 'log-adjacency-changes':
commands.append('no {0}'.format(key))
elif existing_commands.get(key):
existing_value = existing_commands.get(key)
commands.append('no {0} {1}'.format(key, existing_value))
else:
if key == 'timers throttle lsa':
command = '{0} {1} {2} {3}'.format(
key,
get_timer_prd('timer_throttle_lsa_start', proposed),
get_timer_prd('timer_throttle_lsa_hold', proposed),
get_timer_prd('timer_throttle_lsa_max', proposed))
elif key == 'timers throttle spf':
command = '{0} {1} {2} {3}'.format(
key,
get_timer_prd('timer_throttle_spf_start', proposed),
get_timer_prd('timer_throttle_spf_hold', proposed),
get_timer_prd('timer_throttle_spf_max', proposed))
elif key == 'log-adjacency-changes':
if value == 'log':
command = key
elif value == 'detail':
command = '{0} {1}'.format(key, value)
elif key == 'auto-cost reference-bandwidth':
if len(value) < 5:
command = '{0} {1} Mbps'.format(key, value)
else:
value = str(int(value) / 1000)
command = '{0} {1} Gbps'.format(key, value)
else:
command = '{0} {1}'.format(key, value.lower())
if command not in commands:
commands.append(command)
if commands:
parents = ['router ospf {0}'.format(module.params['ospf'])]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
candidate.add(commands, parents=parents)
def state_absent(module, existing, proposed, candidate):
commands = []
parents = ['router ospf {0}'.format(module.params['ospf'])]
if module.params['vrf'] == 'default':
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in existing_commands.items():
if value and key != 'vrf':
if key == 'passive-interface default':
command = 'no {0}'.format(key)
elif key == 'timers throttle lsa':
if (existing['timer_throttle_lsa_start'] !=
PARAM_TO_DEFAULT_KEYMAP.get('timer_throttle_lsa_start') or
existing['timer_throttle_lsa_hold'] !=
PARAM_TO_DEFAULT_KEYMAP.get('timer_throttle_lsa_hold') or
existing['timer_throttle_lsa_max'] !=
PARAM_TO_DEFAULT_KEYMAP.get('timer_throttle_lsa_max')):
command = 'no {0} {1} {2} {3}'.format(
key,
existing['timer_throttle_lsa_start'],
existing['timer_throttle_lsa_hold'],
existing['timer_throttle_lsa_max'])
elif key == 'timers throttle spf':
if (existing['timer_throttle_spf_start'] !=
PARAM_TO_DEFAULT_KEYMAP.get('timer_throttle_spf_start') or
existing['timer_throttle_spf_hold'] !=
PARAM_TO_DEFAULT_KEYMAP.get('timer_throttle_spf_hold') or
existing['timer_throttle_spf_max'] !=
PARAM_TO_DEFAULT_KEYMAP.get('timer_throttle_spf_max')):
command = 'no {0} {1} {2} {3}'.format(
key,
existing['timer_throttle_spf_start'],
existing['timer_throttle_spf_hold'],
existing['timer_throttle_spf_max'])
elif key == 'log-adjacency-changes':
command = 'no {0}'.format(key)
elif key == 'auto-cost reference-bandwidth':
if value != PARAM_TO_DEFAULT_KEYMAP.get('auto_cost'):
command = 'no {0}'.format(key)
else:
command = None
else:
existing_value = existing_commands.get(key)
command = 'no {0} {1}'.format(key, existing_value)
if command:
if command not in commands:
commands.append(command)
else:
if (existing.get('vrf') and
existing.get('vrf') == module.params['vrf']):
commands = ['no vrf {0}'.format(module.params['vrf'])]
if commands:
candidate.add(commands, parents=parents)
def main():
argument_spec = dict(
vrf=dict(required=False, type='str', default='default'),
ospf=dict(required=True, type='str'),
router_id=dict(required=False, type='str'),
default_metric=dict(required=False, type='str'),
log_adjacency=dict(required=False, type='str', choices=['log', 'detail', 'default']),
timer_throttle_lsa_start=dict(required=False, type='str'),
timer_throttle_lsa_hold=dict(required=False, type='str'),
timer_throttle_lsa_max=dict(required=False, type='str'),
timer_throttle_spf_start=dict(required=False, type='str'),
timer_throttle_spf_hold=dict(required=False, type='str'),
timer_throttle_spf_max=dict(required=False, type='str'),
auto_cost=dict(required=False, type='str'),
passive_interface=dict(required=False, type='bool'),
state=dict(choices=['present', 'absent'], default='present', required=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
state = module.params['state']
args = PARAM_TO_COMMAND_KEYMAP.keys()
existing = get_existing(module, args)
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.items():
if key != 'interface':
if str(value).lower() == 'true':
value = True
elif str(value).lower() == 'false':
value = False
elif str(value).lower() == 'default':
value = PARAM_TO_DEFAULT_KEYMAP.get(key)
if value is None:
value = 'default'
if existing.get(key) != value:
proposed[key] = value
candidate = CustomNetworkConfig(indent=3)
if state == 'present':
state_present(module, existing, proposed, candidate)
if state == 'absent' and existing:
state_absent(module, existing, proposed, candidate)
if candidate:
candidate = candidate.items_text()
load_config(module, candidate)
result['changed'] = True
result['commands'] = candidate
else:
result['commands'] = []
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
RubenKelevra/rethinkdb | scripts/generate_rpc_templates.py | 31 | 6985 | #!/usr/bin/env python
# Copyright 2010-2014 RethinkDB, all rights reserved.
import sys
"""This script is used to generate the mailbox templates in
`rethinkdb/src/rpc/mailbox/typed.hpp`. It is meant to be run as follows
(assuming that the current directory is `rethinkdb/src/`):
$ ../scripts/generate_rpc_templates.py > rpc/mailbox/typed.hpp
"""
def ncsep(template, nargs):
return ", ".join(template.replace("#", str(i)) for i in xrange(nargs))
def ncpre(template, nargs):
return "".join(", " + template.replace("#", str(i)) for i in xrange(nargs))
def generate_async_message_template(nargs):
def csep(template):
return ncsep(template, nargs)
def cpre(template):
return ncpre(template, nargs)
mailbox_t_str = "mailbox_t< void(%s) >" % csep("arg#_t")
print
print "template<%s>" % csep("class arg#_t")
print "class %s {" % mailbox_t_str
print " class write_impl_t : public mailbox_write_callback_t {"
if nargs == 0:
print " public:"
print " write_impl_t() { }"
else:
print " private:"
for i in xrange(nargs):
print " const arg%d_t &arg%d;" % (i, i)
print " public:"
if nargs == 1:
print " explicit write_impl_t(%s) :" % csep("const arg#_t& _arg#")
else:
print " write_impl_t(%s) :" % csep("const arg#_t& _arg#")
print " %s" % csep("arg#(_arg#)")
print " { }"
if nargs == 0:
print " void write(DEBUG_VAR cluster_version_t cluster_version, write_message_t *) {"
else:
print " void write(DEBUG_VAR cluster_version_t cluster_version, write_message_t *wm) {"
print " rassert(cluster_version == cluster_version_t::CLUSTER);"
for i in xrange(nargs):
print " serialize<cluster_version_t::CLUSTER>(wm, arg%d);" % i
print " }"
print "#ifdef ENABLE_MESSAGE_PROFILER"
print " const char *message_profiler_tag() const {"
if nargs == 0:
print " return \"mailbox<>\";"
else:
print " static const std::string tag = "
print " strprintf(\"mailbox<%s>\", %s);" % \
(csep("%s"), csep("typeid(arg#_t).name()"))
print " return tag.c_str();"
print " }"
print "#endif"
print " };"
print
print " class read_impl_t : public mailbox_read_callback_t {"
print " public:"
print " explicit read_impl_t(%s *_parent) : parent(_parent) { }" % mailbox_t_str
if nargs == 0:
print " void read(UNUSED read_stream_t *stream, signal_t *interruptor) {"
else:
print " void read(read_stream_t *stream, signal_t *interruptor) {"
for i in xrange(nargs):
print " arg%d_t arg%d;" % (i, i)
print " %sres = deserialize<cluster_version_t::CLUSTER>(stream, &arg%d);" % ("archive_result_t " if i == 0 else "", i)
print " if (bad(res)) { throw fake_archive_exc_t(); }"
print " parent->fun(interruptor%s);" % cpre("std::move(arg#)")
print " }"
print " private:"
print " %s *parent;" % mailbox_t_str
print " };"
print
print " read_impl_t reader;"
print
print "public:"
print " typedef mailbox_addr_t< void(%s) > address_t;" % csep("arg#_t")
print
print " mailbox_t(mailbox_manager_t *manager,"
print " const std::function< void(signal_t *%s)> &f) :" % cpre("arg#_t")
print " reader(this), fun(f), mailbox(manager, &reader)"
print " { }"
print
print " void begin_shutdown() {"
print " mailbox.begin_shutdown();"
print " }"
print
print " address_t get_address() const {"
print " address_t a;"
print " a.addr = mailbox.get_address();"
print " return a;"
print " }"
print
print "private:"
if nargs == 0:
print " friend void send(mailbox_manager_t*, address_t);"
else:
print " template<%s>" % csep("class a#_t")
print " friend void send(mailbox_manager_t*,"
print " typename mailbox_t< void(%s) >::address_t%s);" % (csep("a#_t"), cpre("const a#_t&"))
print
print " std::function< void(signal_t *%s) > fun;" % cpre("arg#_t")
print " raw_mailbox_t mailbox;"
print "};"
print
if nargs == 0:
print "inline"
else:
print "template<%s>" % csep("class arg#_t")
print "void send(mailbox_manager_t *src,"
print " %s %s::address_t dest%s) {" % (("typename" if nargs > 0 else ""),
mailbox_t_str,
cpre("const arg#_t &arg#"))
if nargs == 0:
print " %s::write_impl_t writer;" % mailbox_t_str
else:
print " typename %s::write_impl_t writer(%s);" % (mailbox_t_str, csep("arg#"))
print " send(src, dest.addr, &writer);"
print "}"
print
if __name__ == "__main__":
print "// Copyright 2010-2014 RethinkDB, all rights reserved."
print "#ifndef RPC_MAILBOX_TYPED_HPP_"
print "#define RPC_MAILBOX_TYPED_HPP_"
print
print "/* This file is automatically generated by '%s'." % " ".join(sys.argv)
print "Please modify '%s' instead of modifying this file.*/" % sys.argv[0]
print
print "#include <functional>"
print
print "#include \"containers/archive/versioned.hpp\""
print "#include \"rpc/serialize_macros.hpp\""
print "#include \"rpc/mailbox/mailbox.hpp\""
print "#include \"rpc/semilattice/joins/macros.hpp\""
print
print "template <class> class mailbox_t;"
print
print "template <class T>"
print "class mailbox_addr_t {"
print "public:"
print " bool operator<(const mailbox_addr_t<T> &other) const {"
print " return addr < other.addr;"
print " }"
print " bool is_nil() const { return addr.is_nil(); }"
print " peer_id_t get_peer() const { return addr.get_peer(); }"
print
print " friend class mailbox_t<T>;"
print
print " RDB_MAKE_ME_SERIALIZABLE_1(mailbox_addr_t, addr);"
print " RDB_MAKE_ME_EQUALITY_COMPARABLE_1(mailbox_addr_t<T>, addr);"
print
print "private:"
print " friend void send(mailbox_manager_t *, mailbox_addr_t<void()>);"
for nargs in xrange(1, 15):
print " template <%s>" % ncsep("class a#_t", nargs)
print " friend void send(mailbox_manager_t *,"
print " typename mailbox_t< void(%s) >::address_t%s);" % (ncsep("a#_t", nargs), ncpre("const a#_t&", nargs))
print
print " raw_mailbox_t::address_t addr;"
print "};"
for nargs in xrange(15):
generate_async_message_template(nargs)
print "#endif // RPC_MAILBOX_TYPED_HPP_"
| agpl-3.0 |
Quikling/gpdb | src/test/tinc/tincrepo/mpp/gpdb/tests/storage/basic/partition/manual/TEST.py | 9 | 2617 | #!/usr/bin/env python
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
PARTITION
"""
############################################################################
# Set up some globals, and import gptest
# [YOU DO NOT NEED TO CHANGE THESE]
#
import sys, unittest, os, string
MYD = os.path.abspath(os.path.dirname(__file__))
mkpath = lambda *x: os.path.join(MYD, *x)
UPD = os.path.abspath(mkpath('../..'))
if UPD not in sys.path:
sys.path.append(UPD)
import gptest
from gptest import psql, shell
USER = os.environ.get("LOGNAME")
LEN = len(USER)
PATH = os.getcwd()
pos = PATH.find("partition")
if pos==-1:
PATH += "/partition"
###########################################################################
# A Test class must inherit from gptest.GPTestCase
# [CREATE A CLASS FOR YOUR TESTS]
#
class partition(gptest.GPTestCase):
#def setUp(self):
# pass
# def tearDown(self):
# pass
def doTest(self, num, default=''):
# get file path to queryXX.sql
file = mkpath('query%d.sql' % num)
# run psql on file, and check result
psql.runfile(file,default)
self.checkResult(file)
def doTestFile(self, filename, default=''):
file = mkpath(filename)
psql.runfile(file,default)
self.checkResult(file)
def testQuery001(self):
"Partition: Drop partition table with lots of child partitions 43k"
self.doTestFile("partition_outofmemory.sql")
def testQuery002(self):
"Partition: Drop schema with lots of partitions 86k"
self.doTestFile("partition_outofmemory2.sql")
def testQuery003(self):
"Partition: Drop schema with lots of partitions"
self.doTestFile("partition_outofmemory3.sql")
def testQuery004(self):
"Partition: Drop lots of partitions for BE 5k"
self.doTestFile("partition_outofmemory_BE.sql")
###########################################################################
# Try to run if user launched this script directly
# [YOU SHOULD NOT CHANGE THIS]
if __name__ == '__main__':
gptest.main()
| apache-2.0 |
luiseduardohdbackup/odoo | addons/purchase_requisition/__openerp__.py | 260 | 2424 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Purchase Requisitions',
'version': '0.1',
'author': 'OpenERP SA',
'category': 'Purchase Management',
'website': 'https://www.odoo.com/page/purchase',
'description': """
This module allows you to manage your Purchase Requisition.
===========================================================
When a purchase order is created, you now have the opportunity to save the
related requisition. This new object will regroup and will allow you to easily
keep track and order all your purchase orders.
""",
'depends' : ['purchase'],
'demo': ['purchase_requisition_demo.xml'],
'data': ['views/purchase_requisition.xml',
'security/purchase_tender.xml',
'wizard/purchase_requisition_partner_view.xml',
'wizard/bid_line_qty_view.xml',
'purchase_requisition_data.xml',
'purchase_requisition_view.xml',
'purchase_requisition_report.xml',
'purchase_requisition_workflow.xml',
'security/ir.model.access.csv','purchase_requisition_sequence.xml',
'views/report_purchaserequisition.xml',
],
'auto_install': False,
'test': [
'test/purchase_requisition_users.yml',
'test/purchase_requisition_demo.yml',
'test/cancel_purchase_requisition.yml',
'test/purchase_requisition.yml',
],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
RockySteveJobs/python-for-android | python-modules/twisted/twisted/test/test_sip.py | 59 | 33079 | # -*- test-case-name: twisted.test.test_sip -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Session Initialization Protocol tests."""
from twisted.trial import unittest, util
from twisted.protocols import sip
from twisted.internet import defer, reactor, utils
from twisted.python.versions import Version
from twisted.test import proto_helpers
from twisted import cred
import twisted.cred.portal
import twisted.cred.checkers
from zope.interface import implements
# request, prefixed by random CRLFs
request1 = "\n\r\n\n\r" + """\
INVITE sip:foo SIP/2.0
From: mo
To: joe
Content-Length: 4
abcd""".replace("\n", "\r\n")
# request, no content-length
request2 = """INVITE sip:foo SIP/2.0
From: mo
To: joe
1234""".replace("\n", "\r\n")
# request, with garbage after
request3 = """INVITE sip:foo SIP/2.0
From: mo
To: joe
Content-Length: 4
1234
lalalal""".replace("\n", "\r\n")
# three requests
request4 = """INVITE sip:foo SIP/2.0
From: mo
To: joe
Content-Length: 0
INVITE sip:loop SIP/2.0
From: foo
To: bar
Content-Length: 4
abcdINVITE sip:loop SIP/2.0
From: foo
To: bar
Content-Length: 4
1234""".replace("\n", "\r\n")
# response, no content
response1 = """SIP/2.0 200 OK
From: foo
To:bar
Content-Length: 0
""".replace("\n", "\r\n")
# short header version
request_short = """\
INVITE sip:foo SIP/2.0
f: mo
t: joe
l: 4
abcd""".replace("\n", "\r\n")
request_natted = """\
INVITE sip:foo SIP/2.0
Via: SIP/2.0/UDP 10.0.0.1:5060;rport
""".replace("\n", "\r\n")
class TestRealm:
def requestAvatar(self, avatarId, mind, *interfaces):
return sip.IContact, None, lambda: None
class MessageParsingTestCase(unittest.TestCase):
def setUp(self):
self.l = []
self.parser = sip.MessagesParser(self.l.append)
def feedMessage(self, message):
self.parser.dataReceived(message)
self.parser.dataDone()
def validateMessage(self, m, method, uri, headers, body):
"""Validate Requests."""
self.assertEquals(m.method, method)
self.assertEquals(m.uri.toString(), uri)
self.assertEquals(m.headers, headers)
self.assertEquals(m.body, body)
self.assertEquals(m.finished, 1)
def testSimple(self):
l = self.l
self.feedMessage(request1)
self.assertEquals(len(l), 1)
self.validateMessage(
l[0], "INVITE", "sip:foo",
{"from": ["mo"], "to": ["joe"], "content-length": ["4"]},
"abcd")
def testTwoMessages(self):
l = self.l
self.feedMessage(request1)
self.feedMessage(request2)
self.assertEquals(len(l), 2)
self.validateMessage(
l[0], "INVITE", "sip:foo",
{"from": ["mo"], "to": ["joe"], "content-length": ["4"]},
"abcd")
self.validateMessage(l[1], "INVITE", "sip:foo",
{"from": ["mo"], "to": ["joe"]},
"1234")
def testGarbage(self):
l = self.l
self.feedMessage(request3)
self.assertEquals(len(l), 1)
self.validateMessage(
l[0], "INVITE", "sip:foo",
{"from": ["mo"], "to": ["joe"], "content-length": ["4"]},
"1234")
def testThreeInOne(self):
l = self.l
self.feedMessage(request4)
self.assertEquals(len(l), 3)
self.validateMessage(
l[0], "INVITE", "sip:foo",
{"from": ["mo"], "to": ["joe"], "content-length": ["0"]},
"")
self.validateMessage(
l[1], "INVITE", "sip:loop",
{"from": ["foo"], "to": ["bar"], "content-length": ["4"]},
"abcd")
self.validateMessage(
l[2], "INVITE", "sip:loop",
{"from": ["foo"], "to": ["bar"], "content-length": ["4"]},
"1234")
def testShort(self):
l = self.l
self.feedMessage(request_short)
self.assertEquals(len(l), 1)
self.validateMessage(
l[0], "INVITE", "sip:foo",
{"from": ["mo"], "to": ["joe"], "content-length": ["4"]},
"abcd")
def testSimpleResponse(self):
l = self.l
self.feedMessage(response1)
self.assertEquals(len(l), 1)
m = l[0]
self.assertEquals(m.code, 200)
self.assertEquals(m.phrase, "OK")
self.assertEquals(
m.headers,
{"from": ["foo"], "to": ["bar"], "content-length": ["0"]})
self.assertEquals(m.body, "")
self.assertEquals(m.finished, 1)
class MessageParsingTestCase2(MessageParsingTestCase):
"""Same as base class, but feed data char by char."""
def feedMessage(self, message):
for c in message:
self.parser.dataReceived(c)
self.parser.dataDone()
class MakeMessageTestCase(unittest.TestCase):
def testRequest(self):
r = sip.Request("INVITE", "sip:foo")
r.addHeader("foo", "bar")
self.assertEquals(
r.toString(),
"INVITE sip:foo SIP/2.0\r\nFoo: bar\r\n\r\n")
def testResponse(self):
r = sip.Response(200, "OK")
r.addHeader("foo", "bar")
r.addHeader("Content-Length", "4")
r.bodyDataReceived("1234")
self.assertEquals(
r.toString(),
"SIP/2.0 200 OK\r\nFoo: bar\r\nContent-Length: 4\r\n\r\n1234")
def testStatusCode(self):
r = sip.Response(200)
self.assertEquals(r.toString(), "SIP/2.0 200 OK\r\n\r\n")
class ViaTestCase(unittest.TestCase):
def checkRoundtrip(self, v):
s = v.toString()
self.assertEquals(s, sip.parseViaHeader(s).toString())
def testExtraWhitespace(self):
v1 = sip.parseViaHeader('SIP/2.0/UDP 192.168.1.1:5060')
v2 = sip.parseViaHeader('SIP/2.0/UDP 192.168.1.1:5060')
self.assertEquals(v1.transport, v2.transport)
self.assertEquals(v1.host, v2.host)
self.assertEquals(v1.port, v2.port)
def test_complex(self):
"""
Test parsing a Via header with one of everything.
"""
s = ("SIP/2.0/UDP first.example.com:4000;ttl=16;maddr=224.2.0.1"
" ;branch=a7c6a8dlze (Example)")
v = sip.parseViaHeader(s)
self.assertEquals(v.transport, "UDP")
self.assertEquals(v.host, "first.example.com")
self.assertEquals(v.port, 4000)
self.assertEquals(v.rport, None)
self.assertEquals(v.rportValue, None)
self.assertEquals(v.rportRequested, False)
self.assertEquals(v.ttl, 16)
self.assertEquals(v.maddr, "224.2.0.1")
self.assertEquals(v.branch, "a7c6a8dlze")
self.assertEquals(v.hidden, 0)
self.assertEquals(v.toString(),
"SIP/2.0/UDP first.example.com:4000"
";ttl=16;branch=a7c6a8dlze;maddr=224.2.0.1")
self.checkRoundtrip(v)
def test_simple(self):
"""
Test parsing a simple Via header.
"""
s = "SIP/2.0/UDP example.com;hidden"
v = sip.parseViaHeader(s)
self.assertEquals(v.transport, "UDP")
self.assertEquals(v.host, "example.com")
self.assertEquals(v.port, 5060)
self.assertEquals(v.rport, None)
self.assertEquals(v.rportValue, None)
self.assertEquals(v.rportRequested, False)
self.assertEquals(v.ttl, None)
self.assertEquals(v.maddr, None)
self.assertEquals(v.branch, None)
self.assertEquals(v.hidden, True)
self.assertEquals(v.toString(),
"SIP/2.0/UDP example.com:5060;hidden")
self.checkRoundtrip(v)
def testSimpler(self):
v = sip.Via("example.com")
self.checkRoundtrip(v)
def test_deprecatedRPort(self):
"""
Setting rport to True is deprecated, but still produces a Via header
with the expected properties.
"""
v = sip.Via("foo.bar", rport=True)
warnings = self.flushWarnings(
offendingFunctions=[self.test_deprecatedRPort])
self.assertEqual(len(warnings), 1)
self.assertEqual(
warnings[0]['message'],
'rport=True is deprecated since Twisted 9.0.')
self.assertEqual(
warnings[0]['category'],
DeprecationWarning)
self.assertEqual(v.toString(), "SIP/2.0/UDP foo.bar:5060;rport")
self.assertEqual(v.rport, True)
self.assertEqual(v.rportRequested, True)
self.assertEqual(v.rportValue, None)
def test_rport(self):
"""
An rport setting of None should insert the parameter with no value.
"""
v = sip.Via("foo.bar", rport=None)
self.assertEqual(v.toString(), "SIP/2.0/UDP foo.bar:5060;rport")
self.assertEqual(v.rportRequested, True)
self.assertEqual(v.rportValue, None)
def test_rportValue(self):
"""
An rport numeric setting should insert the parameter with the number
value given.
"""
v = sip.Via("foo.bar", rport=1)
self.assertEqual(v.toString(), "SIP/2.0/UDP foo.bar:5060;rport=1")
self.assertEqual(v.rportRequested, False)
self.assertEqual(v.rportValue, 1)
self.assertEqual(v.rport, 1)
def testNAT(self):
s = "SIP/2.0/UDP 10.0.0.1:5060;received=22.13.1.5;rport=12345"
v = sip.parseViaHeader(s)
self.assertEquals(v.transport, "UDP")
self.assertEquals(v.host, "10.0.0.1")
self.assertEquals(v.port, 5060)
self.assertEquals(v.received, "22.13.1.5")
self.assertEquals(v.rport, 12345)
self.assertNotEquals(v.toString().find("rport=12345"), -1)
def test_unknownParams(self):
"""
Parsing and serializing Via headers with unknown parameters should work.
"""
s = "SIP/2.0/UDP example.com:5060;branch=a12345b;bogus;pie=delicious"
v = sip.parseViaHeader(s)
self.assertEqual(v.toString(), s)
class URLTestCase(unittest.TestCase):
def testRoundtrip(self):
for url in [
"sip:j.doe@big.com",
"sip:j.doe:secret@big.com;transport=tcp",
"sip:j.doe@big.com?subject=project",
"sip:example.com",
]:
self.assertEquals(sip.parseURL(url).toString(), url)
def testComplex(self):
s = ("sip:user:pass@hosta:123;transport=udp;user=phone;method=foo;"
"ttl=12;maddr=1.2.3.4;blah;goo=bar?a=b&c=d")
url = sip.parseURL(s)
for k, v in [("username", "user"), ("password", "pass"),
("host", "hosta"), ("port", 123),
("transport", "udp"), ("usertype", "phone"),
("method", "foo"), ("ttl", 12),
("maddr", "1.2.3.4"), ("other", ["blah", "goo=bar"]),
("headers", {"a": "b", "c": "d"})]:
self.assertEquals(getattr(url, k), v)
class ParseTestCase(unittest.TestCase):
def testParseAddress(self):
for address, name, urls, params in [
('"A. G. Bell" <sip:foo@example.com>',
"A. G. Bell", "sip:foo@example.com", {}),
("Anon <sip:foo@example.com>", "Anon", "sip:foo@example.com", {}),
("sip:foo@example.com", "", "sip:foo@example.com", {}),
("<sip:foo@example.com>", "", "sip:foo@example.com", {}),
("foo <sip:foo@example.com>;tag=bar;foo=baz", "foo",
"sip:foo@example.com", {"tag": "bar", "foo": "baz"}),
]:
gname, gurl, gparams = sip.parseAddress(address)
self.assertEquals(name, gname)
self.assertEquals(gurl.toString(), urls)
self.assertEquals(gparams, params)
class DummyLocator:
implements(sip.ILocator)
def getAddress(self, logicalURL):
return defer.succeed(sip.URL("server.com", port=5060))
class FailingLocator:
implements(sip.ILocator)
def getAddress(self, logicalURL):
return defer.fail(LookupError())
class ProxyTestCase(unittest.TestCase):
def setUp(self):
self.proxy = sip.Proxy("127.0.0.1")
self.proxy.locator = DummyLocator()
self.sent = []
self.proxy.sendMessage = lambda dest, msg: self.sent.append((dest, msg))
def testRequestForward(self):
r = sip.Request("INVITE", "sip:foo")
r.addHeader("via", sip.Via("1.2.3.4").toString())
r.addHeader("via", sip.Via("1.2.3.5").toString())
r.addHeader("foo", "bar")
r.addHeader("to", "<sip:joe@server.com>")
r.addHeader("contact", "<sip:joe@1.2.3.5>")
self.proxy.datagramReceived(r.toString(), ("1.2.3.4", 5060))
self.assertEquals(len(self.sent), 1)
dest, m = self.sent[0]
self.assertEquals(dest.port, 5060)
self.assertEquals(dest.host, "server.com")
self.assertEquals(m.uri.toString(), "sip:foo")
self.assertEquals(m.method, "INVITE")
self.assertEquals(m.headers["via"],
["SIP/2.0/UDP 127.0.0.1:5060",
"SIP/2.0/UDP 1.2.3.4:5060",
"SIP/2.0/UDP 1.2.3.5:5060"])
def testReceivedRequestForward(self):
r = sip.Request("INVITE", "sip:foo")
r.addHeader("via", sip.Via("1.2.3.4").toString())
r.addHeader("foo", "bar")
r.addHeader("to", "<sip:joe@server.com>")
r.addHeader("contact", "<sip:joe@1.2.3.4>")
self.proxy.datagramReceived(r.toString(), ("1.1.1.1", 5060))
dest, m = self.sent[0]
self.assertEquals(m.headers["via"],
["SIP/2.0/UDP 127.0.0.1:5060",
"SIP/2.0/UDP 1.2.3.4:5060;received=1.1.1.1"])
def testResponseWrongVia(self):
# first via must match proxy's address
r = sip.Response(200)
r.addHeader("via", sip.Via("foo.com").toString())
self.proxy.datagramReceived(r.toString(), ("1.1.1.1", 5060))
self.assertEquals(len(self.sent), 0)
def testResponseForward(self):
r = sip.Response(200)
r.addHeader("via", sip.Via("127.0.0.1").toString())
r.addHeader("via", sip.Via("client.com", port=1234).toString())
self.proxy.datagramReceived(r.toString(), ("1.1.1.1", 5060))
self.assertEquals(len(self.sent), 1)
dest, m = self.sent[0]
self.assertEquals((dest.host, dest.port), ("client.com", 1234))
self.assertEquals(m.code, 200)
self.assertEquals(m.headers["via"], ["SIP/2.0/UDP client.com:1234"])
def testReceivedResponseForward(self):
r = sip.Response(200)
r.addHeader("via", sip.Via("127.0.0.1").toString())
r.addHeader(
"via",
sip.Via("10.0.0.1", received="client.com").toString())
self.proxy.datagramReceived(r.toString(), ("1.1.1.1", 5060))
self.assertEquals(len(self.sent), 1)
dest, m = self.sent[0]
self.assertEquals((dest.host, dest.port), ("client.com", 5060))
def testResponseToUs(self):
r = sip.Response(200)
r.addHeader("via", sip.Via("127.0.0.1").toString())
l = []
self.proxy.gotResponse = lambda *a: l.append(a)
self.proxy.datagramReceived(r.toString(), ("1.1.1.1", 5060))
self.assertEquals(len(l), 1)
m, addr = l[0]
self.assertEquals(len(m.headers.get("via", [])), 0)
self.assertEquals(m.code, 200)
def testLoop(self):
r = sip.Request("INVITE", "sip:foo")
r.addHeader("via", sip.Via("1.2.3.4").toString())
r.addHeader("via", sip.Via("127.0.0.1").toString())
self.proxy.datagramReceived(r.toString(), ("client.com", 5060))
self.assertEquals(self.sent, [])
def testCantForwardRequest(self):
r = sip.Request("INVITE", "sip:foo")
r.addHeader("via", sip.Via("1.2.3.4").toString())
r.addHeader("to", "<sip:joe@server.com>")
self.proxy.locator = FailingLocator()
self.proxy.datagramReceived(r.toString(), ("1.2.3.4", 5060))
self.assertEquals(len(self.sent), 1)
dest, m = self.sent[0]
self.assertEquals((dest.host, dest.port), ("1.2.3.4", 5060))
self.assertEquals(m.code, 404)
self.assertEquals(m.headers["via"], ["SIP/2.0/UDP 1.2.3.4:5060"])
def testCantForwardResponse(self):
pass
#testCantForwardResponse.skip = "not implemented yet"
class RegistrationTestCase(unittest.TestCase):
def setUp(self):
self.proxy = sip.RegisterProxy(host="127.0.0.1")
self.registry = sip.InMemoryRegistry("bell.example.com")
self.proxy.registry = self.proxy.locator = self.registry
self.sent = []
self.proxy.sendMessage = lambda dest, msg: self.sent.append((dest, msg))
setUp = utils.suppressWarnings(setUp,
util.suppress(category=DeprecationWarning,
message=r'twisted.protocols.sip.DigestAuthorizer was deprecated'))
def tearDown(self):
for d, uri in self.registry.users.values():
d.cancel()
del self.proxy
def register(self):
r = sip.Request("REGISTER", "sip:bell.example.com")
r.addHeader("to", "sip:joe@bell.example.com")
r.addHeader("contact", "sip:joe@client.com:1234")
r.addHeader("via", sip.Via("client.com").toString())
self.proxy.datagramReceived(r.toString(), ("client.com", 5060))
def unregister(self):
r = sip.Request("REGISTER", "sip:bell.example.com")
r.addHeader("to", "sip:joe@bell.example.com")
r.addHeader("contact", "*")
r.addHeader("via", sip.Via("client.com").toString())
r.addHeader("expires", "0")
self.proxy.datagramReceived(r.toString(), ("client.com", 5060))
def testRegister(self):
self.register()
dest, m = self.sent[0]
self.assertEquals((dest.host, dest.port), ("client.com", 5060))
self.assertEquals(m.code, 200)
self.assertEquals(m.headers["via"], ["SIP/2.0/UDP client.com:5060"])
self.assertEquals(m.headers["to"], ["sip:joe@bell.example.com"])
self.assertEquals(m.headers["contact"], ["sip:joe@client.com:5060"])
self.failUnless(
int(m.headers["expires"][0]) in (3600, 3601, 3599, 3598))
self.assertEquals(len(self.registry.users), 1)
dc, uri = self.registry.users["joe"]
self.assertEquals(uri.toString(), "sip:joe@client.com:5060")
d = self.proxy.locator.getAddress(sip.URL(username="joe",
host="bell.example.com"))
d.addCallback(lambda desturl : (desturl.host, desturl.port))
d.addCallback(self.assertEquals, ('client.com', 5060))
return d
def testUnregister(self):
self.register()
self.unregister()
dest, m = self.sent[1]
self.assertEquals((dest.host, dest.port), ("client.com", 5060))
self.assertEquals(m.code, 200)
self.assertEquals(m.headers["via"], ["SIP/2.0/UDP client.com:5060"])
self.assertEquals(m.headers["to"], ["sip:joe@bell.example.com"])
self.assertEquals(m.headers["contact"], ["sip:joe@client.com:5060"])
self.assertEquals(m.headers["expires"], ["0"])
self.assertEquals(self.registry.users, {})
def addPortal(self):
r = TestRealm()
p = cred.portal.Portal(r)
c = cred.checkers.InMemoryUsernamePasswordDatabaseDontUse()
c.addUser('userXname@127.0.0.1', 'passXword')
p.registerChecker(c)
self.proxy.portal = p
def testFailedAuthentication(self):
self.addPortal()
self.register()
self.assertEquals(len(self.registry.users), 0)
self.assertEquals(len(self.sent), 1)
dest, m = self.sent[0]
self.assertEquals(m.code, 401)
def test_basicAuthentication(self):
"""
Test that registration with basic authentication suceeds.
"""
self.addPortal()
self.proxy.authorizers = self.proxy.authorizers.copy()
self.proxy.authorizers['basic'] = sip.BasicAuthorizer()
warnings = self.flushWarnings(
offendingFunctions=[self.test_basicAuthentication])
self.assertEqual(len(warnings), 1)
self.assertEqual(
warnings[0]['message'],
"twisted.protocols.sip.BasicAuthorizer was deprecated in "
"Twisted 9.0.0")
self.assertEqual(
warnings[0]['category'],
DeprecationWarning)
r = sip.Request("REGISTER", "sip:bell.example.com")
r.addHeader("to", "sip:joe@bell.example.com")
r.addHeader("contact", "sip:joe@client.com:1234")
r.addHeader("via", sip.Via("client.com").toString())
r.addHeader("authorization",
"Basic " + "userXname:passXword".encode('base64'))
self.proxy.datagramReceived(r.toString(), ("client.com", 5060))
self.assertEquals(len(self.registry.users), 1)
self.assertEquals(len(self.sent), 1)
dest, m = self.sent[0]
self.assertEquals(m.code, 200)
def test_failedBasicAuthentication(self):
"""
Failed registration with basic authentication results in an
unauthorized error response.
"""
self.addPortal()
self.proxy.authorizers = self.proxy.authorizers.copy()
self.proxy.authorizers['basic'] = sip.BasicAuthorizer()
warnings = self.flushWarnings(
offendingFunctions=[self.test_failedBasicAuthentication])
self.assertEqual(len(warnings), 1)
self.assertEqual(
warnings[0]['message'],
"twisted.protocols.sip.BasicAuthorizer was deprecated in "
"Twisted 9.0.0")
self.assertEqual(
warnings[0]['category'],
DeprecationWarning)
r = sip.Request("REGISTER", "sip:bell.example.com")
r.addHeader("to", "sip:joe@bell.example.com")
r.addHeader("contact", "sip:joe@client.com:1234")
r.addHeader("via", sip.Via("client.com").toString())
r.addHeader(
"authorization", "Basic " + "userXname:password".encode('base64'))
self.proxy.datagramReceived(r.toString(), ("client.com", 5060))
self.assertEquals(len(self.registry.users), 0)
self.assertEquals(len(self.sent), 1)
dest, m = self.sent[0]
self.assertEquals(m.code, 401)
def testWrongDomainRegister(self):
r = sip.Request("REGISTER", "sip:wrong.com")
r.addHeader("to", "sip:joe@bell.example.com")
r.addHeader("contact", "sip:joe@client.com:1234")
r.addHeader("via", sip.Via("client.com").toString())
self.proxy.datagramReceived(r.toString(), ("client.com", 5060))
self.assertEquals(len(self.sent), 0)
def testWrongToDomainRegister(self):
r = sip.Request("REGISTER", "sip:bell.example.com")
r.addHeader("to", "sip:joe@foo.com")
r.addHeader("contact", "sip:joe@client.com:1234")
r.addHeader("via", sip.Via("client.com").toString())
self.proxy.datagramReceived(r.toString(), ("client.com", 5060))
self.assertEquals(len(self.sent), 0)
def testWrongDomainLookup(self):
self.register()
url = sip.URL(username="joe", host="foo.com")
d = self.proxy.locator.getAddress(url)
self.assertFailure(d, LookupError)
return d
def testNoContactLookup(self):
self.register()
url = sip.URL(username="jane", host="bell.example.com")
d = self.proxy.locator.getAddress(url)
self.assertFailure(d, LookupError)
return d
class Client(sip.Base):
def __init__(self):
sip.Base.__init__(self)
self.received = []
self.deferred = defer.Deferred()
def handle_response(self, response, addr):
self.received.append(response)
self.deferred.callback(self.received)
class LiveTest(unittest.TestCase):
def setUp(self):
self.proxy = sip.RegisterProxy(host="127.0.0.1")
self.registry = sip.InMemoryRegistry("bell.example.com")
self.proxy.registry = self.proxy.locator = self.registry
self.serverPort = reactor.listenUDP(
0, self.proxy, interface="127.0.0.1")
self.client = Client()
self.clientPort = reactor.listenUDP(
0, self.client, interface="127.0.0.1")
self.serverAddress = (self.serverPort.getHost().host,
self.serverPort.getHost().port)
setUp = utils.suppressWarnings(setUp,
util.suppress(category=DeprecationWarning,
message=r'twisted.protocols.sip.DigestAuthorizer was deprecated'))
def tearDown(self):
for d, uri in self.registry.users.values():
d.cancel()
d1 = defer.maybeDeferred(self.clientPort.stopListening)
d2 = defer.maybeDeferred(self.serverPort.stopListening)
return defer.gatherResults([d1, d2])
def testRegister(self):
p = self.clientPort.getHost().port
r = sip.Request("REGISTER", "sip:bell.example.com")
r.addHeader("to", "sip:joe@bell.example.com")
r.addHeader("contact", "sip:joe@127.0.0.1:%d" % p)
r.addHeader("via", sip.Via("127.0.0.1", port=p).toString())
self.client.sendMessage(
sip.URL(host="127.0.0.1", port=self.serverAddress[1]), r)
d = self.client.deferred
def check(received):
self.assertEquals(len(received), 1)
r = received[0]
self.assertEquals(r.code, 200)
d.addCallback(check)
return d
def test_amoralRPort(self):
"""
rport is allowed without a value, apparently because server
implementors might be too stupid to check the received port
against 5060 and see if they're equal, and because client
implementors might be too stupid to bind to port 5060, or set a
value on the rport parameter they send if they bind to another
port.
"""
p = self.clientPort.getHost().port
r = sip.Request("REGISTER", "sip:bell.example.com")
r.addHeader("to", "sip:joe@bell.example.com")
r.addHeader("contact", "sip:joe@127.0.0.1:%d" % p)
r.addHeader("via", sip.Via("127.0.0.1", port=p, rport=True).toString())
warnings = self.flushWarnings(
offendingFunctions=[self.test_amoralRPort])
self.assertEqual(len(warnings), 1)
self.assertEqual(
warnings[0]['message'],
'rport=True is deprecated since Twisted 9.0.')
self.assertEqual(
warnings[0]['category'],
DeprecationWarning)
self.client.sendMessage(sip.URL(host="127.0.0.1",
port=self.serverAddress[1]),
r)
d = self.client.deferred
def check(received):
self.assertEquals(len(received), 1)
r = received[0]
self.assertEquals(r.code, 200)
d.addCallback(check)
return d
registerRequest = """
REGISTER sip:intarweb.us SIP/2.0\r
Via: SIP/2.0/UDP 192.168.1.100:50609\r
From: <sip:exarkun@intarweb.us:50609>\r
To: <sip:exarkun@intarweb.us:50609>\r
Contact: "exarkun" <sip:exarkun@192.168.1.100:50609>\r
Call-ID: 94E7E5DAF39111D791C6000393764646@intarweb.us\r
CSeq: 9898 REGISTER\r
Expires: 500\r
User-Agent: X-Lite build 1061\r
Content-Length: 0\r
\r
"""
challengeResponse = """\
SIP/2.0 401 Unauthorized\r
Via: SIP/2.0/UDP 192.168.1.100:50609;received=127.0.0.1;rport=5632\r
To: <sip:exarkun@intarweb.us:50609>\r
From: <sip:exarkun@intarweb.us:50609>\r
Call-ID: 94E7E5DAF39111D791C6000393764646@intarweb.us\r
CSeq: 9898 REGISTER\r
WWW-Authenticate: Digest nonce="92956076410767313901322208775",opaque="1674186428",qop-options="auth",algorithm="MD5",realm="intarweb.us"\r
\r
"""
authRequest = """\
REGISTER sip:intarweb.us SIP/2.0\r
Via: SIP/2.0/UDP 192.168.1.100:50609\r
From: <sip:exarkun@intarweb.us:50609>\r
To: <sip:exarkun@intarweb.us:50609>\r
Contact: "exarkun" <sip:exarkun@192.168.1.100:50609>\r
Call-ID: 94E7E5DAF39111D791C6000393764646@intarweb.us\r
CSeq: 9899 REGISTER\r
Expires: 500\r
Authorization: Digest username="exarkun",realm="intarweb.us",nonce="92956076410767313901322208775",response="4a47980eea31694f997369214292374b",uri="sip:intarweb.us",algorithm=MD5,opaque="1674186428"\r
User-Agent: X-Lite build 1061\r
Content-Length: 0\r
\r
"""
okResponse = """\
SIP/2.0 200 OK\r
Via: SIP/2.0/UDP 192.168.1.100:50609;received=127.0.0.1;rport=5632\r
To: <sip:exarkun@intarweb.us:50609>\r
From: <sip:exarkun@intarweb.us:50609>\r
Call-ID: 94E7E5DAF39111D791C6000393764646@intarweb.us\r
CSeq: 9899 REGISTER\r
Contact: sip:exarkun@127.0.0.1:5632\r
Expires: 3600\r
Content-Length: 0\r
\r
"""
class FakeDigestAuthorizer(sip.DigestAuthorizer):
def generateNonce(self):
return '92956076410767313901322208775'
def generateOpaque(self):
return '1674186428'
class FakeRegistry(sip.InMemoryRegistry):
"""Make sure expiration is always seen to be 3600.
Otherwise slow reactors fail tests incorrectly.
"""
def _cbReg(self, reg):
if 3600 < reg.secondsToExpiry or reg.secondsToExpiry < 3598:
raise RuntimeError(
"bad seconds to expire: %s" % reg.secondsToExpiry)
reg.secondsToExpiry = 3600
return reg
def getRegistrationInfo(self, uri):
d = sip.InMemoryRegistry.getRegistrationInfo(self, uri)
return d.addCallback(self._cbReg)
def registerAddress(self, domainURL, logicalURL, physicalURL):
d = sip.InMemoryRegistry.registerAddress(
self, domainURL, logicalURL, physicalURL)
return d.addCallback(self._cbReg)
class AuthorizationTestCase(unittest.TestCase):
def setUp(self):
self.proxy = sip.RegisterProxy(host="intarweb.us")
self.proxy.authorizers = self.proxy.authorizers.copy()
self.proxy.authorizers['digest'] = FakeDigestAuthorizer()
self.registry = FakeRegistry("intarweb.us")
self.proxy.registry = self.proxy.locator = self.registry
self.transport = proto_helpers.FakeDatagramTransport()
self.proxy.transport = self.transport
r = TestRealm()
p = cred.portal.Portal(r)
c = cred.checkers.InMemoryUsernamePasswordDatabaseDontUse()
c.addUser('exarkun@intarweb.us', 'password')
p.registerChecker(c)
self.proxy.portal = p
setUp = utils.suppressWarnings(setUp,
util.suppress(category=DeprecationWarning,
message=r'twisted.protocols.sip.DigestAuthorizer was deprecated'))
def tearDown(self):
for d, uri in self.registry.users.values():
d.cancel()
del self.proxy
def testChallenge(self):
self.proxy.datagramReceived(registerRequest, ("127.0.0.1", 5632))
self.assertEquals(
self.transport.written[-1],
((challengeResponse, ("127.0.0.1", 5632)))
)
self.transport.written = []
self.proxy.datagramReceived(authRequest, ("127.0.0.1", 5632))
self.assertEquals(
self.transport.written[-1],
((okResponse, ("127.0.0.1", 5632)))
)
testChallenge.suppress = [
util.suppress(
category=DeprecationWarning,
message=r'twisted.protocols.sip.DigestAuthorizer was deprecated'),
util.suppress(
category=DeprecationWarning,
message=r'twisted.protocols.sip.DigestedCredentials was deprecated'),
util.suppress(
category=DeprecationWarning,
message=r'twisted.protocols.sip.DigestCalcHA1 was deprecated'),
util.suppress(
category=DeprecationWarning,
message=r'twisted.protocols.sip.DigestCalcResponse was deprecated')]
class DeprecationTests(unittest.TestCase):
"""
Tests for deprecation of obsolete components of L{twisted.protocols.sip}.
"""
def test_deprecatedDigestCalcHA1(self):
"""
L{sip.DigestCalcHA1} is deprecated.
"""
self.callDeprecated(Version("Twisted", 9, 0, 0),
sip.DigestCalcHA1, '', '', '', '', '', '')
def test_deprecatedDigestCalcResponse(self):
"""
L{sip.DigestCalcResponse} is deprecated.
"""
self.callDeprecated(Version("Twisted", 9, 0, 0),
sip.DigestCalcResponse, '', '', '', '', '', '', '',
'')
def test_deprecatedBasicAuthorizer(self):
"""
L{sip.BasicAuthorizer} is deprecated.
"""
self.callDeprecated(Version("Twisted", 9, 0, 0), sip.BasicAuthorizer)
def test_deprecatedDigestAuthorizer(self):
"""
L{sip.DigestAuthorizer} is deprecated.
"""
self.callDeprecated(Version("Twisted", 9, 0, 0), sip.DigestAuthorizer)
def test_deprecatedDigestedCredentials(self):
"""
L{sip.DigestedCredentials} is deprecated.
"""
self.callDeprecated(Version("Twisted", 9, 0, 0),
sip.DigestedCredentials, '', {}, {})
| apache-2.0 |
GdZ/scriptfile | software/googleAppEngine/google/appengine/_internal/django/core/management/commands/syncdb.py | 23 | 8047 | from optparse import make_option
import sys
from google.appengine._internal.django.conf import settings
from google.appengine._internal.django.core.management.base import NoArgsCommand
from google.appengine._internal.django.core.management.color import no_style
from google.appengine._internal.django.core.management.sql import custom_sql_for_model, emit_post_sync_signal
from google.appengine._internal.django.db import connections, router, transaction, models, DEFAULT_DB_ALIAS
from google.appengine._internal.django.utils.datastructures import SortedDict
from google.appengine._internal.django.utils.importlib import import_module
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. '
'Defaults to the "default" database.'),
)
help = "Create the database tables for all apps in INSTALLED_APPS whose tables haven't already been created."
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity', 1))
interactive = options.get('interactive')
show_traceback = options.get('traceback', False)
# Stealth option -- 'load_initial_data' is used by the testing setup
# process to disable initial fixture loading.
load_initial_data = options.get('load_initial_data', True)
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except ImportError, exc:
# This is slightly hackish. We want to ignore ImportErrors
# if the "management" module itself is missing -- but we don't
# want to ignore the exception if the management module exists
# but raises an ImportError for some reason. The only way we
# can do this is to check the text of the exception. Note that
# we're a bit broad in how we check the text, because different
# Python implementations may not use the same text.
# CPython uses the text "No module named management"
# PyPy uses "No module named myproject.myapp.management"
msg = exc.args[0]
if not msg.startswith('No module named') or 'management' not in msg:
raise
db = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[db]
cursor = connection.cursor()
# Get a list of already installed *models* so that references work right.
tables = connection.introspection.table_names()
seen_models = connection.introspection.installed_models(tables)
created_models = set()
pending_references = {}
# Build the manifest of apps and models that are to be synchronized
all_models = [
(app.__name__.split('.')[-2],
[m for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(db, m)])
for app in models.get_apps()
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
return not ((converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))
manifest = SortedDict(
(app_name, filter(model_installed, model_list))
for app_name, model_list in all_models
)
# Create the tables for each model
for app_name, model_list in manifest.items():
for model in model_list:
# Create the model's database table, if it doesn't already exist.
if verbosity >= 2:
print "Processing %s.%s model" % (app_name, model._meta.object_name)
sql, references = connection.creation.sql_create_model(model, self.style, seen_models)
seen_models.add(model)
created_models.add(model)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in seen_models:
sql.extend(connection.creation.sql_for_pending_references(refto, self.style, pending_references))
sql.extend(connection.creation.sql_for_pending_references(model, self.style, pending_references))
if verbosity >= 1 and sql:
print "Creating table %s" % model._meta.db_table
for statement in sql:
cursor.execute(statement)
tables.append(connection.introspection.table_name_converter(model._meta.db_table))
transaction.commit_unless_managed(using=db)
# Send the post_syncdb signal, so individual apps can do whatever they need
# to do at this point.
emit_post_sync_signal(created_models, verbosity, interactive, db)
# The connection may have been closed by a syncdb handler.
cursor = connection.cursor()
# Install custom SQL for the app (but only if this
# is a model we've just created)
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
custom_sql = custom_sql_for_model(model, self.style, connection)
if custom_sql:
if verbosity >= 1:
print "Installing custom SQL for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in custom_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install custom SQL for %s.%s model: %s\n" % (app_name, model._meta.object_name, e))
if show_traceback:
import traceback
traceback.print_exc()
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
else:
if verbosity >= 2:
print "No custom SQL for %s.%s model" % (app_name, model._meta.object_name)
# Install SQL indicies for all newly created models
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
index_sql = connection.creation.sql_indexes_for_model(model, self.style)
if index_sql:
if verbosity >= 1:
print "Installing index for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in index_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install index for %s.%s model: %s\n" % (app_name, model._meta.object_name, e))
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
# Load initial_data fixtures (unless that has been disabled)
if load_initial_data:
from google.appengine._internal.django.core.management import call_command
call_command('loaddata', 'initial_data', verbosity=verbosity, database=db)
| mit |
ayumilong/rethinkdb | test/common/unit.py | 28 | 2161 | #!/user/bin/env python
# Copyright 2014-2015 RethinkDB, all rights reserved.
import collections, os, subprocess, sys
import test_framework, utils
class AllUnitTests(test_framework.Test):
def __init__(self, filters=[]):
super(AllUnitTests, self).__init__()
self.filters = filters
self.configured = False
self.tests = None
def filter(self, filter):
return AllUnitTests(self.filters + [filter])
def configure(self, conf):
unit_executable = os.path.join(conf['BUILD_DIR'], "rethinkdb-unittest")
if not os.access(unit_executable, os.X_OK):
sys.stderr.write('Warning: no useable rethinkdb-unittest executable at: %s\n' % unit_executable)
return test_framework.TestTree()
output = subprocess.check_output([unit_executable, "--gtest_list_tests"])
key = None
dict = collections.defaultdict(list)
for line in output.split("\n"):
if not line:
continue
elif line[-1] == '.':
key = line[:-1]
else:
dict[key].append(line.strip())
tests = test_framework.TestTree(
(group, UnitTest(unit_executable, group, tests))
for group, tests in dict.iteritems())
for filter in self.filters:
tests = tests.filter(filter)
return tests
class UnitTest(test_framework.Test):
def __init__(self, unit_executable, test, child_tests=[]):
super(UnitTest, self).__init__()
self.unit_executable = unit_executable
self.test = test
self.child_tests = child_tests
def run(self):
filter = self.test
if self.child_tests:
filter = filter + ".*"
subprocess.check_call([self.unit_executable, "--gtest_filter=" + filter])
def filter(self, filter):
if filter.all_same() or not self.child_tests:
return self if filter.match() else None
tests = test_framework.TestTree((
(child, UnitTest(self.unit_executable, self.test + "." + child))
for child in self.child_tests))
return tests.filter(filter)
| agpl-3.0 |
wrobell/libms5803 | ms5803/glue.py | 1 | 1493 | #
# libms5803 - MS5803 pressure sensor library
#
# Copyright (C) 2014-2016 by Artur Wroblewski <wrobell@pld-linux.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from _ms5803 import ffi, lib
class Sensor(object):
"""
MS5803 sensor communication interface.
"""
def __init__(self, f_dev, address):
"""
Initialize pressure sensor and read its calibration coefficients.
:param f_dev: I2C device filename, i.e. /dev/i2c-0.
:param address: I2C device address, i.e. 0x77.
"""
lib.ms5803_init(f_dev.encode(), address)
self._p_value = ffi.new('int32_t *')
self._t_value = ffi.new('int32_t *')
def read(self):
"""
Read pressure and temperature from sensor.
"""
px = self._p_value
tx = self._t_value
lib.ms5803_read(px, tx)
return px[0], tx[0]
# vim: sw=4:et:ai
| gpl-3.0 |
kotamat/vundle | sparkup/vim/ftplugin/html/sparkup.py | 6 | 35770 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
version = "0.1.4"
import getopt
import sys
import re
# =============================================================================
def iteritems(obj):
"""iteritems() in python2 and items() in python3"""
if sys.version[0] == '2':
return obj.iteritems()
else:
return obj.items()
class Dialect:
shortcuts = {}
synonyms = {}
required = {}
short_tags = ()
class XmlDialect(Dialect):
shortcuts = {}
synonyms = {}
short_tags = ()
required = {}
class HtmlDialect(Dialect):
# TODO: the indentation in snippets should also be based on the user's
# indentation configuration
shortcuts = {
'cc:ie': {
'opening_tag': '<!--[if IE]>',
'closing_tag': '<![endif]-->'},
'cc:ie6': {
'opening_tag': '<!--[if lte IE 6]>',
'closing_tag': '<![endif]-->'},
'cc:ie7': {
'opening_tag': '<!--[if lte IE 7]>',
'closing_tag': '<![endif]-->'},
'cc:noie': {
'opening_tag': '<!--[if !IE]><!-->',
'closing_tag': '<!--<![endif]-->'},
'php:t': {
'expand': True,
'opening_tag': '<?php',
'closing_tag': '?>',
},
'html:4t': {
'expand': True,
'opening_tag':
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n' +
'<html lang="en">\n' +
'<head>\n' +
' ' + '<meta http-equiv="Content-Type" content="text/html;charset=UTF-8" />\n' +
' ' + '<title></title>\n' +
'</head>\n' +
'<body>',
'closing_tag':
'</body>\n' +
'</html>'},
'html:4s': {
'expand': True,
'opening_tag':
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n' +
'<html lang="en">\n' +
'<head>\n' +
' ' + '<meta http-equiv="Content-Type" content="text/html;charset=UTF-8" />\n' +
' ' + '<title></title>\n' +
'</head>\n' +
'<body>',
'closing_tag':
'</body>\n' +
'</html>'},
'html:xt': {
'expand': True,
'opening_tag':
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n' +
'<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">\n' +
'<head>\n' +
' ' + '<meta http-equiv="Content-Type" content="text/html;charset=UTF-8" />\n' +
' ' + '<title></title>\n' +
'</head>\n' +
'<body>',
'closing_tag':
'</body>\n' +
'</html>'},
'html:xs': {
'expand': True,
'opening_tag':
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n' +
'<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">\n' +
'<head>\n' +
' ' + '<meta http-equiv="Content-Type" content="text/html;charset=UTF-8" />\n' +
' ' + '<title></title>\n' +
'</head>\n' +
'<body>',
'closing_tag':
'</body>\n' +
'</html>'},
'html:xxs': {
'expand': True,
'opening_tag':
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n' +
'<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">\n' +
'<head>\n' +
' ' + '<meta http-equiv="Content-Type" content="text/html;charset=UTF-8" />\n' +
' ' + '<title></title>\n' +
'</head>\n' +
'<body>',
'closing_tag':
'</body>\n' +
'</html>'},
'html:5': {
'expand': True,
'opening_tag':
'<!DOCTYPE html>\n' +
'<html lang="en">\n' +
'<head>\n' +
' ' + '<meta charset="UTF-8">\n' +
' ' + '<title></title>\n' +
'</head>\n' +
'<body>',
'closing_tag':
'</body>\n' +
'</html>'},
'input:button': {
'name': 'input',
'attributes': { 'class': 'button', 'type': 'button', 'name': '', 'value': '' }
},
'input:password': {
'name': 'input',
'attributes': { 'class': 'text password', 'type': 'password', 'name': '', 'value': '' }
},
'input:radio': {
'name': 'input',
'attributes': { 'class': 'radio', 'type': 'radio', 'name': '', 'value': '' }
},
'input:checkbox': {
'name': 'input',
'attributes': { 'class': 'checkbox', 'type': 'checkbox', 'name': '', 'value': '' }
},
'input:file': {
'name': 'input',
'attributes': { 'class': 'file', 'type': 'file', 'name': '', 'value': '' }
},
'input:text': {
'name': 'input',
'attributes': { 'class': 'text', 'type': 'text', 'name': '', 'value': '' }
},
'input:submit': {
'name': 'input',
'attributes': { 'class': 'submit', 'type': 'submit', 'value': '' }
},
'input:hidden': {
'name': 'input',
'attributes': { 'type': 'hidden', 'name': '', 'value': '' }
},
'script:src': {
'name': 'script',
'attributes': { 'src': '' }
},
'script:jquery': {
'name': 'script',
'attributes': { 'src': 'http://ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js' }
},
'script:jquery2': {
'name': 'script',
'attributes': { 'src': 'http://ajax.googleapis.com/ajax/libs/jquery/2.0.3/jquery.min.js' }
},
'script:jsapi': {
'name': 'script',
'attributes': { 'src': 'http://www.google.com/jsapi' }
},
'script:jsapix': {
'name': 'script',
'text': '\n google.load("jquery", "1.3.2");\n google.setOnLoadCallback(function() {\n \n });\n'
},
'link:css': {
'name': 'link',
'attributes': { 'rel': 'stylesheet', 'type': 'text/css', 'href': '', 'media': 'all' },
},
'link:print': {
'name': 'link',
'attributes': { 'rel': 'stylesheet', 'type': 'text/css', 'href': '', 'media': 'print' },
},
'link:favicon': {
'name': 'link',
'attributes': { 'rel': 'shortcut icon', 'type': 'image/x-icon', 'href': '' },
},
'link:touch': {
'name': 'link',
'attributes': { 'rel': 'apple-touch-icon', 'href': '' },
},
'link:rss': {
'name': 'link',
'attributes': { 'rel': 'alternate', 'type': 'application/rss+xml', 'title': 'RSS', 'href': '' },
},
'link:atom': {
'name': 'link',
'attributes': { 'rel': 'alternate', 'type': 'application/atom+xml', 'title': 'Atom', 'href': '' },
},
'meta:ie7': {
'name': 'meta',
'attributes': { 'http-equiv': 'X-UA-Compatible', 'content': 'IE=7' },
},
'meta:ie8': {
'name': 'meta',
'attributes': { 'http-equiv': 'X-UA-Compatible', 'content': 'IE=8' },
},
'form:get': {
'name': 'form',
'attributes': { 'method': 'get' },
},
'form:g': {
'name': 'form',
'attributes': { 'method': 'get' },
},
'form:post': {
'name': 'form',
'attributes': { 'method': 'post' },
},
'form:p': {
'name': 'form',
'attributes': { 'method': 'post' },
},
}
synonyms = {
'php': 'php:t',
'checkbox': 'input:checkbox',
'check': 'input:checkbox',
'input:c': 'input:checkbox',
'input:b': 'input:button',
'input:h': 'input:hidden',
'hidden': 'input:hidden',
'submit': 'input:submit',
'input:s': 'input:submit',
'radio': 'input:radio',
'input:r': 'input:radio',
'text': 'input:text',
'passwd': 'input:password',
'password': 'input:password',
'pw': 'input:password',
'input:t': 'input:text',
'linkcss': 'link:css',
'scriptsrc': 'script:src',
'jquery': 'script:jquery',
'jsapi': 'script:jsapi',
'html5': 'html:5',
'html4': 'html:4s',
'html4s': 'html:4s',
'html4t': 'html:4t',
'xhtml': 'html:xxs',
'xhtmlt': 'html:xt',
'xhtmls': 'html:xs',
'xhtml11': 'html:xxs',
'opt': 'option',
'st': 'strong',
'css': 'style',
'csss': 'link:css',
'css:src': 'link:css',
'csssrc': 'link:css',
'js': 'script',
'jss': 'script:src',
'js:src': 'script:src',
'jssrc': 'script:src',
}
short_tags = (
'area', 'base', 'basefont', 'br', 'embed', 'hr', \
'input', 'img', 'link', 'param', 'meta')
required = {
'a': {'href':''},
'base': {'href':''},
'abbr': {'title': ''},
'acronym':{'title': ''},
'bdo': {'dir': ''},
'link': {'rel': 'stylesheet', 'href': ''},
'style': {'type': 'text/css'},
'script': {'type': 'text/javascript'},
'img': {'src':'', 'alt':''},
'iframe': {'src': '', 'frameborder': '0'},
'embed': {'src': '', 'type': ''},
'object': {'data': '', 'type': ''},
'param': {'name': '', 'value': ''},
'form': {'action': '', 'method': 'post'},
'table': {'cellspacing': '0'},
'input': {'type': '', 'name': '', 'value': ''},
'base': {'href': ''},
'area': {'shape': '', 'coords': '', 'href': '', 'alt': ''},
'select': {'name': ''},
'option': {'value': ''},
'textarea':{'name': ''},
'meta': {'content': ''},
}
class Parser:
"""The parser.
"""
# Constructor
# -------------------------------------------------------------------------
def __init__(self, options=None, str=''):
"""Constructor.
"""
self.tokens = []
self.str = str
self.options = options
if self.options.has("xml"):
self.dialect = XmlDialect()
else:
self.dialect = HtmlDialect()
self.root = Element(parser=self)
self.caret = []
self.caret.append(self.root)
self._last = []
# Methods
# -------------------------------------------------------------------------
def load_string(self, str):
"""Loads a string to parse.
"""
self.str = str
self._tokenize()
self._parse()
def render(self):
"""Renders.
Called by [[Router]].
"""
# Get the initial render of the root node
output = self.root.render()
# Indent by whatever the input is indented with
indent = re.findall("^[\r\n]*(\s*)", self.str)[0]
output = indent + output.replace("\n", "\n" + indent)
# Strip newline if not needed
if self.options.has("no-last-newline") \
or self.prefix or self.suffix:
output = re.sub(r'\n\s*$', '', output)
# TextMate mode
if self.options.has("textmate"):
output = self._textmatify(output)
return output
# Protected methods
# -------------------------------------------------------------------------
def _textmatify(self, output):
"""Returns a version of the output with TextMate placeholders in it.
"""
matches = re.findall(r'(></)|("")|(\n\s+)\n|(.|\s)', output)
output = ''
n = 1
for i in matches:
if i[0]:
output += '>$%i</' % n
n += 1
elif i[1]:
output += '"$%i"' % n
n += 1
elif i[2]:
output += i[2] + '$%i\n' % n
n += 1
elif i[3]:
output += i[3]
output += "$0"
return output
def _tokenize(self):
"""Tokenizes.
Initializes [[self.tokens]].
"""
str = self.str.strip()
# Find prefix/suffix
while True:
match = re.match(r"^(\s*<[^>]+>\s*)", str)
if match is None: break
if self.prefix is None: self.prefix = ''
self.prefix += match.group(0)
str = str[len(match.group(0)):]
while True:
match = re.findall(r"(\s*<[^>]+>[\s\n\r]*)$", str)
if not match: break
if self.suffix is None: self.suffix = ''
self.suffix = match[0] + self.suffix
str = str[:-len(match[0])]
# Split by the element separators
for token in re.split('(<|>|\+(?!\\s*\+|$))', str):
if token.strip() != '':
self.tokens.append(Token(token, parser=self))
def _parse(self):
"""Takes the tokens and does its thing.
Populates [[self.root]].
"""
# Carry it over to the root node.
if self.prefix or self.suffix:
self.root.prefix = self.prefix
self.root.suffix = self.suffix
self.root.depth += 1
for token in self.tokens:
if token.type == Token.ELEMENT:
# Reset the "last elements added" list. We will
# repopulate this with the new elements added now.
self._last[:] = []
# Create [[Element]]s from a [[Token]].
# They will be created as many as the multiplier specifies,
# multiplied by how many carets we have
count = 0
for caret in self.caret:
local_count = 0
for i in range(token.multiplier):
count += 1
local_count += 1
new = Element(token, caret,
count = count,
local_count = local_count,
parser = self)
self._last.append(new)
caret.append(new)
# For >
elif token.type == Token.CHILD:
# The last children added.
self.caret[:] = self._last
# For <
elif token.type == Token.PARENT:
# If we're the root node, don't do anything
parent = self.caret[0].parent
if parent is not None:
self.caret[:] = [parent]
return
# Properties
# -------------------------------------------------------------------------
# Property: dialect
# The dialect of XML
dialect = None
# Property: str
# The string
str = ''
# Property: tokens
# The list of tokens
tokens = []
# Property: options
# Reference to the [[Options]] instance
options = None
# Property: root
# The root [[Element]] node.
root = None
# Property: caret
# The current insertion point.
caret = None
# Property: _last
# List of the last appended stuff
_last = None
# Property: indent
# Yeah
indent = ''
# Property: prefix
# (String) The trailing tag in the beginning.
#
# Description:
# For instance, in `<div>ul>li</div>`, the `prefix` is `<div>`.
prefix = ''
# Property: suffix
# (string) The trailing tag at the end.
suffix = ''
pass
# =============================================================================
class Element:
"""An element.
"""
def __init__(self, token=None, parent=None, count=None, local_count=None, \
parser=None, opening_tag=None, closing_tag=None, \
attributes=None, name=None, text=None):
"""Constructor.
This is called by ???.
Description:
All parameters are optional.
token - (Token) The token (required)
parent - (Element) Parent element; `None` if root
count - (Int) The number to substitute for `&` (e.g., in `li.item-$`)
local_count - (Int) The number to substitute for `$` (e.g., in `li.item-&`)
parser - (Parser) The parser
attributes - ...
name - ...
text - ...
"""
self.children = []
self.attributes = {}
self.parser = parser
if token is not None:
# Assumption is that token is of type [[Token]] and is
# a [[Token.ELEMENT]].
self.name = token.name
self.attributes = token.attributes.copy()
self.text = token.text
self.populate = token.populate
self.expand = token.expand
self.opening_tag = token.opening_tag
self.closing_tag = token.closing_tag
# `count` can be given. This will substitude & in classname and ID
if count is not None:
for key in self.attributes:
attrib = self.attributes[key]
attrib = attrib.replace('&', ("%i" % count))
if local_count is not None:
attrib = attrib.replace('$', ("%i" % local_count))
self.attributes[key] = attrib
# Copy over from parameters
if attributes: self.attributes = attributes
if name: self.name = name
if text: self.text = text
self._fill_attributes()
self.parent = parent
if parent is not None:
self.depth = parent.depth + 1
if self.populate: self._populate()
def render(self):
"""Renders the element, along with it's subelements, into HTML code.
[Grouped under "Rendering methods"]
"""
output = ""
try: tabs = bool(self.parser.options.options['indent-tabs'])
except: tabs = False
if tabs:
spaces = '\t'
else:
try: spaces_count = int(self.parser.options.options['indent-spaces'])
except: spaces_count = 4
spaces = ' ' * spaces_count
indent = self.depth * spaces
prefix, suffix = ('', '')
if self.prefix: prefix = self.prefix + "\n"
if self.suffix: suffix = self.suffix
# Make the guide from the ID (/#header), or the class if there's no ID (/.item)
# This is for the start-guide, end-guide and post-tag-guides
guide_str = ''
if 'id' in self.attributes:
guide_str += "#%s" % self.attributes['id']
elif 'class' in self.attributes:
guide_str += ".%s" % self.attributes['class'].replace(' ', '.')
# Build the post-tag guide (e.g., </div><!-- /#header -->),
# the start guide, and the end guide.
guide = ''
start_guide = ''
end_guide = ''
if ((self.name == 'div') and \
(('id' in self.attributes) or ('class' in self.attributes))):
if (self.parser.options.has('post-tag-guides')):
guide = "<!-- /%s -->" % guide_str
if (self.parser.options.has('start-guide-format')):
format = self.parser.options.get('start-guide-format')
try: start_guide = format % guide_str
except: start_guide = (format + " " + guide_str).strip()
start_guide = "%s<!-- %s -->\n" % (indent, start_guide)
if (self.parser.options.has('end-guide-format')):
format = self.parser.options.get('end-guide-format')
try: end_guide = format % guide_str
except: end_guide = (format + " " + guide_str).strip()
end_guide = "\n%s<!-- %s -->" % (indent, end_guide)
# Short, self-closing tags (<br />)
short_tags = self.parser.dialect.short_tags
# When it should be expanded..
# (That is, <div>\n...\n</div> or similar -- wherein something must go
# inside the opening/closing tags)
if len(self.children) > 0 \
or self.expand \
or prefix or suffix \
or (self.parser.options.has('expand-divs') and self.name == 'div'):
for child in self.children:
output += child.render()
# For expand divs: if there are no children (that is, `output`
# is still blank despite above), fill it with a blank line.
if (output == ''): output = indent + spaces + "\n"
# If we're a root node and we have a prefix or suffix...
# (Only the root node can have a prefix or suffix.)
if prefix or suffix:
output = "%s%s%s%s%s\n" % \
(indent, prefix, output, suffix, guide)
# Uh..
elif self.name != '' or \
self.opening_tag is not None or \
self.closing_tag is not None:
output = start_guide + \
indent + self.get_opening_tag() + "\n" + \
output + \
indent + self.get_closing_tag() + \
guide + end_guide + "\n"
# Short, self-closing tags (<br> or <br /> depending on configuration)
elif self.name in short_tags:
if self.parser.options.has('no-html5-self-closing'):
output = "%s<%s />\n" % (indent, self.get_default_tag())
else:
output = "%s<%s>\n" % (indent, self.get_default_tag())
# Tags with text, possibly
elif self.name != '' or \
self.opening_tag is not None or \
self.closing_tag is not None:
output = "%s%s%s%s%s%s%s%s" % \
(start_guide, indent, self.get_opening_tag(), \
self.text, \
self.get_closing_tag(), \
guide, end_guide, "\n")
# Else, it's an empty-named element (like the root). Pass.
else:
pass
return output
def get_default_tag(self):
"""Returns the opening tag (without brackets).
Usage:
element.get_default_tag()
[Grouped under "Rendering methods"]
"""
output = '%s' % (self.name)
for key, value in iteritems(self.attributes):
output += ' %s="%s"' % (key, value)
return output
def get_opening_tag(self):
if self.opening_tag is None:
return "<%s>" % self.get_default_tag()
else:
return self.opening_tag
def get_closing_tag(self):
if self.closing_tag is None:
return "</%s>" % self.name
else:
return self.closing_tag
def append(self, object):
"""Registers an element as a child of this element.
Usage:
element.append(child)
Description:
Adds a given element `child` to the children list of this element. It
will be rendered when [[render()]] is called on the element.
See also:
- [[get_last_child()]]
[Grouped under "Traversion methods"]
"""
self.children.append(object)
def get_last_child(self):
"""Returns the last child element which was [[append()]]ed to this element.
Usage:
element.get_last_child()
Description:
This is the same as using `element.children[-1]`.
[Grouped under "Traversion methods"]
"""
return self.children[-1]
def _populate(self):
"""Expands with default items.
This is called when the [[populate]] flag is turned on.
"""
if self.name == 'ul':
elements = [Element(name='li', parent=self, parser=self.parser)]
elif self.name == 'dl':
elements = [
Element(name='dt', parent=self, parser=self.parser),
Element(name='dd', parent=self, parser=self.parser)]
elif self.name == 'table':
tr = Element(name='tr', parent=self, parser=self.parser)
td = Element(name='td', parent=tr, parser=self.parser)
tr.children.append(td)
elements = [tr]
else:
elements = []
for el in elements:
self.children.append(el)
def _fill_attributes(self):
"""Fills default attributes for certain elements.
Description:
This is called by the constructor.
[Protected, grouped under "Protected methods"]
"""
# Make sure <a>'s have a href, <img>'s have an src, etc.
required = self.parser.dialect.required
for element, attribs in iteritems(required):
if self.name == element:
for attrib in attribs:
if attrib not in self.attributes:
self.attributes[attrib] = attribs[attrib]
# -------------------------------------------------------------------------
# Property: last_child
# [Read-only]
last_child = property(get_last_child)
# -------------------------------------------------------------------------
# Property: parent
# (Element) The parent element.
parent = None
# Property: name
# (String) The name of the element (e.g., `div`)
name = ''
# Property: attributes
# (Dict) The dictionary of attributes (e.g., `{'src': 'image.jpg'}`)
attributes = None
# Property: children
# (List of Elements) The children
children = None
# Property: opening_tag
# (String or None) The opening tag. Optional; will use `name` and
# `attributes` if this is not given.
opening_tag = None
# Property: closing_tag
# (String or None) The closing tag
closing_tag = None
text = ''
depth = -1
expand = False
populate = False
parser = None
# Property: prefix
# Only the root note can have this.
prefix = None
suffix = None
# =============================================================================
class Token:
def __init__(self, str, parser=None):
"""Token.
Description:
str - The string to parse
In the string `div > ul`, there are 3 tokens. (`div`, `>`, and `ul`)
For `>`, it will be a `Token` with `type` set to `Token.CHILD`
"""
self.str = str.strip()
self.attributes = {}
self.parser = parser
# Set the type.
if self.str == '<':
self.type = Token.PARENT
elif self.str == '>':
self.type = Token.CHILD
elif self.str == '+':
self.type = Token.SIBLING
else:
self.type = Token.ELEMENT
self._init_element()
def _init_element(self):
"""Initializes. Only called if the token is an element token.
[Private]
"""
# Get the tag name. Default to DIV if none given.
name = re.findall('^([\w\-:]*)', self.str)[0]
name = name.lower().replace('-', ':')
# Find synonyms through this thesaurus
synonyms = self.parser.dialect.synonyms
if name in synonyms.keys():
name = synonyms[name]
if ':' in name:
shortcuts = self.parser.dialect.shortcuts
if name in shortcuts.keys():
for key, value in iteritems(shortcuts[name]):
setattr(self, key, value)
if 'html' in name:
return
else:
self.name = name
elif (name == ''): self.name = 'div'
else: self.name = name
# Look for attributes
attribs = []
for attrib in re.findall('\[([^\]]*)\]', self.str):
attribs.append(attrib)
self.str = self.str.replace("[" + attrib + "]", "")
if len(attribs) > 0:
for attrib in attribs:
try: key, value = attrib.split('=', 1)
except: key, value = attrib, ''
self.attributes[key] = value
# Try looking for text
text = None
for text in re.findall('\{(.*?)\}(?!\})', self.str):
self.str = self.str.replace("{" + text + "}", "")
if text is not None:
self.text = text
# Get the class names
classes = []
for classname in re.findall('\.([\$a-zA-Z0-9_\-\&]+)', self.str):
classes.append(classname)
if len(classes) > 0:
try: self.attributes['class']
except: self.attributes['class'] = ''
self.attributes['class'] += ' ' + ' '.join(classes)
self.attributes['class'] = self.attributes['class'].strip()
# Get the ID
id = None
for id in re.findall('#([\$a-zA-Z0-9_\-\&]+)', self.str): pass
if id is not None:
self.attributes['id'] = id
# See if there's a multiplier (e.g., "li*3")
multiplier = None
for multiplier in re.findall('\*\s*([0-9]+)', self.str): pass
if multiplier is not None:
self.multiplier = int(multiplier)
# Populate flag (e.g., ul+)
flags = None
for flags in re.findall('[\+\!]+$', self.str): pass
if flags is not None:
if '+' in flags: self.populate = True
if '!' in flags: self.expand = True
def __str__(self):
return self.str
str = ''
parser = None
# For elements
# See the properties of `Element` for description on these.
name = ''
attributes = None
multiplier = 1
expand = False
populate = False
text = ''
opening_tag = None
closing_tag = None
# Type
type = 0
ELEMENT = 2
CHILD = 4
PARENT = 8
SIBLING = 16
# =============================================================================
class Router:
"""The router.
"""
# Constructor
# -------------------------------------------------------------------------
def __init__(self):
pass
# Methods
# -------------------------------------------------------------------------
def start(self, options=None, str=None, ret=None):
if (options):
self.options = Options(router=self, options=options, argv=None)
else:
self.options = Options(router=self, argv=sys.argv[1:], options=None)
if (self.options.has('help')):
return self.help()
elif (self.options.has('version')):
return self.version()
else:
return self.parse(str=str, ret=ret)
def help(self):
print("Usage: %s [OPTIONS]" % sys.argv[0])
print("Expands input into HTML.")
print("")
for short, long, info in self.options.cmdline_keys:
if "Deprecated" in info: continue
if not short == '': short = '-%s,' % short
if not long == '': long = '--%s' % long.replace("=", "=XXX")
print("%6s %-25s %s" % (short, long, info))
print("")
print("\n".join(self.help_content))
def version(self):
print("Uhm, yeah.")
def parse(self, str=None, ret=None):
self.parser = Parser(self.options)
try:
# Read the files
if str is not None:
lines = str
else:
lines = [sys.stdin.read()]
lines = " ".join(lines)
except KeyboardInterrupt:
pass
except:
sys.stderr.write("Reading failed.\n")
return
try:
self.parser.load_string(lines)
output = self.parser.render()
if ret: return output
sys.stdout.write(output)
except:
sys.stderr.write("Parse error. Check your input.\n")
print(sys.exc_info()[0])
print(sys.exc_info()[1])
def exit(self):
sys.exit()
help_content = [
"Please refer to the manual for more information.",
]
# =============================================================================
class Options:
def __init__(self, router, argv, options=None):
# Init self
self.router = router
# `options` can be given as a dict of stuff to preload
if options:
for k, v in iteritems(options):
self.options[k] = v
return
# Prepare for getopt()
short_keys, long_keys = "", []
for short, long, info in self.cmdline_keys: # 'v', 'version'
short_keys += short
long_keys.append(long)
try:
getoptions, arguments = getopt.getopt(argv, short_keys, long_keys)
except getopt.GetoptError:
err = sys.exc_info()[1]
sys.stderr.write("Options error: %s\n" % err)
sys.stderr.write("Try --help for a list of arguments.\n")
return router.exit()
# Sort them out into options
options = {}
for option in getoptions:
key, value = option # '--version', ''
if (value == ''): value = True
# If the key is long, write it
if key[0:2] == '--':
clean_key = key[2:]
options[clean_key] = value
# If the key is short, look for the long version of it
elif key[0:1] == '-':
for short, long, info in self.cmdline_keys:
if short == key[1:]:
print(long)
options[long] = True
# Done
for k, v in iteritems(options):
self.options[k] = v
def __getattr__(self, attr):
return self.get(attr)
def get(self, attr):
try: return self.options[attr]
except: return None
def has(self, attr):
try: return self.options.has_key(attr)
except: return False
options = {
'indent-spaces': 4
}
cmdline_keys = [
('h', 'help', 'Shows help'),
('v', 'version', 'Shows the version'),
('', 'no-guides', 'Deprecated'),
('', 'post-tag-guides', 'Adds comments at the end of DIV tags'),
('', 'textmate', 'Adds snippet info (textmate mode)'),
('', 'indent-spaces=', 'Indent spaces'),
('', 'indent-tabs', 'Indent with tabs'),
('', 'expand-divs', 'Automatically expand divs'),
('', 'no-last-newline', 'Skip the trailing newline'),
('', 'start-guide-format=', 'To be documented'), # << TODO
('', 'end-guide-format=', 'To be documented'), # << TODO
('', 'xml', 'Skip html attribute fillings'),
('', 'no-html5-self-closing', 'Use HTML4 <br /> instead of HTML5 <br>'),
]
# Property: router
# Router
router = 1
# =============================================================================
if __name__ == "__main__":
z = Router()
z.start()
| mit |
yokose-ks/edx-platform | common/lib/capa/capa/tests/test_input_templates.py | 38 | 38744 | """
Tests for the logic in input type mako templates.
"""
import unittest
import capa
import os.path
import json
from lxml import etree
from mako.template import Template as MakoTemplate
from mako import exceptions
from capa.inputtypes import Status
class TemplateError(Exception):
"""
Error occurred while rendering a Mako template.
"""
pass
class TemplateTestCase(unittest.TestCase):
"""
Utilitites for testing templates.
"""
# Subclasses override this to specify the file name of the template
# to be loaded from capa/templates.
# The template name should include the .html extension:
# for example: choicegroup.html
TEMPLATE_NAME = None
def setUp(self):
"""
Load the template under test.
"""
capa_path = capa.__path__[0]
self.template_path = os.path.join(capa_path,
'templates',
self.TEMPLATE_NAME)
with open(self.template_path) as f:
self.template = MakoTemplate(f.read())
def render_to_xml(self, context_dict):
"""
Render the template using the `context_dict` dict.
Returns an `etree` XML element.
"""
# add dummy STATIC_URL to template context
context_dict.setdefault("STATIC_URL", "/dummy-static/")
try:
xml_str = self.template.render_unicode(**context_dict)
except:
raise TemplateError(exceptions.text_error_template().render())
# Attempt to construct an XML tree from the template
# This makes it easy to use XPath to make assertions, rather
# than dealing with a string.
# We modify the string slightly by wrapping it in <test>
# tags, to ensure it has one root element.
try:
xml = etree.fromstring("<test>" + xml_str + "</test>")
except Exception as exc:
raise TemplateError("Could not parse XML from '{0}': {1}".format(
xml_str, str(exc)))
else:
return xml
def assert_has_xpath(self, xml_root, xpath, context_dict, exact_num=1):
"""
Asserts that the xml tree has an element satisfying `xpath`.
`xml_root` is an etree XML element
`xpath` is an XPath string, such as `'/foo/bar'`
`context` is used to print a debugging message
`exact_num` is the exact number of matches to expect.
"""
message = ("XML does not have %d match(es) for xpath '%s'\nXML: %s\nContext: %s"
% (exact_num, str(xpath), etree.tostring(xml_root), str(context_dict)))
self.assertEqual(len(xml_root.xpath(xpath)), exact_num, msg=message)
def assert_no_xpath(self, xml_root, xpath, context_dict):
"""
Asserts that the xml tree does NOT have an element
satisfying `xpath`.
`xml_root` is an etree XML element
`xpath` is an XPath string, such as `'/foo/bar'`
`context` is used to print a debugging message
"""
self.assert_has_xpath(xml_root, xpath, context_dict, exact_num=0)
def assert_has_text(self, xml_root, xpath, text, exact=True):
"""
Find the element at `xpath` in `xml_root` and assert
that its text is `text`.
`xml_root` is an etree XML element
`xpath` is an XPath string, such as `'/foo/bar'`
`text` is the expected text that the element should contain
If multiple elements are found, checks the first one.
If no elements are found, the assertion fails.
"""
element_list = xml_root.xpath(xpath)
self.assertTrue(len(element_list) > 0,
"Could not find element at '%s'" % str(xpath))
if exact:
self.assertEqual(text, element_list[0].text)
else:
self.assertIn(text, element_list[0].text)
class ChoiceGroupTemplateTest(TemplateTestCase):
"""
Test mako template for `<choicegroup>` input.
"""
TEMPLATE_NAME = 'choicegroup.html'
def setUp(self):
choices = [('1', 'choice 1'), ('2', 'choice 2'), ('3', 'choice 3')]
self.context = {'id': '1',
'choices': choices,
'status': Status('correct'),
'label': 'test',
'input_type': 'checkbox',
'name_array_suffix': '1',
'value': '3'}
super(ChoiceGroupTemplateTest, self).setUp()
def test_problem_marked_correct(self):
"""
Test conditions under which the entire problem
(not a particular option) is marked correct.
"""
self.context['status'] = Status('correct')
self.context['input_type'] = 'checkbox'
self.context['value'] = ['1', '2']
# Should mark the entire problem correct
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator_container']/span[@class='status correct']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
self.assert_no_xpath(xml, "//label[@class='choicegroup_incorrect']",
self.context)
self.assert_no_xpath(xml, "//label[@class='choicegroup_correct']",
self.context)
def test_problem_marked_incorrect(self):
"""
Test all conditions under which the entire problem
(not a particular option) is marked incorrect.
"""
conditions = [
{'status': Status('incorrect'), 'input_type': 'radio', 'value': ''},
{'status': Status('incorrect'), 'input_type': 'checkbox', 'value': []},
{'status': Status('incorrect'), 'input_type': 'checkbox', 'value': ['2']},
{'status': Status('incorrect'), 'input_type': 'checkbox', 'value': ['2', '3']},
{'status': Status('incomplete'), 'input_type': 'radio', 'value': ''},
{'status': Status('incomplete'), 'input_type': 'checkbox', 'value': []},
{'status': Status('incomplete'), 'input_type': 'checkbox', 'value': ['2']},
{'status': Status('incomplete'), 'input_type': 'checkbox', 'value': ['2', '3']}]
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator_container']/span[@class='status incorrect']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
self.assert_no_xpath(xml,
"//label[@class='choicegroup_incorrect']",
self.context)
self.assert_no_xpath(xml,
"//label[@class='choicegroup_correct']",
self.context)
def test_problem_marked_unsubmitted(self):
"""
Test all conditions under which the entire problem
(not a particular option) is marked unanswered.
"""
conditions = [
{'status': Status('unsubmitted'), 'input_type': 'radio', 'value': ''},
{'status': Status('unsubmitted'), 'input_type': 'radio', 'value': []},
{'status': Status('unsubmitted'), 'input_type': 'checkbox', 'value': []},
{'input_type': 'radio', 'value': ''},
{'input_type': 'radio', 'value': []},
{'input_type': 'checkbox', 'value': []},
{'input_type': 'checkbox', 'value': ['1']},
{'input_type': 'checkbox', 'value': ['1', '2']}]
self.context['status'] = Status('unanswered')
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator_container']/span[@class='status unanswered']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
self.assert_no_xpath(xml,
"//label[@class='choicegroup_incorrect']",
self.context)
self.assert_no_xpath(xml,
"//label[@class='choicegroup_correct']",
self.context)
def test_option_marked_correct(self):
"""
Test conditions under which a particular option
(not the entire problem) is marked correct.
"""
conditions = [
{'input_type': 'radio', 'value': '2'},
{'input_type': 'radio', 'value': ['2']}]
self.context['status'] = Status('correct')
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//label[@class='choicegroup_correct']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark the whole problem
xpath = "//div[@class='indicator_container']/span"
self.assert_no_xpath(xml, xpath, self.context)
def test_option_marked_incorrect(self):
"""
Test conditions under which a particular option
(not the entire problem) is marked incorrect.
"""
conditions = [
{'input_type': 'radio', 'value': '2'},
{'input_type': 'radio', 'value': ['2']}]
self.context['status'] = Status('incorrect')
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//label[@class='choicegroup_incorrect']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark the whole problem
xpath = "//div[@class='indicator_container']/span"
self.assert_no_xpath(xml, xpath, self.context)
def test_never_show_correctness(self):
"""
Test conditions under which we tell the template to
NOT show correct/incorrect, but instead show a message.
This is used, for example, by the Justice course to ask
questions without specifying a correct answer. When
the student responds, the problem displays "Thank you
for your response"
"""
conditions = [
{'input_type': 'radio', 'status': Status('correct'), 'value': ''},
{'input_type': 'radio', 'status': Status('correct'), 'value': '2'},
{'input_type': 'radio', 'status': Status('correct'), 'value': ['2']},
{'input_type': 'radio', 'status': Status('incorrect'), 'value': '2'},
{'input_type': 'radio', 'status': Status('incorrect'), 'value': []},
{'input_type': 'radio', 'status': Status('incorrect'), 'value': ['2']},
{'input_type': 'checkbox', 'status': Status('correct'), 'value': []},
{'input_type': 'checkbox', 'status': Status('correct'), 'value': ['2']},
{'input_type': 'checkbox', 'status': Status('incorrect'), 'value': []},
{'input_type': 'checkbox', 'status': Status('incorrect'), 'value': ['2']}]
self.context['show_correctness'] = 'never'
self.context['submitted_message'] = 'Test message'
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
# Should NOT mark the entire problem correct/incorrect
xpath = "//div[@class='indicator_container']/span[@class='status correct']"
self.assert_no_xpath(xml, xpath, self.context)
xpath = "//div[@class='indicator_container']/span[@class='status incorrect']"
self.assert_no_xpath(xml, xpath, self.context)
# Should NOT mark individual options
self.assert_no_xpath(xml,
"//label[@class='choicegroup_incorrect']",
self.context)
self.assert_no_xpath(xml,
"//label[@class='choicegroup_correct']",
self.context)
# Expect to see the message
self.assert_has_text(xml, "//div[@class='capa_alert']",
self.context['submitted_message'])
def test_no_message_before_submission(self):
"""
Ensure that we don't show the `submitted_message`
before submitting.
"""
conditions = [
{'input_type': 'radio', 'status': Status('unsubmitted'), 'value': ''},
{'input_type': 'radio', 'status': Status('unsubmitted'), 'value': []},
{'input_type': 'checkbox', 'status': Status('unsubmitted'), 'value': []},
# These tests expose bug #365
# When the bug is fixed, uncomment these cases.
#{'input_type': 'radio', 'status': 'unsubmitted', 'value': '2'},
#{'input_type': 'radio', 'status': 'unsubmitted', 'value': ['2']},
#{'input_type': 'radio', 'status': 'unsubmitted', 'value': '2'},
#{'input_type': 'radio', 'status': 'unsubmitted', 'value': ['2']},
#{'input_type': 'checkbox', 'status': 'unsubmitted', 'value': ['2']},
#{'input_type': 'checkbox', 'status': 'unsubmitted', 'value': ['2']}]
]
self.context['show_correctness'] = 'never'
self.context['submitted_message'] = 'Test message'
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
# Expect that we do NOT see the message yet
self.assert_no_xpath(xml, "//div[@class='capa_alert']", self.context)
def test_label(self):
xml = self.render_to_xml(self.context)
xpath = "//fieldset[@aria-label='%s']" % self.context['label']
self.assert_has_xpath(xml, xpath, self.context)
class TextlineTemplateTest(TemplateTestCase):
"""
Test mako template for `<textline>` input.
"""
TEMPLATE_NAME = 'textline.html'
def setUp(self):
self.context = {'id': '1',
'status': Status('correct'),
'label': 'test',
'value': '3',
'preprocessor': None,
'trailing_text': None}
super(TextlineTemplateTest, self).setUp()
def test_section_class(self):
cases = [({}, ' capa_inputtype textline'),
({'do_math': True}, 'text-input-dynamath capa_inputtype textline'),
({'inline': True}, ' capa_inputtype inline textline'),
({'do_math': True, 'inline': True}, 'text-input-dynamath capa_inputtype inline textline'), ]
for (context, css_class) in cases:
base_context = self.context.copy()
base_context.update(context)
xml = self.render_to_xml(base_context)
xpath = "//div[@class='%s']" % css_class
self.assert_has_xpath(xml, xpath, self.context)
def test_status(self):
cases = [('correct', 'correct', 'correct'),
('unsubmitted', 'unanswered', 'unanswered'),
('incorrect', 'incorrect', 'incorrect'),
('incomplete', 'incorrect', 'incomplete')]
for (context_status, div_class, status_mark) in cases:
self.context['status'] = Status(context_status)
xml = self.render_to_xml(self.context)
# Expect that we get a <div> with correct class
xpath = "//div[@class='%s ']" % div_class
self.assert_has_xpath(xml, xpath, self.context)
# Expect that we get a <p> with class="status"
# (used to by CSS to draw the green check / red x)
self.assert_has_text(xml, "//p[@class='status']",
status_mark, exact=False)
def test_label(self):
xml = self.render_to_xml(self.context)
xpath = "//input[@aria-label='%s']" % self.context['label']
self.assert_has_xpath(xml, xpath, self.context)
def test_hidden(self):
self.context['hidden'] = True
xml = self.render_to_xml(self.context)
xpath = "//div[@style='display:none;']"
self.assert_has_xpath(xml, xpath, self.context)
xpath = "//input[@style='display:none;']"
self.assert_has_xpath(xml, xpath, self.context)
def test_do_math(self):
self.context['do_math'] = True
xml = self.render_to_xml(self.context)
xpath = "//input[@class='math']"
self.assert_has_xpath(xml, xpath, self.context)
xpath = "//div[@class='equation']"
self.assert_has_xpath(xml, xpath, self.context)
xpath = "//textarea[@id='input_1_dynamath']"
self.assert_has_xpath(xml, xpath, self.context)
def test_size(self):
self.context['size'] = '20'
xml = self.render_to_xml(self.context)
xpath = "//input[@size='20']"
self.assert_has_xpath(xml, xpath, self.context)
def test_preprocessor(self):
self.context['preprocessor'] = {'class_name': 'test_class',
'script_src': 'test_script'}
xml = self.render_to_xml(self.context)
xpath = "//div[contains(@class, 'text-input-dynamath_data') and @data-preprocessor='test_class']"
self.assert_has_xpath(xml, xpath, self.context)
xpath = "//div[@class='script_placeholder' and @data-src='test_script']"
self.assert_has_xpath(xml, xpath, self.context)
def test_do_inline_and_preprocessor(self):
self.context['preprocessor'] = {'class_name': 'test_class',
'script_src': 'test_script'}
self.context['inline'] = True
xml = self.render_to_xml(self.context)
xpath = "//div[contains(@class, 'text-input-dynamath_data inline') and @data-preprocessor='test_class']"
self.assert_has_xpath(xml, xpath, self.context)
def test_do_inline(self):
cases = [('correct', 'correct'),
('unsubmitted', 'unanswered'),
('incorrect', 'incorrect'),
('incomplete', 'incorrect')]
self.context['inline'] = True
for (context_status, div_class) in cases:
self.context['status'] = Status(context_status)
xml = self.render_to_xml(self.context)
# Expect that we get a <div> with correct class
xpath = "//div[@class='%s inline']" % div_class
self.assert_has_xpath(xml, xpath, self.context)
def test_message(self):
self.context['msg'] = "Test message"
xml = self.render_to_xml(self.context)
xpath = "//span[@class='message']"
self.assert_has_text(xml, xpath, self.context['msg'])
class FormulaEquationInputTemplateTest(TemplateTestCase):
"""
Test make template for `<formulaequationinput>`s.
"""
TEMPLATE_NAME = 'formulaequationinput.html'
def setUp(self):
self.context = {
'id': 2,
'value': 'PREFILLED_VALUE',
'status': Status('unsubmitted'),
'label': 'test',
'previewer': 'file.js',
'reported_status': 'REPORTED_STATUS',
}
super(FormulaEquationInputTemplateTest, self).setUp()
def test_no_size(self):
xml = self.render_to_xml(self.context)
self.assert_no_xpath(xml, "//input[@size]", self.context)
def test_size(self):
self.context['size'] = '40'
xml = self.render_to_xml(self.context)
self.assert_has_xpath(xml, "//input[@size='40']", self.context)
class AnnotationInputTemplateTest(TemplateTestCase):
"""
Test mako template for `<annotationinput>` input.
"""
TEMPLATE_NAME = 'annotationinput.html'
def setUp(self):
self.context = {'id': 2,
'value': '<p>Test value</p>',
'title': '<h1>This is a title</h1>',
'text': '<p><b>This</b> is a test.</p>',
'comment': '<p>This is a test comment</p>',
'comment_prompt': '<p>This is a test comment prompt</p>',
'comment_value': '<p>This is the value of a test comment</p>',
'tag_prompt': '<p>This is a tag prompt</p>',
'options': [],
'has_options_value': False,
'debug': False,
'status': Status('unsubmitted'),
'return_to_annotation': False,
'msg': '<p>This is a test message</p>', }
super(AnnotationInputTemplateTest, self).setUp()
def test_return_to_annotation(self):
"""
Test link for `Return to Annotation` appears if and only if
the flag is set.
"""
xpath = "//a[@class='annotation-return']"
# If return_to_annotation set, then show the link
self.context['return_to_annotation'] = True
xml = self.render_to_xml(self.context)
self.assert_has_xpath(xml, xpath, self.context)
# Otherwise, do not show the links
self.context['return_to_annotation'] = False
xml = self.render_to_xml(self.context)
self.assert_no_xpath(xml, xpath, self.context)
def test_option_selection(self):
"""
Test that selected options are selected.
"""
# Create options 0-4 and select option 2
self.context['options_value'] = [2]
self.context['options'] = [
{'id': id_num,
'choice': 'correct',
'description': '<p>Unescaped <b>HTML {0}</b></p>'.format(id_num)}
for id_num in range(0, 5)]
xml = self.render_to_xml(self.context)
# Expect that each option description is visible
# with unescaped HTML.
# Since the HTML is unescaped, we can traverse the XML tree
for id_num in range(0, 5):
xpath = "//span[@data-id='{0}']/p/b".format(id_num)
self.assert_has_text(xml, xpath, 'HTML {0}'.format(id_num), exact=False)
# Expect that the correct option is selected
xpath = "//span[contains(@class,'selected')]/p/b"
self.assert_has_text(xml, xpath, 'HTML 2', exact=False)
def test_submission_status(self):
"""
Test that the submission status displays correctly.
"""
# Test cases of `(input_status, expected_css_class)` tuples
test_cases = [('unsubmitted', 'unanswered'),
('incomplete', 'incorrect'),
('incorrect', 'incorrect')]
for (input_status, expected_css_class) in test_cases:
self.context['status'] = Status(input_status)
xml = self.render_to_xml(self.context)
xpath = "//span[@class='status {0}']".format(expected_css_class)
self.assert_has_xpath(xml, xpath, self.context)
# If individual options are being marked, then expect
# just the option to be marked incorrect, not the whole problem
self.context['has_options_value'] = True
self.context['status'] = Status('incorrect')
xpath = "//span[@class='incorrect']"
xml = self.render_to_xml(self.context)
self.assert_no_xpath(xml, xpath, self.context)
def test_display_html_comment(self):
"""
Test that HTML comment and comment prompt render.
"""
self.context['comment'] = "<p>Unescaped <b>comment HTML</b></p>"
self.context['comment_prompt'] = "<p>Prompt <b>prompt HTML</b></p>"
self.context['text'] = "<p>Unescaped <b>text</b></p>"
xml = self.render_to_xml(self.context)
# Because the HTML is unescaped, we should be able to
# descend to the <b> tag
xpath = "//div[@class='block']/p/b"
self.assert_has_text(xml, xpath, 'prompt HTML')
xpath = "//div[@class='block block-comment']/p/b"
self.assert_has_text(xml, xpath, 'comment HTML')
xpath = "//div[@class='block block-highlight']/p/b"
self.assert_has_text(xml, xpath, 'text')
def test_display_html_tag_prompt(self):
"""
Test that HTML tag prompts render.
"""
self.context['tag_prompt'] = "<p>Unescaped <b>HTML</b></p>"
xml = self.render_to_xml(self.context)
# Because the HTML is unescaped, we should be able to
# descend to the <b> tag
xpath = "//div[@class='block']/p/b"
self.assert_has_text(xml, xpath, 'HTML')
class MathStringTemplateTest(TemplateTestCase):
"""
Test mako template for `<mathstring>` input.
"""
TEMPLATE_NAME = 'mathstring.html'
def setUp(self):
self.context = {'isinline': False, 'mathstr': '', 'tail': ''}
super(MathStringTemplateTest, self).setUp()
def test_math_string_inline(self):
self.context['isinline'] = True
self.context['mathstr'] = 'y = ax^2 + bx + c'
xml = self.render_to_xml(self.context)
xpath = "//section[@class='math-string']/span[1]"
self.assert_has_text(xml, xpath,
'[mathjaxinline]y = ax^2 + bx + c[/mathjaxinline]')
def test_math_string_not_inline(self):
self.context['isinline'] = False
self.context['mathstr'] = 'y = ax^2 + bx + c'
xml = self.render_to_xml(self.context)
xpath = "//section[@class='math-string']/span[1]"
self.assert_has_text(xml, xpath,
'[mathjax]y = ax^2 + bx + c[/mathjax]')
def test_tail_html(self):
self.context['tail'] = "<p>This is some <b>tail</b> <em>HTML</em></p>"
xml = self.render_to_xml(self.context)
# HTML from `tail` should NOT be escaped.
# We should be able to traverse it as part of the XML tree
xpath = "//section[@class='math-string']/span[2]/p/b"
self.assert_has_text(xml, xpath, 'tail')
xpath = "//section[@class='math-string']/span[2]/p/em"
self.assert_has_text(xml, xpath, 'HTML')
class OptionInputTemplateTest(TemplateTestCase):
"""
Test mako template for `<optioninput>` input.
"""
TEMPLATE_NAME = 'optioninput.html'
def setUp(self):
self.context = {
'id': 2,
'options': [],
'status': Status('unsubmitted'),
'label': 'test',
'value': 0
}
super(OptionInputTemplateTest, self).setUp()
def test_select_options(self):
# Create options 0-4, and select option 2
self.context['options'] = [(id_num, '<b>Option {0}</b>'.format(id_num))
for id_num in range(0, 5)]
self.context['value'] = 2
xml = self.render_to_xml(self.context)
# Should have a dummy default
xpath = "//option[@value='option_2_dummy_default']"
self.assert_has_xpath(xml, xpath, self.context)
# Should have each of the options, with the correct description
# The description HTML should NOT be escaped
# (that's why we descend into the <b> tag)
for id_num in range(0, 5):
xpath = "//option[@value='{0}']/b".format(id_num)
self.assert_has_text(xml, xpath, 'Option {0}'.format(id_num))
# Should have the correct option selected
xpath = "//option[@selected='true']/b"
self.assert_has_text(xml, xpath, 'Option 2')
def test_status(self):
# Test cases, where each tuple represents
# `(input_status, expected_css_class)`
test_cases = [('unsubmitted', 'status unanswered'),
('correct', 'status correct'),
('incorrect', 'status incorrect'),
('incomplete', 'status incorrect')]
for (input_status, expected_css_class) in test_cases:
self.context['status'] = Status(input_status)
xml = self.render_to_xml(self.context)
xpath = "//span[@class='{0}']".format(expected_css_class)
self.assert_has_xpath(xml, xpath, self.context)
def test_label(self):
xml = self.render_to_xml(self.context)
xpath = "//select[@aria-label='%s']" % self.context['label']
self.assert_has_xpath(xml, xpath, self.context)
class DragAndDropTemplateTest(TemplateTestCase):
"""
Test mako template for `<draganddropinput>` input.
"""
TEMPLATE_NAME = 'drag_and_drop_input.html'
def setUp(self):
self.context = {'id': 2,
'drag_and_drop_json': '',
'value': 0,
'status': Status('unsubmitted'),
'msg': ''}
super(DragAndDropTemplateTest, self).setUp()
def test_status(self):
# Test cases, where each tuple represents
# `(input_status, expected_css_class, expected_text)`
test_cases = [('unsubmitted', 'unanswered', 'unanswered'),
('correct', 'correct', 'correct'),
('incorrect', 'incorrect', 'incorrect'),
('incomplete', 'incorrect', 'incomplete')]
for (input_status, expected_css_class, expected_text) in test_cases:
self.context['status'] = Status(input_status)
xml = self.render_to_xml(self.context)
# Expect a <div> with the status
xpath = "//div[@class='{0}']".format(expected_css_class)
self.assert_has_xpath(xml, xpath, self.context)
# Expect a <p> with the status
xpath = "//p[@class='status']"
self.assert_has_text(xml, xpath, expected_text, exact=False)
def test_drag_and_drop_json_html(self):
json_with_html = json.dumps({'test': '<p>Unescaped <b>HTML</b></p>'})
self.context['drag_and_drop_json'] = json_with_html
xml = self.render_to_xml(self.context)
# Assert that the JSON-encoded string was inserted without
# escaping the HTML. We should be able to traverse the XML tree.
xpath = "//div[@class='drag_and_drop_problem_json']/p/b"
self.assert_has_text(xml, xpath, 'HTML')
class ChoiceTextGroupTemplateTest(TemplateTestCase):
"""Test mako template for `<choicetextgroup>` input"""
TEMPLATE_NAME = 'choicetext.html'
VALUE_DICT = {'1_choiceinput_0bc': '1_choiceinput_0bc', '1_choiceinput_0_textinput_0': '0',
'1_choiceinput_1_textinput_0': '0'}
EMPTY_DICT = {'1_choiceinput_0_textinput_0': '',
'1_choiceinput_1_textinput_0': ''}
BOTH_CHOICE_CHECKBOX = {'1_choiceinput_0bc': 'choiceinput_0',
'1_choiceinput_1bc': 'choiceinput_1',
'1_choiceinput_0_textinput_0': '0',
'1_choiceinput_1_textinput_0': '0'}
WRONG_CHOICE_CHECKBOX = {'1_choiceinput_1bc': 'choiceinput_1',
'1_choiceinput_0_textinput_0': '0',
'1_choiceinput_1_textinput_0': '0'}
def setUp(self):
choices = [('1_choiceinput_0bc',
[{'tail_text': '', 'type': 'text', 'value': '', 'contents': ''},
{'tail_text': '', 'type': 'textinput', 'value': '', 'contents': 'choiceinput_0_textinput_0'}]),
('1_choiceinput_1bc', [{'tail_text': '', 'type': 'text', 'value': '', 'contents': ''},
{'tail_text': '', 'type': 'textinput', 'value': '', 'contents': 'choiceinput_1_textinput_0'}])]
self.context = {'id': '1',
'choices': choices,
'status': Status('correct'),
'input_type': 'radio',
'label': 'choicetext label',
'value': self.VALUE_DICT}
super(ChoiceTextGroupTemplateTest, self).setUp()
def test_grouping_tag(self):
"""
Tests whether we are using a section or a label to wrap choice elements.
Section is used for checkbox, so inputting text does not deselect
"""
input_tags = ('radio', 'checkbox')
self.context['status'] = Status('correct')
xpath = "//section[@id='forinput1_choiceinput_0bc']"
self.context['value'] = {}
for input_type in input_tags:
self.context['input_type'] = input_type
xml = self.render_to_xml(self.context)
self.assert_has_xpath(xml, xpath, self.context)
def test_problem_marked_correct(self):
"""Test conditions under which the entire problem
(not a particular option) is marked correct"""
self.context['status'] = Status('correct')
self.context['input_type'] = 'checkbox'
self.context['value'] = self.VALUE_DICT
# Should mark the entire problem correct
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator_container']/span[@class='status correct']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
self.assert_no_xpath(xml, "//label[@class='choicetextgroup_incorrect']",
self.context)
self.assert_no_xpath(xml, "//label[@class='choicetextgroup_correct']",
self.context)
def test_problem_marked_incorrect(self):
"""Test all conditions under which the entire problem
(not a particular option) is marked incorrect"""
grouping_tags = {'radio': 'label', 'checkbox': 'section'}
conditions = [
{'status': Status('incorrect'), 'input_type': 'radio', 'value': {}},
{'status': Status('incorrect'), 'input_type': 'checkbox', 'value': self.WRONG_CHOICE_CHECKBOX},
{'status': Status('incorrect'), 'input_type': 'checkbox', 'value': self.BOTH_CHOICE_CHECKBOX},
{'status': Status('incorrect'), 'input_type': 'checkbox', 'value': self.VALUE_DICT},
{'status': Status('incomplete'), 'input_type': 'radio', 'value': {}},
{'status': Status('incomplete'), 'input_type': 'checkbox', 'value': self.WRONG_CHOICE_CHECKBOX},
{'status': Status('incomplete'), 'input_type': 'checkbox', 'value': self.BOTH_CHOICE_CHECKBOX},
{'status': Status('incomplete'), 'input_type': 'checkbox', 'value': self.VALUE_DICT}]
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator_container']/span[@class='status incorrect']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
grouping_tag = grouping_tags[test_conditions['input_type']]
self.assert_no_xpath(xml,
"//{0}[@class='choicetextgroup_incorrect']".format(grouping_tag),
self.context)
self.assert_no_xpath(xml,
"//{0}[@class='choicetextgroup_correct']".format(grouping_tag),
self.context)
def test_problem_marked_unsubmitted(self):
"""Test all conditions under which the entire problem
(not a particular option) is marked unanswered"""
grouping_tags = {'radio': 'label', 'checkbox': 'section'}
conditions = [
{'status': Status('unsubmitted'), 'input_type': 'radio', 'value': {}},
{'status': Status('unsubmitted'), 'input_type': 'radio', 'value': self.EMPTY_DICT},
{'status': Status('unsubmitted'), 'input_type': 'checkbox', 'value': {}},
{'status': Status('unsubmitted'), 'input_type': 'checkbox', 'value': self.EMPTY_DICT},
{'status': Status('unsubmitted'), 'input_type': 'checkbox', 'value': self.VALUE_DICT},
{'status': Status('unsubmitted'), 'input_type': 'checkbox', 'value': self.BOTH_CHOICE_CHECKBOX}]
self.context['status'] = Status('unanswered')
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator_container']/span[@class='status unanswered']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
grouping_tag = grouping_tags[test_conditions['input_type']]
self.assert_no_xpath(xml,
"//{0}[@class='choicetextgroup_incorrect']".format(grouping_tag),
self.context)
self.assert_no_xpath(xml,
"//{0}[@class='choicetextgroup_correct']".format(grouping_tag),
self.context)
def test_option_marked_correct(self):
"""Test conditions under which a particular option
(not the entire problem) is marked correct."""
conditions = [
{'input_type': 'radio', 'value': self.VALUE_DICT}]
self.context['status'] = 'correct'
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//section[@id='forinput1_choiceinput_0bc' and\
@class='choicetextgroup_correct']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark the whole problem
xpath = "//div[@class='indicator_container']/span"
self.assert_no_xpath(xml, xpath, self.context)
def test_option_marked_incorrect(self):
"""Test conditions under which a particular option
(not the entire problem) is marked incorrect."""
conditions = [
{'input_type': 'radio', 'value': self.VALUE_DICT}]
self.context['status'] = 'incorrect'
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//section[@id='forinput1_choiceinput_0bc' and\
@class='choicetextgroup_incorrect']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark the whole problem
xpath = "//div[@class='indicator_container']/span"
self.assert_no_xpath(xml, xpath, self.context)
def test_label(self):
xml = self.render_to_xml(self.context)
xpath = "//fieldset[@aria-label='%s']" % self.context['label']
self.assert_has_xpath(xml, xpath, self.context)
| agpl-3.0 |
nesl/sos-2x | modules/unit_test/modules/kernel/shared_mem/close/close_test.py | 2 | 3085 | import sys
import os
import pysos
import signal
# these two variables should be changed depending on the test drivers PID
# and the type of message it will be sending, If you are using the generic_test.c
# then it is likely these two values can stay the same
TEST_MODULE = 0x80
MSG_TEST_DATA= 33
ALARM_LEN = 60
START_DATA = 100
FINAL_DATA = 200
TEST_FAIL = 155
TEST_PASS = 255
# variables holding new and old sensor values
# this can be replaces with whatever you want since this is specific to
# what the test driver expects for data
oldstate = {}
state = {}
# a signal handler that will go off for an alarm
# it is highly suggested that you use this since it is the easiest way to test if your
# node has entered panic mode via the script
def panic_handler(signum, frame):
print "it is highly likely that your node has entered panic mode"
print "please reset the node"
sys.exit(1)
# message handler for messages of type MSG_DATA_READY
def generic_test(msg):
""" Small example of test driver usage. It simulates a virtual
dice and shows which side of the dice is up.
"""
global oldstate
global state
print "message recieved"
signal.alarm(ALARM_LEN)
#unpack the values we are expecting, in this case it is a node id, the acclerometer id,
# and a value from the sensor
(node_id, node_state, data) = pysos.unpack("<BBB", msg['data'])
if node_id not in state.keys():
state[node_id] = 0
oldstate[node_id] = 0
# these are some simple calculations to test the sensor value we have gotten
# this is the part which you need to fill in in order to verify that the function is working
if (node_state == START_DATA):
print "initialization began correctly"
if (node_state == 0):
state[node_id] = data
if (node_state == TEST_FAIL):
print >> sys.stderr, "the test for item %d has failed" %data
if (node_state == TEST_PASS):
print "the test for item %d has passed" %data
if (node_state == 1 and state[node_id] != data):
print >> sys.stderr, " a message was lost somewhere on node %d before count %d" %(node_id,data)
if (node_state == FINAL_DATA):
print "finalization worked correctly"
if __name__ == "__main__":
# here we set up a connection to sossrv using the pysos module
# and begin listening for messages
# we also register our function above with the server so that it is called
# when the appropriate message type is recieved
srv = pysos.sossrv()
srv.register_trigger(generic_test, did=TEST_MODULE, type=MSG_TEST_DATA)
# register the signal handler and begin an alarm that will wait for 60 seconds before going off
# other times for the alarm might be good, use your own judgement based on your test
signal.signal(signal.SIGALRM, panic_handler)
signal.alarm(ALARM_LEN)
# we do this so since the test_suite application has information regarding the amount of time
# each test should be run. after the amount of time specified in test.lst, test_suite will
# end this script and move to another test
while(1):
continue
| bsd-3-clause |
alex4108/scLikesDownloader | requests/__init__.py | 327 | 1856 | # -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('http://python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post("http://httpbin.org/post", data=payload)
>>> print(r.text)
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2014 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '2.3.0'
__build__ = 0x020300
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2014 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
try:
from .packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
except ImportError:
pass
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
| gpl-2.0 |
tboyce021/home-assistant | homeassistant/components/soma/__init__.py | 1 | 4506 | """Support for Soma Smartshades."""
import logging
from api.soma_api import SomaApi
from requests import RequestException
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_PORT
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from .const import API, DOMAIN, HOST, PORT
DEVICES = "devices"
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{vol.Required(CONF_HOST): cv.string, vol.Required(CONF_PORT): cv.string}
)
},
extra=vol.ALLOW_EXTRA,
)
SOMA_COMPONENTS = ["cover", "sensor"]
async def async_setup(hass, config):
"""Set up the Soma component."""
if DOMAIN not in config:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
data=config[DOMAIN],
context={"source": config_entries.SOURCE_IMPORT},
)
)
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Set up Soma from a config entry."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][API] = SomaApi(entry.data[HOST], entry.data[PORT])
devices = await hass.async_add_executor_job(hass.data[DOMAIN][API].list_devices)
hass.data[DOMAIN][DEVICES] = devices["shades"]
for component in SOMA_COMPONENTS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Unload a config entry."""
return True
class SomaEntity(Entity):
"""Representation of a generic Soma device."""
def __init__(self, device, api):
"""Initialize the Soma device."""
self.device = device
self.api = api
self.current_position = 50
self.battery_state = 0
self.is_available = True
@property
def available(self):
"""Return true if the last API commands returned successfully."""
return self.is_available
@property
def unique_id(self):
"""Return the unique id base on the id returned by pysoma API."""
return self.device["mac"]
@property
def name(self):
"""Return the name of the device."""
return self.device["name"]
@property
def device_info(self):
"""Return device specific attributes.
Implemented by platform classes.
"""
return {
"identifiers": {(DOMAIN, self.unique_id)},
"name": self.name,
"manufacturer": "Wazombi Labs",
}
async def async_update(self):
"""Update the device with the latest data."""
try:
response = await self.hass.async_add_executor_job(
self.api.get_shade_state, self.device["mac"]
)
except RequestException:
_LOGGER.error("Connection to SOMA Connect failed")
self.is_available = False
return
if response["result"] != "success":
_LOGGER.error(
"Unable to reach device %s (%s)", self.device["name"], response["msg"]
)
self.is_available = False
return
self.current_position = 100 - response["position"]
try:
response = await self.hass.async_add_executor_job(
self.api.get_battery_level, self.device["mac"]
)
except RequestException:
_LOGGER.error("Connection to SOMA Connect failed")
self.is_available = False
return
if response["result"] != "success":
_LOGGER.error(
"Unable to reach device %s (%s)", self.device["name"], response["msg"]
)
self.is_available = False
return
# https://support.somasmarthome.com/hc/en-us/articles/360026064234-HTTP-API
# battery_level response is expected to be min = 360, max 410 for
# 0-100% levels above 410 are consider 100% and below 360, 0% as the
# device considers 360 the minimum to move the motor.
_battery = round(2 * (response["battery_level"] - 360))
battery = max(min(100, _battery), 0)
self.battery_state = battery
self.is_available = True
| apache-2.0 |
kflavin/cobbler | cobbler/action_report.py | 15 | 12031 | """
Report from a cobbler master.
FIXME: reinstante functionality for 2.0
Copyright 2007-2009, Red Hat, Inc and Others
Anderson Silva <ansilva@redhat.com>
Michael DeHaan <michael.dehaan AT gmail>
This software may be freely redistributed under the terms of the GNU
general public license.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA.
"""
import re
import clogger
import utils
class Report:
def __init__(self, collection_mgr, logger=None):
"""
Constructor
"""
self.collection_mgr = collection_mgr
self.settings = collection_mgr.settings()
self.api = collection_mgr.api
self.report_type = None
self.report_what = None
self.report_name = None
self.report_fields = None
self.report_noheaders = None
self.array_re = re.compile('([^[]+)\[([^]]+)\]')
if logger is None:
logger = clogger.Logger()
self.logger = logger
def fielder(self, structure, fields_list):
"""
Return data from a subset of fields of some item
"""
item = {}
for field in fields_list:
internal = self.array_re.search(field)
# check if field is primary field
if field in structure.keys():
item[field] = structure[field]
# check if subfield in 'interfaces' field
elif internal and internal.group(1) in structure.keys():
outer = internal.group(1)
inner = internal.group(2)
if isinstance(structure[outer], dict) and inner in structure[outer]:
item[field] = structure[outer][inner]
elif "interfaces" in structure.keys():
for device in structure['interfaces'].keys():
if field in structure['interfaces'][device]:
item[field] = device + ': ' + structure['interfaces'][device][field]
return item
def reporting_csv(self, info, order, noheaders):
"""
Formats data on 'info' for csv output
"""
outputheaders = ''
outputbody = ''
sep = ','
info_count = 0
for item in info:
item_count = 0
for key in order:
if info_count == 0:
outputheaders += str(key) + sep
if key in item.keys():
outputbody += str(item[key]) + sep
else:
outputbody += '-' + sep
item_count += 1
info_count += 1
outputbody += '\n'
outputheaders += '\n'
if noheaders:
outputheaders = ''
return outputheaders + outputbody
def reporting_trac(self, info, order, noheaders):
"""
Formats data on 'info' for trac wiki table output
"""
outputheaders = ''
outputbody = ''
sep = '||'
info_count = 0
for item in info:
item_count = 0
for key in order:
if info_count == 0:
outputheaders += sep + str(key)
if key in item.keys():
outputbody += sep + str(item[key])
else:
outputbody += sep + '-'
item_count = item_count + 1
info_count = info_count + 1
outputbody += '||\n'
outputheaders += '||\n'
if noheaders:
outputheaders = ''
return outputheaders + outputbody
def reporting_doku(self, info, order, noheaders):
"""
Formats data on 'info' for doku wiki table output
"""
outputheaders = ''
outputbody = ''
sep1 = '^'
sep2 = '|'
info_count = 0
for item in info:
item_count = 0
for key in order:
if info_count == 0:
outputheaders += sep1 + key
if key in item.keys():
outputbody += sep2 + item[key]
else:
outputbody += sep2 + '-'
item_count = item_count + 1
info_count = info_count + 1
outputbody += sep2 + '\n'
outputheaders += sep1 + '\n'
if noheaders:
outputheaders = ''
return outputheaders + outputbody
def reporting_mediawiki(self, info, order, noheaders):
"""
Formats data on 'info' for mediawiki table output
"""
outputheaders = ''
outputbody = ''
opentable = '{| border="1"\n'
closetable = '|}\n'
sep1 = '||'
sep2 = '|'
sep3 = '|-'
info_count = 0
for item in info:
item_count = 0
for key in order:
if info_count == 0 and item_count == 0:
outputheaders += sep2 + key
elif info_count == 0:
outputheaders += sep1 + key
if item_count == 0:
if key in item.keys():
outputbody += sep2 + str(item[key])
else:
outputbody += sep2 + '-'
else:
if key in item.keys():
outputbody += sep1 + str(item[key])
else:
outputbody += sep1 + '-'
item_count = item_count + 1
info_count = info_count + 1
outputbody += '\n' + sep3 + '\n'
outputheaders += '\n' + sep3 + '\n'
if noheaders:
outputheaders = ''
return opentable + outputheaders + outputbody + closetable
def print_formatted_data(self, data, order, report_type, noheaders):
"""
Used for picking the correct format to output data as
"""
if report_type == "csv":
self.logger.flat(self.reporting_csv(data, order, noheaders))
if report_type == "mediawiki":
self.logger.flat(self.reporting_mediawiki(data, order, noheaders))
if report_type == "trac":
self.logger.flat(self.reporting_trac(data, order, noheaders))
if report_type == "doku":
self.logger.flat(self.reporting_doku(data, order, noheaders))
def reporting_sorter(self, a, b):
"""
Used for sorting cobbler objects for report commands
"""
return cmp(a.name, b.name)
def reporting_print_sorted(self, collection):
"""
Prints all objects in a collection sorted by name
"""
collection = [x for x in collection]
collection.sort(self.reporting_sorter)
for x in collection:
self.logger.flat(x.to_string())
def reporting_list_names2(self, collection, name):
"""
Prints a specific object in a collection.
"""
obj = collection.get(name)
if obj is not None:
self.logger.flat(obj.to_string())
def reporting_print_all_fields(self, collection, report_name, report_type, report_noheaders):
"""
Prints all fields in a collection as a table given the report type
"""
# per-item hack
if report_name:
collection = collection.find(name=report_name)
if collection:
collection = [collection]
else:
return
collection = [x for x in collection]
collection.sort(self.reporting_sorter)
data = []
out_order = []
count = 0
for x in collection:
item = {}
if x.ITEM_TYPE == "settings":
structure = x.to_dict()
else:
structure = x.to_list()
for (key, value) in structure.iteritems():
# exception for systems which could have > 1 interface
if key == "interfaces":
for (device, info) in value.iteritems():
for (info_header, info_value) in info.iteritems():
item[info_header] = str(device) + ': ' + str(info_value)
# needs to create order list for print_formatted_fields
if count == 0:
out_order.append(info_header)
else:
item[key] = value
# needs to create order list for print_formatted_fields
if count == 0:
out_order.append(key)
count = count + 1
data.append(item)
self.print_formatted_data(data=data, order=out_order, report_type=report_type, noheaders=report_noheaders)
def reporting_print_x_fields(self, collection, report_name, report_type, report_fields, report_noheaders):
"""
Prints specific fields in a collection as a table given the report type
"""
# per-item hack
if report_name:
collection = collection.find(name=report_name)
if collection:
collection = [collection]
else:
return
collection = [x for x in collection]
collection.sort(self.reporting_sorter)
data = []
fields_list = report_fields.replace(' ', '').split(',')
for x in collection:
if x.ITEM_TYPE == "settings":
structure = x.to_dict()
else:
structure = x.to_list()
item = self.fielder(structure, fields_list)
data.append(item)
self.print_formatted_data(data=data, order=fields_list, report_type=report_type, noheaders=report_noheaders)
# -------------------------------------------------------
def run(self, report_what=None, report_name=None, report_type=None, report_fields=None, report_noheaders=None):
"""
Get remote profiles and distros and sync them locally
1. Handles original report output
2. Handles all fields of report outputs as table given a format
3. Handles specific fields of report outputs as table given a format
"""
if report_type == 'text' and report_fields == 'all':
for collection_name in ["distro", "profile", "system", "repo", "network", "image", "mgmtclass", "package", "file"]:
if report_what == "all" or report_what == collection_name or report_what == "%ss" % collection_name or report_what == "%ses" % collection_name:
if report_name:
self.reporting_list_names2(self.api.get_items(collection_name), report_name)
else:
self.reporting_print_sorted(self.api.get_items(collection_name))
elif report_type == 'text' and report_fields != 'all':
utils.die(self.logger, "The 'text' type can only be used with field set to 'all'")
elif report_type != 'text' and report_fields == 'all':
for collection_name in ["distro", "profile", "system", "repo", "network", "image", "mgmtclass", "package", "file"]:
if report_what == "all" or report_what == collection_name or report_what == "%ss" % collection_name or report_what == "%ses" % collection_name:
self.reporting_print_all_fields(self.api.get_items(collection_name), report_name, report_type, report_noheaders)
else:
for collection_name in ["distro", "profile", "system", "repo", "network", "image", "mgmtclass", "package", "file"]:
if report_what == "all" or report_what == collection_name or report_what == "%ss" % collection_name or report_what == "%ses" % collection_name:
self.reporting_print_x_fields(self.api.get_items(collection_name), report_name, report_type, report_fields, report_noheaders)
| gpl-2.0 |
gbaty/shiboken2 | tests/samplebinding/bug_554_test.py | 6 | 1283 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of the Shiboken Python Bindings Generator project.
#
# Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
#
# Contact: PySide team <contact@pyside.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# version 2.1 as published by the Free Software Foundation. Please
# review the following information to ensure the GNU Lesser General
# Public License version 2.1 requirements will be met:
# http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
# #
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
'''Unit test for bug#554'''
from sample import *
class Bug554:
def crash(self):
class Crasher(ObjectType):
pass
if __name__ == '__main__':
bug = Bug554()
bug.crash()
| gpl-2.0 |
sovicak/AnonymniAnalytici | 2018_02_15_cryptocurrencies_trading/algorithms/shared/mtma-1502733393810.py | 1 | 6908 | from pyalgotrade import strategy
from pyalgotrade import plotter
from pyalgotrade.tools import yahoofinance
from pyalgotrade.technical import ma
from pyalgotrade.technical import cumret
from pyalgotrade.stratanalyzer import sharpe
from pyalgotrade.stratanalyzer import returns
class MarketTiming(strategy.BacktestingStrategy):
def __init__(self, feed, instrumentsByClass, initialCash):
super(MarketTiming, self).__init__(feed, initialCash)
self.setUseAdjustedValues(True)
self.__instrumentsByClass = instrumentsByClass
self.__rebalanceMonth = None
self.__sharesToBuy = {}
# Initialize indicators for each instrument.
self.__sma = {}
for assetClass in instrumentsByClass:
for instrument in instrumentsByClass[assetClass]:
priceDS = feed[instrument].getPriceDataSeries()
self.__sma[instrument] = ma.SMA(priceDS, 200)
def _shouldRebalance(self, dateTime):
return dateTime.month != self.__rebalanceMonth
def _getRank(self, instrument):
# If the price is below the SMA, then this instrument doesn't rank at
# all.
smas = self.__sma[instrument]
price = self.getLastPrice(instrument)
if len(smas) == 0 or smas[-1] is None or price < smas[-1]:
return None
# Rank based on 20 day returns.
ret = None
lookBack = 20
priceDS = self.getFeed()[instrument].getPriceDataSeries()
if len(priceDS) >= lookBack and smas[-1] is not None and smas[-1*lookBack] is not None:
ret = (priceDS[-1] - priceDS[-1*lookBack]) / float(priceDS[-1*lookBack])
return ret
def _getTopByClass(self, assetClass):
# Find the instrument with the highest rank.
ret = None
highestRank = None
for instrument in self.__instrumentsByClass[assetClass]:
rank = self._getRank(instrument)
if rank is not None and (highestRank is None or rank > highestRank):
highestRank = rank
ret = instrument
return ret
def _getTop(self):
ret = {}
for assetClass in self.__instrumentsByClass:
ret[assetClass] = self._getTopByClass(assetClass)
return ret
def _placePendingOrders(self):
remainingCash = self.getBroker().getCash() * 0.9 # Use less chash just in case price changes too much.
for instrument in self.__sharesToBuy:
orderSize = self.__sharesToBuy[instrument]
if orderSize > 0:
# Adjust the order size based on available cash.
lastPrice = self.getLastPrice(instrument)
cost = orderSize * lastPrice
while cost > remainingCash and orderSize > 0:
orderSize -= 1
cost = orderSize * lastPrice
if orderSize > 0:
remainingCash -= cost
assert(remainingCash >= 0)
if orderSize != 0:
self.info("Placing market order for %d %s shares" % (orderSize, instrument))
self.marketOrder(instrument, orderSize, goodTillCanceled=True)
self.__sharesToBuy[instrument] -= orderSize
def _logPosSize(self):
totalEquity = self.getBroker().getEquity()
positions = self.getBroker().getPositions()
for instrument in self.getBroker().getPositions():
posSize = positions[instrument] * self.getLastPrice(instrument) / totalEquity * 100
self.info("%s - %0.2f %%" % (instrument, posSize))
def _rebalance(self):
self.info("Rebalancing")
# Cancel all active/pending orders.
for order in self.getBroker().getActiveOrders():
self.getBroker().cancelOrder(order)
cashPerAssetClass = self.getBroker().getEquity() / float(len(self.__instrumentsByClass))
self.__sharesToBuy = {}
# Calculate which positions should be open during the next period.
topByClass = self._getTop()
for assetClass in topByClass:
instrument = topByClass[assetClass]
self.info("Best for class %s: %s" % (assetClass, instrument))
if instrument is not None:
lastPrice = self.getLastPrice(instrument)
cashForInstrument = cashPerAssetClass - self.getBroker().getShares(instrument) * lastPrice
# This may yield a negative value and we have to reduce this
# position.
self.__sharesToBuy[instrument] = int(cashForInstrument / lastPrice)
# Calculate which positions should be closed.
for instrument in self.getBroker().getPositions():
if instrument not in topByClass.values():
currentShares = self.getBroker().getShares(instrument)
assert(instrument not in self.__sharesToBuy)
self.__sharesToBuy[instrument] = currentShares * -1
def getSMA(self, instrument):
return self.__sma[instrument]
def onBars(self, bars):
currentDateTime = bars.getDateTime()
if self._shouldRebalance(currentDateTime):
self.__rebalanceMonth = currentDateTime.month
self._rebalance()
self._placePendingOrders()
def main(plot):
initialCash = 10000
instrumentsByClass = {
"US Stocks": ["VTI"],
"Foreign Stocks": ["VEU"],
"US 10 Year Government Bonds": ["IEF"],
"Real Estate": ["VNQ"],
"Commodities": ["DBC"],
}
# Download the bars.
instruments = ["SPY"]
for assetClass in instrumentsByClass:
instruments.extend(instrumentsByClass[assetClass])
feed = yahoofinance.build_feed(instruments, 2007, 2013, "data", skipErrors=True)
strat = MarketTiming(feed, instrumentsByClass, initialCash)
sharpeRatioAnalyzer = sharpe.SharpeRatio()
strat.attachAnalyzer(sharpeRatioAnalyzer)
returnsAnalyzer = returns.Returns()
strat.attachAnalyzer(returnsAnalyzer)
if plot:
plt = plotter.StrategyPlotter(strat, False, False, True)
plt.getOrCreateSubplot("cash").addCallback("Cash", lambda x: strat.getBroker().getCash())
# Plot strategy vs. SPY cumulative returns.
plt.getOrCreateSubplot("returns").addDataSeries("SPY", cumret.CumulativeReturn(feed["SPY"].getPriceDataSeries()))
plt.getOrCreateSubplot("returns").addDataSeries("Strategy", returnsAnalyzer.getCumulativeReturns())
strat.run()
print "Sharpe ratio: %.2f" % sharpeRatioAnalyzer.getSharpeRatio(0.05)
print "Returns: %.2f %%" % (returnsAnalyzer.getCumulativeReturns()[-1] * 100)
if plot:
plt.plot()
if __name__ == "__main__":
main(True)
| mit |
Ernesto99/odoo | addons/website_certification/controllers/main.py | 373 | 2149 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.web import http
from openerp.addons.web.http import request
class WebsiteCertifiedPartners(http.Controller):
@http.route(['/certifications',
'/certifications/<model("certification.type"):cert_type>'], type='http', auth='public',
website=True)
def certified_partners(self, cert_type=None, **post):
cr, uid, context = request.cr, request.uid, request.context
certification_obj = request.registry['certification.certification']
cert_type_obj = request.registry['certification.type']
domain = []
if cert_type:
domain.append(('type_id', '=', cert_type.id))
certifications_ids = certification_obj.search(cr, uid, domain, context=context)
certifications = certification_obj.browse(cr, uid, certifications_ids, context=context)
types = cert_type_obj.browse(cr, uid, cert_type_obj.search(cr, uid, [], context=context), context=context)
data = {
'certifications': certifications,
'types': types
}
return request.website.render("website_certification.certified_partners", data)
| agpl-3.0 |
La0/garmin-uploader | garmin_uploader/api.py | 1 | 10018 | import requests
import re
from garmin_uploader import logger
URL_HOSTNAME = 'https://connect.garmin.com/modern/auth/hostname'
URL_LOGIN = 'https://sso.garmin.com/sso/login'
URL_POST_LOGIN = 'https://connect.garmin.com/modern/'
URL_PROFILE = 'https://connect.garmin.com/modern/currentuser-service/user/info' # noqa
URL_HOST_SSO = 'sso.garmin.com'
URL_HOST_CONNECT = 'connect.garmin.com'
URL_SSO_SIGNIN = 'https://sso.garmin.com/sso/signin'
URL_UPLOAD = 'https://connect.garmin.com/modern/proxy/upload-service/upload'
URL_ACTIVITY_BASE = 'https://connect.garmin.com/modern/proxy/activity-service/activity' # noqa
URL_ACTIVITY_TYPES = 'https://connect.garmin.com/modern/proxy/activity-service/activity/activityTypes' # noqa
class GarminAPIException(Exception):
"""
An Exception occured in Garmin API
"""
class GarminAPI:
"""
Low level Garmin Connect api connector
"""
activity_types = None
# This strange header is needed to get auth working
common_headers = {
'NK': 'NT',
}
def authenticate(self, username, password):
"""
That's where the magic happens !
Try to mimick a browser behavior trying to login
on Garmin Connect as closely as possible
Outputs a Requests session, loaded with precious cookies
"""
# Use a valid Browser user agent
# TODO: use several UA picked randomly
session = requests.Session()
session.headers.update({
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:48.0) Gecko/20100101 Firefox/50.0', # noqa
})
# Request sso hostname
sso_hostname = None
resp = session.get(URL_HOSTNAME)
if not resp.ok:
raise Exception('Invalid SSO first request status code {}'.format(resp.status_code)) # noqa
sso_hostname = resp.json().get('host')
# Load login page to get login ticket
# Full parameters from Firebug, we have to maintain
# Fuck this shit.
# Who needs mandatory urls in a request parameters !
params = [
('service', 'https://connect.garmin.com/modern/'),
('webhost', 'https://connect.garmin.com/modern/'),
('source', 'https://connect.garmin.com/signin/'),
('redirectAfterAccountLoginUrl', 'https://connect.garmin.com/modern/'), # noqa
('redirectAfterAccountCreationUrl', 'https://connect.garmin.com/modern/'), # noqa
('gauthHost', sso_hostname),
('locale', 'fr_FR'),
('id', 'gauth-widget'),
('cssUrl', 'https://connect.garmin.com/gauth-custom-v3.2-min.css'),
('privacyStatementUrl', 'https://www.garmin.com/fr-FR/privacy/connect/'), # noqa
('clientId', 'GarminConnect'),
('rememberMeShown', 'true'),
('rememberMeChecked', 'false'),
('createAccountShown', 'true'),
('openCreateAccount', 'false'),
('displayNameShown', 'false'),
('consumeServiceTicket', 'false'),
('initialFocus', 'true'),
('embedWidget', 'false'),
('generateExtraServiceTicket', 'true'),
('generateTwoExtraServiceTickets', 'true'),
('generateNoServiceTicket', 'false'),
('globalOptInShown', 'true'),
('globalOptInChecked', 'false'),
('mobile', 'false'),
('connectLegalTerms', 'true'),
('showTermsOfUse', 'false'),
('showPrivacyPolicy', 'false'),
('showConnectLegalAge', 'false'),
('locationPromptShown', 'true'),
('showPassword', 'true'),
('useCustomHeader', 'false'),
('mfaRequired', 'false'),
('performMFACheck', 'false'),
('rememberMyBrowserShown', 'false'),
('rememberMyBrowserChecked', 'false'),
]
res = session.get(URL_LOGIN, params=params)
if res.status_code != 200:
raise Exception('No login form')
# Lookup for CSRF token
csrf = re.search(r'<input type="hidden" name="_csrf" value="(\w+)" />', res.content.decode('utf-8')) # noqa
if csrf is None:
raise Exception('No CSRF token')
csrf_token = csrf.group(1)
logger.debug('Found CSRF token {}'.format(csrf_token))
# Login/Password with login ticket
data = {
'embed': 'false',
'username': username,
'password': password,
'_csrf': csrf_token,
}
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', # noqa
'Accept-Language': 'fr,en-US;q=0.7,en;q=0.3',
'Accept-Encoding': 'gzip, deflate, br',
'Origin': 'https://sso.garmin.com',
'DNT': '1',
'Connection': 'keep-alive',
'Referer': res.url,
'Upgrade-Insecure-Requests': '1',
'Sec-Fetch-Dest': 'iframe',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-User': '?1',
'TE': 'Trailers',
}
res = session.post(URL_LOGIN, params=params, data=data,
headers=headers)
if not res.ok:
if res.status_code == 429:
raise Exception('Authentication failed due to too many requests (429). Retry later...') # noqa
raise Exception('Authentification failed.')
# Check we have sso guid in cookies
if 'GARMIN-SSO-GUID' not in session.cookies:
raise Exception('Missing Garmin auth cookie')
# Second auth step
# Needs a service ticket from previous response
headers = {
'Host': URL_HOST_CONNECT,
}
res = session.get(URL_POST_LOGIN, params=params, headers=headers)
if res.status_code != 200 and not res.history:
raise Exception('Second auth step failed.')
# Check login
res = session.get(URL_PROFILE)
if not res.ok:
raise Exception("Login check failed.")
garmin_user = res.json()
logger.info('Logged in as {}'.format(garmin_user['username']))
return session
def upload_activity(self, session, activity):
"""
Upload an activity on Garmin
Support multiple formats
"""
assert activity.id is None
# Upload file as multipart form
files = {
"file": (activity.filename, activity.open()),
}
url = '{}/{}'.format(URL_UPLOAD, activity.extension)
res = session.post(url, files=files, headers=self.common_headers)
# HTTP Status can either be OK or Conflict
if res.status_code not in (200, 201, 409):
if res.status_code == 412:
logger.error('You may have to give explicit consent for uploading files to Garmin') # noqa
raise GarminAPIException('Failed to upload {}'.format(activity))
response = res.json()['detailedImportResult']
if len(response["successes"]) == 0:
if len(response["failures"]) > 0:
if response["failures"][0]["messages"][0]['code'] == 202:
# Activity already exists
return response["failures"][0]["internalId"], False
else:
raise GarminAPIException(response["failures"][0]["messages"]) # noqa
else:
raise GarminAPIException('Unknown error: {}'.format(response))
else:
# Upload was successsful
return response["successes"][0]["internalId"], True
def set_activity_name(self, session, activity):
"""
Update the activity name
"""
assert activity.id is not None
assert activity.name is not None
url = '{}/{}'.format(URL_ACTIVITY_BASE, activity.id)
data = {
'activityId': activity.id,
'activityName': activity.name,
}
headers = dict(self.common_headers) # clone
headers['X-HTTP-Method-Override'] = 'PUT' # weird. again.
res = session.post(url, json=data, headers=headers)
if not res.ok:
raise GarminAPIException('Activity name not set: {}'.format(res.content)) # noqa
def load_activity_types(self):
"""
Fetch valid activity types from Garmin Connect
"""
# Only fetch once
if self.activity_types:
return self.activity_types
logger.debug('Fetching activity types')
resp = requests.get(URL_ACTIVITY_TYPES, headers=self.common_headers)
if not resp.ok:
raise GarminAPIException('Failed to retrieve activity types')
# Store as a clean dict, mapping keys and lower case common name
types = resp.json()
self.activity_types = {t['typeKey']: t for t in types}
logger.debug('Fetched {} activity types'.format(len(self.activity_types))) # noqa
return self.activity_types
def set_activity_type(self, session, activity):
"""
Update the activity type
"""
assert activity.id is not None
assert activity.type is not None
# Load the corresponding type key on Garmin Connect
types = self.load_activity_types()
type_key = types.get(activity.type)
if type_key is None:
logger.error("Activity type '{}' not valid".format(activity.type))
return False
url = '{}/{}'.format(URL_ACTIVITY_BASE, activity.id)
data = {
'activityId': activity.id,
'activityTypeDTO': type_key
}
headers = dict(self.common_headers) # clone
headers['X-HTTP-Method-Override'] = 'PUT' # weird. again.
res = session.post(url, json=data, headers=headers)
if not res.ok:
raise GarminAPIException('Activity type not set: {}'.format(res.content)) # noqa
| gpl-2.0 |
BlueShells/openshift-ansible | inventory/openstack/hosts/nova.py | 11 | 6774 | #!/usr/bin/env python2
# pylint: skip-file
# (c) 2012, Marco Vito Moscaritolo <marco@agavee.com>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import sys
import re
import os
import ConfigParser
from novaclient import client as nova_client
try:
import json
except ImportError:
import simplejson as json
###################################################
# executed with no parameters, return the list of
# all groups and hosts
NOVA_CONFIG_FILES = [os.path.join(os.path.dirname(os.path.realpath(__file__)), "nova.ini"),
os.path.expanduser(os.environ.get('ANSIBLE_CONFIG', "~/nova.ini")),
"/etc/ansible/nova.ini"]
NOVA_DEFAULTS = {
'auth_system': None,
'region_name': None,
'service_type': 'compute',
}
def nova_load_config_file():
p = ConfigParser.SafeConfigParser(NOVA_DEFAULTS)
for path in NOVA_CONFIG_FILES:
if os.path.exists(path):
p.read(path)
return p
return None
def get_fallback(config, value, section="openstack"):
"""
Get value from config object and return the value
or false
"""
try:
return config.get(section, value)
except ConfigParser.NoOptionError:
return False
def push(data, key, element):
"""
Assist in items to a dictionary of lists
"""
if (not element) or (not key):
return
if key in data:
data[key].append(element)
else:
data[key] = [element]
def to_safe(word):
'''
Converts 'bad' characters in a string to underscores so they can
be used as Ansible groups
'''
return re.sub(r"[^A-Za-z0-9\-]", "_", word)
def get_ips(server, access_ip=True):
"""
Returns a list of the server's IPs, or the preferred
access IP
"""
private = []
public = []
address_list = []
# Iterate through each servers network(s), get addresses and get type
addresses = getattr(server, 'addresses', {})
if len(addresses) > 0:
for network in addresses.itervalues():
for address in network:
if address.get('OS-EXT-IPS:type', False) == 'fixed':
private.append(address['addr'])
elif address.get('OS-EXT-IPS:type', False) == 'floating':
public.append(address['addr'])
if not access_ip:
address_list.append(server.accessIPv4)
address_list.extend(private)
address_list.extend(public)
return address_list
access_ip = None
# Append group to list
if server.accessIPv4:
access_ip = server.accessIPv4
if (not access_ip) and public and not (private and prefer_private):
access_ip = public[0]
if private and not access_ip:
access_ip = private[0]
return access_ip
def get_metadata(server):
"""Returns dictionary of all host metadata"""
get_ips(server, False)
results = {}
for key in vars(server):
# Extract value
value = getattr(server, key)
# Generate sanitized key
key = 'os_' + re.sub(r"[^A-Za-z0-9\-]", "_", key).lower()
# Att value to instance result (exclude manager class)
#TODO: maybe use value.__class__ or similar inside of key_name
if key != 'os_manager':
results[key] = value
return results
config = nova_load_config_file()
if not config:
sys.exit('Unable to find configfile in %s' % ', '.join(NOVA_CONFIG_FILES))
# Load up connections info based on config and then environment
# variables
username = (get_fallback(config, 'username') or
os.environ.get('OS_USERNAME', None))
api_key = (get_fallback(config, 'api_key') or
os.environ.get('OS_PASSWORD', None))
auth_url = (get_fallback(config, 'auth_url') or
os.environ.get('OS_AUTH_URL', None))
project_id = (get_fallback(config, 'project_id') or
os.environ.get('OS_TENANT_NAME', None))
region_name = (get_fallback(config, 'region_name') or
os.environ.get('OS_REGION_NAME', None))
auth_system = (get_fallback(config, 'auth_system') or
os.environ.get('OS_AUTH_SYSTEM', None))
# Determine what type of IP is preferred to return
prefer_private = False
try:
prefer_private = config.getboolean('openstack', 'prefer_private')
except ConfigParser.NoOptionError:
pass
client = nova_client.Client(
version=config.get('openstack', 'version'),
username=username,
api_key=api_key,
auth_url=auth_url,
region_name=region_name,
project_id=project_id,
auth_system=auth_system,
service_type=config.get('openstack', 'service_type'),
)
# Default or added list option
if (len(sys.argv) == 2 and sys.argv[1] == '--list') or len(sys.argv) == 1:
groups = {'_meta': {'hostvars': {}}}
# Cycle on servers
for server in client.servers.list():
access_ip = get_ips(server)
# Push to name group of 1
push(groups, server.name, access_ip)
# Run through each metadata item and add instance to it
for key, value in server.metadata.iteritems():
composed_key = to_safe('tag_{0}_{1}'.format(key, value))
push(groups, composed_key, access_ip)
# Do special handling of group for backwards compat
# inventory groups
group = server.metadata['group'] if 'group' in server.metadata else 'undefined'
push(groups, group, access_ip)
# Add vars to _meta key for performance optimization in
# Ansible 1.3+
groups['_meta']['hostvars'][access_ip] = get_metadata(server)
# Return server list
print(json.dumps(groups, sort_keys=True, indent=2))
sys.exit(0)
#####################################################
# executed with a hostname as a parameter, return the
# variables for that host
elif len(sys.argv) == 3 and (sys.argv[1] == '--host'):
results = {}
ips = []
for server in client.servers.list():
if sys.argv[2] in (get_ips(server) or []):
results = get_metadata(server)
print(json.dumps(results, sort_keys=True, indent=2))
sys.exit(0)
else:
print "usage: --list ..OR.. --host <hostname>"
sys.exit(1)
| apache-2.0 |
thecodinghub/news-for-good | news/Lib/site-packages/setuptools/command/upload_docs.py | 173 | 7311 | # -*- coding: utf-8 -*-
"""upload_docs
Implements a Distutils 'upload_docs' subcommand (upload documentation to
PyPI's pythonhosted.org).
"""
from base64 import standard_b64encode
from distutils import log
from distutils.errors import DistutilsOptionError
import os
import socket
import zipfile
import tempfile
import shutil
import itertools
import functools
from setuptools.extern import six
from setuptools.extern.six.moves import http_client, urllib
from pkg_resources import iter_entry_points
from .upload import upload
def _encode(s):
errors = 'surrogateescape' if six.PY3 else 'strict'
return s.encode('utf-8', errors)
class upload_docs(upload):
# override the default repository as upload_docs isn't
# supported by Warehouse (and won't be).
DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/'
description = 'Upload documentation to PyPI'
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server'),
('upload-dir=', None, 'directory to upload'),
]
boolean_options = upload.boolean_options
def has_sphinx(self):
if self.upload_dir is None:
for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
return True
sub_commands = [('build_sphinx', has_sphinx)]
def initialize_options(self):
upload.initialize_options(self)
self.upload_dir = None
self.target_dir = None
def finalize_options(self):
upload.finalize_options(self)
if self.upload_dir is None:
if self.has_sphinx():
build_sphinx = self.get_finalized_command('build_sphinx')
self.target_dir = build_sphinx.builder_target_dir
else:
build = self.get_finalized_command('build')
self.target_dir = os.path.join(build.build_base, 'docs')
else:
self.ensure_dirname('upload_dir')
self.target_dir = self.upload_dir
if 'pypi.python.org' in self.repository:
log.warn("Upload_docs command is deprecated. Use RTD instead.")
self.announce('Using upload directory %s' % self.target_dir)
def create_zipfile(self, filename):
zip_file = zipfile.ZipFile(filename, "w")
try:
self.mkpath(self.target_dir) # just in case
for root, dirs, files in os.walk(self.target_dir):
if root == self.target_dir and not files:
tmpl = "no files found in upload directory '%s'"
raise DistutilsOptionError(tmpl % self.target_dir)
for name in files:
full = os.path.join(root, name)
relative = root[len(self.target_dir):].lstrip(os.path.sep)
dest = os.path.join(relative, name)
zip_file.write(full, dest)
finally:
zip_file.close()
def run(self):
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
tmp_dir = tempfile.mkdtemp()
name = self.distribution.metadata.get_name()
zip_file = os.path.join(tmp_dir, "%s.zip" % name)
try:
self.create_zipfile(zip_file)
self.upload_file(zip_file)
finally:
shutil.rmtree(tmp_dir)
@staticmethod
def _build_part(item, sep_boundary):
key, values = item
title = '\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if not isinstance(values, list):
values = [values]
for value in values:
if isinstance(value, tuple):
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = _encode(value)
yield sep_boundary
yield _encode(title)
yield b"\n\n"
yield value
if value and value[-1:] == b'\r':
yield b'\n' # write an extra newline (lurve Macs)
@classmethod
def _build_multipart(cls, data):
"""
Build up the MIME payload for the POST data
"""
boundary = b'--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b'\n--' + boundary
end_boundary = sep_boundary + b'--'
end_items = end_boundary, b"\n",
builder = functools.partial(
cls._build_part,
sep_boundary=sep_boundary,
)
part_groups = map(builder, data.items())
parts = itertools.chain.from_iterable(part_groups)
body_items = itertools.chain(parts, end_items)
content_type = 'multipart/form-data; boundary=%s' % boundary.decode('ascii')
return b''.join(body_items), content_type
def upload_file(self, filename):
with open(filename, 'rb') as f:
content = f.read()
meta = self.distribution.metadata
data = {
':action': 'doc_upload',
'name': meta.get_name(),
'content': (os.path.basename(filename), content),
}
# set up the authentication
credentials = _encode(self.username + ':' + self.password)
credentials = standard_b64encode(credentials)
if six.PY3:
credentials = credentials.decode('ascii')
auth = "Basic " + credentials
body, ct = self._build_multipart(data)
msg = "Submitting documentation to %s" % (self.repository)
self.announce(msg, log.INFO)
# build the Request
# We can't use urllib2 since we need to send the Basic
# auth right with the first request
schema, netloc, url, params, query, fragments = \
urllib.parse.urlparse(self.repository)
assert not params and not query and not fragments
if schema == 'http':
conn = http_client.HTTPConnection(netloc)
elif schema == 'https':
conn = http_client.HTTPSConnection(netloc)
else:
raise AssertionError("unsupported schema " + schema)
data = ''
try:
conn.connect()
conn.putrequest("POST", url)
content_type = ct
conn.putheader('Content-type', content_type)
conn.putheader('Content-length', str(len(body)))
conn.putheader('Authorization', auth)
conn.endheaders()
conn.send(body)
except socket.error as e:
self.announce(str(e), log.ERROR)
return
r = conn.getresponse()
if r.status == 200:
msg = 'Server response (%s): %s' % (r.status, r.reason)
self.announce(msg, log.INFO)
elif r.status == 301:
location = r.getheader('Location')
if location is None:
location = 'https://pythonhosted.org/%s/' % meta.get_name()
msg = 'Upload successful. Visit %s' % location
self.announce(msg, log.INFO)
else:
msg = 'Upload failed (%s): %s' % (r.status, r.reason)
self.announce(msg, log.ERROR)
if self.show_response:
print('-' * 75, r.read(), '-' * 75)
| bsd-3-clause |
hpcloud/php-buildpack | lib/build_pack_utils/utils.py | 41 | 10306 | import os
import sys
import shutil
import logging
import codecs
import inspect
import re
from string import Template
from runner import check_output
_log = logging.getLogger('utils')
def safe_makedirs(path):
try:
os.makedirs(path)
except OSError, e:
# Ignore if it exists
if e.errno != 17:
raise e
def load_env(path):
_log.info("Loading environment from [%s]", path)
env = {}
with open(path, 'rt') as envFile:
for line in envFile:
name, val = line.strip().split('=', 1)
env[name.strip()] = val.strip()
_log.debug("Loaded environment [%s]", env)
return env
def load_processes(path):
_log.info("Loading processes from [%s]", path)
procs = {}
with open(path, 'rt') as procFile:
for line in procFile:
name, cmd = line.strip().split(':', 1)
procs[name.strip()] = cmd.strip()
_log.debug("Loaded processes [%s]", procs)
return procs
def load_extension(path):
_log.debug("Loading extension from [%s]", path)
init = os.path.join(path, '__init__.py')
if not os.path.exists(init):
with open(init, 'w'):
pass # just create an empty file
try:
sys.path.append(os.path.dirname(path))
extn = __import__('%s.extension' % os.path.basename(path),
fromlist=['extension'])
finally:
sys.path.remove(os.path.dirname(path))
return extn
def process_extension(path, ctx, to_call, success, args=None, ignore=False):
_log.debug('Processing extension from [%s] with method [%s]',
path, to_call)
if not args:
args = [ctx]
extn = load_extension(path)
try:
if hasattr(extn, to_call):
success(getattr(extn, to_call)(*args))
except Exception:
if ignore:
_log.exception("Error with extension [%s]" % path)
else:
raise
def process_extensions(ctx, to_call, success, args=None, ignore=False):
for path in ctx['EXTENSIONS']:
process_extension(path, ctx, to_call, success, args, ignore)
def rewrite_with_template(template, cfgPath, ctx):
with codecs.open(cfgPath, encoding='utf-8') as fin:
data = fin.read()
with codecs.open(cfgPath, encoding='utf-8', mode='wt') as out:
out.write(template(data).safe_substitute(ctx))
def rewrite_cfgs(toPath, ctx, delim='#'):
class RewriteTemplate(Template):
delimiter = delim
if os.path.isdir(toPath):
_log.info("Rewriting configuration under [%s]", toPath)
for root, dirs, files in os.walk(toPath):
for f in files:
cfgPath = os.path.join(root, f)
_log.debug("Rewriting [%s]", cfgPath)
rewrite_with_template(RewriteTemplate, cfgPath, ctx)
else:
_log.info("Rewriting configuration file [%s]", toPath)
rewrite_with_template(RewriteTemplate, toPath, ctx)
def find_git_url(bp_dir):
if os.path.exists(os.path.join(bp_dir, '.git')):
try:
url = check_output(['git', '--git-dir=%s/.git' % bp_dir,
'config', '--get', 'remote.origin.url'])
commit = check_output(['git', '--git-dir=%s/.git' % bp_dir,
'rev-parse', '--short', 'HEAD'])
if url and commit:
return "%s#%s" % (url.strip(), commit.strip())
except OSError:
_log.debug("Git does not seem to be installed / available",
exc_info=True)
class FormattedDictWrapper(object):
def __init__(self, obj):
self.obj = obj
def unwrap(self):
return self.obj
def __str__(self):
return self.obj.__str__()
def __repr__(self):
return self.obj.__repr__()
def wrap(obj):
return FormattedDictWrapper(obj)
class FormattedDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
def format(self, val):
if hasattr(val, 'format'):
val = val.format(**self)
newVal = val.format(**self)
while val != newVal:
val = newVal
newVal = newVal.format(**self)
return val
return val.unwrap() if hasattr(val, 'unwrap') else val
def __getitem__(self, key):
return self.format(dict.__getitem__(self, key))
def get(self, *args, **kwargs):
if kwargs.get('format', True):
return self.format(dict.get(self, *args))
else:
tmp = dict.get(self, *args)
return tmp.unwrap() if hasattr(tmp, 'unwrap') else tmp
def __setitem__(self, key, val):
if _log.isEnabledFor(logging.DEBUG):
frame = inspect.currentframe()
caller = inspect.getouterframes(frame, 2)
info = caller[1]
_log.debug('line #%s in %s, "%s" is setting [%s] = [%s]',
info[2], info[1], info[3], key, val)
dict.__setitem__(self, key, val)
class ConfigFileEditor(object):
def __init__(self, cfgPath):
with open(cfgPath, 'rt') as cfg:
self._lines = cfg.readlines()
def find_lines_matching(self, regex):
if hasattr(regex, 'strip'):
regex = re.compile(regex)
if not hasattr(regex, 'match'):
raise ValueError("must be str or RegexObject")
return [line.strip() for line in self._lines if regex.match(line)]
def update_lines(self, regex, repl):
if hasattr(regex, 'strip'):
regex = re.compile(regex)
if not hasattr(regex, 'match'):
raise ValueError("must be str or RegexObject")
self._lines = [regex.sub(repl, line) for line in self._lines]
def append_lines(self, lines):
self._lines.extend(lines)
def insert_after(self, regex, lines):
if hasattr(regex, 'strip'):
regex = re.compile(regex)
if not hasattr(regex, 'match'):
raise ValueError("must be str or RegexObject")
for i, line in enumerate(self._lines):
if regex.match(line):
for j, item in enumerate(["%s\n" % l for l in lines]):
self._lines.insert((i + j + 1), item)
break
def save(self, cfgPath):
with open(cfgPath, 'wt') as cfg:
cfg.writelines(self._lines)
def unique(seq):
"""Return only the unique items in the given list, but preserve order"""
# http://stackoverflow.com/a/480227
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
# This is copytree from PyPy 2.7 source code.
# https://bitbucket.org/pypy/pypy/src/9d88b4875d6e/lib-python/2.7/shutil.py
# Modifying this so that it doesn't care about an initial directory existing
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
def copytree(src, dst, symlinks=False, ignore=None):
"""Recursively copy a directory tree using copy2().
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
XXX Consider this example code rather than the ultimate tool.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
try:
os.makedirs(dst)
except OSError, e:
if e.errno != 17: # File exists
raise e
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore)
else:
# Will raise a SpecialFileError for unsupported file types
shutil.copy2(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except shutil.Error, err:
errors.extend(err.args[0])
except EnvironmentError, why:
errors.append((srcname, dstname, str(why)))
try:
shutil.copystat(src, dst)
except OSError, why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise shutil.Error, errors
| apache-2.0 |
ApuliaSoftware/odoo | addons/resource/faces/observer.py | 433 | 2328 | #@+leo-ver=4
#@+node:@file observer.py
#@@language python
#@<< Copyright >>
#@+node:<< Copyright >>
############################################################################
# Copyright (C) 2005, 2006, 2007, 2008 by Reithinger GmbH
# mreithinger@web.de
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
#@-node:<< Copyright >>
#@nl
"""
This module contains the base class for all observer objects
"""
#@<< Imports >>
#@+node:<< Imports >>
#@-node:<< Imports >>
#@nl
_is_source_ = True
#@+others
#@+node:class Observer
class Observer(object):
"""
Base Class for all charts and reports.
@var visible: Specifies if the observer is visible
at the navigation bar inside the gui.
@var link_view: syncronizes the marked objects in all views.
"""
#@ << declarations >>
#@+node:<< declarations >>
__type_name__ = None
__type_image__ = None
visible = True
link_view = True
__attrib_completions__ = { "visible" : 'visible = False',
"link_view" : "link_view = False" }
#@-node:<< declarations >>
#@nl
#@ @+others
#@+node:register_editors
def register_editors(cls, registry):
pass
register_editors = classmethod(register_editors)
#@-node:register_editors
#@-others
#@-node:class Observer
#@-others
factories = { }
clear_cache_funcs = {}
#@-node:@file observer.py
#@-leo
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.4/Lib/plat-mac/lib-scriptpackages/Finder/Files.py | 80 | 6439 | """Suite Files: Classes representing files
Level 1, version 1
Generated from /System/Library/CoreServices/Finder.app
AETE/AEUT resource version 0/144, language 0, script 0
"""
import aetools
import MacOS
_code = 'fndr'
class Files_Events:
pass
class alias_file(aetools.ComponentItem):
"""alias file - An alias file (created with \xd2Make Alias\xd3) """
want = 'alia'
class _Prop__3c_Inheritance_3e_(aetools.NProperty):
"""<Inheritance> - inherits some of its properties from the file class """
which = 'c@#^'
want = 'file'
class _Prop_original_item(aetools.NProperty):
"""original item - the original item pointed to by the alias """
which = 'orig'
want = 'obj '
alias_files = alias_file
class application_file(aetools.ComponentItem):
"""application file - An application's file on disk """
want = 'appf'
class _Prop_accepts_high_level_events(aetools.NProperty):
"""accepts high level events - Is the application high-level event aware? (OBSOLETE: always returns true) """
which = 'isab'
want = 'bool'
class _Prop_has_scripting_terminology(aetools.NProperty):
"""has scripting terminology - Does the process have a scripting terminology, i.e., can it be scripted? """
which = 'hscr'
want = 'bool'
class _Prop_minimum_size(aetools.NProperty):
"""minimum size - the smallest memory size with which the application can be launched """
which = 'mprt'
want = 'long'
class _Prop_opens_in_Classic(aetools.NProperty):
"""opens in Classic - Should the application launch in the Classic environment? """
which = 'Clsc'
want = 'bool'
class _Prop_preferred_size(aetools.NProperty):
"""preferred size - the memory size with which the application will be launched """
which = 'appt'
want = 'long'
class _Prop_suggested_size(aetools.NProperty):
"""suggested size - the memory size with which the developer recommends the application be launched """
which = 'sprt'
want = 'long'
application_files = application_file
class clipping(aetools.ComponentItem):
"""clipping - A clipping """
want = 'clpf'
class _Prop_clipping_window(aetools.NProperty):
"""clipping window - (NOT AVAILABLE YET) the clipping window for this clipping """
which = 'lwnd'
want = 'obj '
clippings = clipping
class document_file(aetools.ComponentItem):
"""document file - A document file """
want = 'docf'
document_files = document_file
class file(aetools.ComponentItem):
"""file - A file """
want = 'file'
class _Prop_creator_type(aetools.NProperty):
"""creator type - the OSType identifying the application that created the item """
which = 'fcrt'
want = 'type'
class _Prop_file_type(aetools.NProperty):
"""file type - the OSType identifying the type of data contained in the item """
which = 'asty'
want = 'type'
class _Prop_product_version(aetools.NProperty):
"""product version - the version of the product (visible at the top of the \xd2Get Info\xd3 window) """
which = 'ver2'
want = 'utxt'
class _Prop_stationery(aetools.NProperty):
"""stationery - Is the file a stationery pad? """
which = 'pspd'
want = 'bool'
class _Prop_version(aetools.NProperty):
"""version - the version of the file (visible at the bottom of the \xd2Get Info\xd3 window) """
which = 'vers'
want = 'utxt'
files = file
class internet_location_file(aetools.ComponentItem):
"""internet location file - An file containing an internet location """
want = 'inlf'
class _Prop_location(aetools.NProperty):
"""location - the internet location """
which = 'iloc'
want = 'utxt'
internet_location_files = internet_location_file
class package(aetools.ComponentItem):
"""package - A package """
want = 'pack'
packages = package
alias_file._superclassnames = ['file']
alias_file._privpropdict = {
'_3c_Inheritance_3e_' : _Prop__3c_Inheritance_3e_,
'original_item' : _Prop_original_item,
}
alias_file._privelemdict = {
}
application_file._superclassnames = ['file']
application_file._privpropdict = {
'_3c_Inheritance_3e_' : _Prop__3c_Inheritance_3e_,
'accepts_high_level_events' : _Prop_accepts_high_level_events,
'has_scripting_terminology' : _Prop_has_scripting_terminology,
'minimum_size' : _Prop_minimum_size,
'opens_in_Classic' : _Prop_opens_in_Classic,
'preferred_size' : _Prop_preferred_size,
'suggested_size' : _Prop_suggested_size,
}
application_file._privelemdict = {
}
clipping._superclassnames = ['file']
clipping._privpropdict = {
'_3c_Inheritance_3e_' : _Prop__3c_Inheritance_3e_,
'clipping_window' : _Prop_clipping_window,
}
clipping._privelemdict = {
}
document_file._superclassnames = ['file']
document_file._privpropdict = {
'_3c_Inheritance_3e_' : _Prop__3c_Inheritance_3e_,
}
document_file._privelemdict = {
}
import Finder_items
file._superclassnames = ['item']
file._privpropdict = {
'_3c_Inheritance_3e_' : _Prop__3c_Inheritance_3e_,
'creator_type' : _Prop_creator_type,
'file_type' : _Prop_file_type,
'product_version' : _Prop_product_version,
'stationery' : _Prop_stationery,
'version' : _Prop_version,
}
file._privelemdict = {
}
internet_location_file._superclassnames = ['file']
internet_location_file._privpropdict = {
'_3c_Inheritance_3e_' : _Prop__3c_Inheritance_3e_,
'location' : _Prop_location,
}
internet_location_file._privelemdict = {
}
package._superclassnames = ['item']
package._privpropdict = {
'_3c_Inheritance_3e_' : _Prop__3c_Inheritance_3e_,
}
package._privelemdict = {
}
#
# Indices of types declared in this module
#
_classdeclarations = {
'alia' : alias_file,
'appf' : application_file,
'clpf' : clipping,
'docf' : document_file,
'file' : file,
'inlf' : internet_location_file,
'pack' : package,
}
_propdeclarations = {
'Clsc' : _Prop_opens_in_Classic,
'appt' : _Prop_preferred_size,
'asty' : _Prop_file_type,
'c@#^' : _Prop__3c_Inheritance_3e_,
'fcrt' : _Prop_creator_type,
'hscr' : _Prop_has_scripting_terminology,
'iloc' : _Prop_location,
'isab' : _Prop_accepts_high_level_events,
'lwnd' : _Prop_clipping_window,
'mprt' : _Prop_minimum_size,
'orig' : _Prop_original_item,
'pspd' : _Prop_stationery,
'sprt' : _Prop_suggested_size,
'ver2' : _Prop_product_version,
'vers' : _Prop_version,
}
_compdeclarations = {
}
_enumdeclarations = {
}
| mit |
tensorflow/lingvo | lingvo/core/base_input_generator_test.py | 1 | 21940 | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for base_input_generator."""
import contextlib
import copy
import os
import shutil
import tempfile
from absl.testing import flagsaver
import lingvo.compat as tf
from lingvo.core import base_input_generator
from lingvo.core import cluster_factory
from lingvo.core import datasource
from lingvo.core import hyperparams
from lingvo.core import py_utils
from lingvo.core import test_utils
import mock
import numpy as np
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.tpu import device_assignment
# pylint: enable=g-direct-tensorflow-import
def _CreateFakeTFRecordFiles(record_count=10):
tmpdir = tempfile.mkdtemp()
data_path = os.path.join(tmpdir, 'fake.tfrecord')
with tf.io.TFRecordWriter(data_path) as w:
for _ in range(record_count):
feature = {
'audio':
tf.train.Feature(
float_list=tf.train.FloatList(
value=np.random.uniform(-1.0, 1.0, 48000))),
}
example = tf.train.Example(features=tf.train.Features(feature=feature))
w.write(example.SerializeToString())
return tmpdir, data_path
class BaseInputGeneratorTest(test_utils.TestCase):
@flagsaver.flagsaver(xla_device='tpu', enable_asserts=False)
def testBatchSizeSingleHostInfeed(self):
with cluster_factory.ForTestingWorker(tpus=128):
p = base_input_generator.BaseInputGenerator.Params()
p.batch_size = 16
p.use_per_host_infeed = False
input_generator = p.Instantiate()
self.assertEqual(2048, input_generator.InfeedBatchSize())
self.assertEqual(2048, input_generator.GlobalBatchSize())
@flagsaver.flagsaver(xla_device='tpu', enable_asserts=False)
def testBatchSizePerHostInfeed(self):
with cluster_factory.ForTestingWorker(tpus=128, num_tpu_hosts=8):
p = base_input_generator.BaseInputGenerator.Params()
p.batch_size = 16
p.use_per_host_infeed = True
input_generator = p.Instantiate()
self.assertEqual(256, input_generator.InfeedBatchSize())
self.assertEqual(2048, input_generator.GlobalBatchSize())
@contextlib.contextmanager
def _DeviceAssignment(self):
"""A context for tpu device assignment of a JF 8x8 slice."""
mesh_shape = [8, 8, 1, 2]
device_coordinates = np.zeros([16, 8, 4], dtype=np.int32)
for i in range(np.prod(mesh_shape)):
x = i // 16
y = i % 16 // 2
core = i % 2
task = x // 2 * 4 + y // 2
device = x % 2 * 4 + y % 2 * 2 + core
device_coordinates[task, device] = [x, y, 0, core]
topology = tf.tpu.experimental.Topology(
mesh_shape=mesh_shape, device_coordinates=device_coordinates)
assignment = device_assignment.device_assignment(
topology, computation_shape=[1, 1, 1, 1], num_replicas=128)
py_utils.SetTpuDeviceAssignment(assignment)
try:
yield
finally:
py_utils.SetTpuDeviceAssignment(None)
@flagsaver.flagsaver(xla_device='tpu', enable_asserts=False)
def testCreateTpuEnqueueOpsSingleHostInfeed(self):
class FooInputGenerator(base_input_generator.BaseInputGenerator):
def _InputBatch(self):
return py_utils.NestedMap(
inp=tf.constant(1.0, shape=[2048, 3], dtype=tf.float32))
with cluster_factory.ForTestingWorker(
tpus=128, num_tpu_hosts=16, add_summary=True):
with self._DeviceAssignment():
p = FooInputGenerator.Params()
p.use_per_host_infeed = False
input_generator = p.Instantiate()
input_generator.CreateTpuEnqueueOps()
batch = input_generator.TpuDequeueBatch()
self.assertEqual(batch.inp.shape.as_list(), [16, 3])
@flagsaver.flagsaver(xla_device='tpu', enable_asserts=False)
def testCreateTpuEnqueueOpsPerHostInfeed(self):
class FooInputGenerator(base_input_generator.BaseInputGenerator):
def _InputBatch(self):
return py_utils.NestedMap(
inp=tf.constant(1.0, shape=[128, 3], dtype=tf.float32))
with cluster_factory.ForTestingWorker(tpus=128, num_tpu_hosts=16):
with self._DeviceAssignment():
p = FooInputGenerator.Params()
p.use_per_host_infeed = True
input_generator = p.Instantiate()
input_generator.CreateTpuEnqueueOps()
batch = input_generator.TpuDequeueBatch()
self.assertEqual(batch.inp.shape.as_list(), [16, 3])
@flagsaver.flagsaver(xla_device='tpu', enable_asserts=False)
def testCreateTpuEnqueueOpsPerHostInfeed_Sharded(self):
class FooInputGenerator(base_input_generator.BaseInputGenerator):
def _InputBatch(self):
return [
py_utils.NestedMap(
inp=tf.constant(1.0, shape=[16, 3], dtype=tf.float32))
for _ in range(8)
]
with cluster_factory.ForTestingWorker(tpus=128, num_tpu_hosts=16):
with self._DeviceAssignment():
p = FooInputGenerator.Params()
p.use_per_host_infeed = True
input_generator = p.Instantiate()
input_generator.CreateTpuEnqueueOps()
batch = input_generator.TpuDequeueBatch()
self.assertEqual(batch.inp.shape.as_list(), [16, 3])
def testGetPreprocessedBatchWithDatasource(self):
class TestDataset(datasource.TFDatasetSource):
def GetDataset(self):
return tf.data.Dataset.from_tensors(0)
with self.subTest('AllowedWithNoOverrides'):
p = base_input_generator.BaseInputGenerator.Params()
p.file_datasource = TestDataset.Params()
p.Instantiate().GetPreprocessedInputBatch()
with self.subTest('AllowedWithBaseInputGeneratorFromFiles'):
p = base_input_generator.BaseInputGeneratorFromFiles.Params()
p.file_datasource = TestDataset.Params()
p.Instantiate().GetPreprocessedInputBatch()
msg = 'Batches obtained through p.file_datasource'
with self.subTest('DisallowedWhenOverridingInputBatch'):
class OverrideInputBatch(base_input_generator.BaseInputGenerator):
def _InputBatch(self):
return 0
p = OverrideInputBatch.Params()
p.file_datasource = TestDataset.Params()
with self.assertRaisesRegex(ValueError, msg):
p.Instantiate().GetPreprocessedInputBatch()
with self.subTest('DisallowedWhenOverridingPreprocessInputBatch'):
class OverridePreprocessInputBatch(base_input_generator.BaseInputGenerator
):
def _PreprocessInputBatch(self, batch):
return batch
p = OverridePreprocessInputBatch.Params()
p.file_datasource = TestDataset.Params()
with self.assertRaisesRegex(ValueError, msg):
p.Instantiate().GetPreprocessedInputBatch()
with self.subTest('DisallowedWithTrainerInputReplicas'):
def WithInputTargets():
ret = copy.deepcopy(cluster_factory.Current())
ret.params.input.targets = 'a,b'
ret.params.input.replicas = 2
return ret
p = base_input_generator.BaseInputGenerator.Params()
p.file_datasource = TestDataset.Params()
msg = 'TFDatasetSource subclassed DataSources do not support'
with WithInputTargets(), self.assertRaisesRegex(ValueError, msg):
p.Instantiate().GetPreprocessedInputBatch()
class ToyInputGenerator(base_input_generator.BaseDataExampleInputGenerator):
def GetFeatureSpec(self):
return {'audio': tf.io.FixedLenFeature([48000], tf.float32)}
class BaseExampleInputGeneratorTest(test_utils.TestCase):
def setUp(self):
super().setUp()
tf.reset_default_graph()
def tearDown(self):
super().tearDown()
if hasattr(self, '_tmpdir'):
shutil.rmtree(self._tmpdir)
def testTfRecordFile(self):
p = ToyInputGenerator.Params()
p.batch_size = 2
self._tmpdir, p.input_files = _CreateFakeTFRecordFiles()
p.dataset_type = tf.data.TFRecordDataset
p.randomize_order = False
p.parallel_readers = 1
ig = p.Instantiate()
with self.session():
inputs = ig.GetPreprocessedInputBatch()
eval_inputs = self.evaluate(inputs)
input_shapes = eval_inputs.Transform(lambda t: t.shape)
self.assertEqual(input_shapes.audio, (2, 48000))
def testTfRecordFileLargeBatch(self):
p = ToyInputGenerator.Params()
p.batch_size = 200
self._tmpdir, p.input_files = _CreateFakeTFRecordFiles()
p.dataset_type = tf.data.TFRecordDataset
p.randomize_order = False
p.parallel_readers = 1
ig = p.Instantiate()
with self.session():
inputs = ig.GetPreprocessedInputBatch()
eval_inputs = self.evaluate(inputs)
input_shapes = eval_inputs.Transform(lambda t: t.shape)
self.assertEqual(input_shapes.audio, (200, 48000))
def testNumEpochs(self):
p = ToyInputGenerator.Params()
p.batch_size = 3
p.num_epochs = 7
self._tmpdir, p.input_files = _CreateFakeTFRecordFiles(
record_count=p.batch_size)
p.dataset_type = tf.data.TFRecordDataset
p.randomize_order = False
p.parallel_readers = 1
ig = p.Instantiate()
with self.session():
inputs = ig.GetPreprocessedInputBatch()
for _ in range(p.num_epochs):
eval_inputs = self.evaluate(inputs)
self.assertEqual(eval_inputs.audio.shape, (p.batch_size, 48000))
with self.assertRaisesRegex(tf.errors.OutOfRangeError, 'End of sequence'):
self.evaluate(inputs)
def testRespectsInfeedBatchSize(self):
p = ToyInputGenerator.Params()
p.batch_size = 3
self._tmpdir, p.input_files = _CreateFakeTFRecordFiles()
p.dataset_type = tf.data.TFRecordDataset
ig = p.Instantiate()
batch = ig.GetPreprocessedInputBatch()
self.assertEqual(batch.audio.shape[0], p.batch_size)
self.assertEqual(p.batch_size, ig.InfeedBatchSize())
tf.reset_default_graph()
ig = p.Instantiate()
with mock.patch.object(
ig, 'InfeedBatchSize', return_value=42) as mock_method:
batch = ig.GetPreprocessedInputBatch()
self.assertEqual(batch.audio.shape[0], 42)
mock_method.assert_called()
# Dataset pipelines for TFDataInputTest.
def _TestDatasetFn(begin=0, end=10):
"""Test tf.data pipeline."""
ds = tf.data.Dataset.from_tensor_slices(tf.range(begin, end))
return ds.map(lambda x: {'value': x})
def _TestDatasetFnWithoutDefault(begin, end=10):
"""Test tf.data pipeline with non-defaulted parameters."""
ds = tf.data.Dataset.from_tensor_slices(tf.range(begin, end))
return ds.map(lambda x: {'value': x})
def _TestDatasetFnWithRepeat(begin=0, end=10):
"""Test tf.data pipeline with repeat."""
ds = tf.data.Dataset.from_tensor_slices(tf.range(begin, end)).repeat()
return ds.map(lambda x: {'value': x})
def _TestDatasetFnV1(begin=0, end=10):
"""Similar to _TestDatasetFn but returns TFv1's dataset explicitly."""
ds = tf.tf1.data.Dataset.from_tensor_slices(tf.range(begin, end))
return ds.map(lambda x: {'value': x})
def _TestDatasetFnV2(begin=0, end=10):
"""Similar to _TestDatasetFn but returns TFv2's dataset explicitly."""
ds = tf.tf2.data.Dataset.from_tensor_slices(tf.range(begin, end))
return ds.map(lambda x: {'value': x})
class _TestDatasetClass:
"""A class that generates tf.data by its member function."""
def __init__(self, begin):
self._begin = begin
def DatasetFn(self, end=10):
ds = tf.data.Dataset.from_tensor_slices(tf.range(self._begin, end))
return ds.map(lambda x: {'value': x})
# A class object which will be instantiated at importing the module.
# It can be used in DefineTFDataInput().
_TestDatasetObject = _TestDatasetClass(begin=0)
# InputGenerators for TFDataInputTest.
_TestTFDataInput = base_input_generator.DefineTFDataInput(
'_TestTFDataInput', _TestDatasetFn)
_TestTFDataInputWithIgnoreArgs = base_input_generator.DefineTFDataInput(
'_TestTFDataInputWithIgnoreArgs', _TestDatasetFn, ignore_args=('begin',))
_TestTFDataInputWithMapArgs = base_input_generator.DefineTFDataInput(
'_TestTFDataInputWithMapArgs',
_TestDatasetFn,
map_args={'end': 'num_samples'})
_TestTFDataInputWithoutDefault = base_input_generator.DefineTFDataInput(
'_TestTFDataInputWithoutDefault', _TestDatasetFnWithoutDefault)
_TestTFDataInputWithRepeat = base_input_generator.DefineTFDataInput(
'_TestTFDataInputWithRepeat', _TestDatasetFnWithRepeat)
_TestTFDataInputWithBoundMethod = base_input_generator.DefineTFDataInput(
'_TestTFDataInputWithBoundMethod', _TestDatasetObject.DatasetFn)
_TestTFDataInputV1 = base_input_generator.DefineTFDataInput(
'_TestTFDataInputV1', _TestDatasetFnV1)
_TestTFDataInputV2 = base_input_generator.DefineTFDataInput(
'_TestTFDataInputV2', _TestDatasetFnV2)
class TFDataInputTest(test_utils.TestCase):
def testModule(self):
self.assertEqual(_TestTFDataInput.__module__, '__main__')
self.assertEqual(_TestTFDataInputWithIgnoreArgs.__module__, '__main__')
self.assertEqual(_TestTFDataInputWithMapArgs.__module__, '__main__')
self.assertEqual(_TestTFDataInputWithoutDefault.__module__, '__main__')
self.assertEqual(_TestTFDataInputWithRepeat.__module__, '__main__')
self.assertEqual(_TestTFDataInputWithBoundMethod.__module__, '__main__')
self.assertEqual(_TestTFDataInputV1.__module__, '__main__')
self.assertEqual(_TestTFDataInputV2.__module__, '__main__')
def testExample(self):
"""Tests the example code in the function docstring."""
p = _TestTFDataInput.Params()
self.assertIn('args', p)
self.assertIn('begin', p.args)
self.assertIn('end', p.args)
self.assertEqual(p.args.begin, 0)
self.assertEqual(p.args.end, 10)
ig = p.Instantiate()
self.assertIsInstance(ig, _TestTFDataInput)
with self.session() as sess:
data = ig.GetPreprocessedInputBatch()
self.assertIsInstance(data, py_utils.NestedMap)
self.assertIsInstance(data.value, tf.Tensor)
self.assertAllEqual(data.value.shape, ())
self.assertEqual(data.value.dtype, tf.int32)
# Consumes all data.
for i in range(p.args.begin, p.args.end):
self.assertEqual(sess.run(data).value, i)
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(data)
def testToFromProto(self):
"""Similar to `testExample` but params will be restored from a proto."""
serialized_proto = _TestTFDataInput.Params().ToProto()
p = hyperparams.Params.FromProto(serialized_proto)
self.assertIn('args', p)
self.assertIn('begin', p.args)
self.assertIn('end', p.args)
self.assertEqual(p.args.begin, 0)
self.assertEqual(p.args.end, 10)
ig = p.Instantiate()
self.assertIsInstance(ig, _TestTFDataInput)
with self.session() as sess:
data = ig.GetPreprocessedInputBatch()
self.assertIsInstance(data, py_utils.NestedMap)
self.assertIsInstance(data.value, tf.Tensor)
self.assertAllEqual(data.value.shape, ())
self.assertEqual(data.value.dtype, tf.int32)
# Consumes all data.
for i in range(p.args.begin, p.args.end):
self.assertEqual(sess.run(data).value, i)
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(data)
def testWithIgnoreArgs(self):
"""Tests the `ignore_args` parameter."""
p = _TestTFDataInputWithIgnoreArgs.Params()
self.assertIn('args', p)
self.assertNotIn('begin', p.args)
self.assertIn('end', p.args)
self.assertEqual(p.args.end, 10)
ig = p.Instantiate()
self.assertIsInstance(ig, _TestTFDataInputWithIgnoreArgs)
with self.session() as sess:
data = ig.GetPreprocessedInputBatch()
self.assertIsInstance(data, py_utils.NestedMap)
self.assertIsInstance(data.value, tf.Tensor)
self.assertAllEqual(data.value.shape, ())
self.assertEqual(data.value.dtype, tf.int32)
# Consumes all data.
for i in range(p.args.end):
self.assertEqual(sess.run(data).value, i)
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(data)
def testWithMapArgs(self):
"""Tests the `map_args` parameter."""
p = _TestTFDataInputWithMapArgs.Params()
self.assertIn('args', p)
self.assertIn('num_samples', p) # Defined by BaseInputGenerator.
self.assertIn('begin', p.args)
self.assertNotIn('end', p.args)
self.assertEqual(p.num_samples, 0)
self.assertEqual(p.args.begin, 0)
p.num_samples = 20
ig = p.Instantiate()
self.assertIsInstance(ig, _TestTFDataInputWithMapArgs)
with self.session() as sess:
data = ig.GetPreprocessedInputBatch()
self.assertIsInstance(data, py_utils.NestedMap)
self.assertIsInstance(data.value, tf.Tensor)
self.assertAllEqual(data.value.shape, ())
self.assertEqual(data.value.dtype, tf.int32)
# Consumes all data.
for i in range(p.args.begin, p.num_samples):
self.assertEqual(sess.run(data).value, i)
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(data)
def testWithoutDefault(self):
"""Tests parameters without defaults."""
p = _TestTFDataInputWithoutDefault.Params()
self.assertIn('args', p)
self.assertIn('begin', p.args)
self.assertIn('end', p.args)
self.assertIsNone(p.args.begin)
self.assertEqual(p.args.end, 10)
p.args.begin = 0
ig = p.Instantiate()
self.assertIsInstance(ig, _TestTFDataInputWithoutDefault)
with self.session() as sess:
data = ig.GetPreprocessedInputBatch()
self.assertIsInstance(data, py_utils.NestedMap)
self.assertIsInstance(data.value, tf.Tensor)
self.assertAllEqual(data.value.shape, ())
self.assertEqual(data.value.dtype, tf.int32)
# Consumes all data.
for i in range(p.args.begin, p.args.end):
self.assertEqual(sess.run(data).value, i)
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(data)
def testWithRepeat(self):
"""Tests if the repeated dataset runs forever."""
p = _TestTFDataInputWithRepeat.Params()
self.assertIn('args', p)
self.assertIn('begin', p.args)
self.assertIn('end', p.args)
self.assertEqual(p.args.begin, 0)
self.assertEqual(p.args.end, 10)
ig = p.Instantiate()
self.assertIsInstance(ig, _TestTFDataInputWithRepeat)
with self.session() as sess:
data = ig.GetPreprocessedInputBatch()
self.assertIsInstance(data, py_utils.NestedMap)
self.assertIsInstance(data.value, tf.Tensor)
self.assertAllEqual(data.value.shape, ())
self.assertEqual(data.value.dtype, tf.int32)
# Runs the dataset several times: it should not raise OutOfRangeError.
for _ in range(3):
for i in range(p.args.begin, p.args.end):
self.assertEqual(sess.run(data).value, i)
def testWithBoundMethod(self):
"""Tests pipeline defined by a bound method: member function with self."""
p = _TestTFDataInputWithBoundMethod.Params()
self.assertIn('args', p)
self.assertNotIn('begin', p.args)
self.assertIn('end', p.args)
self.assertEqual(p.args.end, 10)
ig = p.Instantiate()
self.assertIsInstance(ig, _TestTFDataInputWithBoundMethod)
with self.session() as sess:
data = ig.GetPreprocessedInputBatch()
self.assertIsInstance(data, py_utils.NestedMap)
self.assertIsInstance(data.value, tf.Tensor)
self.assertAllEqual(data.value.shape, ())
self.assertEqual(data.value.dtype, tf.int32)
# Consumes all data.
for i in range(p.args.end):
self.assertEqual(sess.run(data).value, i)
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(data)
def testDatasetV1(self):
"""Tests the TFv1 Dataset."""
p = _TestTFDataInputV1.Params()
self.assertIn('args', p)
self.assertIn('begin', p.args)
self.assertIn('end', p.args)
self.assertEqual(p.args.begin, 0)
self.assertEqual(p.args.end, 10)
ig = p.Instantiate()
self.assertIsInstance(ig, _TestTFDataInputV1)
with self.session() as sess:
data = ig.GetPreprocessedInputBatch()
self.assertIsInstance(data, py_utils.NestedMap)
self.assertIsInstance(data.value, tf.Tensor)
self.assertAllEqual(data.value.shape, ())
self.assertEqual(data.value.dtype, tf.int32)
# Consumes all data.
for i in range(p.args.begin, p.args.end):
self.assertEqual(sess.run(data).value, i)
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(data)
def testDatasetV2(self):
"""Tests the TFv2 Dataset."""
p = _TestTFDataInputV2.Params()
self.assertIn('args', p)
self.assertIn('begin', p.args)
self.assertIn('end', p.args)
self.assertEqual(p.args.begin, 0)
self.assertEqual(p.args.end, 10)
ig = p.Instantiate()
self.assertIsInstance(ig, _TestTFDataInputV2)
# We keep the TFv1's Session here since v1/v2 behaviors would not coexist.
# TODO(oday): write TFv2-specific tests.
with self.session() as sess:
data = ig.GetPreprocessedInputBatch()
self.assertIsInstance(data, py_utils.NestedMap)
self.assertIsInstance(data.value, tf.Tensor)
self.assertAllEqual(data.value.shape, ())
self.assertEqual(data.value.dtype, tf.int32)
# Consumes all data.
for i in range(p.args.begin, p.args.end):
self.assertEqual(sess.run(data).value, i)
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(data)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
mnschmit/piano-note-recognition | midi_comparaison.py | 1 | 3581 | #!/usr/bin/python
usage='''
Usage: own_midi_comparaison.py filename.wav filename.mid [pitch_min pitch_max filtering]
Mandatory arguments : two files to compare
Optional arguments : pitch_min (smallest pitch considered), pitch_max (biggest pitch considered), filtering (true or false)
'''
import sys
from librosa import load, stft, logamplitude, note_to_midi, midi_to_hz
import numpy as np
if len(sys.argv) <= 2:
print usage
sys.exit(-1)
filename = sys.argv[1]
midi_filename = sys.argv[2]
pitch_min = note_to_midi('C1')
if len(sys.argv) > 3:
pitch_min = note_to_midi(sys.argv[3])
pitch_max = note_to_midi('C7')
if len(sys.argv) > 4:
pitch_max = note_to_midi(sys.argv[4])
pitches = range(pitch_min, pitch_max + 1)
#pitches = note_to_midi(['C4', 'D4', 'E4', 'F4', 'G4', 'A4', 'B4', 'C5'])
filtering = True
if len(sys.argv) > 5:
if sys.argv[5] == "false":
filtering = False
elif sys.argv[4] == "true":
filtering = True
else:
print "Error reading filtering argument. Assuming true."
### main program ###
x, sr = load(filename)
# compute normal STFT
n_components = len(pitches)
n_fft = 2048
hop_length = n_fft * 3 / 4# big hop_length
X = stft(x, n_fft=n_fft, hop_length=hop_length)
### midi visualization ###
from Midi import midi_matrix
midi_mat = midi_matrix(midi_filename, min_pitch=pitch_min, max_pitch=pitch_max)
### NMF ###
V = np.abs(X)
## custom initialisation ##
W_zero = np.zeros((V.shape[0], n_components)).transpose()
threshold = 0.1
index = 0
#pitch = pitch_min
for comp in W_zero:
h = 1
fund_freq = midi_to_hz(pitches[index])
while int(fund_freq*h*n_fft/sr) < W_zero.shape[1]:
for freq in range(int(fund_freq*h*n_fft/sr * (2**(-threshold))), int(fund_freq*h*n_fft/sr * (2**threshold))):
if freq < W_zero.shape[1]:
comp[freq] = 1.0 / h
h += 1
index += 1
W_zero = W_zero.transpose()
H_zero = np.ones((n_components, V.shape[1]))
from NMF import factorize
comps, acts = factorize(V, W_zero, H_zero)
# filtering activations
if filtering:
filter_threshold = np.max(acts) / 5
for i in range(1, acts.shape[0]):
for j in range(0, acts.shape[1]):
if acts[i-1][j] > filter_threshold and acts[i-1][j] > acts[i][j]:
acts[i-1][j] += acts[i][j]
acts[i][j] = 0
acts[acts < filter_threshold] = 0
# visualisation matters
import matplotlib.pyplot as plt
from librosa.display import specshow
import matplotlib.gridspec as gridspec
plt.close('all')
#plt.subplot2grid((4, 2), (0,0))
#specshow(W_zero, sr=sr, hop_length=n_fft/4, n_yticks=10, y_axis='linear')
#plt.title('Initialised components')
#plt.subplot2grid((4, 2), (0,1))
#specshow(H_zero, sr=sr, x_axis='time')
#plt.title('Randomly initialised activations')
#plt.subplot2grid((3, 2), (0,0), colspan=2)
#specshow(V, sr=sr, x_axis='time', y_axis='linear')
#plt.colorbar()
#plt.title('Input power spectrogram')
plt.subplot2grid((1, 2), (0,0))
specshow(midi_mat, n_yticks=25, y_axis='cqt_note', fmin=midi_to_hz(pitch_min))
plt.xlabel('Ticks')
plt.title('Midi reference')
plt.subplot2grid((1, 2), (0,1))
specshow(acts, sr=sr, hop_length=hop_length, n_yticks=25, y_axis='cqt_note', x_axis='time', fmin=midi_to_hz(pitch_min))
plt.colorbar()
plt.ylabel('Components')
plt.title('Determined Activations')
#plt.subplot2grid((3, 2), (2,0), colspan=2)
#V_approx = comps.dot(acts)
#specshow(V_approx, sr=sr, x_axis='time', y_axis='linear')
#plt.colorbar()
#plt.title('Reconstructed spectrum')
plt.tight_layout()
plt.show()
| gpl-2.0 |
lnielsen/invenio | invenio/legacy/websubmit/functions/Send_Modify_Mail.py | 4 | 3919 | ## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
## Description: function Send_Modify_Mail
## This function sends an email saying the document has been
## correctly updated
## Author: T.Baron
## PARAMETERS: addressesMBI: email addresses to which the mail is sent
## fieldnameMBI: name of the file containing the modified
## fields
## sourceDoc: name of the type of document
## emailFile: name of the file containing the email of the
## user
import os
import re
from invenio.config import CFG_SITE_URL, \
CFG_SITE_NAME, \
CFG_SITE_SUPPORT_EMAIL, \
CFG_SITE_RECORD
from invenio.legacy.websubmit.config import CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN
from invenio.ext.email import send_email
def Send_Modify_Mail (parameters, curdir, form, user_info=None):
"""
This function sends an email to warn people a document has been
modified and the user his modifications have been taken into
account..
Parameters:
* addressesMBI: email addresses of the people who will receive
this email (comma separated list).
* fieldnameMBI: name of the file containing the modified
fields.
* sourceDoc: Long name for the type of document. This name will
be displayed in the mail.
* emailfile: name of the file in which the email of the modifier
will be found.
"""
FROMADDR = '%s Submission Engine <%s>' % (CFG_SITE_NAME,CFG_SITE_SUPPORT_EMAIL)
global sysno,rn
if parameters['emailFile'] is not None and parameters['emailFile']!= "" and os.path.exists("%s/%s" % (curdir,parameters['emailFile'])):
fp = open("%s/%s" % (curdir,parameters['emailFile']),"r")
sub = fp.read()
fp.close()
sub = sub.replace ("\n","")
else:
sub = ""
# Copy mail to:
addresses = parameters['addressesMBI']
addresses = addresses.strip()
m_fields = parameters['fieldnameMBI']
type = parameters['sourceDoc']
rn = re.sub("[\n\r ]+","",rn)
if os.path.exists("%s/%s" % (curdir,m_fields)):
fp = open("%s/%s" % (curdir,m_fields),"r")
fields = fp.read()
fp.close()
fields = fields.replace ("\n"," | ")
fields = re.sub("[| \n\r]+$","",fields)
else:
fields = ""
email_txt = "Dear Sir or Madam, \n%s %s has just been modified.\nModified fields: %s\n\n" % (type,rn,fields)
if CFG_SITE_URL != "" and sysno != "":
email_txt += "You can check the modified document here:\n"
email_txt += "<%s/%s/%s>\n\n" % (CFG_SITE_URL,CFG_SITE_RECORD,sysno)
email_txt += "Please note that the modifications will be taken into account in a couple of minutes.\n\nBest regards,\nThe %s Server support Team" % CFG_SITE_NAME
# send the mail if any recipients or copy to admin
if sub or CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN:
send_email(FROMADDR,sub,"%s modified" % rn,email_txt,copy_to_admin=CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN)
return ""
| gpl-2.0 |
kinsney/sport | bike/models.py | 1 | 5857 | from django.db import models
from participator.models import Participator,University
from . import suitHeightChoices,howOldChoices,equipmentChoices,statusChoices,pledgeChoices,GENDER,speedChangeChoices,suspensionChoices,wheelSizeChoices,brakeTypeChoices,handlebarChoices
from datetime import timedelta
# Create your models here.
class Brand(models.Model):
name = models.CharField(u'名称', max_length=32, primary_key=True)
thumbnail = models.ImageField(u'缩略图', max_length=128,upload_to ='brand/' ,blank=True)
order = models.SmallIntegerField(u'顺序', default=0)
def __str__(self):
return self.name
class Meta:
verbose_name = u'品牌'
verbose_name_plural = u'品牌'
ordering = ('order', 'name')
class Category(models.Model):
name = models.CharField(u'名称',max_length=32,primary_key=True)
order = models.SmallIntegerField(u'顺序',default=0)
def __str__(self):
return self.name
class Meta:
verbose_name = u'分类'
verbose_name_plural = u'分类'
ordering = ['order']
class Version(models.Model):
name = models.CharField(u'型号名称',max_length=32,primary_key=True)
brand = models.ForeignKey(Brand,verbose_name=u'品牌')
order = models.SmallIntegerField(u'顺序', default=0)
category = models.ForeignKey(Category,verbose_name=u'单车类型')
price = models.IntegerField(u'原价',blank=True,default=0)
speedChange = models.CharField(u'变速',choices=speedChangeChoices,blank=True,max_length=10)
wheelSize = models.CharField(u'车轮尺寸',choices=wheelSizeChoices,blank=True,max_length=10)
brakeType = models.CharField(u'刹车类型',choices=brakeTypeChoices,blank=True,max_length=10)
handlebar = models.CharField(u'车把类型',choices=handlebarChoices,blank=True,max_length=10)
suspension = models.CharField(u'避震类型',choices=suspensionChoices,blank=True,max_length=10)
quickRelease = models.BooleanField(u'是否快拆',default=True)
def __str__(self):
return self.name
class Meta:
verbose_name = u'型号'
verbose_name_plural = u'型号'
ordering = ('order', 'name')
class Address(models.Model):
name = models.CharField(u'位置名称',max_length=32)
longitude = models.CharField(u'经度',max_length = 20)
latitude = models.CharField(u'纬度',max_length = 20)
def __str__(self):
try:
bike = Bike.objects.get(address=self)
return u'%s %s' % (
bike.owner.school,
self.name
)
except:
return '无用地址'
class Meta:
verbose_name = u'地理位置'
verbose_name_plural = u'地理位置'
default_maxDuration = timedelta(weeks=1)
default_minDuration = timedelta(hours=8)
class Bike(models.Model):
name = models.CharField(u'单车名称',max_length= 30,null=True)
number = models.CharField(u'编号',max_length=20,blank=True,null=True)
version = models.ForeignKey(Version,verbose_name=u'型号',null=True)
owner = models.ForeignKey(Participator,verbose_name=u'车主',null=True)
amount = models.IntegerField(u'单车数量',default=1)
address = models.ForeignKey(Address,verbose_name='具体位置',null=True)
status = models.CharField(u'状态',choices=statusChoices,max_length = 10,default='checking')
hourRent = models.IntegerField("每小时租金",default=0)
dayRent = models.IntegerField("每天租金",default=0)
weekRent = models.IntegerField("每周租金",default=0)
deposit = models.IntegerField(u'押金',blank=True,null=True,default=0)
studentDeposit = models.BooleanField(u'学生租客是否免押金',default=True)
pledge = models.CharField(u'抵押',choices=pledgeChoices,max_length = 10,blank=True,null=True,default='noPledge')
suitHeight = models.CharField(u'适合身高',choices=suitHeightChoices,max_length = 10,blank=True,null=True)
howOld = models.IntegerField(u'新旧程度',choices=howOldChoices,blank=True,null=True)
sexualFix = models.CharField(u'适合男女',choices=GENDER,max_length=15,default=None,null=True)
equipment = models.CharField(u'提供装备',max_length=100,blank=True,null=True)
maxDuration = models.DurationField('最长租期',blank=True,null=True,default=default_maxDuration)
minDuration = models.DurationField('最短租期',blank=True,null=True,default=default_minDuration)
added = models.DateTimeField('发布时间',auto_now_add=True)
beginTime = models.DateTimeField('暂停起始时间',null=True,blank=True)
endTime = models.DateTimeField('暂停结束时间',blank=True,null=True)
description = models.TextField(u'描述', blank=True,null=True)
soldable = models.BooleanField("是否卖车",default=False)
soldMoney = models.IntegerField("出售价格",blank=True,null=True)
def user_directory_path(instance, filename):
# file will be uploaded to MEDIA_ROOT/user_<id>/<filename>
return 'user/{0}/{1}'.format(instance.bike.owner.user.username, filename)
def __str__(self):
return self.name
def thumbnail(self):
photo = Photo.objects.filter(bike=self)[0]
return photo
class Meta:
verbose_name = u'自行车'
verbose_name_plural = u'自行车'
class Photo(models.Model):
def user_directory_path(instance, filename):
# file will be uploaded to MEDIA_ROOT/user_<id>/<filename>
return 'user/{0}/{1}'.format(instance.bike.owner.user.username, filename)
title = models.CharField(u'图片说明',max_length=10,blank=True)
content = models.ImageField(u'图片内容',upload_to=user_directory_path)
bike = models.ForeignKey(Bike,verbose_name='所属车')
class Meta:
verbose_name = u'缩略图'
verbose_name_plural = u'缩略图'
| mit |
josefschneider/ptheremin | ptheremin.py | 1 | 57135 | #!/bin/env python
"""A software simulation of a theremin.
A 2-dimension area serves for control of the instrument; the user drags the mouse on this area to control the frequency and amplitude. Several modes are provided that adds virtual "frets" to allow for the playing of the equal tempered tuning (or subsets of it).
For better musical sound run your sound card into a guitar amp or similar.
Requires Python 2.3+ and PyGTK 2.4+ (not tested on anything older).
http://ptheremin.sourceforge.net
"""
import array
import math
import struct
import threading
import time
import wave
import pyaudio
import serial
import pygtk
pygtk.require('2.0')
import gtk
import pango
SCALES = ("chromatic", "diatonic major", "pentatonic major", "pentatonic minor", "blues")
INIT_FREQ = 20
NAME="PTheremin"
VERSION="0.2.1"
# from "Musical Instrument Design" by Bart Hopkin
sharp = 1.05946
equal_temp_freqs = [16.352, 16.352*sharp, 18.354, 18.354*sharp, 20.602, 21.827, 21.827*sharp, 24.500, 24.500*sharp, 27.500, 27.500*sharp, 30.868]
equal_temp_labels = ['C*', 'C*#', 'D*', 'D*#', 'E*', 'F*', 'F*#', 'G*', 'G*#', 'A*', 'A*#', 'B*']
equal_temp_tuning = zip(equal_temp_labels, equal_temp_freqs)
diatonic_major_intervals = (0, 2, 4, 5, 7, 9, 11)
pentatonic_major_intervals = (0, 2, 4, 7, 9)
pentatonic_minor_intervals = (0, 3, 5, 7, 10)
blues_intervals = (0, 3, 5, 6, 7, 10)
# build up several octaves of notes
NOTES = []
for octave in range(11):
for label,freq in equal_temp_tuning:
NOTES.append((label.replace('*', "%d" % octave), (2**octave)*freq))
def just_freqs(notes):
return [freq for label,freq in notes]
class PlaybackThread(threading.Thread):
"""A thread that manages audio playback."""
def __init__(self, name):
threading.Thread.__init__(self, name=name)
self.name = name
#####################################################
# Constants that come from the pyAudio sample scripts
# Short int audio format
self.WIDTH=2
self.CHANNELS = 2
self.RATE = 44100
# Signed short range
self.maxVolume = 10000
self.fs = 44100 # the sample frequency
self.ft = INIT_FREQ # the base frequency of the instrument
self.vol = 1
######################################################
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format = self.p.get_format_from_width(self.WIDTH),
channels = self.CHANNELS,
rate = self.RATE,
output = True)
self.paused = True
self.alive = True
self.recording = array.array('h') # *way* faster than a list for data access
def run(self):
def tone_gen(fs):
"""A tone sample generator."""
x = 0
pi = math.pi
sin = math.sin
ft = self.ft
sample = 0
prev_sample = 0
while 1:
prev_sample = sample
sample = sin(2*pi*ft*x/fs)
# The idea here is to keep the waveform continuous by only changing
# the frequency at the end of the previous frequency's period. And
# it works!
if ft != self.ft and 0.01 > sample > -0.01 and prev_sample < sample:
ft = self.ft
x = 0
x += 1
yield sample*self.vol*0.95 # don't max out the range otherwise we clip
# to optimize loop performance, dereference everything ahead of time
tone = tone_gen(self.fs)
pack_func = struct.pack
record_func = self.recording.append
while self.alive:
wave = ""
if not self.paused:
clean = tone.next()
val_f = clean
val_i = int(val_f*(2**15 - 1))
sample = pack_func("h", val_i)
for c in range(self.CHANNELS):
# write one sample to each channel
self.stream.write(sample,1)
record_func(val_i)
else:
time.sleep(0.1)
def stop(self):
print " [*] Stopping toner..."
self.stream.stop_stream()
self.stream.close()
self.p.terminate()
print " [*] Toner Done."
self.alive = False
def set_new_freq(self, freq, vol):
"""Updates the input frequency."""
self.ft = freq
self.vol = vol
def get_wav_data(self):
return self.recording
def clear_wav_data(self):
self.recording = []
def iir_2pole(coeff1, coeff2):
"""A two-pole IIR filter generator from that one guy's filter design page that I always use."""
xv = [0, 0, 0]
yv = [0, 0, 0]
def iir(sample):
while 1:
xv[0] = xv[1]
xv[1] = xv[2]
xv[2] = sample
yv[0] = yv[1]
yv[1] = yv[2]
yv[2] = xv[0] + xv[2] - 2*xv[1] + coeff1*yv[0] + coeff2*yv[1]
yield yv[2]
def discrete_tones(tones):
"""Makes a discrete-tone filter that latches to particular tones."""
def filt(x):
closest = tones[0]
err = 500000
mean = 0
iir = iir_2pole(-.9979871157, 1.997850878)
for i,tone in enumerate(tones):
tone_err = abs(x - tone)
if tone_err < err:
closest = tone
err = tone_err
elif tone_err > err:
if i > 0:
mean = (x - closest)/2
break
return closest + mean
return filt
class ThereminApp(object):
"""The GUI part of the theremin."""
def delete_event(self, w, e, d=None): return False
def destroy(self, w=None, d=None):
for thread in self.threads.values():
thread.stop()
gtk.main_quit()
# the next 5 functions were ripped from the scribblesimple.py example
def configure_event(self, widget, event):
# Create a new backing pixmap of the appropriate size
x, y, width, height = widget.get_allocation()
self.pixmap = gtk.gdk.Pixmap(widget.window, width, height)
self.pixmap.draw_rectangle(widget.get_style().black_gc,
True, 0, 0, width, height)
notes = [(label, int(float(x - self.freq_min)*width/(self.freq_max - self.freq_min))) for label,x in self.discrete_notes]
root_notes = [(label, int(float(x - self.freq_min)*width/(self.freq_max - self.freq_min))) for label,x in self.root_notes]
ygrid = height/10
# this is the "intuitive" way to get the gc to be different colors... why isn't this in the pygtk tutorial???
gc = widget.window.new_gc()
gc.foreground = gtk.gdk.colormap_get_system().alloc_color(56360, 56360, 56360)
# TODO when things are cleaner we need to color the root notes differently
root_gc = widget.window.new_gc()
root_gc.foreground = gtk.gdk.colormap_get_system().alloc_color(20000, 60000, 20000)
root_gc.line_width = 3
pc = widget.get_pango_context()
layout = pango.Layout(pc)
layout.set_font_description(pango.FontDescription("sans 8"))
for label,x in notes:
if len(label) == 3:
l = label[0] + label[2]
else:
l = label[0]
layout.set_text(l)
if (label,x) in root_notes:
self.pixmap.draw_line(root_gc, x, 0, x, height)
self.pixmap.draw_layout(root_gc, x + 2, 0, layout)
else:
self.pixmap.draw_line(gc, x, 0, x, height)
self.pixmap.draw_layout(gc, x + 2, 0, layout)
for y in range(height):
if y % ygrid == 0:
self.pixmap.draw_line(gc, 0, y, width, y)
return True
def expose_event(self, widget, event):
# Redraw the screen from the backing pixmap
x , y, width, height = event.area
widget.window.draw_drawable(widget.get_style().fg_gc[gtk.STATE_NORMAL],
self.pixmap, x, y, x, y, width, height)
return False
def redraw_input(self, widget):
# redraw the pixmap
self.configure_event(widget, None)
# force the drawing area to be redrawn
alloc = widget.get_allocation()
rect = gtk.gdk.Rectangle(alloc.x, alloc.y, alloc.width, alloc.height)
widget.window.invalidate_rect(rect, True)
def draw_brush(self, widget, x, y):
# Draw a rectangle on the screen
rect = (int(x-5), int(y-5), 10, 10)
self.pixmap.draw_rectangle(widget.get_style().black_gc, True,
rect[0], rect[1], rect[2], rect[3])
widget.queue_draw_area(rect[0], rect[1], rect[2], rect[3])
def button_press_event(self, widget, event):
if event.button == 1 and self.pixmap != None:
pass#self.draw_brush(widget, event.x, event.y)
return True
def motion_notify_event(self, widget, event):
if event.is_hint:
x, y, state = event.window.get_pointer()
else:
x = event.x
y = event.y
state = event.state
if state & gtk.gdk.BUTTON1_MASK and self.pixmap != None:
width, height = widget.window.get_size()
freq = (x/float(width))*(self.freq_max - self.freq_min) + self.freq_min
if freq > self.freq_max:
freq = self.freq_max
if freq < self.freq_min:
freq = self.freq_min
vol = (height - y)/float(height)
if vol > 1:
vol = 1
if vol < 0:
vol = 0
vol = 9*vol + 1 # scale to the range 1 - 10
vol = math.log10(vol) # log scale
self.set_tone(freq, vol)
return True
#return widget.emit("motion_notify_event", event)
def make_menu(self):
menu_def = """
<ui>
<menubar name="MenuBar">
<menu action="File">
<menuitem action="SaveAs"/>
<separator/>
<menuitem action="Quit"/>
</menu>
<menu action="Help">
<menuitem action="About"/>
</menu>
</menubar>
<toolbar name="ToolBar">
<toolitem action="Play"/>
<toolitem action="Stop"/>
</toolbar>
</ui>
"""
def stop(w):
self.threads['playback'].paused = True
def play(w):
self.threads['playback'].paused = False
# so this runs on older GTK versions (2.2?)
try:
self.about_dialog = gtk.AboutDialog()
self.about_dialog.set_name(NAME)
self.about_dialog.set_authors(["nbm_clan@yahoo.com"])
self.about_dialog.set_comments("A software simulation of a Theremin (see http://en.wikipedia.org/wiki/Theremin) with a few added features.")
self.about_dialog.set_version(VERSION)
self.about_dialog.set_license("GPLv2")
self.about_dialog.set_logo(gtk.gdk.pixbuf_new_from_inline(len(self.logo), self.logo, False))
about_icon = gtk.STOCK_ABOUT
play_icon = gtk.STOCK_MEDIA_PLAY
stop_icon = gtk.STOCK_MEDIA_STOP
except AttributeError, e:
self.about_dialog = None
about_icon = None
play_icon = None
stop_icon = None
actions = [
('File', None, '_File'),
('SaveAs', gtk.STOCK_SAVE_AS, 'Save Recording _As...', None, 'Save recording', self.saveas),
('Quit', gtk.STOCK_QUIT, '_Quit', None, 'Quit', self.destroy),
('Help', None, '_Help'),
('About', about_icon, '_About', None, 'About', lambda w: self.about_dialog and self.about_dialog.show_all() and self.about_dialog.run()),
('Play', play_icon, 'Play', None, 'Play', play),
('Stop', stop_icon, 'Stop', None, 'Stop', stop),
]
ag = gtk.ActionGroup('menu')
ag.add_actions(actions)
ui = gtk.UIManager()
ui.insert_action_group(ag, 0)
ui.add_ui_from_string(menu_def)
return ui.get_widget('/MenuBar'), ui.get_widget('/ToolBar')
def make_input_widget(self, lower, upper):
input_frame = gtk.Frame("Control")
input = gtk.DrawingArea()
input.set_size_request(100, 100)
input.show()
# Signals used to handle backing pixmap
input.connect("expose_event", self.expose_event)
input.connect("configure_event", self.configure_event)
# Event signals
input.connect("button_press_event", self.button_press_event)
input.set_events(gtk.gdk.EXPOSURE_MASK
| gtk.gdk.LEAVE_NOTIFY_MASK
| gtk.gdk.BUTTON_PRESS_MASK
| gtk.gdk.POINTER_MOTION_MASK)
#| gtk.gdk.POINTER_MOTION_HINT_MASK)
input_table = gtk.Table(4, 3, False)
input_table.attach(input, 2, 3, 2, 3, gtk.EXPAND | gtk.FILL, gtk.FILL, 0, 0)
def motion_notify(ruler, event):
return ruler.emit("motion_notify_event", event)
hrule = gtk.HRuler()
hrule.set_range(lower, upper, lower, upper)
input.connect_object("motion_notify_event", motion_notify, hrule)
input_table.attach(hrule, 2, 3, 1, 2, gtk.EXPAND | gtk.SHRINK | gtk.FILL, gtk.FILL, 0, 0)
vrule = gtk.VRuler()
vrule.set_range(1, 0, 0, 1)
input.connect_object("motion_notify_event", motion_notify, vrule)
input_table.attach(vrule, 1, 2, 2, 3, gtk.FILL, gtk.EXPAND | gtk.SHRINK | gtk.FILL, 0, 0)
input.connect("motion_notify_event", self.motion_notify_event)
input_table.attach(gtk.Label("V\no\nl\nu\nm\ne"), 0, 1, 0, 3, gtk.FILL, gtk.EXPAND | gtk.SHRINK | gtk.FILL, 0, 0)
input_table.attach(gtk.Label("Frequency (Hz)"), 1, 3, 0, 1, gtk.EXPAND | gtk.SHRINK | gtk.FILL, gtk.FILL, 0, 0)
input_frame.add(input_table)
return input_frame
def init_ui(self):
"""All the gory details of the GUI."""
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_size_request(600, 600)
self.window.set_title(NAME)
self.window.set_icon(gtk.gdk.pixbuf_new_from_inline(len(self.logo), self.logo, False))
# so the close button works
self.window.connect("delete_event", self.delete_event)
self.window.connect("destroy", self.destroy)
self.root = gtk.VBox(False, 1)
self.window.add(self.root)
self.root.show()
menubar, toolbar = self.make_menu()
self.root.pack_start(menubar, False)
self.root.pack_start(toolbar, False)
opts_box = gtk.HBox(False, 1)
opts_frame = gtk.Frame("Options")
opts_frame.add(opts_box)
self.root.pack_start(opts_frame, False, False)
mode_and_key = gtk.VBox(False, 1)
mode_frame = gtk.Frame("Output mode")
mode_frame.set_shadow_type(gtk.SHADOW_NONE)
mode_ctls = gtk.VBox(False, 1)
mode_frame.add(mode_ctls)
mode_and_key.pack_start(mode_frame, False, False)
opts_box.pack_start(mode_and_key, False, False)
rb1 = gtk.RadioButton(None, 'continuous')
rb1.connect("toggled", self.mode_changed, 'continuous')
mode_ctls.pack_start(rb1, False, False)
rb2 = gtk.RadioButton(rb1, 'discrete')
rb2.connect("toggled", self.mode_changed, 'discrete')
mode_ctls.pack_start(rb2, False, False)
scale_frame = gtk.Frame("Scale")
scale_frame.set_shadow_type(gtk.SHADOW_NONE)
scale_ctls = gtk.VBox(False, 1)
scale_frame.add(scale_ctls)
opts_box.pack_start(scale_frame, False, False)
first_rb = None
for scale in SCALES:
rb = gtk.RadioButton(first_rb, scale)
rb.connect("toggled", self.scale_changed, scale)
if first_rb == None:
first_rb = rb
rb.set_active(True)
scale_ctls.pack_start(rb, False, False)
key_frame = gtk.Frame("Key")
key_frame.set_shadow_type(gtk.SHADOW_NONE)
key_ctl = gtk.combo_box_new_text()
for key in ["A", "A#", "B", "C", "C#", "D", "D#", "E", "F", "F#", "G", "G#"]:
key_ctl.append_text(key)
key_ctl.set_active(3)
key_ctl.connect("changed", self.key_changed, key_ctl)
key_frame.add(key_ctl)
mode_and_key.pack_start(key_frame, False, False)
volume_frame = gtk.Frame("Volume")
volume_frame.set_shadow_type(gtk.SHADOW_NONE)
volume = gtk.VScale(gtk.Adjustment(value=7, lower=1, upper=10))
volume.set_draw_value(False)
volume_frame.add(volume)
volume.set_inverted(True)
opts_box.pack_start(volume_frame, False, False)
volume.connect("value-changed", self.master_volume_changed)
self.root.pack_start(gtk.HSeparator(), False, False)
self.pixmap = None
self.inputs = []
self.inputs.append(self.make_input_widget(self.freq_min, self.freq_max))
self.root.pack_start(self.inputs[0], True, True)
self.window.show_all()
# status
self.status = gtk.Statusbar()
self.status.show()
self.root.pack_end(self.status, False, False)
def saveas(self, w):
open_diag = gtk.FileChooserDialog(title="Save Recording", parent=self.window, action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_SAVE,gtk.RESPONSE_OK))
ffilt = gtk.FileFilter()
ffilt.add_pattern("*.wav")
open_diag.add_filter(ffilt)
response = open_diag.run()
if response == gtk.RESPONSE_OK:
output = wave.open(open_diag.get_filename(), 'w')
output.setnchannels(1)
output.setsampwidth(2)
output.setframerate(44100)
pbar = gtk.ProgressBar()
pbar.set_fraction(0)
d = gtk.Dialog(title="Saving recording . . .", parent=self.window,
flags=gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT))
d.action_area.pack_start(pbar, True, True, 0)
d.set_has_separator(True)
d.show_all()
abort = [False]
def print_response(w, r):
if abort[0] == False:
abort[0] = True
d.connect("response", print_response)
n = len(self.threads['playback'].get_wav_data())
for i,sample in enumerate(self.threads['playback'].get_wav_data()):
if i % 256 == 0:
pbar.set_fraction(float(i)/n)
# so that the progress bar dialog shows/updates
while gtk.events_pending():
gtk.mainiteration()
if abort[0]:
break
output.writeframes(struct.pack('h', sample))
output.close()
d.destroy()
open_diag.destroy()
def new_tone_filter(self):
self.root_notes = [n for i,n in enumerate(self.shifted_notes) if i % 12 == 0]
if self.scale == 'chromatic':
key_notes = NOTES
elif self.scale == 'diatonic major':
key_notes = [n for i,n in enumerate(self.shifted_notes) if i % 12 in diatonic_major_intervals]
elif self.scale == 'pentatonic major':
key_notes = [n for i,n in enumerate(self.shifted_notes) if i % 12 in pentatonic_major_intervals]
elif self.scale == 'pentatonic minor':
key_notes = [n for i,n in enumerate(self.shifted_notes) if i % 12 in pentatonic_minor_intervals]
elif self.scale == 'blues':
key_notes = [n for i,n in enumerate(self.shifted_notes) if i % 12 in blues_intervals]
self.tone_filter = discrete_tones(just_freqs(key_notes))
self.discrete_notes = key_notes
for input in self.inputs:
self.redraw_input(input)
def scale_changed(self, button, scale_name):
if button.get_active():
self.scale = scale_name
self.new_tone_filter()
def mode_changed(self, button, mode):
if button.get_active():
self.mode = mode
self.new_tone_filter()
def key_changed(self, button, key):
self.key = key.get_active_text()
self.shifted_notes = list(NOTES)
shifts = {
'A': 9,
'A#': 10,
'B': 11,
'C': 0,
'C#': 1,
'D': 2,
'D#': 3,
'E': 4,
'F': 5,
'F#': 6,
'G': 7,
'G#': 8,
}
for i in range(shifts[self.key]):
self.shifted_notes.append(self.shifted_notes.pop(0))
self.new_tone_filter()
def master_volume_changed(self, slider):
self.master_volume = math.log10(slider.get_value())
self.set_tone(self.freq, self.vol)
def set_tone(self, freq, vol):
self.freq = freq
self.vol = vol
if self.mode == 'discrete':
closest = self.tone_filter(freq)
else:
closest = freq
self.status.push(self.status.get_context_id("note"), "Output frequency: %.2f Hz - volume %.2f%%" % (closest, vol))
self.threads['playback'].set_new_freq(closest, vol*self.master_volume)
def pause(self, button):
if button.get_active():
self.threads['playback'].paused = False
else:
self.threads['playback'].paused = True
def __init__(self):
self.threads = {}
self.threads['playback'] = PlaybackThread("playback")
self.freq = INIT_FREQ
self.freq = 0
self.freq_max = 2000
self.freq_min = 20
self.mode = 'continuous'
self.scale = 'chromatic'
self.key = 'C'
self.shifted_notes = NOTES
self.discrete_notes = NOTES
self.root_notes = [x for i,x in enumerate(NOTES) if i % 12 == 0]
self.master_volume = math.log10(7.2)
self.vol = 0
self.tone_filter = discrete_tones(just_freqs(NOTES))
self.init_ui()
gtk.gdk.threads_init()
for thread in self.threads.values():
thread.start()
def main(self):
gtk.gdk.threads_enter()
gtk.main()
gtk.gdk.threads_leave()
logo = "" +\
"GdkP" +\
"\0\0$\30" +\
"\1\1\0\2" +\
"\0\0\0\300" +\
"\0\0\0""0" +\
"\0\0\0""0" +\
"\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\376\376\376\0\334\334\334" +\
"\377uuu\377AAA\377>>>\377\77\77\77\377CCC\377\247\247\247\377\371\371" +\
"\371\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\376\376\376\0\360\360" +\
"\360\0\223\223\223\377bbb\377:::\377333\377///\377///\377===\377\317" +\
"\317\317\377\376\376\376\0\377\377\377\0\377\377\377\0\377\377\377\0" +\
"\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\375\375\375" +\
"\0\376\376\376\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\376\376\376" +\
"\0\353\353\353\252\335\335\335\377\322\322\322\377\276\276\276\377\225" +\
"\225\225\377GGG\377---\377///\377\204\204\204\377\375\375\375\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\362\362\362\0\376\376\376\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\372\372\372\0\340\340\340\377\343\343\343\377\335" +\
"\335\335\377\325\325\325\377\241\241\241\377```\377---\377)))\377```" +\
"\377\375\375\375\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\376\376\376\0\355\355\355f\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\362\362\362\0\337\337" +\
"\337\377\340\340\340\377\331\331\331\377\310\310\310\377\200\200\200" +\
"\377999\377)))\377***\377QQQ\377\373\373\373\0\376\376\376\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\376\376" +\
"\376\0\356\356\356D\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\356\356\356D\262\262\262\377\277\277\277\377}}}\377\226\226\226" +\
"\377NNN\377AAA\377)))\377+++\377iii\377\374\374\374\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\376" +\
"\376\376\0\360\360\360\0\376\376\376\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\375\375\375\0\375\375\375\0\375\375\375\0\371" +\
"\371\371\0\374\374\374\0\374\374\374\0\376\376\376\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0\372\372\372\0\326\326\326\377\211\211\211\377\306\306\306" +\
"\377\301\301\301\377\205\205\205\377FFF\377;;;\377rrr\377\302\302\302" +\
"\377\375\375\375\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\376\376\376\0\360\360\360\0\376\376" +\
"\376\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\375\375" +\
"\375\0\361\361\361\0\331\331\331\377\227\227\227\377\222\222\222\377" +\
"\335\335\335\377\375\375\375\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\374\374\374" +\
"\0\314\314\314\377ccc\377\303\303\303\377\315\315\315\377vvv\377AAA\377" +\
"WWW\377\213\213\213\377\365\365\365\0\376\376\376\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\376" +\
"\376\376\0\357\357\357\"\376\376\376\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\376\376\376\0\311\311\311\377bbb\377@@@\377" +\
">>>\377\254\254\254\377\375\375\375\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\375" +\
"\375\375\0\314\314\314\377}}}\377\271\271\271\377\257\257\257\377ZZZ" +\
"\377DDD\377ddd\377\335\335\335\377\375\375\375\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\375\375\375\0\355\355\355f\376\376\376\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\375\375\375\0\334\334\334\377\215" +\
"\215\215\377\214\214\214\377\202\202\202\377\232\232\232\377\370\370" +\
"\370\0\376\376\376\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\375\375\375\0\325\325\325\377\237" +\
"\237\237\377\235\235\235\377\211\211\211\377WWW\377DDD\377\213\213\213" +\
"\377\373\373\373\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\375\375" +\
"\375\0\355\355\355f\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\375\375\375\0\322\322\322\377\261\261\261\377\315" +\
"\315\315\377\271\271\271\377www\377\311\311\311\377\376\376\376\0\377" +\
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0\376\376\376\0\352\352\352\314\320\320\320\377\260\260\260" +\
"\377ccc\377AAA\377FFF\377\237\237\237\377\371\371\371\0\375\375\375\0" +\
"\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\376\376\376\0\355\355\355f\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\376\376\376" +\
"\0\323\323\323\377\307\307\307\377\326\326\326\377\317\317\317\377\237" +\
"\237\237\377\354\354\354\210\376\376\376\0\376\376\376\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\376\376\376" +\
"\0\367\367\367\0\312\312\312\377\211\211\211\377YYY\377XXX\377fff\377" +\
"\241\241\241\377\322\322\322\377\375\375\375\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\376\376\376\0\355\355\355f\376\376\376\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\376\376\376\0\375\375\375\0\337\337\337\377\322" +\
"\322\322\377\314\314\314\377\210\210\210\377\265\265\265\377\374\374" +\
"\374\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\376\376\376\0\376\376" +\
"\376\0\376\376\376\0\377\377\377\0\375\375\375\0\371\371\371\0\277\277" +\
"\277\377\324\324\324\377\245\245\245\377\251\251\251\377\314\314\314" +\
"\377rrr\377\77\77\77\377\322\322\322\377\375\375\375\0\375\375\375\0" +\
"\376\376\376\0\376\376\376\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\376\376\376\0\355\355\355f\376\376\376\0\377\377\377\0\377\377\377" +\
"\0\375\375\375\0\374\374\374\0\361\361\361\0\320\320\320\377\243\243" +\
"\243\377fff\377TTT\377\355\355\355f\374\374\374\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\374\374\374\0\370\370\370\0\347\347\347\377\331" +\
"\331\331\377\334\334\334\377\223\223\223\377zzz\377\343\343\343\377\337" +\
"\337\337\377\277\277\277\377OOO\377>>>\377'''\377888\377\211\211\211" +\
"\377\343\343\343\377\374\374\374\0\375\375\375\0\376\376\376\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\375\375\375\0\356\356\356D\376\376\376\0\377\377" +\
"\377\0\377\377\377\0\375\375\375\0\351\351\351\356\272\272\272\377\303" +\
"\303\303\377\276\276\276\377ooo\377666\377\230\230\230\377\372\372\372" +\
"\0\376\376\376\0\376\376\376\0\376\376\376\0\377\377\377\0\377\377\377" +\
"\0\376\376\376\0\376\376\376\0\375\375\375\0\264\264\264\377MMM\3772" +\
"22\377+++\377666\377111\377777\377___\377\203\203\203\377UUU\377\227" +\
"\227\227\377KKK\377%%%\377'''\377(((\377888\377|||\377\267\267\267\377" +\
"\361\361\361\0\376\376\376\0\376\376\376\0\376\376\376\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\376\376\376\0\375\375\375\0\356\356\356" +\
"D\377\377\377\0\377\377\377\0\377\377\377\0\375\375\375\0\365\365\365" +\
"\0yyy\377III\377EEE\377###\377!!!\377111\377\320\320\320\377\372\372" +\
"\372\0\374\374\374\0\375\375\375\0\374\374\374\0\371\371\371\0\373\373" +\
"\373\0\345\345\345\377\235\235\235\377---\377&&&\377\"\"\"\377///\377" +\
"333\377555\377---\377eee\377\207\207\207\377HHH\377\233\233\233\377'" +\
"''\377$$$\377%%%\377###\377###\377$$$\377'''\377\77\77\77\377\247\247" +\
"\247\377\373\373\373\0\374\374\374\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0\376\376\376\0\375\375\375\0\357\357\357\"\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\376\376\376\0\374\374\374\0\354\354\354\210" +\
"VVV\377!!!\377\40\40\40\377\37\37\37\377!!!\377555\377\255\255\255\377" +\
"\340\340\340\377\304\304\304\377\301\301\301\377\202\202\202\377vvv\377" +\
"BBB\377+++\377%%%\377\"\"\"\377\37\37\37\377...\377///\377///\377jjj" +\
"\377\336\336\336\377\342\342\342\377\321\321\321\377ggg\377$$$\377!!" +\
"!\377$$$\377'''\377\40\40\40\377\37\37\37\377!!!\377%%%\377,,,\377\311" +\
"\311\311\377\374\374\374\0\376\376\376\0\377\377\377\0\377\377\377\0" +\
"\377\377\377\0\376\376\376\0\361\361\361\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\374\374\374\0\336\336\336" +\
"\377===\377!!!\377\36\36\36\377\36\36\36\377\40\40\40\377\"\"\"\377*" +\
"**\377&&&\377&&&\377&&&\377$$$\377&&&\377'''\377!!!\377\36\36\36\377" +\
"\36\36\36\377...\377)))\377(((\377\201\201\201\377\342\342\342\377\343" +\
"\343\343\377\314\314\314\377777\377###\377\36\36\36\377%%%\377\35\35" +\
"\35\377\36\36\36\377\36\36\36\377\37\37\37\377$$$\377%%%\377WWW\377\361" +\
"\361\361\0\375\375\375\0\375\375\375\0\376\376\376\0\377\377\377\0\376" +\
"\376\376\0\361\361\361\0\377\377\377\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\376\376\376\0\375\375\375\0\307\307\307\377" +\
"...\377\37\37\37\377\36\36\36\377\37\37\37\377\35\35\35\377!!!\377\40" +\
"\40\40\377\37\37\37\377!!!\377!!!\377\"\"\"\377$$$\377!!!\377\35\35\35" +\
"\377\34\34\34\377---\377###\377,,,\377\203\203\203\377\341\341\341\377" +\
"\342\342\342\377\220\220\220\377&&&\377\37\37\37\377\36\36\36\377###" +\
"\377\35\35\35\377\34\34\34\377\34\34\34\377\37\37\37\377$$$\377$$$\377" +\
"$$$\377hhh\377\350\350\350\377\374\374\374\0\377\377\377\0\377\377\377" +\
"\0\375\375\375\0\360\360\360\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\375\375\375" +\
"\0\251\251\251\377+++\377\40\40\40\377!!!\377\36\36\36\377\36\36\36\377" +\
"\40\40\40\377\36\36\36\377\34\34\34\377\36\36\36\377!!!\377\"\"\"\377" +\
"!!!\377\35\35\35\377\33\33\33\377,,,\377$$$\377000\377\177\177\177\377" +\
"\341\341\341\377\341\341\341\377JJJ\377\"\"\"\377\37\37\37\377(((\377" +\
"\37\37\37\377\33\33\33\377\34\34\34\377\34\34\34\377\35\35\35\377\40" +\
"\40\40\377!!!\377!!!\377\37\37\37\377JJJ\377\352\352\352\377\375\375" +\
"\375\0\377\377\377\0\374\374\374\0\354\354\354\210\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0\375\375\375\0\372\372\372\0\267\267\267\377999\377\"\"\"\377" +\
"\37\37\37\377\40\40\40\377\"\"\"\377\36\36\36\377\40\40\40\377\"\"\"" +\
"\377!!!\377%%%\377!!!\377\36\36\36\377\33\33\33\377(((\377$$$\377666" +\
"\377vvv\377\342\342\342\377\266\266\266\377---\377\40\40\40\377$$$\377" +\
"xxx\377\200\200\200\377ggg\377\35\35\35\377\35\35\35\377\34\34\34\377" +\
"!!!\377\"\"\"\377\36\36\36\377\35\35\35\377\35\35\35\377jjj\377\346\346" +\
"\346\377\377\377\377\0\375\375\375\0\353\353\353\252\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\377\377\377\0\375\375\375\0\343\343\343\377" +\
"\233\233\233\377KKK\377///\377LLL\377jjj\377\205\205\205\377\222\222" +\
"\222\377\233\233\233\377\240\240\240\377999\377!!!\377\35\35\35\377'" +\
"''\377$$$\377:::\377lll\377\342\342\342\377xxx\377%%%\377!!!\377---\377" +\
"ggg\377hhh\377KKK\377\34\34\34\377\32\32\32\377\33\33\33\377###\377\40" +\
"\40\40\377***\377***\377\"\"\"\377\37\37\37\377777\377\377\377\377\0" +\
"\376\376\376\0\353\353\353\252\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\376\376\376\0\375\375\375\0\374\374\374\0\366\366\366" +\
"\0\362\362\362\0\373\373\373\0\375\375\375\0\373\373\373\0\374\374\374" +\
"\0\374\374\374\0\373\373\373\0nnn\377!!!\377\35\35\35\377%%%\377)))\377" +\
"999\377aaa\377\324\324\324\377\77\77\77\377!!!\377(((\377$$$\377\35\35" +\
"\35\377\34\34\34\377\34\34\34\377\31\31\31\377\33\33\33\377\32\32\32" +\
"\377\40\40\40\377:::\377\257\257\257\377\267\267\267\377sss\377LLL\377" +\
"$$$\377\377\377\377\0\375\375\375\0\354\354\354\210\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\376" +\
"\376\376\0\377\377\377\0\376\376\376\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\377\377\377\0\376\376\376\0\244\244\244\377" +\
"$$$\377\37\37\37\377!!!\377000\377555\377LLL\377\240\240\240\377...\377" +\
"###\377'''\377\34\34\34\377\32\32\32\377\32\32\32\377\32\32\32\377\33" +\
"\33\33\377\34\34\34\377$$$\377\276\276\276\377\237\237\237\377\272\272" +\
"\272\377\236\236\236\377bbb\377\240\240\240\377LLL\377\377\377\377\0" +\
"\374\374\374\0\356\356\356D\377\377\377\0\377\377\377\0\377\377\377\0" +\
"\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\272\272\272\377&&&\377\37\37\37\377\36" +\
"\36\36\377666\377000\377222\377TTT\377---\377&&&\377\33\33\33\377\33" +\
"\33\33\377\32\32\32\377\32\32\32\377\32\32\32\377\34\34\34\377\35\35" +\
"\35\377WWW\377\367\367\367\0\326\326\326\377\222\222\222\377fff\377K" +\
"KK\377mmm\377BBB\377\376\376\376\0\374\374\374\0\356\356\356D\376\376" +\
"\376\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\310\310" +\
"\310\377(((\377\36\36\36\377\34\34\34\377***\377+++\377!!!\377777\377" +\
"%%%\377\32\32\32\377\31\31\31\377\32\32\32\377\32\32\32\377\32\32\32" +\
"\377\32\32\32\377\33\33\33\377\37\37\37\377\230\230\230\377\370\370\370" +\
"\0\202\202\202\377\232\232\232\377ccc\377>>>\377)))\377kkk\377\376\376" +\
"\376\0\364\364\364\0\325\325\325\377\374\374\374\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\376\376\376\0\323\323\323\377,,,\377\40\40\40" +\
"\377\35\35\35\377\37\37\37\377///\377,,,\377222\377\32\32\32\377\31\31" +\
"\31\377\31\31\31\377\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32" +\
"\377\36\36\36\377&&&\377\333\333\333\377\375\375\375\0\374\374\374\0" +\
"\364\364\364\0\251\251\251\377uuu\377DDD\377\215\215\215\377\376\376" +\
"\376\0\335\335\335\377\214\214\214\377\372\372\372\0\377\377\377\0\376" +\
"\376\376\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\376\376\376\0\337\337\337\377000\377\"\"\"\377" +\
"\37\37\37\377\37\37\37\377'''\377...\377\34\34\34\377\31\31\31\377\32" +\
"\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377\33\33" +\
"\33\377\37\37\37\377bbb\377\373\373\373\0\376\376\376\0\376\376\376\0" +\
"\374\374\374\0\361\361\361\0\371\371\371\0\307\307\307\377vvv\377\375" +\
"\375\375\0\320\320\320\377xxx\377\353\353\353\252\377\377\377\0\377\377" +\
"\377\0\376\376\376\0\376\376\376\0\375\375\375\0\376\376\376\0\375\375" +\
"\375\0\376\376\376\0\376\376\376\0\376\376\376\0\376\376\376\0\375\375" +\
"\375\0\376\376\376\0\376\376\376\0\376\376\376\0\376\376\376\0\376\376" +\
"\376\0\376\376\376\0\376\376\376\0\353\353\353\252999\377&&&\377###\377" +\
"!!!\377\37\37\37\377\37\37\37\377\33\33\33\377\32\32\32\377\33\33\33" +\
"\377\33\33\33\377\32\32\32\377\32\32\32\377\32\32\32\377\33\33\33\377" +\
"\37\37\37\377\235\235\235\377\376\376\376\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\376\376\376\0\374\374\374" +\
"\0\331\331\331\377\311\311\311\377\236\236\236\377\314\314\314\377\310" +\
"\310\310\377\305\305\305\377\302\302\302\377\300\300\300\377\300\300" +\
"\300\377\306\306\306\377\311\311\311\377\305\305\305\377\310\310\310" +\
"\377\313\313\313\377\321\321\321\377\326\326\326\377\331\331\331\377" +\
"\333\333\333\377\340\340\340\377\345\345\345\377\347\347\347\377\353" +\
"\353\353\252\360\360\360\0\341\341\341\377999\377%%%\377###\377\"\"\"" +\
"\377\37\37\37\377\32\32\32\377\32\32\32\377\32\32\32\377\34\34\34\377" +\
"\33\33\33\377\32\32\32\377\32\32\32\377\32\32\32\377\33\33\33\377\40" +\
"\40\40\377\313\313\313\377\376\376\376\0\377\377\377\0\377\377\377\0" +\
"\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\244\244\244\377\226\226\226\377\201\201\201\377qqq\377|||\377{{{\377" +\
"iii\377mmm\377iii\377bbb\377ccc\377ZZZ\377YYY\377UUU\377XXX\377PPP\377" +\
"GGG\377;;;\377AAA\377MMM\377TTT\377\\\\\\\377VVV\377NNN\377222\37777" +\
"7\377\77\77\77\377EEE\377\77\77\77\377\34\34\34\377\32\32\32\377\31\31" +\
"\31\377\33\33\33\377\33\33\33\377\34\34\34\377\34\34\34\377\35\35\35" +\
"\377\35\35\35\377+++\377\355\355\355f\374\374\374\0\376\376\376\0\376" +\
"\376\376\0\376\376\376\0\376\376\376\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0\236\236\236\377www\377UUU\377UUU\377kkk\377sss\377jjj\377" +\
"bbb\377eee\377kkk\377iii\377BBB\377###\377+++\377aaa\377KKK\377DDD\377" +\
"BBB\377:::\377888\377444\377;;;\377;;;\377999\377444\377,,,\377555\377" +\
"HHH\377\\\\\\\377,,,\377333\377<<<\377YYY\377zzz\377\204\204\204\377" +\
"ttt\377GGG\377///\377HHH\377\313\313\313\377\320\320\320\377\322\322" +\
"\322\377\344\344\344\377\372\372\372\0\375\375\375\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\202\202\202\377###\377\35\35\35\377\35\35\35" +\
"\377\36\36\36\377\37\37\37\377\35\35\35\377\33\33\33\377\33\33\33\377" +\
"\34\34\34\377\33\33\33\377\40\40\40\377&&&\377,,,\377000\377\34\34\34" +\
"\377\33\33\33\377\36\36\36\377\36\36\36\377\40\40\40\377\40\40\40\377" +\
"###\377(((\377111\377555\377TTT\377,,,\377\77\77\77\377YYY\377{{{\377" +\
"sss\377bbb\377@@@\377###\377\36\36\36\377\36\36\36\377\34\34\34\377\35" +\
"\35\35\377VVV\377\373\373\373\0\376\376\376\0\374\374\374\0\354\354\354" +\
"\210\333\333\333\377\332\332\332\377\357\357\357\"\373\373\373\0\374" +\
"\374\374\0^^^\377\40\40\40\377\33\33\33\377\34\34\34\377\33\33\33\377" +\
"\32\32\32\377\31\31\31\377\32\32\32\377\31\31\31\377\31\31\31\377\27" +\
"\27\27\377\32\32\32\377\35\35\35\377\35\35\35\377\33\33\33\377\31\31" +\
"\31\377\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32" +\
"\377\32\32\32\377\32\32\32\377\32\32\32\377\33\33\33\377&&&\377%%%\377" +\
"FFF\377RRR\377222\377\"\"\"\377###\377\37\37\37\377\36\36\36\377\36\36" +\
"\36\377\33\33\33\377\34\34\34\377\37\37\37\377PPP\377\372\372\372\0\375" +\
"\375\375\0\376\376\376\0\375\375\375\0\375\375\375\0\372\372\372\0\344" +\
"\344\344\377\337\337\337\377\374\374\374\0>>>\377\40\40\40\377\34\34" +\
"\34\377\35\35\35\377\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32" +\
"\377\32\32\32\377\32\32\32\377\31\31\31\377\30\30\30\377\33\33\33\377" +\
"\33\33\33\377\33\33\33\377\32\32\32\377\32\32\32\377\32\32\32\377\32" +\
"\32\32\377\32\32\32\377\34\34\34\377\33\33\33\377\32\32\32\377\32\32" +\
"\32\377\32\32\32\377\36\36\36\377///\377WWW\377GGG\377>>>\377...\377" +\
"###\377&&&\377\"\"\"\377\34\34\34\377\34\34\34\377\34\34\34\377\36\36" +\
"\36\377>>>\377\366\366\366\0\376\376\376\0\377\377\377\0\377\377\377" +\
"\0\376\376\376\0\375\375\375\0\374\374\374\0\355\355\355f\351\351\351" +\
"\356888\377\37\37\37\377\34\34\34\377\34\34\34\377\32\32\32\377\32\32" +\
"\32\377\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377\31\31\31" +\
"\377\32\32\32\377\33\33\33\377\33\33\33\377\33\33\33\377\33\33\33\377" +\
"\33\33\33\377\33\33\33\377\33\33\33\377\34\34\34\377\34\34\34\377\34" +\
"\34\34\377\33\33\33\377\33\33\33\377\35\35\35\377!!!\377%%%\377HHH\377" +\
"TTT\377CCC\377\35\35\35\377\33\33\33\377\33\33\33\377'''\377>>>\3770" +\
"00\377,,,\377$$$\377+++\377\353\353\353\252\376\376\376\0\376\376\376" +\
"\0\376\376\376\0\375\375\375\0\375\375\375\0\365\365\365\0\337\337\337" +\
"\377\362\362\362\0""666\377\37\37\37\377\34\34\34\377\33\33\33\377\32" +\
"\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377\32\32" +\
"\32\377\32\32\32\377\33\33\33\377\34\34\34\377\32\32\32\377\33\33\33" +\
"\377\34\34\34\377\34\34\34\377\34\34\34\377\34\34\34\377\34\34\34\377" +\
"\33\33\33\377\34\34\34\377\34\34\34\377\34\34\34\377\36\36\36\377\"\"" +\
"\"\377&&&\377JJJ\377LLL\377BBB\377\35\35\35\377\34\34\34\377\34\34\34" +\
"\377\35\35\35\377\35\35\35\377\35\35\35\377'''\377222\377111\377\277" +\
"\277\277\377\340\340\340\377\343\343\343\377\345\345\345\377\355\355" +\
"\355f\353\353\353\252\352\352\352\314\370\370\370\0\375\375\375\0<<<" +\
"\377\35\35\35\377\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377" +\
"\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377\32" +\
"\32\32\377\33\33\33\377\32\32\32\377\33\33\33\377\33\33\33\377\34\34" +\
"\34\377\34\34\34\377\33\33\33\377\33\33\33\377\34\34\34\377\34\34\34" +\
"\377\34\34\34\377\35\35\35\377\36\36\36\377!!!\377///\377FFF\377888\377" +\
"444\377\36\36\36\377\35\35\35\377\37\37\37\377\35\35\35\377\33\33\33" +\
"\377\33\33\33\377\35\35\35\377\37\37\37\377$$$\377\325\325\325\377\375" +\
"\375\375\0\376\376\376\0\376\376\376\0\376\376\376\0\376\376\376\0\377" +\
"\377\377\0\376\376\376\0\376\376\376\0""777\377\35\35\35\377\32\32\32" +\
"\377\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377" +\
"\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377\33\33\33\377\32" +\
"\32\32\377\32\32\32\377\34\34\34\377\34\34\34\377\34\34\34\377\33\33" +\
"\33\377\33\33\33\377\34\34\34\377\34\34\34\377\34\34\34\377\33\33\33" +\
"\377\34\34\34\377!!!\377***\377AAA\377\35\35\35\377\34\34\34\377\34\34" +\
"\34\377)))\377$$$\377%%%\377\35\35\35\377\33\33\33\377\34\34\34\377\37" +\
"\37\37\377%%%\377\321\321\321\377\376\376\376\0\376\376\376\0\377\377" +\
"\377\0\377\377\377\0\376\376\376\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0;;;\377\34\34\34\377\31\31\31\377\32\32\32\377\32\32\32\377\32" +\
"\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377\31\31" +\
"\31\377\30\30\30\377\33\33\33\377\31\31\31\377\31\31\31\377\33\33\33" +\
"\377\34\34\34\377\34\34\34\377\34\34\34\377\34\34\34\377\34\34\34\377" +\
"\34\34\34\377\34\34\34\377\34\34\34\377\34\34\34\377!!!\377///\377@@" +\
"@\377\34\34\34\377\34\34\34\377\34\34\34\377000\377)))\377\"\"\"\377" +\
"\35\35\35\377\36\36\36\377\36\36\36\377!!!\377(((\377\315\315\315\377" +\
"\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0<<<\377\35\35\35\377\31\31" +\
"\31\377\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32" +\
"\377\32\32\32\377\32\32\32\377\31\31\31\377\30\30\30\377\31\31\31\377" +\
"\31\31\31\377\32\32\32\377\32\32\32\377\34\34\34\377\35\35\35\377\34" +\
"\34\34\377\34\34\34\377\34\34\34\377\34\34\34\377\34\34\34\377\34\34" +\
"\34\377\34\34\34\377\"\"\"\377)))\377>>>\377\37\37\37\377\36\36\36\377" +\
"\35\35\35\377777\377000\377&&&\377---\377$$$\377\"\"\"\377%%%\377)))" +\
"\377\324\324\324\377\376\376\376\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0""999\377" +\
"\36\36\36\377\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377\32" +\
"\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377\31\31\31\377\30\30" +\
"\30\377\30\30\30\377\31\31\31\377\32\32\32\377\32\32\32\377\33\33\33" +\
"\377\33\33\33\377\33\33\33\377\33\33\33\377\33\33\33\377\34\34\34\377" +\
"\35\35\35\377\34\34\34\377\34\34\34\377\"\"\"\377)))\377===\377\37\37" +\
"\37\377\36\36\36\377000\377@@@\377222\377777\377HHH\377&&&\377\"\"\"" +\
"\377&&&\377,,,\377\334\334\334\377\376\376\376\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\77\77\77\377\40\40\40\377\34\34\34\377\32\32\32\377\32\32\32\377" +\
"\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377\32" +\
"\32\32\377\32\32\32\377\30\30\30\377\32\32\32\377\33\33\33\377\33\33" +\
"\33\377\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32" +\
"\377\33\33\33\377\35\35\35\377\34\34\34\377\33\33\33\377\40\40\40\377" +\
",,,\377===\377\"\"\"\377\"\"\"\377\"\"\"\377\35\35\35\377\33\33\33\377" +\
";;;\377GGG\377%%%\377\"\"\"\377'''\377///\377\334\334\334\377\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0""888\377\37\37\37\377\33\33\33\377" +\
"\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377\32" +\
"\32\32\377\32\32\32\377\32\32\32\377\30\30\30\377\31\31\31\377\31\31" +\
"\31\377\33\33\33\377\33\33\33\377\34\34\34\377\33\33\33\377\32\32\32" +\
"\377\32\32\32\377\32\32\32\377\33\33\33\377\33\33\33\377\34\34\34\377" +\
"\34\34\34\377\"\"\"\377@@@\377;;;\377\40\40\40\377###\377\"\"\"\377\35" +\
"\35\35\377\35\35\35\377999\377DDD\377$$$\377\"\"\"\377&&&\377...\377" +\
"\336\336\336\377\376\376\376\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0""999\377\37" +\
"\37\37\377\33\33\33\377\32\32\32\377\32\32\32\377\32\32\32\377\32\32" +\
"\32\377\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377\30\30\30" +\
"\377\30\30\30\377\31\31\31\377\33\33\33\377\33\33\33\377\34\34\34\377" +\
"\34\34\34\377\33\33\33\377\32\32\32\377\32\32\32\377\32\32\32\377\32" +\
"\32\32\377\34\34\34\377\34\34\34\377!!!\377666\377<<<\377\"\"\"\377#" +\
"##\377%%%\377!!!\377!!!\377;;;\377FFF\377%%%\377\"\"\"\377&&&\377000" +\
"\377\340\340\340\377\376\376\376\0\377\377\377\0\377\377\377\0\377\377" +\
"\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0:::\377" +\
"\37\37\37\377\33\33\33\377\32\32\32\377\32\32\32\377\32\32\32\377\32" +\
"\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377\33\33\33\377\31\31" +\
"\31\377\30\30\30\377\31\31\31\377\34\34\34\377\33\33\33\377\33\33\33" +\
"\377\34\34\34\377\34\34\34\377\34\34\34\377\32\32\32\377\32\32\32\377" +\
"\32\32\32\377\32\32\32\377\33\33\33\377\37\37\37\377)))\377BBB\377**" +\
"*\377+++\377(((\377###\377###\377<<<\377===\377\"\"\"\377\37\37\37\377" +\
"%%%\377===\377\354\354\354\210\376\376\376\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0""555\377\35\35\35\377\32\32\32\377\32\32\32\377\32\32\32\377\32\32" +\
"\32\377\32\32\32\377\32\32\32\377\32\32\32\377\34\34\34\377\33\33\33" +\
"\377\31\31\31\377\30\30\30\377\31\31\31\377\34\34\34\377\33\33\33\377" +\
"\32\32\32\377\32\32\32\377\34\34\34\377\34\34\34\377\34\34\34\377\33" +\
"\33\33\377\33\33\33\377\33\33\33\377\33\33\33\377\40\40\40\377)))\377" +\
"\77\77\77\377...\377///\377,,,\377,,,\377///\377444\377\"\"\"\377!!!" +\
"\377!!!\377&&&\377ddd\377\374\374\374\0\376\376\376\0\377\377\377\0\377" +\
"\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377" +\
"\377\377\0""333\377\33\33\33\377\31\31\31\377\32\32\32\377\32\32\32\377" +\
"\32\32\32\377\32\32\32\377\32\32\32\377\32\32\32\377\34\34\34\377\34" +\
"\34\34\377\32\32\32\377\31\31\31\377\31\31\31\377\34\34\34\377\32\32" +\
"\32\377\32\32\32\377\32\32\32\377\34\34\34\377\34\34\34\377\34\34\34" +\
"\377\34\34\34\377\34\34\34\377\34\34\34\377\34\34\34\377###\377)))\377" +\
"\77\77\77\377***\377&&&\377***\377...\377+++\377$$$\377\37\37\37\377" +\
"\40\40\40\377###\377(((\377\177\177\177\377\374\374\374\0\376\376\376" +\
"\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377\0\377\377\377" +\
"\0\377\377\377\0\377\377\377\0"
def usage(pname):
print """Usage: %s [OPTIONS]
Options:
--device=DEV The device filename to open. Defauts to /dev/dsp.
--help Display this help text and exit.
""" % pname
def main():
import getopt
import sys
opts, args = getopt.getopt(sys.argv[1:], '', ['help'])
for opt,val in opts:
if opt == '--help':
usage(sys.argv[0])
sys.exit(0)
app = ThereminApp()
app.main()
if __name__ == '__main__': main()
| gpl-2.0 |
75651/kbengine_cloud | kbe/res/scripts/common/Lib/encodings/cp875.py | 272 | 12854 | """ Python Character Mapping Codec cp875 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP875.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp875',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x9c' # 0x04 -> CONTROL
'\t' # 0x05 -> HORIZONTAL TABULATION
'\x86' # 0x06 -> CONTROL
'\x7f' # 0x07 -> DELETE
'\x97' # 0x08 -> CONTROL
'\x8d' # 0x09 -> CONTROL
'\x8e' # 0x0A -> CONTROL
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x9d' # 0x14 -> CONTROL
'\x85' # 0x15 -> CONTROL
'\x08' # 0x16 -> BACKSPACE
'\x87' # 0x17 -> CONTROL
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x92' # 0x1A -> CONTROL
'\x8f' # 0x1B -> CONTROL
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
'\x80' # 0x20 -> CONTROL
'\x81' # 0x21 -> CONTROL
'\x82' # 0x22 -> CONTROL
'\x83' # 0x23 -> CONTROL
'\x84' # 0x24 -> CONTROL
'\n' # 0x25 -> LINE FEED
'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
'\x1b' # 0x27 -> ESCAPE
'\x88' # 0x28 -> CONTROL
'\x89' # 0x29 -> CONTROL
'\x8a' # 0x2A -> CONTROL
'\x8b' # 0x2B -> CONTROL
'\x8c' # 0x2C -> CONTROL
'\x05' # 0x2D -> ENQUIRY
'\x06' # 0x2E -> ACKNOWLEDGE
'\x07' # 0x2F -> BELL
'\x90' # 0x30 -> CONTROL
'\x91' # 0x31 -> CONTROL
'\x16' # 0x32 -> SYNCHRONOUS IDLE
'\x93' # 0x33 -> CONTROL
'\x94' # 0x34 -> CONTROL
'\x95' # 0x35 -> CONTROL
'\x96' # 0x36 -> CONTROL
'\x04' # 0x37 -> END OF TRANSMISSION
'\x98' # 0x38 -> CONTROL
'\x99' # 0x39 -> CONTROL
'\x9a' # 0x3A -> CONTROL
'\x9b' # 0x3B -> CONTROL
'\x14' # 0x3C -> DEVICE CONTROL FOUR
'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
'\x9e' # 0x3E -> CONTROL
'\x1a' # 0x3F -> SUBSTITUTE
' ' # 0x40 -> SPACE
'\u0391' # 0x41 -> GREEK CAPITAL LETTER ALPHA
'\u0392' # 0x42 -> GREEK CAPITAL LETTER BETA
'\u0393' # 0x43 -> GREEK CAPITAL LETTER GAMMA
'\u0394' # 0x44 -> GREEK CAPITAL LETTER DELTA
'\u0395' # 0x45 -> GREEK CAPITAL LETTER EPSILON
'\u0396' # 0x46 -> GREEK CAPITAL LETTER ZETA
'\u0397' # 0x47 -> GREEK CAPITAL LETTER ETA
'\u0398' # 0x48 -> GREEK CAPITAL LETTER THETA
'\u0399' # 0x49 -> GREEK CAPITAL LETTER IOTA
'[' # 0x4A -> LEFT SQUARE BRACKET
'.' # 0x4B -> FULL STOP
'<' # 0x4C -> LESS-THAN SIGN
'(' # 0x4D -> LEFT PARENTHESIS
'+' # 0x4E -> PLUS SIGN
'!' # 0x4F -> EXCLAMATION MARK
'&' # 0x50 -> AMPERSAND
'\u039a' # 0x51 -> GREEK CAPITAL LETTER KAPPA
'\u039b' # 0x52 -> GREEK CAPITAL LETTER LAMDA
'\u039c' # 0x53 -> GREEK CAPITAL LETTER MU
'\u039d' # 0x54 -> GREEK CAPITAL LETTER NU
'\u039e' # 0x55 -> GREEK CAPITAL LETTER XI
'\u039f' # 0x56 -> GREEK CAPITAL LETTER OMICRON
'\u03a0' # 0x57 -> GREEK CAPITAL LETTER PI
'\u03a1' # 0x58 -> GREEK CAPITAL LETTER RHO
'\u03a3' # 0x59 -> GREEK CAPITAL LETTER SIGMA
']' # 0x5A -> RIGHT SQUARE BRACKET
'$' # 0x5B -> DOLLAR SIGN
'*' # 0x5C -> ASTERISK
')' # 0x5D -> RIGHT PARENTHESIS
';' # 0x5E -> SEMICOLON
'^' # 0x5F -> CIRCUMFLEX ACCENT
'-' # 0x60 -> HYPHEN-MINUS
'/' # 0x61 -> SOLIDUS
'\u03a4' # 0x62 -> GREEK CAPITAL LETTER TAU
'\u03a5' # 0x63 -> GREEK CAPITAL LETTER UPSILON
'\u03a6' # 0x64 -> GREEK CAPITAL LETTER PHI
'\u03a7' # 0x65 -> GREEK CAPITAL LETTER CHI
'\u03a8' # 0x66 -> GREEK CAPITAL LETTER PSI
'\u03a9' # 0x67 -> GREEK CAPITAL LETTER OMEGA
'\u03aa' # 0x68 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
'\u03ab' # 0x69 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
'|' # 0x6A -> VERTICAL LINE
',' # 0x6B -> COMMA
'%' # 0x6C -> PERCENT SIGN
'_' # 0x6D -> LOW LINE
'>' # 0x6E -> GREATER-THAN SIGN
'?' # 0x6F -> QUESTION MARK
'\xa8' # 0x70 -> DIAERESIS
'\u0386' # 0x71 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
'\u0388' # 0x72 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
'\u0389' # 0x73 -> GREEK CAPITAL LETTER ETA WITH TONOS
'\xa0' # 0x74 -> NO-BREAK SPACE
'\u038a' # 0x75 -> GREEK CAPITAL LETTER IOTA WITH TONOS
'\u038c' # 0x76 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
'\u038e' # 0x77 -> GREEK CAPITAL LETTER UPSILON WITH TONOS
'\u038f' # 0x78 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
'`' # 0x79 -> GRAVE ACCENT
':' # 0x7A -> COLON
'#' # 0x7B -> NUMBER SIGN
'@' # 0x7C -> COMMERCIAL AT
"'" # 0x7D -> APOSTROPHE
'=' # 0x7E -> EQUALS SIGN
'"' # 0x7F -> QUOTATION MARK
'\u0385' # 0x80 -> GREEK DIALYTIKA TONOS
'a' # 0x81 -> LATIN SMALL LETTER A
'b' # 0x82 -> LATIN SMALL LETTER B
'c' # 0x83 -> LATIN SMALL LETTER C
'd' # 0x84 -> LATIN SMALL LETTER D
'e' # 0x85 -> LATIN SMALL LETTER E
'f' # 0x86 -> LATIN SMALL LETTER F
'g' # 0x87 -> LATIN SMALL LETTER G
'h' # 0x88 -> LATIN SMALL LETTER H
'i' # 0x89 -> LATIN SMALL LETTER I
'\u03b1' # 0x8A -> GREEK SMALL LETTER ALPHA
'\u03b2' # 0x8B -> GREEK SMALL LETTER BETA
'\u03b3' # 0x8C -> GREEK SMALL LETTER GAMMA
'\u03b4' # 0x8D -> GREEK SMALL LETTER DELTA
'\u03b5' # 0x8E -> GREEK SMALL LETTER EPSILON
'\u03b6' # 0x8F -> GREEK SMALL LETTER ZETA
'\xb0' # 0x90 -> DEGREE SIGN
'j' # 0x91 -> LATIN SMALL LETTER J
'k' # 0x92 -> LATIN SMALL LETTER K
'l' # 0x93 -> LATIN SMALL LETTER L
'm' # 0x94 -> LATIN SMALL LETTER M
'n' # 0x95 -> LATIN SMALL LETTER N
'o' # 0x96 -> LATIN SMALL LETTER O
'p' # 0x97 -> LATIN SMALL LETTER P
'q' # 0x98 -> LATIN SMALL LETTER Q
'r' # 0x99 -> LATIN SMALL LETTER R
'\u03b7' # 0x9A -> GREEK SMALL LETTER ETA
'\u03b8' # 0x9B -> GREEK SMALL LETTER THETA
'\u03b9' # 0x9C -> GREEK SMALL LETTER IOTA
'\u03ba' # 0x9D -> GREEK SMALL LETTER KAPPA
'\u03bb' # 0x9E -> GREEK SMALL LETTER LAMDA
'\u03bc' # 0x9F -> GREEK SMALL LETTER MU
'\xb4' # 0xA0 -> ACUTE ACCENT
'~' # 0xA1 -> TILDE
's' # 0xA2 -> LATIN SMALL LETTER S
't' # 0xA3 -> LATIN SMALL LETTER T
'u' # 0xA4 -> LATIN SMALL LETTER U
'v' # 0xA5 -> LATIN SMALL LETTER V
'w' # 0xA6 -> LATIN SMALL LETTER W
'x' # 0xA7 -> LATIN SMALL LETTER X
'y' # 0xA8 -> LATIN SMALL LETTER Y
'z' # 0xA9 -> LATIN SMALL LETTER Z
'\u03bd' # 0xAA -> GREEK SMALL LETTER NU
'\u03be' # 0xAB -> GREEK SMALL LETTER XI
'\u03bf' # 0xAC -> GREEK SMALL LETTER OMICRON
'\u03c0' # 0xAD -> GREEK SMALL LETTER PI
'\u03c1' # 0xAE -> GREEK SMALL LETTER RHO
'\u03c3' # 0xAF -> GREEK SMALL LETTER SIGMA
'\xa3' # 0xB0 -> POUND SIGN
'\u03ac' # 0xB1 -> GREEK SMALL LETTER ALPHA WITH TONOS
'\u03ad' # 0xB2 -> GREEK SMALL LETTER EPSILON WITH TONOS
'\u03ae' # 0xB3 -> GREEK SMALL LETTER ETA WITH TONOS
'\u03ca' # 0xB4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
'\u03af' # 0xB5 -> GREEK SMALL LETTER IOTA WITH TONOS
'\u03cc' # 0xB6 -> GREEK SMALL LETTER OMICRON WITH TONOS
'\u03cd' # 0xB7 -> GREEK SMALL LETTER UPSILON WITH TONOS
'\u03cb' # 0xB8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
'\u03ce' # 0xB9 -> GREEK SMALL LETTER OMEGA WITH TONOS
'\u03c2' # 0xBA -> GREEK SMALL LETTER FINAL SIGMA
'\u03c4' # 0xBB -> GREEK SMALL LETTER TAU
'\u03c5' # 0xBC -> GREEK SMALL LETTER UPSILON
'\u03c6' # 0xBD -> GREEK SMALL LETTER PHI
'\u03c7' # 0xBE -> GREEK SMALL LETTER CHI
'\u03c8' # 0xBF -> GREEK SMALL LETTER PSI
'{' # 0xC0 -> LEFT CURLY BRACKET
'A' # 0xC1 -> LATIN CAPITAL LETTER A
'B' # 0xC2 -> LATIN CAPITAL LETTER B
'C' # 0xC3 -> LATIN CAPITAL LETTER C
'D' # 0xC4 -> LATIN CAPITAL LETTER D
'E' # 0xC5 -> LATIN CAPITAL LETTER E
'F' # 0xC6 -> LATIN CAPITAL LETTER F
'G' # 0xC7 -> LATIN CAPITAL LETTER G
'H' # 0xC8 -> LATIN CAPITAL LETTER H
'I' # 0xC9 -> LATIN CAPITAL LETTER I
'\xad' # 0xCA -> SOFT HYPHEN
'\u03c9' # 0xCB -> GREEK SMALL LETTER OMEGA
'\u0390' # 0xCC -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
'\u03b0' # 0xCD -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
'\u2018' # 0xCE -> LEFT SINGLE QUOTATION MARK
'\u2015' # 0xCF -> HORIZONTAL BAR
'}' # 0xD0 -> RIGHT CURLY BRACKET
'J' # 0xD1 -> LATIN CAPITAL LETTER J
'K' # 0xD2 -> LATIN CAPITAL LETTER K
'L' # 0xD3 -> LATIN CAPITAL LETTER L
'M' # 0xD4 -> LATIN CAPITAL LETTER M
'N' # 0xD5 -> LATIN CAPITAL LETTER N
'O' # 0xD6 -> LATIN CAPITAL LETTER O
'P' # 0xD7 -> LATIN CAPITAL LETTER P
'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
'R' # 0xD9 -> LATIN CAPITAL LETTER R
'\xb1' # 0xDA -> PLUS-MINUS SIGN
'\xbd' # 0xDB -> VULGAR FRACTION ONE HALF
'\x1a' # 0xDC -> SUBSTITUTE
'\u0387' # 0xDD -> GREEK ANO TELEIA
'\u2019' # 0xDE -> RIGHT SINGLE QUOTATION MARK
'\xa6' # 0xDF -> BROKEN BAR
'\\' # 0xE0 -> REVERSE SOLIDUS
'\x1a' # 0xE1 -> SUBSTITUTE
'S' # 0xE2 -> LATIN CAPITAL LETTER S
'T' # 0xE3 -> LATIN CAPITAL LETTER T
'U' # 0xE4 -> LATIN CAPITAL LETTER U
'V' # 0xE5 -> LATIN CAPITAL LETTER V
'W' # 0xE6 -> LATIN CAPITAL LETTER W
'X' # 0xE7 -> LATIN CAPITAL LETTER X
'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
'\xb2' # 0xEA -> SUPERSCRIPT TWO
'\xa7' # 0xEB -> SECTION SIGN
'\x1a' # 0xEC -> SUBSTITUTE
'\x1a' # 0xED -> SUBSTITUTE
'\xab' # 0xEE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xEF -> NOT SIGN
'0' # 0xF0 -> DIGIT ZERO
'1' # 0xF1 -> DIGIT ONE
'2' # 0xF2 -> DIGIT TWO
'3' # 0xF3 -> DIGIT THREE
'4' # 0xF4 -> DIGIT FOUR
'5' # 0xF5 -> DIGIT FIVE
'6' # 0xF6 -> DIGIT SIX
'7' # 0xF7 -> DIGIT SEVEN
'8' # 0xF8 -> DIGIT EIGHT
'9' # 0xF9 -> DIGIT NINE
'\xb3' # 0xFA -> SUPERSCRIPT THREE
'\xa9' # 0xFB -> COPYRIGHT SIGN
'\x1a' # 0xFC -> SUBSTITUTE
'\x1a' # 0xFD -> SUBSTITUTE
'\xbb' # 0xFE -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| lgpl-3.0 |
annahs/atmos_research | NC_MAC_vs_alt_single_coating_thickness.py | 1 | 16412 | from pymiecoated import Mie
import sys
import os
import numpy as np
from pprint import pprint
from datetime import datetime
import mysql.connector
import math
import matplotlib.pyplot as plt
import matplotlib.colors
import calendar
from scipy.optimize import curve_fit
assumed_coating_th = [41,43,44,40,38,40,44,41,38,38] #nm sc1-7
assumed_coating_th = [43,57,50,57,51,47,46,40,30,17] #nm sc10
wavelength = 550 #nm
rBC_RI = complex(2.26,1.26)
savefig = False
show_distr_plots = False
#alt parameters
min_alt = 0
max_alt = 5000
alt_incr = 500
#distr parameters
bin_value_min = 80
bin_value_max = 220
bin_incr = 10
bin_number_lim = (bin_value_max-bin_value_min)/bin_incr
#constants
R = 8.3144621 # in m3*Pa/(K*mol)
flight_times = {
'science 1' : [datetime(2015,4,5,9,0),datetime(2015,4,5,14,0),15.6500, 78.2200] ,
##'ferry 1' : [datetime(2015,4,6,9,0),datetime(2015,4,6,11,0),15.6500, 78.2200] ,
##'ferry 2' : [datetime(2015,4,6,15,0),datetime(2015,4,6,18,0),-16.6667, 81.6000] ,
#'science 2' : [datetime(2015,4,7,16,0),datetime(2015,4,7,21,0),-62.338, 82.5014] ,
#'science 3' : [datetime(2015,4,8,13,0),datetime(2015,4,8,17,0),-62.338, 82.5014] ,
#'science 4' : [datetime(2015,4,8,17,30),datetime(2015,4,8,22,0),-70.338, 82.5014] ,
#'science 5' : [datetime(2015,4,9,13,30),datetime(2015,4,9,18,0),-62.338, 82.0] ,
##'ferry 3' : [datetime(2015,4,10,14,0),datetime(2015,4,10,17,0),-75.338, 81] ,
#'science 6' : [datetime(2015,4,11,15,0),datetime(2015,4,11,22,0),-90.9408, 80.5] ,
#'science 7' : [datetime(2015,4,13,15,0),datetime(2015,4,13,21,0),-95, 80.1] ,
#'science 8' : [datetime(2015,4,20,15,0),datetime(2015,4,20,20,0),-133.7306, 67.1],
#'science 9' : [datetime(2015,4,20,21,0),datetime(2015,4,21,2,0),-133.7306, 69.3617] ,
#'science 10' : [datetime(2015,4,21,16,0),datetime(2015,4,21,22,0),-131, 69.55],
}
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
def lognorm(x_vals, A, w, xc):
return A/(np.sqrt(2*math.pi)*w*x_vals)*np.exp(-(np.log(x_vals/xc))**2/(2*w**2))
def MieCalc(wavelength,core_dia,coat_th):
mie = Mie()
wl = wavelength
core_rad = core_dia/2 #nm
shell_thickness = coat_th
size_par = 2*math.pi*core_rad*1/wl
#Refractive indices PSL 1.59-0.0i rBC 2.26- 1.26i shell 1.5-0.0i
core_RI = rBC_RI
shell_rad = core_rad + shell_thickness
shell_RI = complex(1.5,0.0)
mie.x = 2*math.pi*core_rad/wl
mie.m = core_RI
mie.y = 2*math.pi*shell_rad/wl
mie.m2 = shell_RI
abs = mie.qabs()
abs_xs_nm2 = abs*math.pi*shell_rad**2 #in nm^2
abs_xs = abs_xs_nm2*1e-14 #in cm^2
sca = mie.qsca()
sca_xs_nm2 = sca*math.pi*shell_rad**2 #in nm^2
sca_xs = sca_xs_nm2*1e-14 #in cm^2
ext_xs = sca_xs+abs_xs
return [abs_xs,sca_xs,ext_xs]
#bin and step size for extrapolating to the full distr
fit_bins = []
for x in range (30,1000,1):
fit_bins.append(x)
plot_data={}
for flight in flight_times:
print flight
lower_alt = min_alt
start_time = flight_times[flight][0]
end_time = flight_times[flight][1]
UNIX_start_time = calendar.timegm(start_time.utctimetuple())
UNIX_end_time = calendar.timegm(end_time.utctimetuple())
alt = 0
while (lower_alt + alt_incr) <= max_alt:
#make data binning dicts for the interval
mass_binned_data = {}
number_binned_data = {}
i = bin_value_min
while i < bin_value_max:
mass_binned_data[i] = []
number_binned_data[i] = []
i+=bin_incr
#get mass data
cursor.execute(('SELECT bnm.70t80,bnm.80t90,bnm.90t100,bnm.100t110,bnm.110t120,bnm.120t130,bnm.130t140,bnm.140t150,bnm.150t160,bnm.160t170,bnm.170t180,bnm.180t190,bnm.190t200,bnm.200t210,bnm.210t220,bnm.sampled_vol,bnm.total_mass, ftd.temperature_C,ftd.BP_Pa from polar6_binned_mass_and_sampled_volume_alertcalib bnm join polar6_flight_track_details ftd ON bnm.flight_track_data_id = ftd.id WHERE ftd.alt >=%s and ftd.alt < %s and bnm.UNIX_UTC_ts >= %s and bnm.UNIX_UTC_ts < %s'),(lower_alt,(lower_alt + alt_incr),UNIX_start_time,UNIX_end_time))
mass_data = cursor.fetchall()
for row in mass_data:
volume_sampled = row[15]
total_mass = row[16]
temperature = row[17] + 273.15 #convert to Kelvin
pressure = row[18]
correction_factor_for_STP = (101325/pressure)*(temperature/273)
total_mass_conc_value = total_mass*correction_factor_for_STP/volume_sampled
#append STP corrected mass conc to dict of binned data
i=1
j=bin_value_min
while i <= bin_number_lim:
mass_binned_data[j].append(row[i]*correction_factor_for_STP/volume_sampled)
i+=1
j+=10
#get number data
cursor.execute(('SELECT bnn.70t80,bnn.80t90,bnn.90t100,bnn.100t110,bnn.110t120,bnn.120t130,bnn.130t140,bnn.140t150,bnn.150t160,bnn.160t170,bnn.170t180,bnn.180t190,bnn.190t200,bnn.200t210,bnn.210t220,bnn.sampled_vol,bnn.total_number, ftd.temperature_C,ftd.BP_Pa from polar6_binned_number_and_sampled_volume_alertcalib bnn join polar6_flight_track_details ftd ON bnn.flight_track_data_id = ftd.id WHERE ftd.alt >=%s and ftd.alt < %s and bnn.UNIX_UTC_ts >= %s and bnn.UNIX_UTC_ts < %s'),(lower_alt,(lower_alt + alt_incr),UNIX_start_time,UNIX_end_time))
number_data = cursor.fetchall()
for row in number_data:
volume_sampled = row[15]
total_number = row[16]
temperature = row[17] + 273.15 #convert to Kelvin
pressure = row[18]
correction_factor_for_STP = (101325/pressure)*(temperature/273)
#append STP corrected number conc to dict of binned data
i=1
j=bin_value_min
while i <= bin_number_lim:
number_binned_data[j].append(row[i]*correction_factor_for_STP/volume_sampled)
i+=1
j+=10
#make lists from binned data and sort
binned_list = []
number_binned_list = []
for key in mass_binned_data:
abs_xsec = MieCalc(wavelength,(key+bin_incr/2),assumed_coating_th[alt])[0]
sca_xsec = MieCalc(wavelength,(key+bin_incr/2),assumed_coating_th[alt])[1]
abs_xsec_bare = MieCalc(wavelength,(key+bin_incr/2),0)[0]
sca_xsec_bare = MieCalc(wavelength,(key+bin_incr/2),0)[1]
binned_list.append([(key+bin_incr/2), np.mean(mass_binned_data[key]), np.mean(number_binned_data[key]), abs_xsec,sca_xsec, abs_xsec_bare,sca_xsec_bare])
binned_list.sort()
#optical constants for the measured mass range
optical_data_meas = []
for row in binned_list:
row[1] = row[1]/(math.log((row[0]+bin_incr/2))-math.log(row[0]-bin_incr/2)) #normalize mass
row[2] = row[2]/(math.log((row[0]+bin_incr/2))-math.log(row[0]-bin_incr/2)) #normalize number
bin_midpoint = row[0]
bin_mass = row[1] #in fg/cm3
bin_number = row[2] #in #/cm3
bin_abs_xsec = row[3] #in cm2
bin_sca_xsec = row[4] #in cm2
bin_abs_xsec_bare = row[5] #in cm2
bin_sca_xsec_bare = row[6] #in cm2
vol_abs_coeff = bin_number*bin_abs_xsec #in cm-1
vol_sca_coeff = bin_number*bin_sca_xsec #in cm-1
vol_abs_coeff_bare = bin_number*bin_abs_xsec_bare #in cm-1
vol_sca_coeff_bare = bin_number*bin_sca_xsec_bare #in cm-1
mass_abs_coeff_int = (vol_abs_coeff)/bin_mass #in cm2/fg
mass_abs_coeff = mass_abs_coeff_int*(10**11) #in m2/g
optical_data_meas.append([bin_midpoint,bin_mass,bin_number,vol_abs_coeff,vol_sca_coeff,vol_abs_coeff_bare,vol_sca_coeff_bare])
bin_midpoints = np.array([row[0] for row in optical_data_meas])
mass_concs = [row[1] for row in optical_data_meas]
mass_concs_sum = np.sum([row[1] for row in optical_data_meas])
number_concs = np.array([row[2] for row in optical_data_meas])
vol_abs_coeff_sum = np.sum([row[3] for row in optical_data_meas])
vol_sca_coeff_sum = np.sum([row[4] for row in optical_data_meas])
vol_abs_coeff_sum_bare = np.sum([row[5] for row in optical_data_meas])
vol_sca_coeff_sum_bare = np.sum([row[6] for row in optical_data_meas])
MAC_meas = vol_abs_coeff_sum*(10**11)/mass_concs_sum
SSA_meas = vol_sca_coeff_sum/(vol_sca_coeff_sum+vol_abs_coeff_sum)
MAC_meas_bare = vol_abs_coeff_sum_bare*(10**11)/mass_concs_sum
SSA_meas_bare = vol_sca_coeff_sum_bare/(vol_sca_coeff_sum_bare+vol_abs_coeff_sum_bare)
abs_enhancement_meas = vol_abs_coeff_sum/vol_abs_coeff_sum_bare
#fit mass distr with lognormal
#get Dg and sigma and write to dict
try:
popt, pcov = curve_fit(lognorm, bin_midpoints, mass_concs)
fit_binned_mass_concs = []
for bin in fit_bins:
fit_val = lognorm(bin, popt[0], popt[1], popt[2])
fit_binned_mass_concs.append([bin,fit_val])
except:
print 'fit failure'
#fit number distr with lognormal
try:
popt, pcov = curve_fit(lognorm, bin_midpoints, number_concs)
fit_binned_number_concs = []
fit_binned_mass_concs_c = []
for bin in fit_bins:
fit_val = lognorm(bin, popt[0], popt[1], popt[2])
fit_binned_number_concs.append([bin,fit_val])
except:
print 'fit failure'
#optical constants for the extrapolated (from fit) full mass range
i=0
optical_data = []
for row in fit_binned_number_concs:
bin_midpoint = row[0]
bin_mass = fit_binned_mass_concs[i][1] #in fg/cm3
bin_number = row[1] #in #/cm3
abs_xsec = MieCalc(wavelength,bin_midpoint,assumed_coating_th[alt])[0]
sca_xsec = MieCalc(wavelength,bin_midpoint,assumed_coating_th[alt])[1]
abs_xsec_bare = MieCalc(wavelength,bin_midpoint,0)[0]
sca_xsec_bare = MieCalc(wavelength,bin_midpoint,0)[1]
vol_abs_coeff = bin_number*abs_xsec #in cm-1
vol_sca_coeff = bin_number*sca_xsec #in cm-1
vol_abs_coeff_bare = bin_number*abs_xsec_bare #in cm-1
vol_sca_coeff_bare = bin_number*sca_xsec_bare #in cm-1
mass_abs_coeff_int = (vol_abs_coeff)/bin_mass #in cm2/fg
mass_abs_coeff = mass_abs_coeff_int*(10**11) #in m2/g
optical_data.append([bin_mass,vol_abs_coeff,vol_sca_coeff,vol_abs_coeff_bare,vol_sca_coeff_bare,bin_midpoint])
i+=1
mass_concs_sum_calc = np.sum([row[0] for row in optical_data])
vol_abs_coeff_sum_calc = np.sum([row[1] for row in optical_data])
vol_sca_coeff_sum_calc = np.sum([row[2] for row in optical_data])
vol_abs_coeff_sum_calc_bare = np.sum([row[3] for row in optical_data])
vol_sca_coeff_sum_calc_bare = np.sum([row[4] for row in optical_data])
MAC_calc = vol_abs_coeff_sum_calc*(10**11)/mass_concs_sum_calc
SSA_calc = vol_sca_coeff_sum_calc/(vol_sca_coeff_sum_calc+vol_abs_coeff_sum_calc)
MAC_calc_bare = vol_abs_coeff_sum_calc_bare*(10**11)/mass_concs_sum_calc
SSA_calc_bare = vol_sca_coeff_sum_calc_bare/(vol_sca_coeff_sum_calc_bare+vol_abs_coeff_sum_calc_bare)
abs_enhancement_calc = vol_abs_coeff_sum_calc/vol_abs_coeff_sum_calc_bare
#add overall data to dict
mean_alt = lower_alt + alt_incr/2
print mean_alt
if mean_alt in plot_data:
plot_data[mean_alt].append([MAC_calc,SSA_calc,MAC_calc_bare,SSA_calc_bare,MAC_meas,SSA_meas,MAC_meas_bare,SSA_meas_bare,abs_enhancement_meas,abs_enhancement_calc])
else:
plot_data[mean_alt] = [[MAC_calc,SSA_calc,MAC_calc_bare,SSA_calc_bare,MAC_meas,SSA_meas,MAC_meas_bare,SSA_meas_bare,abs_enhancement_meas,abs_enhancement_calc]]
####plotting distrs if desired
fit_binned_mass_conc_vals = [row[1] for row in fit_binned_mass_concs]
fit_binned_number_conc_vals = [row[1] for row in fit_binned_number_concs]
if show_distr_plots == True:
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.semilogx(bin_midpoints,number_concs, color = 'g',marker='o')
ax1.semilogx(bin_midpoints,mass_concs, color = 'b',marker='o')
ax1.semilogx(fit_bins,fit_binned_mass_conc_vals, color = 'b',marker=None)
ax1.semilogx(fit_bins,fit_binned_number_conc_vals, color = 'g',marker=None)
plt.ylabel('dM/dlog(VED)')
ax1.set_xlabel('VED (nm)')
plt.show()
lower_alt += alt_incr
alt += 1
cnx.close()
print 'next step . . .'
##
plot_list = []
for mean_alt in plot_data:
mean_MAC_calc = np.mean([row[0] for row in plot_data[mean_alt]])
min_MAC_calc = mean_MAC_calc - np.min([row[0] for row in plot_data[mean_alt]])
max_MAC_calc = np.max([row[0] for row in plot_data[mean_alt]]) - mean_MAC_calc
mean_SSA_calc = np.mean([row[1] for row in plot_data[mean_alt]])
min_SSA_calc = mean_SSA_calc - np.min([row[1] for row in plot_data[mean_alt]])
max_SSA_calc = np.max([row[1] for row in plot_data[mean_alt]]) - mean_SSA_calc
mean_MAC_calc_bare = np.mean([row[2] for row in plot_data[mean_alt]])
min_MAC_calc_bare = mean_MAC_calc_bare - np.min([row[2] for row in plot_data[mean_alt]])
max_MAC_calc_bare = np.max([row[2] for row in plot_data[mean_alt]]) - mean_MAC_calc_bare
mean_SSA_calc_bare = np.mean([row[3] for row in plot_data[mean_alt]])
min_SSA_calc_bare = mean_SSA_calc_bare - np.min([row[3] for row in plot_data[mean_alt]])
max_SSA_calc_bare = np.max([row[3] for row in plot_data[mean_alt]]) - mean_SSA_calc_bare
mean_MAC_meas = np.mean([row[4] for row in plot_data[mean_alt]])
min_MAC_meas = mean_MAC_meas - np.min([row[4] for row in plot_data[mean_alt]])
max_MAC_meas = np.max([row[4] for row in plot_data[mean_alt]]) - mean_MAC_meas
mean_SSA_meas = np.mean([row[5] for row in plot_data[mean_alt]])
min_SSA_meas = mean_SSA_meas - np.min([row[5] for row in plot_data[mean_alt]])
max_SSA_meas = np.max([row[5] for row in plot_data[mean_alt]]) - mean_SSA_meas
mean_MAC_meas_bare = np.mean([row[6] for row in plot_data[mean_alt]])
min_MAC_meas_bare = mean_MAC_meas_bare - np.min([row[6] for row in plot_data[mean_alt]])
max_MAC_meas_bare = np.max([row[6] for row in plot_data[mean_alt]]) - mean_MAC_meas_bare
mean_SSA_meas_bare = np.mean([row[7] for row in plot_data[mean_alt]])
min_SSA_meas_bare = mean_SSA_meas_bare - np.min([row[7] for row in plot_data[mean_alt]])
max_SSA_meas_bare = np.max([row[7] for row in plot_data[mean_alt]]) - mean_SSA_meas_bare
mean_abse_meas = np.mean([row[8] for row in plot_data[mean_alt]])
mean_abse_calc = np.mean([row[9] for row in plot_data[mean_alt]])
plot_list.append([mean_alt,mean_MAC_calc,mean_SSA_calc,mean_MAC_calc_bare,mean_SSA_calc_bare,mean_MAC_meas,mean_SSA_meas,mean_MAC_meas_bare,mean_SSA_meas_bare,mean_abse_calc,mean_abse_meas])
plot_list.sort()
altitudes = [row[0] for row in plot_list]
MAC_calc_mean = [row[1] for row in plot_list]
SSA_calc_mean = [row[2] for row in plot_list]
MAC_calc_mean_bare = [row[3] for row in plot_list]
SSA_calc_mean_bare = [row[4] for row in plot_list]
MAC_meas_mean = [row[5] for row in plot_list]
SSA_meas_mean = [row[6] for row in plot_list]
MAC_meas_mean_bare = [row[7] for row in plot_list]
SSA_meas_mean_bare = [row[8] for row in plot_list]
mean_abse_calc = [row[9] for row in plot_list]
mean_abse_meas = [row[10] for row in plot_list]
fig = plt.figure(figsize=(10,10))
ax1 = plt.subplot2grid((2,2), (0,0), colspan=1)
ax2 = plt.subplot2grid((2,2), (0,1), colspan=1)
ax3 = plt.subplot2grid((2,2), (1,0), colspan=1)
ax1.plot(MAC_calc_mean,altitudes,marker='o',linestyle='-', color = 'b', label = 'coated rBC')
ax1.plot(MAC_calc_mean_bare,altitudes,marker='o',linestyle='--', color = 'b',alpha = 0.5, label = 'bare rBC')
#ax1.plot(MAC_meas_mean,altitudes,marker='o',linestyle='-', color = 'r', label = 'coated rBC')
#ax1.plot(MAC_meas_mean_bare,altitudes,marker='o',linestyle='--', color = 'r',alpha = 0.5, label = 'bare rBC')
ax1.set_ylabel('altitude (m)')
ax1.set_xlabel('MAC (m2/g)')
ax1.set_xlim(5,18)
ax1.set_ylim(0,5000)
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(handles, labels,loc=7)
ax2.plot(SSA_calc_mean,altitudes,marker='o',linestyle='-', color = 'b')
ax2.plot(SSA_calc_mean_bare,altitudes,marker='o',linestyle='--', color = 'b',alpha = 0.5)
#ax2.plot(SSA_meas_mean,altitudes,marker='o',linestyle='-', color = 'r')
#ax2.plot(SSA_meas_mean_bare,altitudes,marker='o',linestyle='--', color = 'r',alpha = 0.5)
ax2.set_xlabel('SSA')
ax2.set_ylabel('altitude (m)')
ax2.set_xlim(0.38,0.5)
ax2.set_ylim(0,5000)
#ax3.plot(SSA_calc_mean,altitudes,marker='o',linestyle='-', color = 'b')
#ax3.plot(SSA_calc_mean_bare,altitudes,marker='o',linestyle='--', color = 'b',alpha = 0.5)
ax3.plot(mean_abse_calc,altitudes,marker='o',linestyle='-', color = 'b')
#ax3.plot(mean_abse_meas,altitudes,marker='o',linestyle='-', color = 'r')
ax3.set_xlabel('absorption enhancement')
ax3.set_ylabel('altitude (m)')
ax3.set_xlim(1.3,1.7)
ax3.set_ylim(0,5000)
dir = 'C:/Users/Sarah Hanna/Documents/Data/Netcare/Spring 2015/'
os.chdir(dir)
if savefig == True:
plt.savefig('MAC SSA abs enhancement - Sc 1-7 full mass range.png', bbox_inches='tight')
plt.show()
| mit |
CPedrini/TateTRES | tornado/process.py | 15 | 5233 | #!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities for working with multiple processes."""
from __future__ import absolute_import, division, with_statement
import errno
import logging
import os
import sys
import time
from binascii import hexlify
from tornado import ioloop
try:
import multiprocessing # Python 2.6+
except ImportError:
multiprocessing = None
def cpu_count():
"""Returns the number of processors on this machine."""
if multiprocessing is not None:
try:
return multiprocessing.cpu_count()
except NotImplementedError:
pass
try:
return os.sysconf("SC_NPROCESSORS_CONF")
except ValueError:
pass
logging.error("Could not detect number of processors; assuming 1")
return 1
def _reseed_random():
if 'random' not in sys.modules:
return
import random
# If os.urandom is available, this method does the same thing as
# random.seed (at least as of python 2.6). If os.urandom is not
# available, we mix in the pid in addition to a timestamp.
try:
seed = long(hexlify(os.urandom(16)), 16)
except NotImplementedError:
seed = int(time.time() * 1000) ^ os.getpid()
random.seed(seed)
_task_id = None
def fork_processes(num_processes, max_restarts=100):
"""Starts multiple worker processes.
If ``num_processes`` is None or <= 0, we detect the number of cores
available on this machine and fork that number of child
processes. If ``num_processes`` is given and > 0, we fork that
specific number of sub-processes.
Since we use processes and not threads, there is no shared memory
between any server code.
Note that multiple processes are not compatible with the autoreload
module (or the debug=True option to `tornado.web.Application`).
When using multiple processes, no IOLoops can be created or
referenced until after the call to ``fork_processes``.
In each child process, ``fork_processes`` returns its *task id*, a
number between 0 and ``num_processes``. Processes that exit
abnormally (due to a signal or non-zero exit status) are restarted
with the same id (up to ``max_restarts`` times). In the parent
process, ``fork_processes`` returns None if all child processes
have exited normally, but will otherwise only exit by throwing an
exception.
"""
global _task_id
assert _task_id is None
if num_processes is None or num_processes <= 0:
num_processes = cpu_count()
if ioloop.IOLoop.initialized():
raise RuntimeError("Cannot run in multiple processes: IOLoop instance "
"has already been initialized. You cannot call "
"IOLoop.instance() before calling start_processes()")
logging.info("Starting %d processes", num_processes)
children = {}
def start_child(i):
pid = os.fork()
if pid == 0:
# child process
_reseed_random()
global _task_id
_task_id = i
return i
else:
children[pid] = i
return None
for i in range(num_processes):
id = start_child(i)
if id is not None:
return id
num_restarts = 0
while children:
try:
pid, status = os.wait()
except OSError, e:
if e.errno == errno.EINTR:
continue
raise
if pid not in children:
continue
id = children.pop(pid)
if os.WIFSIGNALED(status):
logging.warning("child %d (pid %d) killed by signal %d, restarting",
id, pid, os.WTERMSIG(status))
elif os.WEXITSTATUS(status) != 0:
logging.warning("child %d (pid %d) exited with status %d, restarting",
id, pid, os.WEXITSTATUS(status))
else:
logging.info("child %d (pid %d) exited normally", id, pid)
continue
num_restarts += 1
if num_restarts > max_restarts:
raise RuntimeError("Too many child restarts, giving up")
new_id = start_child(id)
if new_id is not None:
return new_id
# All child processes exited cleanly, so exit the master process
# instead of just returning to right after the call to
# fork_processes (which will probably just start up another IOLoop
# unless the caller checks the return value).
sys.exit(0)
def task_id():
"""Returns the current task id, if any.
Returns None if this process was not created by `fork_processes`.
"""
global _task_id
return _task_id
| apache-2.0 |
spaceof7/QGIS | tests/src/python/test_provider_tabfile.py | 24 | 4558 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for the OGR/MapInfo tab provider.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '2016-01-28'
__copyright__ = 'Copyright 2016, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import tempfile
from qgis.core import QgsVectorLayer, QgsFeatureRequest, QgsVectorDataProvider, QgsField
from qgis.PyQt.QtCore import QDate, QTime, QDateTime, QVariant, QDir
from qgis.testing import start_app, unittest
import osgeo.gdal # NOQA
from utilities import unitTestDataPath
import shutil
start_app()
TEST_DATA_DIR = unitTestDataPath()
# Note - doesn't implement ProviderTestCase as OGR provider is tested by the shapefile provider test
class TestPyQgsTabfileProvider(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
cls.basetestpath = tempfile.mkdtemp()
cls.dirs_to_cleanup = [cls.basetestpath]
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
for dirname in cls.dirs_to_cleanup:
shutil.rmtree(dirname, True)
def testDateTimeFormats(self):
# check that date and time formats are correctly interpreted
basetestfile = os.path.join(TEST_DATA_DIR, 'tab_file.tab')
vl = QgsVectorLayer('{}|layerid=0'.format(basetestfile), 'test', 'ogr')
fields = vl.dataProvider().fields()
self.assertEqual(fields.at(fields.indexFromName('date')).type(), QVariant.Date)
self.assertEqual(fields.at(fields.indexFromName('time')).type(), QVariant.Time)
self.assertEqual(fields.at(fields.indexFromName('date_time')).type(), QVariant.DateTime)
f = next(vl.getFeatures(QgsFeatureRequest()))
date_idx = vl.fields().lookupField('date')
assert isinstance(f.attributes()[date_idx], QDate)
self.assertEqual(f.attributes()[date_idx], QDate(2004, 5, 3))
time_idx = vl.fields().lookupField('time')
assert isinstance(f.attributes()[time_idx], QTime)
self.assertEqual(f.attributes()[time_idx], QTime(13, 41, 00))
datetime_idx = vl.fields().lookupField('date_time')
assert isinstance(f.attributes()[datetime_idx], QDateTime)
self.assertEqual(f.attributes()[datetime_idx], QDateTime(QDate(2004, 5, 3), QTime(13, 41, 00)))
def testUpdateMode(self):
""" Test that on-the-fly re-opening in update/read-only mode works """
basetestfile = os.path.join(TEST_DATA_DIR, 'tab_file.tab')
vl = QgsVectorLayer('{}|layerid=0'.format(basetestfile), 'test', 'ogr')
caps = vl.dataProvider().capabilities()
self.assertTrue(caps & QgsVectorDataProvider.AddFeatures)
# We should be really opened in read-only mode even if write capabilities are declared
self.assertEqual(vl.dataProvider().property("_debug_open_mode"), "read-only")
# Test that startEditing() / commitChanges() plays with enterUpdateMode() / leaveUpdateMode()
self.assertTrue(vl.startEditing())
self.assertEqual(vl.dataProvider().property("_debug_open_mode"), "read-write")
self.assertTrue(vl.dataProvider().isValid())
self.assertTrue(vl.commitChanges())
self.assertEqual(vl.dataProvider().property("_debug_open_mode"), "read-only")
self.assertTrue(vl.dataProvider().isValid())
def testInteger64WriteTabfile(self):
"""Check writing Integer64 fields to an MapInfo tabfile (which does not support that type)."""
base_dest_file_name = os.path.join(str(QDir.tempPath()), 'integer64')
dest_file_name = base_dest_file_name + '.tab'
shutil.copy(os.path.join(TEST_DATA_DIR, 'tab_file.tab'), base_dest_file_name + '.tab')
shutil.copy(os.path.join(TEST_DATA_DIR, 'tab_file.dat'), base_dest_file_name + '.dat')
shutil.copy(os.path.join(TEST_DATA_DIR, 'tab_file.map'), base_dest_file_name + '.map')
shutil.copy(os.path.join(TEST_DATA_DIR, 'tab_file.id'), base_dest_file_name + '.id')
vl = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
self.assertTrue(vl.isValid())
self.assertTrue(vl.dataProvider().addAttributes([QgsField("int8", QVariant.LongLong, "integer64")]))
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
RedhawkSDR/integration-gnuhawk | gnuradio/gr-howto-write-a-block/python/qa_howto.py | 8 | 1947 | #!/usr/bin/env python
#
# Copyright 2004,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import howto_swig
class qa_howto (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_square_ff (self):
src_data = (-3, 4, -5.5, 2, 3)
expected_result = (9, 16, 30.25, 4, 9)
src = gr.vector_source_f (src_data)
sqr = howto_swig.square_ff ()
dst = gr.vector_sink_f ()
self.tb.connect (src, sqr)
self.tb.connect (sqr, dst)
self.tb.run ()
result_data = dst.data ()
self.assertFloatTuplesAlmostEqual (expected_result, result_data, 6)
def test_002_square2_ff (self):
src_data = (-3, 4, -5.5, 2, 3)
expected_result = (9, 16, 30.25, 4, 9)
src = gr.vector_source_f (src_data)
sqr = howto_swig.square2_ff ()
dst = gr.vector_sink_f ()
self.tb.connect (src, sqr)
self.tb.connect (sqr, dst)
self.tb.run ()
result_data = dst.data ()
self.assertFloatTuplesAlmostEqual (expected_result, result_data, 6)
if __name__ == '__main__':
gr_unittest.main ()
| gpl-3.0 |
camptocamp/odoo | addons/sale_analytic_plans/__openerp__.py | 119 | 1718 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sales Analytic Distribution',
'version': '1.0',
'category': 'Sales Management',
'description': """
The base module to manage analytic distribution and sales orders.
=================================================================
Using this module you will be able to link analytic accounts to sales orders.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/sale_order_analytic_account.jpeg', 'images/sales_order_line.jpeg'],
'depends': ['sale', 'account_analytic_plans'],
'data': ['sale_analytic_plans_view.xml'],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
gusyussh/learntosolveit | languages/python/algorithm_bipartite.py | 2 | 2496 | #!/usr/bin/python
#$Id$
"""
Program to find the bipartite match.
Hopcroft-Karp bipartite max-cardinality matching and max independent set
David Eppstein, UC Irvine, 27 Apr 2002
Explaination: TODO
"""
def bipartiteMatch(graph):
'''Find maximum cardinality matching of a bipartite graph (U,V,E).
The input format is a dictionary mapping members of U to a list
of their neighbors in V. The output is a triple (M,A,B) where M is a
dictionary mapping members of V to their matches in U, A is the part
of the maximum independent set in U, and B is the part of the MIS in V.
The same object may occur in both U and V, and is treated as two
distinct vertices if this happens.'''
# initialize greedy matching (redundant, but faster than full search)
matching = {}
for u in graph:
for v in graph[u]:
if v not in matching:
matching[v] = u
break
while 1:
# structure residual graph into layers
# pred[u] gives the neighbor in the previous layer for u in U
# preds[v] gives a list of neighbors in the previous layer for v in V
# unmatched gives a list of unmatched vertices in final layer of V,
# and is also used as a flag value for pred[u] when u is in the first layer
preds = {}
unmatched = []
pred = dict([(u,unmatched) for u in graph])
for v in matching:
del pred[matching[v]]
layer = list(pred)
# repeatedly extend layering structure by another pair of layers
while layer and not unmatched:
newLayer = {}
for u in layer:
for v in graph[u]:
if v not in preds:
newLayer.setdefault(v,[]).append(u)
layer = []
for v in newLayer:
preds[v] = newLayer[v]
if v in matching:
layer.append(matching[v])
pred[matching[v]] = v
else:
unmatched.append(v)
# did we finish layering without finding any alternating paths?
if not unmatched:
unlayered = {}
for u in graph:
for v in graph[u]:
if v not in preds:
unlayered[v] = None
return (matching,list(pred),list(unlayered))
# recursively search backward through layers to find alternating paths
# recursion returns true if found path, false otherwise
def recurse(v):
if v in preds:
L = preds[v]
del preds[v]
for u in L:
if u in pred:
pu = pred[u]
del pred[u]
if pu is unmatched or recurse(pu):
matching[v] = u
return 1
return 0
for v in unmatched: recurse(v)
if __name__ == '__main__':
obj = bipartiteMatch({0:[0],1:[0,2],2:[1,2,3],3:[2],4:[2]})
print obj
| bsd-3-clause |
pysv/djep | pyconde/checkin/forms.py | 1 | 5252 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.forms import formsets
from django.utils.translation import ugettext_lazy as _
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Field, Fieldset, Layout, Submit
from crispy_forms.bootstrap import FieldWithButtons
from ..accounts.forms import UserModelChoiceField
from ..attendees.models import TicketType
from ..sponsorship.models import Sponsor
def get_ticket_types():
return TicketType.objects.filter_ondesk()
def get_users():
return User.objects.all()
def get_sponsors():
return Sponsor.objects.filter(active=True).all()
class SearchForm(forms.Form):
query = forms.CharField(required=False)
def __init__(self, *args, **kwargs):
super(SearchForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_show_labels = False
self.helper.form_action = reverse('checkin_search')
self.helper.form_class = 'form-horizontal'
self.helper.form_method = 'GET'
self.helper.layout = Layout(
FieldWithButtons(
Field('query', autofocus='autofocus'),
Submit('', _('Search'))
)
)
class OnDeskPurchaseForm(forms.Form):
first_name = forms.CharField(label=_('First name'), max_length=250)
last_name = forms.CharField(label=_('Last name'), max_length=250)
company_name = forms.CharField(label=_('Company'), max_length=100, required=False)
street = forms.CharField(label=_('Street and house number'), max_length=100, required=False)
zip_code = forms.CharField(label=_('Zip code'), max_length=20, required=False)
city = forms.CharField(label=_('City'), max_length=100, required=False)
country = forms.CharField(label=_('Country'), max_length=100, required=False)
vat_id = forms.CharField(label=_('VAT-ID'), max_length=16, required=False)
def __init__(self, *args, **kwargs):
super(OnDeskPurchaseForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
invoice_fields = Fieldset(
_('Invoice'),
'first_name',
'last_name',
'company_name',
'street',
'zip_code',
'city',
'country',
'vat_id',
)
self.helper.form_tag = False
self.helper.layout = Layout(
invoice_fields,
)
class EditOnDeskTicketForm(forms.Form):
first_name = forms.CharField(label=_('First name'), max_length=250)
last_name = forms.CharField(label=_('Last name'), max_length=250)
organisation = forms.CharField(label=_('Organization'), max_length=100, required=False)
user_id = UserModelChoiceField(label=_('User'), queryset=None, required=False)
sponsor_id = forms.ModelChoiceField(label=_('Sponsor'), queryset=None, required=False)
def __init__(self, users, sponsors, *args, **kwargs):
super(EditOnDeskTicketForm, self).__init__(*args, **kwargs)
self.fields['user_id'].queryset = users
self.fields['sponsor_id'].queryset = sponsors
self.helper = FormHelper()
ticket_fields = Fieldset(
_('Ticket'),
'first_name',
'last_name',
'organisation',
'user_id',
'sponsor_id',
)
self.helper.form_tag = False
self.helper.layout = Layout(
ticket_fields
)
class NewOnDeskTicketForm(EditOnDeskTicketForm):
ticket_type_id = forms.ModelChoiceField(label=_('Ticket type'), queryset=None)
def __init__(self, ticket_types, *args, **kwargs):
super(NewOnDeskTicketForm, self).__init__(*args, **kwargs)
self.fields['ticket_type_id'].queryset = ticket_types
self.helper.disable_csrf = True
self.helper.layout[0].fields.insert(0,
'ticket_type_id')
class BaseOnDeskTicketFormSet(formsets.BaseFormSet):
def __init__(self, *args, **kwargs):
super(BaseOnDeskTicketFormSet, self).__init__(*args, **kwargs)
self.forms[0].empty_permitted = False
def _construct_forms(self):
self.ticket_types = get_ticket_types()
self.users = get_users()
self.sponsors = get_sponsors()
return super(BaseOnDeskTicketFormSet, self)._construct_forms()
def _construct_form(self, i, **kwargs):
kwargs.update({
'ticket_types': self.ticket_types,
'users': self.users,
'sponsors': self.sponsors,
})
return super(BaseOnDeskTicketFormSet, self)._construct_form(i, **kwargs)
@property
def empty_form(self):
form = self.form(
ticket_types=self.ticket_types,
users=self.users,
sponsors=self.sponsors,
auto_id=self.auto_id,
prefix=self.add_prefix('__prefix__'),
empty_permitted=True,
)
self.add_fields(form, None)
return form
@property
def changed_forms(self):
for f in self.forms:
if not f.has_changed():
continue
yield f
| bsd-3-clause |
akesandgren/easybuild-easyblocks | easybuild/easyblocks/generic/cmakeninja.py | 3 | 2149 | ##
# Copyright 2019-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for software that uses
CMake configure step and Ninja build install.
@author: Kenneth Hoste (Ghent University)
@author: Pavel Grochal (INUITS)
"""
from easybuild.easyblocks.generic.cmakemake import CMakeMake
from easybuild.easyblocks.generic.mesonninja import MesonNinja
class CMakeNinja(CMakeMake, MesonNinja):
"""Support for configuring with CMake, building and installing with MesonNinja."""
@staticmethod
def extra_options(extra_vars=None):
"""Define extra easyconfig parameters specific to CMakeMake."""
extra_vars = CMakeMake.extra_options(extra_vars)
extra_vars['generator'][0] = 'Ninja'
return extra_vars
def configure_step(self, *args, **kwargs):
"""Configure using CMake."""
CMakeMake.configure_step(self, *args, **kwargs)
def build_step(self, *args, **kwargs):
"""Build using MesonNinja."""
MesonNinja.build_step(self, *args, **kwargs)
def install_step(self, *args, **kwargs):
"""Install using MesonNinja."""
MesonNinja.install_step(self, *args, **kwargs)
| gpl-2.0 |
portnov/sverchok | utils/csg_core.py | 6 | 2180 | import math
from sverchok.utils.csg_geom import *
class CSG(object):
"""
## License
Copyright (c) 2011 Evan Wallace (http://madebyevan.com/), under the MIT license.
Python port Copyright (c) 2012 Tim Knip (http://www.floorplanner.com), under the MIT license.
"""
def __init__(self):
self.polygons = []
@classmethod
def fromPolygons(cls, polygons):
csg = CSG()
csg.polygons = polygons
return csg
def clone(self):
csg = CSG()
csg.polygons = map(lambda p: p.clone(), self.polygons)
return csg
def toPolygons(self):
return self.polygons
def union(self, csg):
a = CSGNode(self.clone().polygons)
b = CSGNode(csg.clone().polygons)
a.clipTo(b)
b.clipTo(a)
b.invert()
b.clipTo(a)
b.invert()
a.build(b.allPolygons())
return CSG.fromPolygons(a.allPolygons())
def subtract(self, csg):
a = CSGNode(self.clone().polygons)
b = CSGNode(csg.clone().polygons)
a.invert()
a.clipTo(b)
b.clipTo(a)
b.invert()
b.clipTo(a)
b.invert()
a.build(b.allPolygons())
a.invert()
return CSG.fromPolygons(a.allPolygons())
def intersect(self, csg):
a = CSGNode(self.clone().polygons)
b = CSGNode(csg.clone().polygons)
a.invert()
b.clipTo(a)
b.invert()
a.clipTo(b)
b.clipTo(a)
a.build(b.allPolygons())
a.invert()
return CSG.fromPolygons(a.allPolygons())
def inverse(self):
"""
Return a new CSG solid with solid and empty space switched. This solid is
not modified.
"""
csg = self.clone()
map(lambda p: p.flip(), csg.polygons)
return csg
@classmethod
def Obj_from_pydata(cls, verts, faces):
"""
"""
polygons = []
for face in faces:
polyg = []
for idx in face:
co = verts[idx]
polyg.append(CSGVertex(co))
polygons.append(CSGPolygon(polyg))
return CSG.fromPolygons(polygons)
| gpl-3.0 |
davidshepherd7/Landau-Lifshitz-Gilbert-ODE-model | core/utils.py | 1 | 16649 |
from __future__ import division
from __future__ import absolute_import
import collections
import scipy as sp
import scipy.linalg
import itertools as it
import operator as op
import functools as ft
import sympy
import math
from functools import partial as par
from os.path import join as pjoin
from math import sin, cos, tan, log, atan2, acos, pi, sqrt
# General
# ============================================================
def unzip(iterable_of_iterables):
"""Inverse of zip. E.g. given a list of tuples returns a tuple of
lists.
To understand why: think about what * does to a list and what zip then
does with this list.
See http://www.shocksolution.com/2011/07/python-lists-to-tuples-and-tuples-to-lists/"""
return zip(*iterable_of_iterables)
def _apply_to_list_and_print_args(function, list_of_args):
"""Does what it says. Should really be a lambda function but
multiprocessing requires named functions
"""
print list_of_args
return function(*list_of_args)
def parallel_parameter_sweep(function, parameter_lists, serial_mode=False):
"""Run function with all combinations of parameters in parallel using
all available cores.
parameter_lists should be a list of lists of parameters,
"""
import multiprocessing
# Generate a complete set of combinations of parameters
parameter_sets = it.product(*parameter_lists)
# multiprocessing doesn't include a "starmap", requires all functions
# to take a single argument. Use a function wrapper to fix this. Also
# print the list of args while we're in there.
wrapped_function = par(_apply_to_list_and_print_args, function)
# For debugging we often need to run in serial (to get useful stack
# traces).
if serial_mode:
results_iterator = it.imap(wrapped_function, parameter_sets)
# Force evaluation (to be exactly the same as in parallel)
results_iterator = list(results_iterator)
else:
# Run in all parameter sets in parallel
pool = multiprocessing.Pool()
results_iterator = pool.imap_unordered(
wrapped_function, parameter_sets)
pool.close()
# wait for everything to finish
pool.join()
return results_iterator
def partial_lists(l, min_list_length=1):
"""Given a list l return a list of "partial lists" (probably not the
right term...).
Optionally specify a minimum list length.
ie.
l = [0, 1, 2, 3]
partial_lists(l) = [[0], [0, 1], [0, 1, 2], [0, 1, 2, 3]]
"""
all_lists = [l[:i] for i in range(0, len(l)+1)]
return filter(lambda x: len(x) >= min_list_length, all_lists)
def myfigsave(
figure, name, texpath="/home/david/Dropbox/phd/reports/ongoing-writeup/images"):
"""Fix up layout and save a pdf of an image into my latex folder.
"""
# Fix layout
figure.tight_layout(pad=0.3)
# Save a pdf into my tex image dir
figpath = pjoin(texpath, name)
figure.savefig(figpath, dpi=300, orientation='portrait',
transparent=False)
print "Saved to", figpath
return
def memoize(f):
""" Memoization decorator for a function taking multiple arguments.
From http://code.activestate.com/recipes/578231-probably-the-fastest-memoization-decorator-in-the-/
(in the comments)
"""
class memodict(dict):
def __init__(self, f):
self.f = f
def __call__(self, *args):
return self[args]
def __missing__(self, key):
ret = self[key] = self.f(*key)
return ret
return memodict(f)
def latex_escape(s):
"""Escape all characters that latex will cry about.
"""
s = s.replace(r'{', r'\{')
s = s.replace(r'}', r'\}')
s = s.replace(r'&', r'\&')
s = s.replace(r'%', r'\%')
s = s.replace(r'$', r'\$')
s = s.replace(r'#', r'\#')
s = s.replace(r'_', r'\_')
s = s.replace(r'^', r'\^{}')
# Can't handle backslashes... ?
return s
# Testing helpers
# ============================================================
def almost_equal(a, b, tol=1e-9):
return abs(a - b) < tol
def abs_list_diff(list_a, list_b):
return [abs(a - b) for a, b in zip(list_a, list_b)]
def list_almost_zero(list_x, tol=1e-9):
return max(list_x) < tol
def list_almost_equal(list_a, list_b, tol=1e-9):
return list_almost_zero(abs_list_diff(list_a, list_b), tol)
def same_order_of_magnitude(a, b, fp_zero):
if abs(a) < fp_zero or abs(b) < fp_zero:
return abs(a) < fp_zero and abs(b) < fp_zero
else:
return (abs(sp.log10(abs(a)) - sp.log10(abs(b))) < 1)
def same_sign(a, b, fp_zero):
"""Check if two floats (or probably fine for other numbers) have the
same sign. Throw an error on NaN values. Treat small floats as zero and
treat zero as not having a sign.
"""
if (a == sp.NaN) or (b == sp.NaN):
raise ValueError("NaN(s) passed to sign comparison functions")
elif (abs(a) < fp_zero) and (abs(b) < fp_zero):
return True
else:
return math.copysign(1, a) == math.copysign(1, b)
# Some useful asserts. We explicitly use the assert command in each
# (instead of defining the asserts in terms of the bool-returning functions
# above) to get useful output from nose -d.
def assert_almost_equal(a, b, tol=1e-9):
assert(abs(a - b) < tol)
def assert_almost_zero(a, tol=1e-9):
assert(abs(a) < tol)
def assert_list_almost_equal(list_a, list_b, tol=1e-9):
assert(len(list(list_a)) == len(list(list_b)))
for a, b in zip(list_a, list_b):
assert(abs(a - b) < tol)
def assert_list_almost_zero(values, tol=1e-9):
for x in values:
assert abs(x) < tol
def assert_sym_eq(a, b):
"""Compare symbolic expressions. Note that the simplification algorithm
is not completely robust: might give false negatives (but never false
positives).
Try adding extra simplifications if needed, e.g. add .trigsimplify() to
the end of my_simp.
"""
def my_simp(expr):
# Can't .expand() ints, so catch the zero case separately.
try:
return expr.expand().simplify()
except AttributeError:
return expr
print
print sympy.pretty(my_simp(a))
print "equals"
print sympy.pretty(my_simp(b))
print
# Try to simplify the difference to zero
assert (my_simp(a - b) == 0)
def assert_same_sign(a, b, fp_zero=1e-9):
if (a == sp.NaN) or (b == sp.NaN):
raise ValueError("NaN(s) passed to sign comparison functions")
elif (abs(a) < fp_zero) and (abs(b) < fp_zero):
assert True
else:
assert math.copysign(1, a) == math.copysign(1, b)
def assert_same_order_of_magnitude(a, b, fp_zero=1e-14):
"""Check that log10(abs(.)) are nearby for a and b. If a or b is below
fp_zero then the other is checked in the same way for closeness to
fp_zero (after checking that it is not also below fp_zero, for safety
with log10).
"""
if abs(a) < fp_zero:
assert abs(b) < fp_zero or (
sp.log10(abs(b)) - sp.log10(abs(fp_zero)) < 1)
if abs(b) < fp_zero:
assert abs(a) < fp_zero or (
sp.log10(abs(a)) - sp.log10(abs(fp_zero)) < 1)
else:
assert (abs(sp.log10(abs(a)) - sp.log10(abs(b))) < 1)
# Spherical polar coordinates asserts
def assert_azi_in_range(sph):
assert(sph.azi > 0 or almost_equal(sph.azi, 0.0))
assert(sph.azi < 2*pi or almost_equal(sph.azi, 2*pi))
def assert_polar_in_range(sph):
assert(sph.pol >= 0 and sph.pol <= pi)
# Convert symbolic expressions to useful python functions
# ============================================================
def symb2deriv(exact_symb, order):
t, y, Dy = sympy.symbols('t y Dy')
deriv_symb = sympy.diff(exact_symb, t, order).subs(exact_symb, y)
deriv = sympy.lambdify((t, y), deriv_symb)
return deriv
def symb2residual(exact_symb):
t, y, Dy = sympy.symbols('t y Dy')
dydt_symb = sympy.diff(exact_symb, t, 1).subs(exact_symb, y)
residual_symb = Dy - dydt_symb
residual = sympy.lambdify((t, y, Dy), residual_symb)
return residual
def symb2jacobian(exact_symb):
t, y, Dy = sympy.symbols('t y Dy')
dydt_symb = sympy.diff(exact_symb, t, 1).subs(exact_symb, y)
jacobian_symb = sympy.diff(dydt_symb, y, 1).subs(exact_symb, y)
jacobian = sympy.lambdify((t, y), jacobian_symb)
return jacobian
def symb2functions(exact_symb):
t, y, Dy = sympy.symbols('t y Dy')
exact = sympy.lambdify(t, exact_symb)
residual = symb2residual(exact_symb)
dys = [None]+map(par(symb2deriv, exact_symb), range(1, 10))
jacobian = symb2jacobian(exact_symb)
return exact, residual, dys, jacobian
# Coordinate systems
# ============================================================
# Some data structures
SphPoint = collections.namedtuple('SphPoint', ['r', 'azi', 'pol'])
CartPoint = collections.namedtuple('CartPoint', ['x', 'y', 'z'])
def cart2sph(cartesian_point):
"""
Convert a 3D cartesian tuple into spherical polars.
In the form (r,azi, pol) = (r, theta, phi) (following convention from
mathworld).
In Mallinson's notation this is (r, phi, theta).
"""
x, y, z = cartesian_point
r = sp.linalg.norm(cartesian_point, 2)
# Get azimuthal then shift from [-pi,pi] to [0,2pi]
azi = atan2(y, x)
if azi < 0:
azi += 2*pi
# Dodge the problem at central singular point...
if r < 1e-9:
polar = 0
else:
polar = acos(z/r)
return SphPoint(r, azi, polar)
def sph2cart(spherical_point):
"""
Convert a 3D spherical polar coordinate tuple into cartesian
coordinates. See cart2sph(...) for spherical coordinate scheme."""
r, azi, pol = spherical_point
x = r * cos(azi) * sin(pol)
y = r * sin(azi) * sin(pol)
z = r * cos(pol)
return CartPoint(x, y, z)
def array2sph(point_as_array):
""" Convert from an array representation to a SphPoint.
"""
assert point_as_array.ndim == 1
# Hopefully 2 dims => spherical coords
if point_as_array.shape[0] == 2:
azi = point_as_array[0]
pol = point_as_array[1]
return SphPoint(1.0, azi, pol)
# Presumably in cartesian...
elif point_as_array.shape[0] == 3:
return cart2sph(SphPoint(point_as_array[0],
point_as_array[1],
point_as_array[2]))
else:
raise IndexError
def plot_sph_points(sphs, title='Path of m'):
carts = map(sph2cart, sphs)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Plot the path
xs, ys, zs = unzip(carts)
ax.plot(xs, ys, zs)
# Draw on the starting point
start_point = carts[0]
ax.scatter(start_point.x, start_point.y, start_point.z)
# Draw on z-axis
ax.plot([0, 0], [0, 0], [-1, 1], '--')
plt.title(title)
# Axes
ax.set_zlim(-1, 1)
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
return fig
def plot_polar_vs_time(sphs, times, title='Polar angle vs time'):
fig = plt.figure()
ax = fig.add_subplot(111)
rs, azis, pols = unzip(sphs)
ax.plot(times, pols)
plt.xlabel('time/ arb. units')
plt.ylabel('polar angle/ radians')
plt.title(title)
return fig
class MagParameters():
def Hvec(self, t):
return sp.array([0, 0, -2])
gamma = 1.0
K1 = 0.0
Ms = 1.0
mu0 = 1.0
easy_axis = sp.array([0, 0, 1])
def __init__(self, alpha=1.0):
self.alpha = alpha
def dimensional_H(self, t):
return sp.linalg.norm(self.Hvec(t), ord=2)
def H(self, t):
return sp.linalg.norm(self.Hvec(t)/self.Ms, ord=2)
def dimensional_Hk(self):
"""Ansiotropy field strength."""
# ??ds if m is always unit vector then this is right, if not we
# need extra factor of Ms on bottom...
return (2 * self.K1) / (self.mu0 * self.Ms)
def Hk(self):
"""Ansiotropy field strength."""
# ??ds if m is always unit vector then this is right, if not we
# need extra factor of Ms on bottom...
return self.dimensional_Hk() / self.Ms
def dimensional_Hk_vec(self, m_cart):
"""Uniaxial anisotropy field. Magnetisation should be in normalised
cartesian form."""
return (
self.dimensional_Hk() * sp.dot(
m_cart, self.easy_axis) * self.easy_axis
)
def Hk_vec(self, m_cart):
"""Normalised uniaxial anisotropy field. Magnetisation should be in
normalised cartesian form."""
return self.dimensional_Hk_vec(m_cart) / self.Ms
def __repr__(self):
"""Return a string representation of the parameters.
"""
return "alpha = " + str(self.alpha) \
+ ", gamma = " + str(self.gamma) + ",\n" \
+ "H(0) = " + str(self.Hvec(0)) \
+ ", K1 = " + str(self.K1) \
+ ", Ms = " + str(self.Ms)
# Smaller helper functions
# ============================================================
def relative_error(exact, estimate):
return abs(exact - estimate) / exact
def dts_from_ts(ts):
return list(map(op.sub, ts[1:], ts[:-1]))
ts2dts = dts_from_ts
def dts2ts(base, dts):
ts = [base]
for dt in dts:
ts.append(ts[-1] + dt)
return ts
def ts2dtn(ts):
return ts[-1] - ts[-2]
def ts2dtnm1(ts):
return ts[-2] - ts[-3]
# Matrices
# ============================================================
def skew(vector_with_length_3):
v = vector_with_length_3
if len(v) != 3:
raise TypeError("skew is only defined for vectors of length 3")
return sp.array([[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]])
# Test this file's code
# ============================================================
import unittest
from random import random
import nose.tools as nt
class TestCoordinateConversion(unittest.TestCase):
# Pick some coordinate lists to try out
def setUp(self):
def carttuple(x):
return (x*random(), x*random(), x*random())
self.carts = map(carttuple, sp.linspace(0, 2, 20))
self.sphs = map(cart2sph, self.carts)
# Check that applying both operations gives back the same thing
def check_cart_sph_composition(self, cart, sph):
assert_list_almost_equal(cart, sph2cart(sph))
def test_composition_is_identity(self):
for (cart, sph) in zip(self.carts, self.sphs):
self.check_cart_sph_composition(cart, sph)
# Check that the azimuthal angle is in the correct range
def test_azi_range(self):
for sph in self.sphs:
assert_azi_in_range(sph)
def test_azimuthal_edge_cases(self):
assert_almost_equal(cart2sph((-1, -1, 0)).azi, 5*pi/4)
# Check that the polar angle is in the correct range
def test_polar_range(self):
for sph in self.sphs:
assert_polar_in_range(sph)
def example_f(x, y):
return cos(x) + sin(y)
def test_parallel_sweep():
"""Check that a parallel run gives the same results as a non-parallel
run for a simple function.
"""
xs = sp.linspace(-pi, +pi, 30)
ys = sp.linspace(-pi, +pi, 30)
parallel_result = list(parallel_parameter_sweep(example_f, [xs, ys]))
serial_result = list(parallel_parameter_sweep(example_f, [xs, ys], True))
exact_result = list(it.starmap(example_f, it.product(xs, ys)))
# Use sets for the comparison because the parallel computation destroys
# any ordering we had before (and sets order their elements).
assert_list_almost_equal(set(parallel_result), set(exact_result))
assert_list_almost_equal(set(serial_result), set(exact_result))
def test_skew_size_check():
xs = [sp.linspace(0.0, 1.0, 4), 1.0, sp.identity(3)]
for x in xs:
nt.assert_raises(TypeError, skew, [x])
def test_skew():
xs = [sp.linspace(0.0, 1.0, 3), sp.zeros((3, 1)), [1, 2, 3], ]
a = sp.rand(3)
for x in xs:
# Anything crossed with itself is zero:
skew_mat = skew(x)
assert_list_almost_zero(sp.dot(skew_mat, sp.array(x)))
# a x b = - b x a
assert_list_almost_zero(sp.dot(skew_mat, a) + sp.dot(a, skew_mat))
def test_dts2ts():
"""Check that ts2dts and dts2ts are the inverse of each other (except for
the requirement for a "base" value in dts2ts).
"""
t = sympy.symbols('t')
dts = sympy.symbols('Delta9:0', Real=True)
results = ts2dts(dts2ts(t, dts))
for a, b in zip(results, dts):
assert_sym_eq(a, b)
| gpl-3.0 |
overtherain/scriptfile | software/googleAppEngine/lib/django_1_4/django/conf/locale/ko/formats.py | 313 | 2016 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y년 n월 j일'
TIME_FORMAT = 'A g:i:s'
DATETIME_FORMAT = 'Y년 n월 j일 g:i:s A'
YEAR_MONTH_FORMAT = 'Y년 F월'
MONTH_DAY_FORMAT = 'F월 j일'
SHORT_DATE_FORMAT = 'Y-n-j.'
SHORT_DATETIME_FORMAT = 'Y-n-j H:i'
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
'%Y년 %m월 %d일', # '2006년 10월 25일', with localized suffix.
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
'%H시 %M분 %S초', # '14시 30분 59초'
'%H시 %M분', # '14시 30분'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
'%Y년 %m월 %d일 %H시 %M분 %S초', # '2006년 10월 25일 14시 30분 59초'
'%Y년 %m월 %d일 %H시 %M분', # '2006년 10월 25일 14시 30분'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| mit |
boundary/plugin-rabbitmq | rabbitmq_monitoring.py | 3 | 5104 | #!/usr/bin/env python
"""
This script extracts metrics from a RabbitMQ instance.
The usage of this script is as follows:
rabbitmq_monitoring.py
"""
import json
from time import sleep
import collections
import sys
from os.path import basename
import urllib2
from base64 import b64encode
from string import replace
#
# Maps the API path names to Boundary Metric Identifiers
#
KEY_MAPPING = [
("object_totals_queues", "RABBITMQ_OBJECT_TOTALS_QUEUES"),
("object_totals_channels", "RABBITMQ_OBJECT_TOTALS_CHANNELS"),
("object_totals_exchanges", "RABBITMQ_OBJECT_TOTALS_EXCHANGES"),
("object_totals_consumers", "RABBITMQ_OBJECT_TOTALS_CONSUMERS"),
("object_totals_connections", "RABBITMQ_OBJECT_TOTALS_CONNECTIONS"),
("message_stats_deliver", "RABBITMQ_MESSAGE_STATS_DELIVER"),
("message_stats_deliver_details_rate", "RABBITMQ_MESSAGE_STATS_DELIVER_DETAILS_RATE"),
("message_stats_deliver_no_ack", "RABBITMQ_MESSAGE_STATS_DELIVER_NO_ACK"),
("message_stats_deliver_no_ack_details_rate", "RABBITMQ_MESSAGE_STATS_DELIVER_NO_ACK_DETAILS_RATE"),
("message_stats_deliver_get", "RABBITMQ_MESSAGE_STATS_DELIVER_GET"),
("message_stats_deliver_get_details_rate", "RABBITMQ_MESSAGE_STATS_DELIVER_GET_DETAILS_RATE"),
("message_stats_redeliver", "RABBITMQ_MESSAGE_STATS_REDELIVER"),
("message_stats_redeliver_details_rate", "RABBITMQ_MESSAGE_STATS_REDELIVER_DETAILS_RATE"),
("message_stats_publish", "RABBITMQ_MESSAGE_STATS_PUBLISH"),
("message_stats_publish_details_rate", "RABBITMQ_MESSAGE_STATS_PUBLISH_DETAILS_RATE"),
("queue_totals_messages", "RABBITMQ_QUEUE_TOTALS_MESSAGES"),
("queue_totals_messages_details_rate", "RABBITMQ_QUEUE_TOTALS_MESSAGES_DETAILS_RATE"),
("queue_totals_messages_ready", "RABBITMQ_QUEUE_TOTALS_MESSAGES_READY"),
("queue_totals_messages_ready_details_rate", "RABBITMQ_QUEUE_TOTALS_MESSAGES_READY_DETAILS_RATE"),
("queue_totals_messages_unacknowledged", "RABBITMQ_QUEUE_TOTALS_MESSAGES_UNACKNOWLEDGED"),
("queue_totals_messages_unacknowledged_details_rate","RABBITMQ_QUEUE_TOTALS_MESSAGES_UNACKNOWLEDGED_DETAILS_RATE"),
("mem_used","RABBITMQ_MEM_USED"),
("disk_free","RABBITMQ_DISK_FREE")
]
class RabitMQMonitoring():
def __init__(self):
self.pollInterval = None
self.hostname = None
self.port = None
self.user = None
self.password = None
self.url = None
def send_get(self,url):
response = requests.get(url, auth=(self.user, self.password))
return response.json()
def call_api(self, endpoint):
url = self.url + endpoint
auth = b64encode(self.user + ":" + self.password)
headers = {
"Accept": "application/json",
"Authorization": "Basic %s" % auth,
}
request = urllib2.Request(url,headers=headers)
try:
response = urllib2.urlopen(request)
except urllib2.URLError as e:
sys.stderr.write("Error connecting to host: {0} ({1}), Error: {2}".format(self.hostname,e.errno,e.message))
raise
except urllib2.HTTPError as e:
sys.stderr.write("Error getting data from AWS Cloud Watch API: %s (%d), Error: %s",
getattr(h, "reason", "Unknown Reason"),h.code, h.read())
raise
return json.load(response)
def print_dict(self, dic):
for (key, value) in KEY_MAPPING:
if dic.get(key,"-") != "-":
name = dic.get("name")
print("%s %s %s" % (value.upper(), dic.get(key, "-"), name))
sys.stdout.flush()
def get_details(self):
overview = self.call_api("overview")
nodes = self.call_api("nodes")
if nodes:
overview.update(nodes[0])
if overview:
data = self.flatten_dict(overview)
self.print_dict(data)
def flatten_dict(self, dic, parent_key='', sep='_'):
items = []
for k, v in dic.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(self.flatten_dict(v, new_key, sep).items())
else:
items.append((new_key, v))
return dict(items)
def extractMetrics(self):
self.get_details()
def get_configuration(self):
'''
1) Reads the param.json file that contains the configuration of the plugin.
2) Sets the values to member variables of the class instance.
'''
with open('param.json') as f:
parameters = json.loads(f.read())
self.hostname = parameters['hostname']
self.port = parameters['port']
self.pollInterval = float(parameters['pollInterval'])/1000.0
self.user = parameters['user']
self.password = parameters['password']
self.url = "http://" + self.hostname + ":" + self.port + "/api/"
def continuous_monitoring(self):
while True:
try:
self.get_details()
sleep(float(self.pollInterval))
except Exception as se:
sys.stderr.write("\nTrying to re-connect to host: {0} \n".format(self.hostname))
sleep(float(self.pollInterval))
if __name__ == "__main__":
monitor = RabitMQMonitoring()
monitor.get_configuration()
monitor.continuous_monitoring()
| apache-2.0 |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/tensorflow/python/ops/resource_variable_ops.py | 13 | 11428 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops to use variables as resources."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_resource_variable_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_resource_variable_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import compat
def _register_variable_read(read, collections, trainable):
"""Helper function to put a read from a variable in the collections."""
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if (trainable and ops.GraphKeys.TRAINABLE_VARIABLES
not in collections):
collections = (list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES])
ops.add_to_collections(collections, read)
class ResourceVariable(object):
"""Variable based on resource handles.
TODO(apassos): fill this out explaining the semantics and Variable
compatibility when the API has settled more.
"""
# pylint: disable=unused-argument
def __init__(self,
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
dtype=None):
"""Creates a variable.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called.
(Note that initializer functions from init_ops.py must first be bound
to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: Ignored. Provided for compatibility with tf.Variable.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If None, either the datatype will be kept (if initial_value is
a Tensor) or float32 will be used (if it is a Python object convertible
to a Tensor).
Raises:
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
"""
if initial_value is None:
raise ValueError("initial_value must be specified.")
init_from_fn = callable(initial_value)
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if not isinstance(collections, (list, tuple, set)):
raise ValueError(
"collections argument to Variable constructor must be a list, tuple, "
"or set. Got %s of type %s" % (collections, type(collections)))
if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
with ops.control_dependencies(None):
with ops.name_scope(name, "Variable", [] if init_from_fn else
[initial_value]) as name:
if init_from_fn:
# Use attr_scope and device(None) to simulate the behavior of
# colocate_with when the variable we want to colocate with doesn't
# yet exist.
# pylint: disable=protected-access
true_name = ops._name_from_scope_name(name)
attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
s=[compat.as_bytes("loc:@%s" % true_name)]))
# pylint: disable=protected-access
with ops.get_default_graph()._attr_scope({"_class": attr}):
with ops.name_scope("Initializer"), ops.device(None):
self._initial_value = ops.convert_to_tensor(
initial_value(), name="initial_value", dtype=dtype)
self._handle = gen_resource_variable_ops.var_handle_op(
shape=self._initial_value.get_shape(),
dtype=self._initial_value.dtype.base_dtype,
shared_name=name, name=name)
# Or get the initial value from a Tensor or Python object.
else:
self._initial_value = ops.convert_to_tensor(
initial_value, name="initial_value", dtype=dtype)
self._handle = gen_resource_variable_ops.var_handle_op(
shape=self._initial_value.get_shape(),
dtype=self._initial_value.dtype.base_dtype,
shared_name=name, name=name)
self._dtype = self._initial_value.dtype.base_dtype
with ops.name_scope("IsInitialized"):
self._is_initialized_op = (
gen_resource_variable_ops.var_is_initialized_op(self._handle))
if initial_value is not None:
with ops.name_scope("Create"):
self._initialize_op = gen_resource_variable_ops.assign_variable_op(
self._handle, self._initial_value)
with ops.name_scope("Read"):
self._value = gen_resource_variable_ops.read_variable_op(
self._handle, dtype=self._dtype)
if caching_device is not None:
with ops.device(caching_device):
self._cached_value = array_ops.identity(self._value)
else:
with ops.colocate_with(self._handle.op):
self._cached_value = array_ops.identity(self._value)
# TODO(apassos) this is terrible monkey-patching required to make
# initialize_all_variables work. Replace self._value with an explicit
# class instead of monkey-patching.
self._value.initializer = self._initialize_op
ops.add_to_collections(collections, self)
@property
def dtype(self):
"""The dtype of this variable."""
return self._dtype
@property
def name(self):
"""The name of the handle for this variable."""
return self._handle.name
def get_shape(self):
"""The shape of this variable."""
return self._value.get_shape()
@property
def create(self):
"""The op responsible for initializing this variable."""
return self._initialize_op
@property
def handle(self):
"""The handle by which this variable can be accessed."""
return self._handle
@property
def value(self):
"""A cached operation which reads the value of this variable."""
return self._cached_value
def _as_graph_element(self):
"""Conversion function for Graph.as_graph_element()."""
return self._value
@property
def initializer(self):
"""The op responsible for initializing this variable."""
return self._initialize_op
@property
def op(self):
"""The op for this variable."""
return self._handle.op
def eval(self, session=None):
"""Evaluates and returns the value of this variable."""
return self._value.eval(session=session)
def read_value(self, collections=None, trainable=True):
"""Constructs an op which reads the value of this variable.
Should be used when there are multiple reads, or when it is desirable to
read the value only after some condition is true.
Args:
collections: any collections in which this operation should be inserted.
trainable: whether this read is to be used for training.
Returns:
the read operation.
"""
with ops.name_scope("Read"):
value = gen_resource_variable_ops.read_variable_op(
self._handle, dtype=self._dtype)
_register_variable_read(value, collections=collections, trainable=trainable)
return array_ops.identity(value)
def sparse_read(self, indices, collections=None, trainable=True, name=None):
"""Reads the value of this variable sparsely, using `gather`."""
with ops.name_scope("Gather" if name is None else name):
value = gen_resource_variable_ops.resource_gather(
self._handle, indices, dtype=self._dtype)
_register_variable_read(value, collections=collections, trainable=trainable)
return array_ops.identity(value)
@staticmethod
def _OverloadAllOperators(): # pylint: disable=invalid-name
"""Register overloads for all operators."""
for operator in ops.Tensor.OVERLOADABLE_OPERATORS:
ResourceVariable._OverloadOperator(operator)
# For slicing, bind getitem differently than a tensor (use SliceHelperVar
# instead)
# pylint: disable=protected-access
setattr(ResourceVariable, "__getitem__", array_ops._SliceHelperVar)
def _AsTensor(self):
return self.value
@staticmethod
def _OverloadOperator(operator): # pylint: disable=invalid-name
"""Defer an operator overload to `ops.Tensor`.
We pull the operator out of ops.Tensor dynamically to avoid ordering issues.
Args:
operator: string. The operator name.
"""
def _run_op(a, *args):
# pylint: disable=protected-access
return getattr(ops.Tensor, operator)(a._AsTensor(), *args)
# Propagate __doc__ to wrapper
try:
_run_op.__doc__ = getattr(ops.Tensor, operator).__doc__
except AttributeError:
pass
setattr(ResourceVariable, operator, _run_op)
__array_priority__ = 100
# pylint: disable=unused-argument,protected-access
def _dense_var_to_tensor(var, dtype=None, name=None, as_ref=False):
if dtype is not None and dtype != var.value.dtype:
print("trying to switch the dtype to ", dtype, " from ", var.value.dtype)
return NotImplemented
if as_ref:
return var._value
return var._cached_value
# pylint: enable=unused-argument,protected-access
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
ops.register_tensor_conversion_function(ResourceVariable, _dense_var_to_tensor)
# pylint: disable=protected-access
ResourceVariable._OverloadAllOperators()
| apache-2.0 |
dvliman/jaikuengine | explore/tests.py | 30 | 2261 | # Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import simplejson
from django.conf import settings
from common import api
from common import normalize
from common import profile
from common import util
from common.tests import ViewTestCase
class ExploreTest(ViewTestCase):
def test_explore_when_signed_out(self):
l = profile.label('explore_get_public')
r = self.client.get('/explore')
l.stop()
self.assertContains(r, "Latest Public Posts")
self.assertTemplateUsed(r, 'explore/templates/recent.html')
def test_explore_when_signed_in(self):
self.login('popular')
l = profile.label('explore_get_logged_in')
r = self.client.get('/explore')
l.stop()
self.assertContains(r, "Latest Public Posts")
self.assertTemplateUsed(r, 'explore/templates/recent.html')
def test_rss_and_atom_feeds(self):
r = self.client.get('/explore')
self.assertContains(r, 'href="/explore/rss"')
self.assertContains(r, 'href="/explore/atom"')
def test_json_feed(self):
urls = ['/feed/json', '/explore/json']
for u in urls:
r = self.client.get(u)
self.assertEqual(r.status_code, 200)
j = simplejson.loads(r.content)
self.assertEqual(j['url'], '/explore')
self.assertTemplateUsed(r, 'explore/templates/recent.json')
def test_json_feed_with_callback(self):
urls = ['/feed/json', '/explore/json']
for u in urls:
r = self.client.get('/feed/json', {'callback': 'callback'})
self.assertContains(r, '"url": "\/explore",', status_code=200)
self.failIf(not re.match('callback\(', r.content))
self.failIf(not re.search('\);$', r.content))
self.assertTemplateUsed(r, 'explore/templates/recent.json')
| apache-2.0 |
harmy/kbengine | kbe/res/scripts/common/Lib/distutils/command/build_py.py | 2 | 17369 | """distutils.command.build_py
Implements the Distutils 'build_py' command."""
import sys, os
import sys
from glob import glob
from distutils.core import Command
from distutils.errors import *
from distutils.util import convert_path, Mixin2to3
from distutils import log
class build_py (Command):
description = "\"build\" pure Python modules (copy to build directory)"
user_options = [
('build-lib=', 'd', "directory to \"build\" (copy) to"),
('compile', 'c', "compile .py to .pyc"),
('no-compile', None, "don't compile .py files [default]"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('force', 'f', "forcibly build everything (ignore file timestamps)"),
]
boolean_options = ['compile', 'force']
negative_opt = {'no-compile' : 'compile'}
def initialize_options(self):
self.build_lib = None
self.py_modules = None
self.package = None
self.package_data = None
self.package_dir = None
self.compile = 0
self.optimize = 0
self.force = None
def finalize_options(self):
self.set_undefined_options('build',
('build_lib', 'build_lib'),
('force', 'force'))
# Get the distribution options that are aliases for build_py
# options -- list of packages and list of modules.
self.packages = self.distribution.packages
self.py_modules = self.distribution.py_modules
self.package_data = self.distribution.package_data
self.package_dir = {}
if self.distribution.package_dir:
for name, path in self.distribution.package_dir.items():
self.package_dir[name] = convert_path(path)
self.data_files = self.get_data_files()
# Ick, copied straight from install_lib.py (fancy_getopt needs a
# type system! Hell, *everything* needs a type system!!!)
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
assert 0 <= self.optimize <= 2
except (ValueError, AssertionError):
raise DistutilsOptionError("optimize must be 0, 1, or 2")
def run(self):
# XXX copy_file by default preserves atime and mtime. IMHO this is
# the right thing to do, but perhaps it should be an option -- in
# particular, a site administrator might want installed files to
# reflect the time of installation rather than the last
# modification time before the installed release.
# XXX copy_file by default preserves mode, which appears to be the
# wrong thing to do: if a file is read-only in the working
# directory, we want it to be installed read/write so that the next
# installation of the same module distribution can overwrite it
# without problems. (This might be a Unix-specific issue.) Thus
# we turn off 'preserve_mode' when copying to the build directory,
# since the build directory is supposed to be exactly what the
# installation will look like (ie. we preserve mode when
# installing).
# Two options control which modules will be installed: 'packages'
# and 'py_modules'. The former lets us work with whole packages, not
# specifying individual modules at all; the latter is for
# specifying modules one-at-a-time.
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
self.byte_compile(self.get_outputs(include_bytecode=0))
def get_data_files(self):
"""Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
data = []
if not self.packages:
return data
for package in self.packages:
# Locate package source directory
src_dir = self.get_package_dir(package)
# Compute package build directory
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
# Length of path to strip from found files
plen = 0
if src_dir:
plen = len(src_dir)+1
# Strip directory from globbed filenames
filenames = [
file[plen:] for file in self.find_data_files(package, src_dir)
]
data.append((package, src_dir, build_dir, filenames))
return data
def find_data_files(self, package, src_dir):
"""Return filenames for package's data files in 'src_dir'"""
globs = (self.package_data.get('', [])
+ self.package_data.get(package, []))
files = []
for pattern in globs:
# Each pattern has to be converted to a platform-specific path
filelist = glob(os.path.join(src_dir, convert_path(pattern)))
# Files that match more than one pattern are only added once
files.extend([fn for fn in filelist if fn not in files])
return files
def build_package_data(self):
"""Copy data files into build directory"""
lastdir = None
for package, src_dir, build_dir, filenames in self.data_files:
for filename in filenames:
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
self.copy_file(os.path.join(src_dir, filename), target,
preserve_mode=False)
def get_package_dir(self, package):
"""Return the directory, relative to the top of the source
distribution, where package 'package' should be found
(at least according to the 'package_dir' option, if any)."""
path = package.split('.')
if not self.package_dir:
if path:
return os.path.join(*path)
else:
return ''
else:
tail = []
while path:
try:
pdir = self.package_dir['.'.join(path)]
except KeyError:
tail.insert(0, path[-1])
del path[-1]
else:
tail.insert(0, pdir)
return os.path.join(*tail)
else:
# Oops, got all the way through 'path' without finding a
# match in package_dir. If package_dir defines a directory
# for the root (nameless) package, then fallback on it;
# otherwise, we might as well have not consulted
# package_dir at all, as we just use the directory implied
# by 'tail' (which should be the same as the original value
# of 'path' at this point).
pdir = self.package_dir.get('')
if pdir is not None:
tail.insert(0, pdir)
if tail:
return os.path.join(*tail)
else:
return ''
def check_package(self, package, package_dir):
# Empty dir name means current directory, which we can probably
# assume exists. Also, os.path.exists and isdir don't know about
# my "empty string means current dir" convention, so we have to
# circumvent them.
if package_dir != "":
if not os.path.exists(package_dir):
raise DistutilsFileError(
"package directory '%s' does not exist" % package_dir)
if not os.path.isdir(package_dir):
raise DistutilsFileError(
"supposed package directory '%s' exists, "
"but is not a directory" % package_dir)
# Require __init__.py for all but the "root package"
if package:
init_py = os.path.join(package_dir, "__init__.py")
if os.path.isfile(init_py):
return init_py
else:
log.warn(("package init file '%s' not found " +
"(or not a regular file)"), init_py)
# Either not in a package at all (__init__.py not expected), or
# __init__.py doesn't exist -- so don't return the filename.
return None
def check_module(self, module, module_file):
if not os.path.isfile(module_file):
log.warn("file %s (for module %s) not found", module_file, module)
return False
else:
return True
def find_package_modules(self, package, package_dir):
self.check_package(package, package_dir)
module_files = glob(os.path.join(package_dir, "*.py"))
modules = []
setup_script = os.path.abspath(self.distribution.script_name)
for f in module_files:
abs_f = os.path.abspath(f)
if abs_f != setup_script:
module = os.path.splitext(os.path.basename(f))[0]
modules.append((package, module, f))
else:
self.debug_print("excluding %s" % setup_script)
return modules
def find_modules(self):
"""Finds individually-specified Python modules, ie. those listed by
module name in 'self.py_modules'. Returns a list of tuples (package,
module_base, filename): 'package' is a tuple of the path through
package-space to the module; 'module_base' is the bare (no
packages, no dots) module name, and 'filename' is the path to the
".py" file (relative to the distribution root) that implements the
module.
"""
# Map package names to tuples of useful info about the package:
# (package_dir, checked)
# package_dir - the directory where we'll find source files for
# this package
# checked - true if we have checked that the package directory
# is valid (exists, contains __init__.py, ... ?)
packages = {}
# List of (package, module, filename) tuples to return
modules = []
# We treat modules-in-packages almost the same as toplevel modules,
# just the "package" for a toplevel is empty (either an empty
# string or empty list, depending on context). Differences:
# - don't check for __init__.py in directory for empty package
for module in self.py_modules:
path = module.split('.')
package = '.'.join(path[0:-1])
module_base = path[-1]
try:
(package_dir, checked) = packages[package]
except KeyError:
package_dir = self.get_package_dir(package)
checked = 0
if not checked:
init_py = self.check_package(package, package_dir)
packages[package] = (package_dir, 1)
if init_py:
modules.append((package, "__init__", init_py))
# XXX perhaps we should also check for just .pyc files
# (so greedy closed-source bastards can distribute Python
# modules too)
module_file = os.path.join(package_dir, module_base + ".py")
if not self.check_module(module, module_file):
continue
modules.append((package, module_base, module_file))
return modules
def find_all_modules(self):
"""Compute the list of all modules that will be built, whether
they are specified one-module-at-a-time ('self.py_modules') or
by whole packages ('self.packages'). Return a list of tuples
(package, module, module_file), just like 'find_modules()' and
'find_package_modules()' do."""
modules = []
if self.py_modules:
modules.extend(self.find_modules())
if self.packages:
for package in self.packages:
package_dir = self.get_package_dir(package)
m = self.find_package_modules(package, package_dir)
modules.extend(m)
return modules
def get_source_files(self):
return [module[-1] for module in self.find_all_modules()]
def get_module_outfile(self, build_dir, package, module):
outfile_path = [build_dir] + list(package) + [module + ".py"]
return os.path.join(*outfile_path)
def get_outputs(self, include_bytecode=1):
modules = self.find_all_modules()
outputs = []
for (package, module, module_file) in modules:
package = package.split('.')
filename = self.get_module_outfile(self.build_lib, package, module)
outputs.append(filename)
if include_bytecode:
if self.compile:
outputs.append(filename + "c")
if self.optimize > 0:
outputs.append(filename + "o")
outputs += [
os.path.join(build_dir, filename)
for package, src_dir, build_dir, filenames in self.data_files
for filename in filenames
]
return outputs
def build_module(self, module, module_file, package):
if isinstance(package, str):
package = package.split('.')
elif not isinstance(package, (list, tuple)):
raise TypeError(
"'package' must be a string (dot-separated), list, or tuple")
# Now put the module source file into the "build" area -- this is
# easy, we just copy it somewhere under self.build_lib (the build
# directory for Python source).
outfile = self.get_module_outfile(self.build_lib, package, module)
dir = os.path.dirname(outfile)
self.mkpath(dir)
return self.copy_file(module_file, outfile, preserve_mode=0)
def build_modules(self):
modules = self.find_modules()
for (package, module, module_file) in modules:
# Now "build" the module -- ie. copy the source file to
# self.build_lib (the build directory for Python source).
# (Actually, it gets copied to the directory for this package
# under self.build_lib.)
self.build_module(module, module_file, package)
def build_packages(self):
for package in self.packages:
# Get list of (package, module, module_file) tuples based on
# scanning the package directory. 'package' is only included
# in the tuple so that 'find_modules()' and
# 'find_package_tuples()' have a consistent interface; it's
# ignored here (apart from a sanity check). Also, 'module' is
# the *unqualified* module name (ie. no dots, no package -- we
# already know its package!), and 'module_file' is the path to
# the .py file, relative to the current directory
# (ie. including 'package_dir').
package_dir = self.get_package_dir(package)
modules = self.find_package_modules(package, package_dir)
# Now loop over the modules we found, "building" each one (just
# copy it to self.build_lib).
for (package_, module, module_file) in modules:
assert package == package_
self.build_module(module, module_file, package)
def byte_compile(self, files):
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
prefix = self.build_lib
if prefix[-1] != os.sep:
prefix = prefix + os.sep
# XXX this code is essentially the same as the 'byte_compile()
# method of the "install_lib" command, except for the determination
# of the 'prefix' string. Hmmm.
if self.compile:
byte_compile(files, optimize=0,
force=self.force, prefix=prefix, dry_run=self.dry_run)
if self.optimize > 0:
byte_compile(files, optimize=self.optimize,
force=self.force, prefix=prefix, dry_run=self.dry_run)
class build_py_2to3(build_py, Mixin2to3):
def run(self):
self.updated_files = []
# Base class code
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
# 2to3
self.run_2to3(self.updated_files)
# Remaining base class code
self.byte_compile(self.get_outputs(include_bytecode=0))
def build_module(self, module, module_file, package):
res = build_py.build_module(self, module, module_file, package)
if res[1]:
# file was copied
self.updated_files.append(res[0])
return res
| lgpl-3.0 |
TJ-VMT/vmtsite | plugins/render_math/pelican_mathjax_markdown_extension.py | 348 | 6929 | # -*- coding: utf-8 -*-
"""
Pelican Mathjax Markdown Extension
==================================
An extension for the Python Markdown module that enables
the Pelican python blog to process mathjax. This extension
gives Pelican the ability to use Mathjax as a "first class
citizen" of the blog
"""
import markdown
from markdown.util import etree
from markdown.util import AtomicString
class PelicanMathJaxPattern(markdown.inlinepatterns.Pattern):
"""Inline markdown processing that matches mathjax"""
def __init__(self, pelican_mathjax_extension, tag, pattern):
super(PelicanMathJaxPattern,self).__init__(pattern)
self.math_tag_class = pelican_mathjax_extension.getConfig('math_tag_class')
self.pelican_mathjax_extension = pelican_mathjax_extension
self.tag = tag
def handleMatch(self, m):
node = markdown.util.etree.Element(self.tag)
node.set('class', self.math_tag_class)
prefix = '\\(' if m.group('prefix') == '$' else m.group('prefix')
suffix = '\\)' if m.group('suffix') == '$' else m.group('suffix')
node.text = markdown.util.AtomicString(prefix + m.group('math') + suffix)
# If mathjax was successfully matched, then JavaScript needs to be added
# for rendering. The boolean below indicates this
self.pelican_mathjax_extension.mathjax_needed = True
return node
class PelicanMathJaxCorrectDisplayMath(markdown.treeprocessors.Treeprocessor):
"""Corrects invalid html that results from a <div> being put inside
a <p> for displayed math"""
def __init__(self, pelican_mathjax_extension):
self.pelican_mathjax_extension = pelican_mathjax_extension
def correct_html(self, root, children, div_math, insert_idx, text):
"""Separates out <div class="math"> from the parent tag <p>. Anything
in between is put into its own parent tag of <p>"""
current_idx = 0
for idx in div_math:
el = markdown.util.etree.Element('p')
el.text = text
el.extend(children[current_idx:idx])
# Test to ensure that empty <p> is not inserted
if len(el) != 0 or (el.text and not el.text.isspace()):
root.insert(insert_idx, el)
insert_idx += 1
text = children[idx].tail
children[idx].tail = None
root.insert(insert_idx, children[idx])
insert_idx += 1
current_idx = idx+1
el = markdown.util.etree.Element('p')
el.text = text
el.extend(children[current_idx:])
if len(el) != 0 or (el.text and not el.text.isspace()):
root.insert(insert_idx, el)
def run(self, root):
"""Searches for <div class="math"> that are children in <p> tags and corrects
the invalid HTML that results"""
math_tag_class = self.pelican_mathjax_extension.getConfig('math_tag_class')
for parent in root:
div_math = []
children = list(parent)
for div in parent.findall('div'):
if div.get('class') == math_tag_class:
div_math.append(children.index(div))
# Do not process further if no displayed math has been found
if not div_math:
continue
insert_idx = list(root).index(parent)
self.correct_html(root, children, div_math, insert_idx, parent.text)
root.remove(parent) # Parent must be removed last for correct insertion index
return root
class PelicanMathJaxAddJavaScript(markdown.treeprocessors.Treeprocessor):
"""Tree Processor for adding Mathjax JavaScript to the blog"""
def __init__(self, pelican_mathjax_extension):
self.pelican_mathjax_extension = pelican_mathjax_extension
def run(self, root):
# If no mathjax was present, then exit
if (not self.pelican_mathjax_extension.mathjax_needed):
return root
# Add the mathjax script to the html document
mathjax_script = etree.Element('script')
mathjax_script.set('type','text/javascript')
mathjax_script.text = AtomicString(self.pelican_mathjax_extension.getConfig('mathjax_script'))
root.append(mathjax_script)
# Reset the boolean switch to false so that script is only added
# to other pages if needed
self.pelican_mathjax_extension.mathjax_needed = False
return root
class PelicanMathJaxExtension(markdown.Extension):
"""A markdown extension enabling mathjax processing in Markdown for Pelican"""
def __init__(self, config):
try:
# Needed for markdown versions >= 2.5
self.config['mathjax_script'] = ['', 'Mathjax JavaScript script']
self.config['math_tag_class'] = ['math', 'The class of the tag in which mathematics is wrapped']
self.config['auto_insert'] = [True, 'Determines if mathjax script is automatically inserted into content']
super(PelicanMathJaxExtension,self).__init__(**config)
except AttributeError:
# Markdown versions < 2.5
config['mathjax_script'] = [config['mathjax_script'], 'Mathjax JavaScript script']
config['math_tag_class'] = [config['math_tag_class'], 'The class of the tag in which mathematic is wrapped']
config['auto_insert'] = [config['auto_insert'], 'Determines if mathjax script is automatically inserted into content']
super(PelicanMathJaxExtension,self).__init__(config)
# Used as a flag to determine if javascript
# needs to be injected into a document
self.mathjax_needed = False
def extendMarkdown(self, md, md_globals):
# Regex to detect mathjax
mathjax_inline_regex = r'(?P<prefix>\$)(?P<math>.+?)(?P<suffix>(?<!\s)\2)'
mathjax_display_regex = r'(?P<prefix>\$\$|\\begin\{(.+?)\})(?P<math>.+?)(?P<suffix>\2|\\end\{\3\})'
# Process mathjax before escapes are processed since escape processing will
# intefer with mathjax. The order in which the displayed and inlined math
# is registered below matters
md.inlinePatterns.add('mathjax_displayed', PelicanMathJaxPattern(self, 'div', mathjax_display_regex), '<escape')
md.inlinePatterns.add('mathjax_inlined', PelicanMathJaxPattern(self, 'span', mathjax_inline_regex), '<escape')
# Correct the invalid HTML that results from teh displayed math (<div> tag within a <p> tag)
md.treeprocessors.add('mathjax_correctdisplayedmath', PelicanMathJaxCorrectDisplayMath(self), '>inline')
# If necessary, add the JavaScript Mathjax library to the document. This must
# be last in the ordered dict (hence it is given the position '_end')
if self.getConfig('auto_insert'):
md.treeprocessors.add('mathjax_addjavascript', PelicanMathJaxAddJavaScript(self), '_end')
| gpl-3.0 |
dannyperry571/theapprentice | script.module.urlresolver/lib/urlresolver/lib/pyaes/util.py | 124 | 2032 | # The MIT License (MIT)
#
# Copyright (c) 2014 Richard Moore
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Why to_bufferable?
# Python 3 is very different from Python 2.x when it comes to strings of text
# and strings of bytes; in Python 3, strings of bytes do not exist, instead to
# represent arbitrary binary data, we must use the "bytes" object. This method
# ensures the object behaves as we need it to.
def to_bufferable(binary):
return binary
def _get_byte(c):
return ord(c)
try:
xrange
except:
def to_bufferable(binary):
if isinstance(binary, bytes):
return binary
return bytes(ord(b) for b in binary)
def _get_byte(c):
return c
def append_PKCS7_padding(data):
pad = 16 - (len(data) % 16)
return data + to_bufferable(chr(pad) * pad)
def strip_PKCS7_padding(data):
if len(data) % 16 != 0:
raise ValueError("invalid length")
pad = _get_byte(data[-1])
if not pad or pad > 16:
return data
return data[:-pad]
| gpl-2.0 |
ericflo/awesomestream | setup.py | 1 | 5909 | from setuptools import setup, find_packages
version = '0.1'
LONG_DESCRIPTION = """
AwesomeStream
=============
AwesomeStream is a set of tools for creating a "stream server". That is, a
server which can store information about events that happen, and can query back
those events in reverse-chronological order, sliced in interesting ways.
Example and Use Case
====================
Say that you run a website like GitHub, where people interact in various
different ways. People can create repositories, fork them, watch or unwatch
repositories, add friends, etc. There are all kinds of things that a user
can do on the site. Let's look at how AwesomeStream can help.
First, we'll set up a simple redis-based server:
>>> from awesomestream.backends import RedisBackend
>>> from awesomestream.jsonrpc import create_app, run_server
>>> backend = RedisBackend(
... keys=['user', 'kind', 'repo'],
... host='127.0.0.1',
... port=6379
... )
>>>
>>> app = create_app(backend)
>>> run_server(app, 8080)
This simple script sets up a Redis-based AwesomeStream server--one that pays
special attention to the 'user', 'kind', and 'repo' keys. This will make a
bit more sense in a bit.
In another console, we're going to instantiate a client.
>>> from awesomestream.jsonrpc import Client
>>> c = Client('http://127.0.0.1:8080/')
OK, now that we've set up our client, lets start logging user actions. Look,
a user has just created a new repo!
>>> c.insert({
... 'kind': 'create-repo',
... 'repo': 17,
... 'user': 291,
... 'name': 'frist',
... 'description': 'This is my first repo ever!',
... })
>>>
But the user made a mistake, and named it 'frist' instead of 'first'. So they
go ahead and delete it:
>>> c.insert({
... 'kind': 'delete-repo',
... 'repo': 17,
... 'user': 291,
... 'reason': 'Made a typo :(',
... })
>>>
Then they give up and decide to watch another user's repo instead:
>>> c.insert({'kind': 'watch', 'repo': 2842, 'user': 291, 'owner': 23})
And finally they add that user as a friend:
>>> c.insert({'kind': 'friend', 'user': 291, 'friend': 23})
That second user notices that someone is following them, and follows back:
>>> c.insert({'kind': 'friend', 'user': 23, 'friend': 291})
Now that we have data inserted into the stream server, we can query it to get
back the full stream. Here's how something like that might look:
>>> c.items()
[{'kind': 'friend', 'user': 23, 'friend': 291},
{'kind': 'friend', 'user': 291, 'friend': 23},
{'repo': 2842, 'owner': 23, 'kind': 'watch', 'user': 291},
{'repo': 17, 'kind': 'delete-repo', 'reason': 'Made a typo :(', 'user': 291},
{'repo': 17, 'kind': 'create-repo', 'user': 291, 'name': 'frist', 'description': 'This is my first repo ever!'}
]
As you can see, we got the entire stream back, in reverse chronological order.
But let's say we want to filter this out, to only see 'friend' requests. We
can do that easily:
>>> c.items(kind='friend')
[{'kind': 'friend', 'user': 23, 'friend': 291},
{'kind': 'friend', 'user': 291, 'friend': 23}
]
Notice that they are still in reverse chronological order. We can also combine
our predicates, to get only friend requests made by a specific user.
>>> c.items(kind='friend', user=23)
[{'kind': 'friend', 'user': 23, 'friend': 291}]
But an extremely common case is that you want to see only your activity
that is generated by your friends. With AwesomeStream, that's simple:
>>> c.items(user=[23, 291])
[{'kind': 'friend', 'user': 23, 'friend': 291},
{'kind': 'friend', 'user': 291, 'friend': 23},
{'repo': 2842, 'owner': 23, 'kind': 'watch', 'user': 291},
{'repo': 17, 'kind': 'delete-repo', 'reason': 'Made a typo :(', 'user': 291},
{'repo': 17, 'kind': 'create-repo', 'user': 291, 'name': 'frist', 'description': 'This is my first repo ever!'}
]
As you can see, every user ID passed into that list is retrieved. By default,
the items() function retrieves 20 items, but often times we'll need to
customize that. Here's how that would look:
>>> c.items(user=[23, 291], start=1, end=3)
[{'kind': 'friend', 'user': 291, 'friend': 23},
{'repo': 2842, 'owner': 23, 'kind': 'watch', 'user': 291}
]
Supported Backends
==================
* In-Memory (mostly for testing)
* SQL
* Redis
Planned Support
===============
* CouchDB
* Cassandra
Maturity
========
I'm writing this for eventual deployment on http://radiosox.com/, but have not
yet deployed it in production. Do so at your own risk.
Requirements
============
Short Summary:
Use pip, and do `pip install -U -r requirements.txt`
Longer Summary:
Strictly speaking, the only requirement is *simplejson*. That being said,
if you want redis support, you need *redis* installed. If you want SQL
support, you need *SQLAlchemy* installed. If you want support for creating
a WSGI app to expose this over HTTP, you'll need *werkzeug* installed.
Finally, if you want a simple, pure-python way of running that WSGI app,
you'll want to install *cherrypy*.
"""
setup(
name='awesomestream',
version=version,
description="AwesomeStream makes awesome streams",
long_description=LONG_DESCRIPTION,
classifiers=[
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Web Environment",
],
keywords='json-rpc,stream,feed,werkzeug,cherrypy,sqlalchemy,redis',
author='Eric Florenzano',
author_email='floguy@gmail.com',
url='http://github.com/ericflo/awesomestream',
license='BSD',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=['setuptools'],
) | bsd-3-clause |
adam111316/SickGear | lib/unidecode/x0fc.py | 253 | 3595 | data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'', # 0x6c
'', # 0x6d
'', # 0x6e
'', # 0x6f
'', # 0x70
'', # 0x71
'', # 0x72
'', # 0x73
'', # 0x74
'', # 0x75
'', # 0x76
'', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'', # 0x7d
'', # 0x7e
'', # 0x7f
'', # 0x80
'', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'', # 0xc0
'', # 0xc1
'', # 0xc2
'', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'', # 0xcd
'', # 0xce
'', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'', # 0xd4
'', # 0xd5
'', # 0xd6
'', # 0xd7
'', # 0xd8
'', # 0xd9
'', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'', # 0xee
'', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'', # 0xfc
'', # 0xfd
'', # 0xfe
'', # 0xff
)
| gpl-3.0 |
kjhsdgf/Collin-Neural-Network | Network/dlib-19.7/python_examples/svm_struct.py | 10 | 17274 | #!/usr/bin/python
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
#
# This is an example illustrating the use of the structural SVM solver from
# the dlib C++ Library. Therefore, this example teaches you the central ideas
# needed to setup a structural SVM model for your machine learning problems. To
# illustrate the process, we use dlib's structural SVM solver to learn the
# parameters of a simple multi-class classifier. We first discuss the
# multi-class classifier model and then walk through using the structural SVM
# tools to find the parameters of this classification model. As an aside,
# dlib's C++ interface to the structural SVM solver is threaded. So on a
# multi-core computer it is significantly faster than using the python
# interface. So consider using the C++ interface instead if you find that
# running it in python is slow.
#
#
# COMPILING/INSTALLING THE DLIB PYTHON INTERFACE
# You can install dlib using the command:
# pip install dlib
#
# Alternatively, if you want to compile dlib yourself then go into the dlib
# root folder and run:
# python setup.py install
# or
# python setup.py install --yes USE_AVX_INSTRUCTIONS
# if you have a CPU that supports AVX instructions, since this makes some
# things run faster.
#
# Compiling dlib should work on any operating system so long as you have
# CMake and boost-python installed. On Ubuntu, this can be done easily by
# running the command:
# sudo apt-get install libboost-python-dev cmake
#
import dlib
def main():
# In this example, we have three types of samples: class 0, 1, or 2. That
# is, each of our sample vectors falls into one of three classes. To keep
# this example very simple, each sample vector is zero everywhere except at
# one place. The non-zero dimension of each vector determines the class of
# the vector. So for example, the first element of samples has a class of 1
# because samples[0][1] is the only non-zero element of samples[0].
samples = [[0, 2, 0], [1, 0, 0], [0, 4, 0], [0, 0, 3]]
# Since we want to use a machine learning method to learn a 3-class
# classifier we need to record the labels of our samples. Here samples[i]
# has a class label of labels[i].
labels = [1, 0, 1, 2]
# Now that we have some training data we can tell the structural SVM to
# learn the parameters of our 3-class classifier model. The details of this
# will be explained later. For now, just note that it finds the weights
# (i.e. a vector of real valued parameters) such that predict_label(weights,
# sample) always returns the correct label for a sample vector.
problem = ThreeClassClassifierProblem(samples, labels)
weights = dlib.solve_structural_svm_problem(problem)
# Print the weights and then evaluate predict_label() on each of our
# training samples. Note that the correct label is predicted for each
# sample.
print(weights)
for k, s in enumerate(samples):
print("Predicted label for sample[{0}]: {1}".format(
k, predict_label(weights, s)))
def predict_label(weights, sample):
"""Given the 9-dimensional weight vector which defines a 3 class classifier,
predict the class of the given 3-dimensional sample vector. Therefore, the
output of this function is either 0, 1, or 2 (i.e. one of the three possible
labels)."""
# Our 3-class classifier model can be thought of as containing 3 separate
# linear classifiers. So to predict the class of a sample vector we
# evaluate each of these three classifiers and then whatever classifier has
# the largest output "wins" and predicts the label of the sample. This is
# the popular one-vs-all multi-class classifier model.
# Keeping this in mind, the code below simply pulls the three separate
# weight vectors out of weights and then evaluates each against sample. The
# individual classifier scores are stored in scores and the highest scoring
# index is returned as the label.
w0 = weights[0:3]
w1 = weights[3:6]
w2 = weights[6:9]
scores = [dot(w0, sample), dot(w1, sample), dot(w2, sample)]
max_scoring_label = scores.index(max(scores))
return max_scoring_label
def dot(a, b):
"""Compute the dot product between the two vectors a and b."""
return sum(i * j for i, j in zip(a, b))
################################################################################
class ThreeClassClassifierProblem:
# Now we arrive at the meat of this example program. To use the
# dlib.solve_structural_svm_problem() routine you need to define an object
# which tells the structural SVM solver what to do for your problem. In
# this example, this is done by defining the ThreeClassClassifierProblem
# object. Before we get into the details, we first discuss some background
# information on structural SVMs.
#
# A structural SVM is a supervised machine learning method for learning to
# predict complex outputs. This is contrasted with a binary classifier
# which makes only simple yes/no predictions. A structural SVM, on the
# other hand, can learn to predict complex outputs such as entire parse
# trees or DNA sequence alignments. To do this, it learns a function F(x,y)
# which measures how well a particular data sample x matches a label y,
# where a label is potentially a complex thing like a parse tree. However,
# to keep this example program simple we use only a 3 category label output.
#
# At test time, the best label for a new x is given by the y which
# maximizes F(x,y). To put this into the context of the current example,
# F(x,y) computes the score for a given sample and class label. The
# predicted class label is therefore whatever value of y which makes F(x,y)
# the biggest. This is exactly what predict_label() does. That is, it
# computes F(x,0), F(x,1), and F(x,2) and then reports which label has the
# biggest value.
#
# At a high level, a structural SVM can be thought of as searching the
# parameter space of F(x,y) for the set of parameters that make the
# following inequality true as often as possible:
# F(x_i,y_i) > max{over all incorrect labels of x_i} F(x_i, y_incorrect)
# That is, it seeks to find the parameter vector such that F(x,y) always
# gives the highest score to the correct output. To define the structural
# SVM optimization problem precisely, we first introduce some notation:
# - let PSI(x,y) == the joint feature vector for input x and a label y
# - let F(x,y|w) == dot(w,PSI(x,y)).
# (we use the | notation to emphasize that F() has the parameter vector
# of weights called w)
# - let LOSS(idx,y) == the loss incurred for predicting that the
# idx-th training sample has a label of y. Note that LOSS()
# should always be >= 0 and should become exactly 0 when y is the
# correct label for the idx-th sample. Moreover, it should notionally
# indicate how bad it is to predict y for the idx'th sample.
# - let x_i == the i-th training sample.
# - let y_i == the correct label for the i-th training sample.
# - The number of data samples is N.
#
# Then the optimization problem solved by a structural SVM using
# dlib.solve_structural_svm_problem() is the following:
# Minimize: h(w) == 0.5*dot(w,w) + C*R(w)
#
# Where R(w) == sum from i=1 to N: 1/N * sample_risk(i,w) and
# sample_risk(i,w) == max over all
# Y: LOSS(i,Y) + F(x_i,Y|w) - F(x_i,y_i|w) and C > 0
#
# You can think of the sample_risk(i,w) as measuring the degree of error
# you would make when predicting the label of the i-th sample using
# parameters w. That is, it is zero only when the correct label would be
# predicted and grows larger the more "wrong" the predicted output becomes.
# Therefore, the objective function is minimizing a balance between making
# the weights small (typically this reduces overfitting) and fitting the
# training data. The degree to which you try to fit the data is controlled
# by the C parameter.
#
# For a more detailed introduction to structured support vector machines
# you should consult the following paper:
# Predicting Structured Objects with Support Vector Machines by
# Thorsten Joachims, Thomas Hofmann, Yisong Yue, and Chun-nam Yu
#
# Finally, we come back to the code. To use
# dlib.solve_structural_svm_problem() you need to provide the things
# discussed above. This is the value of C, the number of training samples,
# the dimensionality of PSI(), as well as methods for calculating the loss
# values and PSI() vectors. You will also need to write code that can
# compute:
# max over all Y: LOSS(i,Y) + F(x_i,Y|w). To summarize, the
# ThreeClassClassifierProblem class is required to have the following
# fields:
# - C
# - num_samples
# - num_dimensions
# - get_truth_joint_feature_vector()
# - separation_oracle()
C = 1
# There are also a number of optional arguments:
# epsilon is the stopping tolerance. The optimizer will run until R(w) is
# within epsilon of its optimal value. If you don't set this then it
# defaults to 0.001.
# epsilon = 1e-13
# Uncomment this and the optimizer will print its progress to standard
# out. You will be able to see things like the current risk gap. The
# optimizer continues until the
# risk gap is below epsilon.
# be_verbose = True
# If you want to require that the learned weights are all non-negative
# then set this field to True.
# learns_nonnegative_weights = True
# The optimizer uses an internal cache to avoid unnecessary calls to your
# separation_oracle() routine. This parameter controls the size of that
# cache. Bigger values use more RAM and might make the optimizer run
# faster. You can also disable it by setting it to 0 which is good to do
# when your separation_oracle is very fast. If If you don't call this
# function it defaults to a value of 5.
# max_cache_size = 20
def __init__(self, samples, labels):
# dlib.solve_structural_svm_problem() expects the class to have
# num_samples and num_dimensions fields. These fields should contain
# the number of training samples and the dimensionality of the PSI
# feature vector respectively.
self.num_samples = len(samples)
self.num_dimensions = len(samples[0])*3
self.samples = samples
self.labels = labels
def make_psi(self, x, label):
"""Compute PSI(x,label)."""
# All we are doing here is taking x, which is a 3 dimensional sample
# vector in this example program, and putting it into one of 3 places in
# a 9 dimensional PSI vector, which we then return. So this function
# returns PSI(x,label). To see why we setup PSI like this, recall how
# predict_label() works. It takes in a 9 dimensional weight vector and
# breaks the vector into 3 pieces. Each piece then defines a different
# classifier and we use them in a one-vs-all manner to predict the
# label. So now that we are in the structural SVM code we have to
# define the PSI vector to correspond to this usage. That is, we need
# to setup PSI so that argmax_y dot(weights,PSI(x,y)) ==
# predict_label(weights,x). This is how we tell the structural SVM
# solver what kind of problem we are trying to solve.
#
# It's worth emphasizing that the single biggest step in using a
# structural SVM is deciding how you want to represent PSI(x,label). It
# is always a vector, but deciding what to put into it to solve your
# problem is often not a trivial task. Part of the difficulty is that
# you need an efficient method for finding the label that makes
# dot(w,PSI(x,label)) the biggest. Sometimes this is easy, but often
# finding the max scoring label turns into a difficult combinatorial
# optimization problem. So you need to pick a PSI that doesn't make the
# label maximization step intractable but also still well models your
# problem.
#
# Create a dense vector object (note that you can also use unsorted
# sparse vectors (i.e. dlib.sparse_vector objects) to represent your
# PSI vector. This is useful if you have very high dimensional PSI
# vectors that are mostly zeros. In the context of this example, you
# would simply return a dlib.sparse_vector at the end of make_psi() and
# the rest of the example would still work properly. ).
psi = dlib.vector()
# Set it to have 9 dimensions. Note that the elements of the vector
# are 0 initialized.
psi.resize(self.num_dimensions)
dims = len(x)
if label == 0:
for i in range(0, dims):
psi[i] = x[i]
elif label == 1:
for i in range(dims, 2 * dims):
psi[i] = x[i - dims]
else: # the label must be 2
for i in range(2 * dims, 3 * dims):
psi[i] = x[i - 2 * dims]
return psi
# Now we get to the two member functions that are directly called by
# dlib.solve_structural_svm_problem().
#
# In get_truth_joint_feature_vector(), all you have to do is return the
# PSI() vector for the idx-th training sample when it has its true label.
# So here it returns
# PSI(self.samples[idx], self.labels[idx]).
def get_truth_joint_feature_vector(self, idx):
return self.make_psi(self.samples[idx], self.labels[idx])
# separation_oracle() is more interesting.
# dlib.solve_structural_svm_problem() will call separation_oracle() many
# times during the optimization. Each time it will give it the current
# value of the parameter weights and the separation_oracle() is supposed to
# find the label that most violates the structural SVM objective function
# for the idx-th sample. Then the separation oracle reports the
# corresponding PSI vector and loss value. To state this more precisely,
# the separation_oracle() member function has the following contract:
# requires
# - 0 <= idx < self.num_samples
# - len(current_solution) == self.num_dimensions
# ensures
# - runs the separation oracle on the idx-th sample.
# We define this as follows:
# - let X == the idx-th training sample.
# - let PSI(X,y) == the joint feature vector for input X
# and an arbitrary label y.
# - let F(X,y) == dot(current_solution,PSI(X,y)).
# - let LOSS(idx,y) == the loss incurred for predicting that the
# idx-th sample has a label of y. Note that LOSS()
# should always be >= 0 and should become exactly 0 when y is the
# correct label for the idx-th sample.
#
# Then the separation oracle finds a Y such that:
# Y = argmax over all y: LOSS(idx,y) + F(X,y)
# (i.e. It finds the label which maximizes the above expression.)
#
# Finally, separation_oracle() returns LOSS(idx,Y),PSI(X,Y)
def separation_oracle(self, idx, current_solution):
samp = self.samples[idx]
dims = len(samp)
scores = [0, 0, 0]
# compute scores for each of the three classifiers
scores[0] = dot(current_solution[0:dims], samp)
scores[1] = dot(current_solution[dims:2*dims], samp)
scores[2] = dot(current_solution[2*dims:3*dims], samp)
# Add in the loss-augmentation. Recall that we maximize
# LOSS(idx,y) + F(X,y) in the separate oracle, not just F(X,y) as we
# normally would in predict_label(). Therefore, we must add in this
# extra amount to account for the loss-augmentation. For our simple
# multi-class classifier, we incur a loss of 1 if we don't predict the
# correct label and a loss of 0 if we get the right label.
if self.labels[idx] != 0:
scores[0] += 1
if self.labels[idx] != 1:
scores[1] += 1
if self.labels[idx] != 2:
scores[2] += 1
# Now figure out which classifier has the largest loss-augmented score.
max_scoring_label = scores.index(max(scores))
# And finally record the loss that was associated with that predicted
# label. Again, the loss is 1 if the label is incorrect and 0 otherwise.
if max_scoring_label == self.labels[idx]:
loss = 0
else:
loss = 1
# Finally, return the loss and PSI vector corresponding to the label
# we just found.
psi = self.make_psi(samp, max_scoring_label)
return loss, psi
if __name__ == "__main__":
main()
| gpl-3.0 |
WmHHooper/aima-python | submissions/Marszalkowski/myKMeans.py | 1 | 3040 | from sklearn import preprocessing
import matplotlib.pyplot as plt
import numpy as np
nbaPGData = [
[ 15.8, 8.2, 8.1, 1.7 ],
[ 25.4, 10.3, 10.1, 1.8 ],
[ 22.1, 5.6, 3.1, 1.1 ],
[ 16.7, 3.4, 3.7, 1.0 ],
[ 16.2, 6.9, 5.6, 1.1 ],
[ 13.1, 5.3, 4.6, 1.6 ],
[ 17.3, 4.8, 4.1, 0.8 ],
[ 17.7, 5.0, 3.8, 2.0 ],
[ 26.9, 6.6, 4.5, 1.1 ],
[ 14.2, 7.0, 3.0, 1.5 ],
[ 15.2, 5.2, 3.8, 1.0 ],
[ 19.4, 6.2, 3.1, 1.1 ],
[ 12.4, 5.3, 2.6, 1.3 ],
[ 12.7, 6.2, 4.3, 1.3 ],
[ 8.3, 8.2, 4.0, 1.1 ],
[ 24.4, 5.1, 3.8, 1.1 ],
[ 11.6, 4.4, 2.8, 1.0 ],
[ 10.0, 2.8, 2.7, 0.9 ],
[ 18.6, 7.9, 5.4, 1.7 ],
[ 12.6, 6.6, 3.2, 0.9 ],
[ 7.5, 5.6, 3.1, 0.6 ],
[ 26.4, 6.1, 5.1, 1.6 ],
[ 10.2, 7.2, 6.9, 1.7 ],
[ 8.1, 2.9, 5.7, 1.2 ],
[ 9.5, 3.2, 2.3, 0.7 ],
[ 14.6, 5.3, 2.8, 0.6 ],
[ 13.4, 6.0, 4.3, 2.0 ],
[ 7.8, 4.4, 1.8, 1.0 ],
[ 19.4, 9.6, 3.7, 1.4 ],
[ 15.3, 7.8, 4.0, 1.2 ],
[ 29.1, 11.2, 8.1, 1.5 ],
[ 31.6, 10.4, 10.7, 1.6 ],
[ 25.3, 6.6, 4.5, 1.8 ],
[ 23.2, 5.5, 3.9, 1.1 ],
[ 17.9, 6.3, 3.1, 0.9 ],
[ 23.1, 10.7, 4.2, 2.0 ],
[ 28.9, 5.9, 2.7, 0.9 ],
[ 27.0, 5.9, 4.9, 0.9 ],
[ 11.1, 9.1, 4.1, 1.7 ],
[ 20.3, 5.8, 3.8, 1.2 ],
[ 25.2, 5.8, 3.2, 1.2 ],
[ 20.5, 6.3, 3.5, 1.3 ],
[ 21.1, 6.3, 4.8, 1.4 ],
[ 13.2, 4.6, 2.2, 1.0 ],
[ 18.0, 4.4, 3.8, 0.7 ],
[ 10.1, 4.5, 1.8, 0.5 ],
[ 15.4, 7.3, 3.9, 1.5 ],
[ 18.1, 9.2, 5.0, 2.0 ],
[ 22.4, 7.0, 4.8, 1.5 ],
[ 15.6, 4.8, 3.5, 1.4 ],
[ 12.8, 6.5, 4.7, 1.1 ],
[ 7.6, 4.7, 1.9, 0.7 ],
[ 6.9, 6.6, 3.1, 1.7 ],
[ 14.5, 5.2, 2.2, 0.7 ],
[ 16.9, 4.2, 3.4, 1.0 ],
[ 11.0, 5.6, 2.3, 0.5 ],
[ 12.8, 2.7, 2.6, 1.1 ],
[ 7.8, 6.7, 5.1, 1.4 ],
[ 11.0, 3.9, 3.2, 0.7 ],
[ 20.9, 5.2, 4.4, 1.6 ],
[ 23.5, 10.4, 7.8, 2.0 ],
[ 16.9, 4.3, 7.7, 1.2 ],
[ 30.1, 6.7, 5.4, 2.1 ],
[ 18.8, 6.2, 3.0, 1.1 ],
[ 22.2, 6.2, 3.0, 1.1 ],
[ 15.7, 5.9, 2.7, 1.2 ],
[ 21.2, 6.4, 4.7, 2.1 ],
[ 19.9, 10.2, 4.9, 1.9 ],
[ 10.1, 8.7, 4.3, 2.1 ],
[ 25.1, 6.8, 4.0, 0.9 ],
[ 19.5, 10.0, 4.2, 2.1 ],
[ 12.1, 3.5, 4.0, 1.1 ],
[ 19.0, 4.6, 4.1, 1.1 ],
[ 7.6, 4.1, 3.2, 0.9 ],
[ 14.1, 5.8, 3.8, 1.0 ],
[ 11.9, 5.3, 2.4, 0.8 ],
[ 11.9, 11.7, 6.0, 2.0 ],
[ 10.7, 6.4, 3.6, 1.2 ],
[ 12.8, 5.5, 3.4, 1.0 ],
[ 16.4, 4.7, 3.4, 0.7 ],
[ 9.9, 3.4, 3.5, 1.3 ],
[ 14.1, 5.8, 2.9, 0.9 ],
[ 15.3, 6.1, 2.9, 1.2 ],
[ 19.6, 4.7, 3.0, 1.1 ],
[ 12.6, 6.5, 4.0, 3.4 ],
[ 13.2, 3.3, 3.4, 1.2 ],
[ 10.3, 5.4, 2.2, 0.5 ],
[ 15.6, 10.2, 4.3, 1.3 ],
[ 12.2, 6.4, 3.4, 1.5 ],
[ 17.6, 5.6, 4.0, 1.2 ],
[ 15.5, 7.9, 8.7, 1.4 ],
[ 15.9, 7.6, 3.0, 0.7 ],
[ 15.0, 6.0, 4.5, 1.3 ],
[ 9.0, 4.8, 2.3, 1.5 ],
[ 12.6, 2.3, 1.8, 0.7 ],
[ 27.1, 6.0, 5.3, 0.8 ],
[ 27.4, 6.3, 4.3, 1.3 ],
[ 21.5, 8.1, 3.6, 1.7 ],
[ 20.3, 6.6, 3.4, 1.2 ],
[ 17.5, 7.5, 4.0, 1.2 ],
[ 22.0, 6.4, 4.9, 1.9 ],
[ 17.5, 4.5, 4.3, 0.9 ],
[ 8.2, 4.5, 5.6, 1.0 ],
[ 16.0, 4.2, 2.7, 0.7 ],
[ 13.9, 3.9, 2.8, 1.2 ],
[ 6.7, 4.0, 4.0, 0.7 ],
[ 12.6, 7.6, 2.3, 1.3 ],
[ 7.5, 3.3, 2.6, 0.6 ],
]
normalized_data = preprocessing.normalize(nbaPGData)
Examples = {
'pgNotNormalized': {
'data': nbaPGData,
'k': [3, 2, 4],
},
'pgNormalized': {
'data': normalized_data,
'k': [2, 4, 3],
},
} | mit |
firebase/grpc | tools/distrib/python/grpcio_tools/grpc_tools/protoc.py | 20 | 1198 | #!/usr/bin/env python
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pkg_resources
import sys
from grpc_tools import _protoc_compiler
def main(command_arguments):
"""Run the protocol buffer compiler with the given command-line arguments.
Args:
command_arguments: a list of strings representing command line arguments to
`protoc`.
"""
command_arguments = [argument.encode() for argument in command_arguments]
return _protoc_compiler.run_main(command_arguments)
if __name__ == '__main__':
proto_include = pkg_resources.resource_filename('grpc_tools', '_proto')
sys.exit(main(sys.argv + ['-I{}'.format(proto_include)]))
| apache-2.0 |
sanford1/BulletLua | example/bootstrap.py | 1 | 2966 | #!/usr/bin/env python
import os
import sys
import fnmatch
import argparse
import itertools
sys.path.insert(0, '..')
import ninja_syntax
objdir = 'obj'
def flags(*args):
return ' '.join(itertools.chain(*args))
def object_file(f):
(root, ext) = os.path.splitext(f)
return os.path.join(objdir, root + '.o')
def generate_files_from(directory, glob):
for root, directories, files in os.walk(directory):
for f in files:
if fnmatch.fnmatch(f, glob):
yield os.path.join(root, f)
def files_from(directory, glob):
return list(generate_files_from(directory, glob))
# command line stuff
parser = argparse.ArgumentParser(usage='%(prog)s [options...]')
parser.add_argument('--debug', action='store_true', help='compile with debug flags')
parser.add_argument('--ci', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--cxx', metavar='<compiler>', help='compiler name to use (default: g++)', default='g++')
args = parser.parse_args()
BUILD_FILENAME = 'build.ninja'
compiler = args.cxx
include = ['-I../include', '-I../ext/sol']
depends = []
libdirs = ['-L../lib']
ldflags = ['-lSDL2', '-lGL', '-lGLEW', '-lbulletlua']
cxxflags = ['-Wall', '-Wextra', '-pedantic', '-pedantic-errors', '-std=c++11']
if sys.platform == 'win32':
project.libraries = ['mingw32']
if args.ci:
ldflags.extend(['-llua5.2'])
include.extend(['-I/usr/include/lua5.2', '-I./lua-5.2.2/src'])
else:
ldflags.extend(['-llua'])
def warning(string):
print('warning: {}'.format(string))
# configuration
if 'g++' not in args.cxx:
warning('compiler not explicitly supported: {}'.format(args.cxx))
if args.debug:
cxxflags.extend(['-g', '-O0', '-DDEBUG'])
else:
cxxflags.extend(['-DNDEBUG', '-O3'])
if args.cxx == 'clang++':
cxxflags.extend(['-Wno-constexpr-not-const', '-Wno-unused-value', '-Wno-mismatched-tags'])
### Build our ninja file
ninja = ninja_syntax.Writer(open('build.ninja', 'w'))
# Variables
ninja.variable('ninja_required_version', '1.3')
ninja.variable('ar', 'ar')
ninja.variable('cxx', compiler)
ninja.variable('cxxflags', flags(cxxflags + include + libdirs + depends))
ninja.variable('ldflags', flags(ldflags))
ninja.newline()
# Rules
ninja.rule('bootstrap', command = ' '.join(['python'] + sys.argv), generator = True)
ninja.rule('compile', command = '$cxx -MMD -MF $out.d -c $cxxflags $in -o $out',
deps = 'gcc', depfile = '$out.d',
description = 'Compiling $in to $out')
ninja.rule('link', command = '$cxx $cxxflags $in -o $out $ldflags', description = 'Creating $out')
ninja.rule('ar', command = 'rm -f $out && $ar crs $out $in', description = 'AR $out')
ninja.newline()
# Builds
ninja.build('build.ninja', 'bootstrap', implicit = 'bootstrap.py')
testobjs = []
for f in files_from('src/', '**.cpp'):
obj = object_file(f)
testobjs.append(obj)
ninja.build(obj, 'compile', inputs = f)
ninja.newline()
ninja.build('./bin/sdl_test', 'link', inputs = testobjs)
| mit |
lokirius/python-for-android | python3-alpha/python3-src/Lib/lib2to3/fixes/fix_filter.py | 164 | 2102 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes filter(F, X) into list(filter(F, X)).
We avoid the transformation if the filter() call is directly contained
in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or
for V in <>:.
NOTE: This is still not correct if the original code was depending on
filter(F, X) to return a string if X is a string and a tuple if X is a
tuple. That would require type inference, which we don't do. Let
Python 2.6 figure it out.
"""
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, ListComp, in_special_context
class FixFilter(fixer_base.ConditionalFix):
BM_compatible = True
PATTERN = """
filter_lambda=power<
'filter'
trailer<
'('
arglist<
lambdef< 'lambda'
(fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
>
','
it=any
>
')'
>
>
|
power<
'filter'
trailer< '(' arglist< none='None' ',' seq=any > ')' >
>
|
power<
'filter'
args=trailer< '(' [any] ')' >
>
"""
skip_on = "future_builtins.filter"
def transform(self, node, results):
if self.should_skip(node):
return
if "filter_lambda" in results:
new = ListComp(results.get("fp").clone(),
results.get("fp").clone(),
results.get("it").clone(),
results.get("xp").clone())
elif "none" in results:
new = ListComp(Name("_f"),
Name("_f"),
results["seq"].clone(),
Name("_f"))
else:
if in_special_context(node):
return None
new = node.clone()
new.prefix = ""
new = Call(Name("list"), [new])
new.prefix = node.prefix
return new
| apache-2.0 |
DJTobias/Cherry-Autonomous-Racecar | car/scripts/pyfestival.py | 2 | 10003 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2008 John Paulett
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Python bindings for The Festival Speech Synthesis System.
http://code.google.com/p/pyfestival/
http://www.cstr.ed.ac.uk/projects/festival/
"""
__version__ = "0.2.0"
__all__ = [
'Festival', 'FestivalServer', 'FestivalClient', 'say'
]
import os
import signal
import subprocess
import socket
class Festival(object):
def __init__(self, port=None, language=None, heap=None):
self._server = None
self._festival_bin = "festival"
self._text2wave_bin = "text2wave"
self._port = port
self._language = language
def open(self):
"""Opens a connection to the server.
Must be called before say() when using Festival in server mode.
Server mode creates extra overhead when opening the connection,
but is beneficial when making multiple calls to the festival engine.
"""
if self._server != None:
# if an instance of the server already exists, close it
# before opening a new one
self.close()
self._server = FestivalServer()
self._server.start()
def close(self):
"""Stops the server and closes the connection.
Should be called when finished using the program in order to free
the port the server is running on and
"""
if self._server != None:
self._server.stop()
self._server = None
def say(self, text):
"""Orders Festival to speak the text provided.
Action depends on whether the open() method was used prior
to calling say(). If a server connection was created (i.e. by
calling open()), then this method communicates with the server
instance. If open() was not callled, say() uses a separate
instance for every invokation.
"""
if self._server != None:
# speak to the server via sockets
self._say_server(text)
else:
# no server, so use a single instance of festival
self._say_single(text)
def say_file(self, filename):
"""Speaks the contents of a file.
If there is an issue opening or reading the file, nothing will
be spoken.
"""
text = self._read_file(filename)
if text != None:
self.say(text)
def wave(self, text=None, source=None, dest=None,
wave_type=None, frequency=None, scale=None):
"""Uses Festival's text2wave program to generate a wave file object.
Must contain either a text or source_file input (if both are given,
text overrides the source_file. Returns the stdout wav, unless a
dest_file is given, in which case it will return None and save the wav
in the dest_file.
Options:
text - the text to be spoke in the wav
source - the file to be read in
dest - the output file, if not specificied, output is returned
wave_type - the output waveform type (alaw, ulaw, snd, aiff, riff,
nist, etc. Defaults to the programs default (which is
riff).
frequency - the output frequency. Defaults to program's default.
scale - the volume factor. Defaults to program's default.
"""
args = [self._text2wave_bin]
if text == None:
# check to make sure something will act as input
if source == None:
raise TooFewArgumentsError("Need an input value.")
# have program read the source file
args.append(source)
else:
# tts mode
args.append("-")
# append optional arguments
if dest != None:
args.append("-o "+dest)
if wave_type != None:
args.append("-mode "+wave_tye)
if frequency != None:
args.append("-F "+frequency)
if scale != None:
args.append("-scale "+scale)
print args
# opena connection to the file
#p = subprocess.Popen( args,
# stdin = subprocess.PIPE,
# stdout = subprocess.PIPE,
# stderr = subprocess.PIPE,
# close_fds = True)
#stdout, stderr = p.communicate(text)
if dest <> None and text <> None:
subprocess.call("echo \"" + text + "\" | " + self._text2wave_bin + " -o " + dest, shell=True)
# only return a value if the file is not saved.
if dest == None:
return stdout
else:
return None
def version(self):
"""Returns the version of Festival and of PyFestival.
"""
args = [self._festival_bin, "--version"]
p = subprocess.Popen( args,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
close_fds = True)
stdout, stderr = p.communicate()
stdout += "\n"
stdout += "PyFestival version: " + __version__
return stdout
def _save_file(self, filename, contents, mode="w"):
"""Saves the contents to the filename.
Mode indicates the mode with which the file should
be opened ('w','wb','a', or 'ab'). If an error occurs when
writing the file, a simple message is printed to stdout.
"""
try:
f = open(filename, mode)
f.write(contents)
# clean up the resource
f.close()
except Exception:
print "Execption: "+filename+" could not be saved."
def _read_file(self, filename, mode="r"):
"""Reads the contents of a file.
The contents are returned, unless there is an error reading
the file, in which case None is returned. The mode specifies
the mode to read the file in--should be 'r' or 'rb'.
"""
# default the return value
contents = None
try:
# open and read the file (asssumes that file will fit into memory"
f = open(filename, mode)
contents = f.read()
# clean up resources
f.close()
except Exception:
print "Execption: "+filename+" could not be read."
return contents
def _say_server(self, text):
"""Uses the Festival server to speak the text.
A connection to the server must be open.
"""
if self._server == None:
raise ServerError("No server started")
def _say_single(self, text):
"""Uses Festival tts mode and the command line.
Does not bother using the server connection."""
args = [self._festival_bin, "--tts", "-"]
p = subprocess.Popen( args,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
close_fds = True)
stdout, stderr = p.communicate(text)
class FestivalServer(object):
def __init__(self):
# initialize the process
self._process = None
#self._festival_bin = "festival" # TODO Added this dellanar04
try:
self.start()
except ServerError, e:
pass
def start(self):
args = [self._festival_bin, "--server"]
self._process = subprocess.Popen( args,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
close_fds = True)
stdout, stderr = self._process.communicate()
if stderr.rstrip() == "socket: bind failed":
raise ServerError(stderr.rstrip())
#self._pid = os.spawnlp(os.P_NOWAIT, "festival","festival", "--server")
#print self._pid
def stop(self):
"""Kill the instance of the festival server.
"""
if self.get_pid() != None:
try:
os.kill(self._pid, signal.SIGTERM)
except OSError:
print "Error killing the festival server"
else:
print "No festival server process to stop"
def restart(self):
self.stop()
self.start()
def get_pid(self):
if self._process != None:
return self._process.pid
else:
return None
class FestivalClient(object):
def __init__(self, port=1314, host="localhost"):
self._host = host
self._port = port
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def open(self):
self._sock.connect((self._host, self._port))
def close(self):
self._sock.close()
def send(self, cmd):
self._sock.send(cmd)
def recv(self):
data = self._sock.recv()
return None
def say(self, text):
self.send('(SayText "%s"'%text)
class TooFewArgumentsError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ServerError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def say(text):
"""Quick access for the impatient.
Speaks the provided text.
"""
#create a single instance to say this one phrase
fest = Festival()
fest.say(text)
| mit |
Neural-Network/TicTacToe | examples/supervised/test_network_read_write/jpq2layersWriter.py | 26 | 3987 | from __future__ import print_function
from pybrain.structure import FeedForwardNetwork
from pybrain.structure import LinearLayer, SigmoidLayer
from pybrain.structure import BiasUnit,TanhLayer
from pybrain.structure import FullConnection
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer, RPropMinusTrainer
from pybrain.tools.validation import ModuleValidator,Validator
from pybrain.utilities import percentError
from pybrain.tools.customxml import NetworkWriter
import numpy
import pylab
import os
def myplot(trns,ctrns,tsts = None,ctsts = None,iter = 0):
plotdir = os.path.join(os.getcwd(),'plot')
pylab.clf()
try:
assert len(tsts) > 1
tstsplot = True
except:
tstsplot = False
try:
assert len(ctsts) > 1
ctstsplot = True
except:
ctstsplot = False
if tstsplot:
pylab.plot(tsts['input'],tsts['target'],c='b')
pylab.scatter(trns['input'],trns['target'],c='r')
pylab.scatter(trns['input'],ctrns,c='y')
if tstsplot and ctstsplot:
pylab.plot(tsts['input'], ctsts,c='g')
pylab.xlabel('x')
pylab.ylabel('y')
pylab.title('Neuron Number:'+str(nneuron))
pylab.grid(True)
plotname = os.path.join(plotdir,('jpq2layers_plot'+ str(iter)))
pylab.savefig(plotname)
# set-up the neural network
nneuron = 5
mom = 0.98
netname="LSL-"+str(nneuron)+"-"+str(mom)
mv=ModuleValidator()
v = Validator()
n=FeedForwardNetwork(name=netname)
inLayer = LinearLayer(1,name='in')
hiddenLayer = SigmoidLayer(nneuron,name='hidden0')
outLayer = LinearLayer(1,name='out')
biasinUnit = BiasUnit(name="bhidden0")
biasoutUnit = BiasUnit(name="bout")
n.addInputModule(inLayer)
n.addModule(hiddenLayer)
n.addModule(biasinUnit)
n.addModule(biasoutUnit)
n.addOutputModule(outLayer)
in_to_hidden = FullConnection(inLayer,hiddenLayer)
bias_to_hidden = FullConnection(biasinUnit,hiddenLayer)
bias_to_out = FullConnection(biasoutUnit,outLayer)
hidden_to_out = FullConnection(hiddenLayer,outLayer)
n.addConnection(in_to_hidden)
n.addConnection(bias_to_hidden)
n.addConnection(bias_to_out)
n.addConnection(hidden_to_out)
n.sortModules()
n.reset()
#read the initail weight values from myparam2.txt
filetoopen = os.path.join(os.getcwd(),'myparam2.txt')
if os.path.isfile(filetoopen):
myfile = open('myparam2.txt','r')
c=[]
for line in myfile:
c.append(float(line))
n._setParameters(c)
else:
myfile = open('myparam2.txt','w')
for i in n.params:
myfile.write(str(i)+'\n')
myfile.close()
#activate the neural networks
act = SupervisedDataSet(1,1)
act.addSample((0.2,),(0.880422606518061,))
n.activateOnDataset(act)
#create the test DataSet
x = numpy.arange(0.0, 1.0+0.01, 0.01)
s = 0.5+0.4*numpy.sin(2*numpy.pi*x)
tsts = SupervisedDataSet(1,1)
tsts.setField('input',x.reshape(len(x),1))
tsts.setField('target',s.reshape(len(s),1))
#read the train DataSet from file
trndata = SupervisedDataSet.loadFromFile(os.path.join(os.getcwd(),'trndata'))
#create the trainer
t = BackpropTrainer(n, learningrate = 0.01 ,
momentum = mom)
#train the neural network from the train DataSet
cterrori=1.0
print("trainer momentum:"+str(mom))
for iter in range(25):
t.trainOnDataset(trndata, 1000)
ctrndata = mv.calculateModuleOutput(n,trndata)
cterr = v.MSE(ctrndata,trndata['target'])
relerr = abs(cterr-cterrori)
cterrori = cterr
print('iteration:',iter+1,'MSE error:',cterr)
myplot(trndata,ctrndata,iter=iter+1)
if cterr < 1.e-5 or relerr < 1.e-7:
break
#write the network using xml file
myneuralnet = os.path.join(os.getcwd(),'myneuralnet.xml')
if os.path.isfile(myneuralnet):
NetworkWriter.appendToFile(n,myneuralnet)
else:
NetworkWriter.writeToFile(n,myneuralnet)
#calculate the test DataSet based on the trained Neural Network
ctsts = mv.calculateModuleOutput(n,tsts)
tserr = v.MSE(ctsts,tsts['target'])
print('MSE error on TSTS:',tserr)
myplot(trndata,ctrndata,tsts,ctsts)
pylab.show()
| bsd-3-clause |
lpirl/ansible | contrib/inventory/nova.py | 131 | 7030 | #!/usr/bin/env python
# (c) 2012, Marco Vito Moscaritolo <marco@agavee.com>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# WARNING: This file is deprecated. New work should focus on the openstack.py
# inventory module, which properly handles multiple clouds as well as keystone
# v3 and keystone auth plugins
import sys
import re
import os
import ConfigParser
from novaclient import client as nova_client
from six import iteritems
try:
import json
except ImportError:
import simplejson as json
sys.stderr.write("WARNING: this inventory module is deprecated. please migrate usage to openstack.py\n")
###################################################
# executed with no parameters, return the list of
# all groups and hosts
NOVA_CONFIG_FILES = [os.getcwd() + "/nova.ini",
os.path.expanduser(os.environ.get('ANSIBLE_CONFIG', "~/nova.ini")),
"/etc/ansible/nova.ini"]
NOVA_DEFAULTS = {
'auth_system': None,
'region_name': None,
'service_type': 'compute',
}
def nova_load_config_file():
p = ConfigParser.SafeConfigParser(NOVA_DEFAULTS)
for path in NOVA_CONFIG_FILES:
if os.path.exists(path):
p.read(path)
return p
return None
def get_fallback(config, value, section="openstack"):
"""
Get value from config object and return the value
or false
"""
try:
return config.get(section, value)
except ConfigParser.NoOptionError:
return False
def push(data, key, element):
"""
Assist in items to a dictionary of lists
"""
if (not element) or (not key):
return
if key in data:
data[key].append(element)
else:
data[key] = [element]
def to_safe(word):
'''
Converts 'bad' characters in a string to underscores so they can
be used as Ansible groups
'''
return re.sub(r"[^A-Za-z0-9\-]", "_", word)
def get_ips(server, access_ip=True):
"""
Returns a list of the server's IPs, or the preferred
access IP
"""
private = []
public = []
address_list = []
# Iterate through each servers network(s), get addresses and get type
addresses = getattr(server, 'addresses', {})
if len(addresses) > 0:
for network in addresses.itervalues():
for address in network:
if address.get('OS-EXT-IPS:type', False) == 'fixed':
private.append(address['addr'])
elif address.get('OS-EXT-IPS:type', False) == 'floating':
public.append(address['addr'])
if not access_ip:
address_list.append(server.accessIPv4)
address_list.extend(private)
address_list.extend(public)
return address_list
access_ip = None
# Append group to list
if server.accessIPv4:
access_ip = server.accessIPv4
if (not access_ip) and public and not (private and prefer_private):
access_ip = public[0]
if private and not access_ip:
access_ip = private[0]
return access_ip
def get_metadata(server):
"""Returns dictionary of all host metadata"""
get_ips(server, False)
results = {}
for key in vars(server):
# Extract value
value = getattr(server, key)
# Generate sanitized key
key = 'os_' + re.sub(r"[^A-Za-z0-9\-]", "_", key).lower()
# Att value to instance result (exclude manager class)
#TODO: maybe use value.__class__ or similar inside of key_name
if key != 'os_manager':
results[key] = value
return results
config = nova_load_config_file()
if not config:
sys.exit('Unable to find configfile in %s' % ', '.join(NOVA_CONFIG_FILES))
# Load up connections info based on config and then environment
# variables
username = (get_fallback(config, 'username') or
os.environ.get('OS_USERNAME', None))
api_key = (get_fallback(config, 'api_key') or
os.environ.get('OS_PASSWORD', None))
auth_url = (get_fallback(config, 'auth_url') or
os.environ.get('OS_AUTH_URL', None))
project_id = (get_fallback(config, 'project_id') or
os.environ.get('OS_TENANT_NAME', None))
region_name = (get_fallback(config, 'region_name') or
os.environ.get('OS_REGION_NAME', None))
auth_system = (get_fallback(config, 'auth_system') or
os.environ.get('OS_AUTH_SYSTEM', None))
# Determine what type of IP is preferred to return
prefer_private = False
try:
prefer_private = config.getboolean('openstack', 'prefer_private')
except ConfigParser.NoOptionError:
pass
client = nova_client.Client(
version=config.get('openstack', 'version'),
username=username,
api_key=api_key,
auth_url=auth_url,
region_name=region_name,
project_id=project_id,
auth_system=auth_system,
service_type=config.get('openstack', 'service_type'),
)
# Default or added list option
if (len(sys.argv) == 2 and sys.argv[1] == '--list') or len(sys.argv) == 1:
groups = {'_meta': {'hostvars': {}}}
# Cycle on servers
for server in client.servers.list():
access_ip = get_ips(server)
# Push to name group of 1
push(groups, server.name, access_ip)
# Run through each metadata item and add instance to it
for key, value in iteritems(server.metadata):
composed_key = to_safe('tag_{0}_{1}'.format(key, value))
push(groups, composed_key, access_ip)
# Do special handling of group for backwards compat
# inventory groups
group = server.metadata['group'] if 'group' in server.metadata else 'undefined'
push(groups, group, access_ip)
# Add vars to _meta key for performance optimization in
# Ansible 1.3+
groups['_meta']['hostvars'][access_ip] = get_metadata(server)
# Return server list
print(json.dumps(groups, sort_keys=True, indent=2))
sys.exit(0)
#####################################################
# executed with a hostname as a parameter, return the
# variables for that host
elif len(sys.argv) == 3 and (sys.argv[1] == '--host'):
results = {}
ips = []
for server in client.servers.list():
if sys.argv[2] in (get_ips(server) or []):
results = get_metadata(server)
print(json.dumps(results, sort_keys=True, indent=2))
sys.exit(0)
else:
print("usage: --list ..OR.. --host <hostname>")
sys.exit(1)
| gpl-3.0 |
alajfit/gulp_test | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSUserFile.py | 2710 | 5094 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
| gpl-2.0 |
carthach/essentia | test/src/unittests/io/test_audioonsetsmarker_streaming.py | 1 | 3046 | #!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from essentia import *
from essentia.streaming import AudioOnsetsMarker, MonoWriter
from numpy import pi, sin
import os
file = "foo.wav"
class TestAudioOnsetsMarker_Streaming(TestCase):
def testRegression(self):
sr = 44100
inputSize = sr # 1 second of audio
input = [0.5*sin(2.0*pi*440.0*i/inputSize) for i in range(inputSize)]
onsets = [0.15, 0.5, 0.9]
signal = VectorInput(input)
marker = AudioOnsetsMarker(sampleRate = sr, onsets=onsets)
writer = MonoWriter(filename=file)
signal.data >> marker.signal;
marker.signal>> writer.audio;
run(signal);
left = MonoLoader(filename = file, downmix='left', sampleRate=sr)()
diff = zeros(inputSize)
for i in range(inputSize):
# due to AudioOnsetsMarker downmixing by 0.5,
# input[i] must be divided accordingly:
diff[i] = left[i] - input[i]/2
os.remove(file)
onsetWidth = 0.04*sr
epsilon = 1e-3
found = []
j = 0
i = 0
while i < inputSize:
if diff[i] > epsilon:
found.append(float(i)/float(sr))
j += 1
i = int(i + onsetWidth)
else: i+=1
self.assertAlmostEqualVector(found, onsets, 1.5e-3)
def testEmptySignal(self):
sr = 44100
signal = VectorInput([])
onsets = [0.15, 0.5, 0.9]
marker = AudioOnsetsMarker(sampleRate = sr, onsets=onsets)
writer = MonoWriter(filename=file)
signal.data >> marker.signal
marker.signal >> writer.audio
run(signal)
self.assertTrue( not os.path.exists(file) )
def testInvalidParam(self):
self.assertConfigureFails(AudioOnsetsMarker(), { 'sampleRate' : 0 })
self.assertConfigureFails(AudioOnsetsMarker(), { 'type' : 'burst' })
self.assertConfigureFails(AudioOnsetsMarker(), { 'onsets' : [-1, -2, 9]})
self.assertConfigureFails(AudioOnsetsMarker(), { 'onsets' : [1, -2, 9]})
self.assertConfigureFails(AudioOnsetsMarker(), { 'onsets' : [2, 0, 9]})
suite = allTests(TestAudioOnsetsMarker_Streaming)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| agpl-3.0 |
VizGrimoire/GrimoireLib | vizgrimoire/analysis/quarters_data.py | 4 | 3493 | #!/usr/bin/env python
# Copyright (C) 2014 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#
# Authors:
# Alvaro del Castillor <acs@bitergia.com>
#
""" People and Companies evolution per quarters """
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from vizgrimoire.analysis.analyses import Analyses
from vizgrimoire.metrics.query_builder import DSQuery
from vizgrimoire.metrics.metrics_filter import MetricFilters
from vizgrimoire.GrimoireUtils import createJSON
from vizgrimoire.SCR import SCR
class QuartersData(Analyses):
id = "quarters_data"
name = "Quarters Data"
desc = "Metrics by Quarter"
def create_report(self, data_source, destdir):
if data_source != SCR: return None
self.result(data_source, destdir)
def result(self, data_source, destdir = None):
if data_source != SCR or destdir is None: return None
period = self.filters.period
startdate = self.filters.startdate
enddate = self.filters.enddate
idb = self.db.identities_db
bots = SCR.get_bots()
# people = self.db.GetPeopleList("'"+startdate+"'", "'"+enddate+"'", SCR.get_bots())
people = self.db.GetPeopleList(startdate, enddate, bots)
createJSON(people, destdir+"/scr-people-all.json", False)
organizations = self.db.GetCompaniesName(startdate, enddate)
createJSON(organizations, destdir+"/scr-organizations-all.json", False)
start = datetime.strptime(startdate.replace("'",""), "%Y-%m-%d")
start_quarter = (start.month-1)%3 + 1
end = datetime.strptime(enddate.replace("'",""), "%Y-%m-%d")
end_quarter = (end.month-1)%3 + 1
organizations_quarters = {}
people_quarters = {}
quarters = (end.year - start.year) * 4 + (end_quarter - start_quarter)
for i in range(0, quarters+1):
year = start.year
quarter = (i%4)+1
# logging.info("Analyzing organizations and people quarter " + str(year) + " " + str(quarter))
data = self.db.GetCompaniesQuarters(year, quarter)
organizations_quarters[str(year)+" "+str(quarter)] = data
data_people = self.db.GetPeopleQuarters(year, quarter, 25, bots)
people_quarters[str(year)+" "+str(quarter)] = data_people
start = start + relativedelta(months=3)
createJSON(organizations_quarters, destdir+"/scr-organizations-quarters.json")
createJSON(people_quarters, destdir+"/scr-people-quarters.json")
def get_report_files(self, data_source = None):
if data_source is not SCR: return []
return ["scr-people-all.json",
"scr-organizations-all.json",
"scr-organizations-quarters.json",
"scr-people-quarters.json"]
| gpl-3.0 |
dpwolfe/otucha_pre_rebase | src/external/gtest-1.7.0/test/gtest_color_test.py | 3259 | 4911 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
AVOXI/b2bua | sippy/SipReferTo.py | 2 | 1535 | # Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.
# Copyright (c) 2006-2014 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from SipAddressHF import SipAddressHF
class SipReferTo(SipAddressHF):
hf_names = ('refer-to', 'r')
| bsd-2-clause |
cchurch/ansible | lib/ansible/plugins/cliconf/exos.py | 31 | 9970 | #
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
cliconf: exos
short_description: Use exos cliconf to run command on Extreme EXOS platform
description:
- This exos plugin provides low level abstraction apis for
sending and receiving CLI commands from Extreme EXOS network devices.
version_added: "2.6"
"""
import re
import json
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.connection import ConnectionError
from ansible.module_utils.common._collections_compat import Mapping
from ansible.module_utils.network.common.config import NetworkConfig, dumps
from ansible.plugins.cliconf import CliconfBase
class Cliconf(CliconfBase):
def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
diff = {}
device_operations = self.get_device_operations()
option_values = self.get_option_values()
if candidate is None and device_operations['supports_generate_diff']:
raise ValueError("candidate configuration is required to generate diff")
if diff_match not in option_values['diff_match']:
raise ValueError("'match' value %s in invalid, valid values are %s" % (diff_match, ', '.join(option_values['diff_match'])))
if diff_replace not in option_values['diff_replace']:
raise ValueError("'replace' value %s in invalid, valid values are %s" % (diff_replace, ', '.join(option_values['diff_replace'])))
# prepare candidate configuration
candidate_obj = NetworkConfig(indent=1)
candidate_obj.load(candidate)
if running and diff_match != 'none' and diff_replace != 'config':
# running configuration
running_obj = NetworkConfig(indent=1, contents=running, ignore_lines=diff_ignore_lines)
configdiffobjs = candidate_obj.difference(running_obj, path=path, match=diff_match, replace=diff_replace)
else:
configdiffobjs = candidate_obj.items
diff['config_diff'] = dumps(configdiffobjs, 'commands') if configdiffobjs else ''
return diff
def get_device_info(self):
device_info = {}
device_info['network_os'] = 'exos'
reply = self.run_commands({'command': 'show switch detail', 'output': 'text'})
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'ExtremeXOS version (\S+)', data)
if match:
device_info['network_os_version'] = match.group(1)
match = re.search(r'System Type: +(\S+)', data)
if match:
device_info['network_os_model'] = match.group(1)
match = re.search(r'SysName: +(\S+)', data)
if match:
device_info['network_os_hostname'] = match.group(1)
return device_info
def get_default_flag(self):
# The flag to modify the command to collect configuration with defaults
return 'detail'
def get_config(self, source='running', format='text', flags=None):
options_values = self.get_option_values()
if format not in options_values['format']:
raise ValueError("'format' value %s is invalid. Valid values are %s" % (format, ','.join(options_values['format'])))
lookup = {'running': 'show configuration', 'startup': 'debug cfgmgr show configuration file'}
if source not in lookup:
raise ValueError("fetching configuration from %s is not supported" % source)
cmd = {'command': lookup[source], 'output': 'text'}
if source == 'startup':
reply = self.run_commands({'command': 'show switch', 'format': 'text'})
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'Config Selected: +(\S+)\.cfg', data, re.MULTILINE)
if match:
cmd['command'] += match.group(1)
else:
# No Startup(/Selected) Config
return {}
cmd['command'] += ' '.join(to_list(flags))
cmd['command'] = cmd['command'].strip()
return self.run_commands(cmd)[0]
def edit_config(self, candidate=None, commit=True, replace=None, diff=False, comment=None):
resp = {}
operations = self.get_device_operations()
self.check_edit_config_capability(operations, candidate, commit, replace, comment)
results = []
requests = []
if commit:
for line in to_list(candidate):
if not isinstance(line, Mapping):
line = {'command': line}
results.append(self.send_command(**line))
requests.append(line['command'])
else:
raise ValueError('check mode is not supported')
resp['request'] = requests
resp['response'] = results
return resp
def get(self, command, prompt=None, answer=None, sendonly=False, output=None, newline=True, check_all=False):
if output:
command = self._get_command_with_output(command, output)
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
def run_commands(self, commands=None, check_rc=True):
if commands is None:
raise ValueError("'commands' value is required")
responses = list()
for cmd in to_list(commands):
if not isinstance(cmd, Mapping):
cmd = {'command': cmd}
output = cmd.pop('output', None)
if output:
cmd['command'] = self._get_command_with_output(cmd['command'], output)
try:
out = self.send_command(**cmd)
except AnsibleConnectionFailure as e:
if check_rc is True:
raise
out = getattr(e, 'err', e)
if out is not None:
try:
out = to_text(out, errors='surrogate_or_strict').strip()
except UnicodeError:
raise ConnectionError(message=u'Failed to decode output from %s: %s' % (cmd, to_text(out)))
if output and output == 'json':
try:
out = json.loads(out)
except ValueError:
raise ConnectionError('Response was not valid JSON, got {0}'.format(
to_text(out)
))
responses.append(out)
return responses
def get_device_operations(self):
return {
'supports_diff_replace': False, # identify if config should be merged or replaced is supported
'supports_commit': False, # identify if commit is supported by device or not
'supports_rollback': False, # identify if rollback is supported or not
'supports_defaults': True, # identify if fetching running config with default is supported
'supports_commit_comment': False, # identify if adding comment to commit is supported of not
'supports_onbox_diff': False, # identify if on box diff capability is supported or not
'supports_generate_diff': True, # identify if diff capability is supported within plugin
'supports_multiline_delimiter': False, # identify if multiline delimiter is supported within config
'supports_diff_match': True, # identify if match is supported
'supports_diff_ignore_lines': True, # identify if ignore line in diff is supported
'supports_config_replace': False, # identify if running config replace with candidate config is supported
'supports_admin': False, # identify if admin configure mode is supported or not
'supports_commit_label': False, # identify if commit label is supported or not
'supports_replace': False
}
def get_option_values(self):
return {
'format': ['text', 'json'],
'diff_match': ['line', 'strict', 'exact', 'none'],
'diff_replace': ['line', 'block'],
'output': ['text', 'json']
}
def get_capabilities(self):
result = super(Cliconf, self).get_capabilities()
result['rpc'] += ['run_commmands', 'get_default_flag', 'get_diff']
result['device_operations'] = self.get_device_operations()
result['device_info'] = self.get_device_info()
result.update(self.get_option_values())
return json.dumps(result)
def _get_command_with_output(self, command, output):
if output not in self.get_option_values().get('output'):
raise ValueError("'output' value is %s is invalid. Valid values are %s" % (output, ','.join(self.get_option_values().get('output'))))
if output == 'json' and not command.startswith('run script cli2json.py'):
cmd = 'run script cli2json.py %s' % command
else:
cmd = command
return cmd
| gpl-3.0 |
apache/incubator-airflow | airflow/api_connexion/schemas/config_schema.py | 10 | 1667 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import List, NamedTuple
from marshmallow import Schema, fields
class ConfigOptionSchema(Schema):
"""Config Option Schema"""
key = fields.String(required=True)
value = fields.String(required=True)
class ConfigOption(NamedTuple):
"""Config option"""
key: str
value: str
class ConfigSectionSchema(Schema):
"""Config Section Schema"""
name = fields.String(required=True)
options = fields.List(fields.Nested(ConfigOptionSchema))
class ConfigSection(NamedTuple):
"""List of config options within a section"""
name: str
options: List[ConfigOption]
class ConfigSchema(Schema):
"""Config Schema"""
sections = fields.List(fields.Nested(ConfigSectionSchema))
class Config(NamedTuple):
"""List of config sections with their options"""
sections: List[ConfigSection]
config_schema = ConfigSchema()
| apache-2.0 |
borzunov/cpmoptimize | tests/perfomance/arith_sum.py | 1 | 1024 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import tests_common as common
from cpmoptimize import cpmoptimize, xrange
start = 32
step = 43
def naive(count):
res = 0
for elem in xrange(start, start + step * count, step):
res += elem
return res
def formula(count):
return (start * 2 + step * (count - 1)) * count / 2
def pow10_wrapper(func):
return lambda arg: func(10 ** arg)
if __name__ == '__main__':
common.run(
'arith_sum', 'N elements',
common.optimized(naive) + [
('formula', formula),
],
[
('linear', None, common.linear_scale(600000, 5)),
],
exec_compare=False, draw_plot=False,
)
common.run(
'arith_sum', '10 ** N elements',
[
('cpm', pow10_wrapper(cpmoptimize()(naive))),
('formula', pow10_wrapper(formula)),
],
[
('exp', None, common.linear_scale(10000, 5)),
],
exec_compare=False, draw_plot=False,
)
| mit |
berendkleinhaneveld/VTK | ThirdParty/Twisted/twisted/conch/test/test_default.py | 42 | 6363 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.client.default}.
"""
try:
import Crypto.Cipher.DES3
import pyasn1
except ImportError:
skip = "PyCrypto and PyASN1 required for twisted.conch.client.default."
else:
from twisted.conch.client.agent import SSHAgentClient
from twisted.conch.client.default import SSHUserAuthClient
from twisted.conch.client.options import ConchOptions
from twisted.conch.ssh.keys import Key
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from twisted.conch.test import keydata
from twisted.test.proto_helpers import StringTransport
class SSHUserAuthClientTest(TestCase):
"""
Tests for L{SSHUserAuthClient}.
@type rsaPublic: L{Key}
@ivar rsaPublic: A public RSA key.
"""
def setUp(self):
self.rsaPublic = Key.fromString(keydata.publicRSA_openssh)
self.tmpdir = FilePath(self.mktemp())
self.tmpdir.makedirs()
self.rsaFile = self.tmpdir.child('id_rsa')
self.rsaFile.setContent(keydata.privateRSA_openssh)
self.tmpdir.child('id_rsa.pub').setContent(keydata.publicRSA_openssh)
def test_signDataWithAgent(self):
"""
When connected to an agent, L{SSHUserAuthClient} can use it to
request signatures of particular data with a particular L{Key}.
"""
client = SSHUserAuthClient("user", ConchOptions(), None)
agent = SSHAgentClient()
transport = StringTransport()
agent.makeConnection(transport)
client.keyAgent = agent
cleartext = "Sign here"
client.signData(self.rsaPublic, cleartext)
self.assertEqual(
transport.value(),
"\x00\x00\x00\x8b\r\x00\x00\x00u" + self.rsaPublic.blob() +
"\x00\x00\x00\t" + cleartext +
"\x00\x00\x00\x00")
def test_agentGetPublicKey(self):
"""
L{SSHUserAuthClient} looks up public keys from the agent using the
L{SSHAgentClient} class. That L{SSHAgentClient.getPublicKey} returns a
L{Key} object with one of the public keys in the agent. If no more
keys are present, it returns C{None}.
"""
agent = SSHAgentClient()
agent.blobs = [self.rsaPublic.blob()]
key = agent.getPublicKey()
self.assertEqual(key.isPublic(), True)
self.assertEqual(key, self.rsaPublic)
self.assertEqual(agent.getPublicKey(), None)
def test_getPublicKeyFromFile(self):
"""
L{SSHUserAuthClient.getPublicKey()} is able to get a public key from
the first file described by its options' C{identitys} list, and return
the corresponding public L{Key} object.
"""
options = ConchOptions()
options.identitys = [self.rsaFile.path]
client = SSHUserAuthClient("user", options, None)
key = client.getPublicKey()
self.assertEqual(key.isPublic(), True)
self.assertEqual(key, self.rsaPublic)
def test_getPublicKeyAgentFallback(self):
"""
If an agent is present, but doesn't return a key,
L{SSHUserAuthClient.getPublicKey} continue with the normal key lookup.
"""
options = ConchOptions()
options.identitys = [self.rsaFile.path]
agent = SSHAgentClient()
client = SSHUserAuthClient("user", options, None)
client.keyAgent = agent
key = client.getPublicKey()
self.assertEqual(key.isPublic(), True)
self.assertEqual(key, self.rsaPublic)
def test_getPublicKeyBadKeyError(self):
"""
If L{keys.Key.fromFile} raises a L{keys.BadKeyError}, the
L{SSHUserAuthClient.getPublicKey} tries again to get a public key by
calling itself recursively.
"""
options = ConchOptions()
self.tmpdir.child('id_dsa.pub').setContent(keydata.publicDSA_openssh)
dsaFile = self.tmpdir.child('id_dsa')
dsaFile.setContent(keydata.privateDSA_openssh)
options.identitys = [self.rsaFile.path, dsaFile.path]
self.tmpdir.child('id_rsa.pub').setContent('not a key!')
client = SSHUserAuthClient("user", options, None)
key = client.getPublicKey()
self.assertEqual(key.isPublic(), True)
self.assertEqual(key, Key.fromString(keydata.publicDSA_openssh))
self.assertEqual(client.usedFiles, [self.rsaFile.path, dsaFile.path])
def test_getPrivateKey(self):
"""
L{SSHUserAuthClient.getPrivateKey} will load a private key from the
last used file populated by L{SSHUserAuthClient.getPublicKey}, and
return a L{Deferred} which fires with the corresponding private L{Key}.
"""
rsaPrivate = Key.fromString(keydata.privateRSA_openssh)
options = ConchOptions()
options.identitys = [self.rsaFile.path]
client = SSHUserAuthClient("user", options, None)
# Populate the list of used files
client.getPublicKey()
def _cbGetPrivateKey(key):
self.assertEqual(key.isPublic(), False)
self.assertEqual(key, rsaPrivate)
return client.getPrivateKey().addCallback(_cbGetPrivateKey)
def test_getPrivateKeyPassphrase(self):
"""
L{SSHUserAuthClient} can get a private key from a file, and return a
Deferred called back with a private L{Key} object, even if the key is
encrypted.
"""
rsaPrivate = Key.fromString(keydata.privateRSA_openssh)
passphrase = 'this is the passphrase'
self.rsaFile.setContent(rsaPrivate.toString('openssh', passphrase))
options = ConchOptions()
options.identitys = [self.rsaFile.path]
client = SSHUserAuthClient("user", options, None)
# Populate the list of used files
client.getPublicKey()
def _getPassword(prompt):
self.assertEqual(prompt,
"Enter passphrase for key '%s': " % (
self.rsaFile.path,))
return passphrase
def _cbGetPrivateKey(key):
self.assertEqual(key.isPublic(), False)
self.assertEqual(key, rsaPrivate)
self.patch(client, '_getPassword', _getPassword)
return client.getPrivateKey().addCallback(_cbGetPrivateKey)
| bsd-3-clause |
joachimwolff/bioconda-recipes | recipes/mobster/mobster.py | 45 | 3272 | #!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
# Program Parameters
#
import os
import subprocess
import sys
import shutil
from os import access
from os import getenv
from os import X_OK
jar_file = 'MobileInsertions-0.2.4.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
exec_dir = None
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
elif arg.startswith('--exec_dir='):
exec_dir = arg.split('=')[1].strip('"').strip("'")
if not os.path.exists(exec_dir):
shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args, exec_dir)
def main():
java = java_executable()
"""
PeptideShaker updates files relative to the path of the jar file.
In a multiuser setting, the option --exec_dir="exec_dir"
can be used as the location for the peptide-shaker distribution.
If the exec_dir dies not exist,
we copy the jar file, lib, and resources to the exec_dir directory.
"""
(mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:])
jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
| mit |
kr41/ggrc-core | src/ggrc/models/reflection.py | 1 | 13581 | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Utilties to deal with introspecting gGRC models for publishing, creation,
and update from resource format representations, such as JSON."""
from sqlalchemy.sql.schema import UniqueConstraint
from ggrc.utils import get_mapping_rules
from ggrc.utils import title_from_camelcase
from ggrc.utils import underscore_from_camelcase
ATTRIBUTE_ORDER = (
"slug",
"assessment_template",
"audit",
"assessment_object",
"request_audit",
"control",
"program",
"task_group",
"workflow",
"title",
"description",
"notes",
"test_plan",
"owners",
"request_type",
"related_assessors",
"related_creators",
"related_requesters",
"related_assignees",
"related_verifiers",
"program_owner",
"program_editor",
"program_reader",
"workflow_owner",
"workflow_member",
"task_type",
"due_on",
"start_date",
"end_date",
"report_start_date",
"report_end_date",
"relative_start_date",
"relative_end_date",
"finished_date",
"verified_date",
"status",
"assertions",
"categories",
"contact",
"design",
"directive",
"fraud_related",
"key_control",
"kind",
"link",
"means",
"network_zone",
"operationally",
"principal_assessor",
"secondary_assessor",
"secondary_contact",
"url",
"reference_url",
"verify_frequency",
"name",
"email",
"is_enabled",
"company",
"user_role",
"test",
"recipients",
"send_by_default",
"document_url",
"document_evidence",
"delete",
)
EXCLUDE_CUSTOM_ATTRIBUTES = set([
"AssessmentTemplate",
])
EXCLUDE_MAPPINGS = set([
"AssessmentTemplate",
])
class DontPropagate(object):
"""Attributes wrapped by ``DontPropagate`` instances should not be considered
to be a part of an inherited list. For example, ``_update_attrs`` can be
inherited from ``_publish_attrs`` if left unspecified. This class provides
a mechanism to use that inheritance while excluding some elements from the
resultant inherited list. For example, this:
.. sourcecode::
_publish_attrs = [
'inherited_attr',
DontPropagate('not_inherited_attr'),
]
is equivalent to this:
.. sourcecode::
_publish_attrs = [
'inherited_attr',
'not_inherited_attr',
]
_update_attrs = [
'inherited_attr',
]
"""
# pylint: disable=too-few-public-methods
def __init__(self, attr_name):
self.attr_name = attr_name
class PublishOnly(DontPropagate):
"""Alias of ``DontPropagate`` for use in a ``_publish_attrs`` specification.
"""
# pylint: disable=too-few-public-methods
pass
class AttributeInfo(object):
"""Gather model CRUD information by reflecting on model classes. Builds and
caches a list of the publishing properties for a class by walking the
class inheritance tree.
"""
MAPPING_PREFIX = "__mapping__:"
UNMAPPING_PREFIX = "__unmapping__:"
CUSTOM_ATTR_PREFIX = "__custom__:"
class Type(object):
"""Types of model attributes."""
# TODO: change to enum.
# pylint: disable=too-few-public-methods
PROPERTY = "property"
MAPPING = "mapping"
SPECIAL_MAPPING = "special_mapping"
CUSTOM = "custom" # normal custom attribute
OBJECT_CUSTOM = "object_custom" # object level custom attribute
USER_ROLE = "user_role"
def __init__(self, tgt_class):
self._publish_attrs = AttributeInfo.gather_publish_attrs(tgt_class)
self._stub_attrs = AttributeInfo.gather_stub_attrs(tgt_class)
self._update_attrs = AttributeInfo.gather_update_attrs(tgt_class)
self._create_attrs = AttributeInfo.gather_create_attrs(tgt_class)
self._include_links = AttributeInfo.gather_include_links(tgt_class)
self._aliases = AttributeInfo.gather_aliases(tgt_class)
@classmethod
def gather_attr_dicts(cls, tgt_class, src_attr):
""" Gather dictionaries from target class parets """
result = {}
for base_class in tgt_class.__bases__:
base_result = cls.gather_attr_dicts(base_class, src_attr)
result.update(base_result)
attrs = getattr(tgt_class, src_attr, {})
result.update(attrs)
return result
@classmethod
def gather_attrs(cls, tgt_class, src_attrs, accumulator=None,
main_class=None):
"""Gathers the attrs to be included in a list for publishing, update, or
some other purpose. Supports inheritance by iterating the list of
``src_attrs`` until a list is found.
Inheritance of some attributes can be circumvented through use of the
``DontPropoagate`` decorator class.
"""
if main_class is None:
main_class = tgt_class
src_attrs = src_attrs if isinstance(src_attrs, list) else [src_attrs]
accumulator = accumulator if accumulator is not None else set()
ignore_dontpropagate = True
for attr in src_attrs:
attrs = None
# Only get the attribute if it is defined on the target class, but
# get it via `getattr`, to handle `@declared_attr`
if attr in tgt_class.__dict__:
attrs = getattr(tgt_class, attr, None)
if callable(attrs):
attrs = attrs(main_class)
if attrs is not None:
if not ignore_dontpropagate:
attrs = [a for a in attrs if not isinstance(a, DontPropagate)]
else:
attrs = [a if not isinstance(a, DontPropagate) else a.attr_name for
a in attrs]
accumulator.update(attrs)
break
else:
ignore_dontpropagate = False
for base in tgt_class.__bases__:
cls.gather_attrs(base, src_attrs, accumulator, main_class=main_class)
return accumulator
@classmethod
def gather_publish_attrs(cls, tgt_class):
return cls.gather_attrs(tgt_class, '_publish_attrs')
@classmethod
def gather_aliases(cls, tgt_class):
return cls.gather_attr_dicts(tgt_class, '_aliases')
@classmethod
def gather_stub_attrs(cls, tgt_class):
return cls.gather_attrs(tgt_class, '_stub_attrs')
@classmethod
def gather_update_attrs(cls, tgt_class):
attrs = cls.gather_attrs(tgt_class, ['_update_attrs', '_publish_attrs'])
return attrs
@classmethod
def gather_create_attrs(cls, tgt_class):
return cls.gather_attrs(tgt_class, [
'_create_attrs', '_update_attrs', '_publish_attrs'])
@classmethod
def gather_include_links(cls, tgt_class):
return cls.gather_attrs(tgt_class, ['_include_links'])
@classmethod
def get_mapping_definitions(cls, object_class):
""" Get column definitions for allowed mappings for object_class """
definitions = {}
mapping_rules = get_mapping_rules()
object_mapping_rules = mapping_rules.get(object_class.__name__, [])
for mapping_class in object_mapping_rules:
class_name = title_from_camelcase(mapping_class)
mapping_name = "{}{}".format(cls.MAPPING_PREFIX, class_name)
definitions[mapping_name.lower()] = {
"display_name": "map:{}".format(class_name),
"attr_name": mapping_class.lower(),
"mandatory": False,
"unique": False,
"description": "",
"type": cls.Type.MAPPING,
}
unmapping_name = "{}{}".format(cls.UNMAPPING_PREFIX, class_name)
definitions[unmapping_name.lower()] = {
"display_name": "unmap:{}".format(class_name),
"attr_name": mapping_class.lower(),
"mandatory": False,
"unique": False,
"description": "",
"type": cls.Type.MAPPING,
}
return definitions
@classmethod
def get_custom_attr_definitions(cls, object_class, ca_cache=None,
include_oca=True):
"""Get column definitions for custom attributes on object_class.
Args:
object_class: Model for which we want the attribute definitions.
ca_cache: dictionary containing custom attribute definitions. If it's set
this function will not look for CAD in the database. This should be
used for bulk operations, and eventually replaced with memcache.
include_oca: Flag for including object level custom attributes. This
should be true only for defenitions needed for csv imports.
returns:
dict of custom attribute definitions.
"""
definitions = {}
if not hasattr(object_class, "get_custom_attribute_definitions"):
return definitions
object_name = underscore_from_camelcase(object_class.__name__)
if isinstance(ca_cache, dict) and object_name:
custom_attributes = ca_cache.get(object_name, [])
else:
custom_attributes = object_class.get_custom_attribute_definitions()
for attr in custom_attributes:
description = attr.helptext or u""
if (attr.attribute_type == attr.ValidTypes.DROPDOWN and
attr.multi_choice_options):
if description:
description += "\n\n"
description += u"Accepted values are:\n{}".format(
attr.multi_choice_options.replace(",", "\n")
)
if attr.definition_id:
ca_type = cls.Type.OBJECT_CUSTOM
else:
ca_type = cls.Type.CUSTOM
attr_name = u"{}{}".format(cls.CUSTOM_ATTR_PREFIX, attr.title).lower()
definition_ids = definitions.get(attr_name, {}).get("definition_ids", [])
definition_ids.append(attr.id)
definitions[attr_name] = {
"display_name": attr.title,
"attr_name": attr.title,
"mandatory": attr.mandatory,
"unique": False,
"description": description,
"type": ca_type,
"definition_ids": definition_ids,
}
return definitions
@classmethod
def get_unique_constraints(cls, object_class):
""" Return a set of attribute names for single unique columns """
constraints = object_class.__table__.constraints
unique = [con for con in constraints if isinstance(con, UniqueConstraint)]
# we only handle single column unique constraints
unique_columns = [u.columns.keys() for u in unique if len(u.columns) == 1]
return set(sum(unique_columns, []))
@classmethod
def get_object_attr_definitions(cls, object_class, ca_cache=None,
include_oca=True):
"""Get all column definitions for object_class.
This function joins custom attribute definitions, mapping definitions and
the extra delete column.
Args:
object_class: Model for which we want the attribute definitions.
ca_cache: dictionary containing custom attribute definitions.
include_oca: Flag for including object level custom attributes.
"""
definitions = {}
aliases = AttributeInfo.gather_aliases(object_class)
filtered_aliases = [(k, v) for k, v in aliases.items() if v is not None]
# push the extra delete column at the end to override any custom behavior
if hasattr(object_class, "slug"):
filtered_aliases.append(("delete", {
"display_name": "Delete",
"description": "",
}))
unique_columns = cls.get_unique_constraints(object_class)
for key, value in filtered_aliases:
column = object_class.__table__.columns.get(key)
definition = {
"display_name": value,
"attr_name": key,
"mandatory": False if column is None else not column.nullable,
"unique": key in unique_columns,
"description": "",
"type": cls.Type.PROPERTY,
"handler_key": key,
}
if isinstance(value, dict):
definition.update(value)
definitions[key] = definition
if object_class.__name__ not in EXCLUDE_CUSTOM_ATTRIBUTES:
definitions.update(
cls.get_custom_attr_definitions(object_class, ca_cache=ca_cache,
include_oca=include_oca))
if object_class.__name__ not in EXCLUDE_MAPPINGS:
definitions.update(cls.get_mapping_definitions(object_class))
return definitions
@classmethod
def get_attr_definitions_array(cls, object_class, ca_cache=None):
""" get all column definitions containing only json serializable data """
definitions = cls.get_object_attr_definitions(object_class,
ca_cache=ca_cache)
order = cls.get_column_order(definitions.keys())
result = []
for key in order:
item = definitions[key]
item["key"] = key
result.append(item)
return result
@classmethod
def get_column_order(cls, attr_list):
""" Sort attribute list
Attribute list should be sorted with 3 rules:
- attributes in ATTRIBUTE_ORDER variable must be fist and in the same
order as defined in that variable.
- Custom Attributes are sorted alphabetically after default attributes
- mapping attributes are sorted alphabetically and placed last
"""
attr_set = set(attr_list)
default_attrs = [v for v in ATTRIBUTE_ORDER if v in attr_set]
default_set = set(default_attrs)
other_attrs = [v for v in attr_list if v not in default_set]
custom_attrs = [v for v in other_attrs if not v.lower().startswith("map:")]
mapping_attrs = [v for v in other_attrs if v.lower().startswith("map:")]
custom_attrs.sort(key=lambda x: x.lower())
mapping_attrs.sort(key=lambda x: x.lower())
return default_attrs + custom_attrs + mapping_attrs
class SanitizeHtmlInfo(AttributeInfo):
def __init__(self, tgt_class):
self._sanitize_html = SanitizeHtmlInfo.gather_attrs(
tgt_class, '_sanitize_html')
| apache-2.0 |
adamcandy/QGIS-Meshing | plugins/mesh_surface/scripts/define_boundary_id.py | 2 | 3691 |
##########################################################################
#
# QGIS-meshing plugins.
#
# Copyright (C) 2012-2013 Imperial College London and others.
#
# Please see the AUTHORS file in the main source directory for a
# full list of copyright holders.
#
# Dr Adam S. Candy, adam.candy@imperial.ac.uk
# Applied Modelling and Computation Group
# Department of Earth Science and Engineering
# Imperial College London
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation,
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
##########################################################################
# Take the coords of the domain as well as that of the boundary polygons, a default ID and list of ID's contained inside the boundary-polygons. For the exterior of the domain it will create Shapely Lines from sequential points and see if the line intersects one of the boundary-polygons. For the islands: creates a Shapely polygon for each island and sees if it is fully enclosed within any of the boundary-ID polygons.
#Returns a tuple of 2 elements: element 1 contains a list of lines that compose the external line loop of the domain; element 2 is the ID associated with each line along with a list of tuples, element 1 of the tuple is the island and element 2 is the ID of that island.
# ([line1, line2, linex], [1, 2, x]), [(polygon1, ID1), (polygon2, ID2)...]
# Second function, connectLines joins sequential lines if they share the same ID number.
import shapefile
from shapely.geometry import *
class assignIDs():
def assignIDsMethod(self, idShapeFile):
# Generate a list of Shapely polygons from the coordinates of the boundary-ID polygons.
self.boundaryIDList = []
self.idShapeFile = idShapeFile
self.IDPolygons = []
if idShapeFile:
for j in range(len(self.boundaryData.points)):
self.IDPolygons.append(Polygon([self.boundaryData.points[j][i] for i in range(len(self.boundaryData.points[j]))]))
# Break into component lines and see which intersect the boundary polygons.
for i in range(len(self.domainData.points)):
self.generateIds(i)
def generateIds(self, part):
localIdList = []
for j in range(len(self.domainData.points[part]) - 1):
if not self.idShapeFile:
localIdList.append(self.defID)
continue
self.methodIDPolygons(localIdList, part, j)
self.boundaryIDList.append(localIdList)
def methodIDPolygons(self, localIdList, part, j):
# Want to make a shapely line from sequential points.
line = LineString([tuple(self.domainData.points[part][j]), tuple(self.domainData.points[part][j + 1])])
done = False
for n in range(len(self.IDPolygons)):
if line.intersects(self.IDPolygons[-(n+1)]):
localIdList.append(self.boundaryData.records[-(n+1)][0])
done = True
break
if not done:
localIdList.append(self.defID)
def connectLines (bounds):
lineLists = []
for points in bounds:
localLines = []
for i in range(len(points)-1):
point1 = points[i]
point2 = points[i+1]
localLines.append((point1, point2))
lineLists.append(localLines)
return lineLists
| lgpl-2.1 |
openhatch/oh-mainline | vendor/packages/Django/django/conf/locale/de_CH/formats.py | 107 | 1389 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
from __future__ import unicode_literals
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
)
# these are the separators for non-monetary numbers. For monetary numbers,
# the DECIMAL_SEPARATOR is a . (decimal point) and the THOUSAND_SEPARATOR is a
# ' (single quote).
# For details, please refer to http://www.bk.admin.ch/dokumentation/sprachen/04915/05016/index.html?lang=de
# (in German) and the documentation
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| agpl-3.0 |
axilleas/ansible-modules-core | packaging/os/rpm_key.py | 60 | 7339 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Ansible module to import third party repo keys to your rpm db
# (c) 2013, Héctor Acosta <hector.acosta@gazzang.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rpm_key
author: Hector Acosta <hector.acosta@gazzang.com>
short_description: Adds or removes a gpg key from the rpm db
description:
- Adds or removes (rpm --import) a gpg key to your rpm database.
version_added: "1.3"
options:
key:
required: true
default: null
aliases: []
description:
- Key that will be modified. Can be a url, a file, or a keyid if the key already exists in the database.
state:
required: false
default: "present"
choices: [present, absent]
description:
- Wheather the key will be imported or removed from the rpm db.
validate_certs:
description:
- If C(no) and the C(key) is a url starting with https, SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
EXAMPLES = '''
# Example action to import a key from a url
- rpm_key: state=present key=http://apt.sw.be/RPM-GPG-KEY.dag.txt
# Example action to import a key from a file
- rpm_key: state=present key=/path/to/key.gpg
# Example action to ensure a key is not present in the db
- rpm_key: state=absent key=DEADB33F
'''
import syslog
import os.path
import re
import tempfile
def is_pubkey(string):
"""Verifies if string is a pubkey"""
pgp_regex = ".*?(-----BEGIN PGP PUBLIC KEY BLOCK-----.*?-----END PGP PUBLIC KEY BLOCK-----).*"
return re.match(pgp_regex, string, re.DOTALL)
class RpmKey:
def __init__(self, module):
self.syslogging = False
# If the key is a url, we need to check if it's present to be idempotent,
# to do that, we need to check the keyid, which we can get from the armor.
keyfile = None
should_cleanup_keyfile = False
self.module = module
self.rpm = self.module.get_bin_path('rpm', True)
state = module.params['state']
key = module.params['key']
if '://' in key:
keyfile = self.fetch_key(key)
keyid = self.getkeyid(keyfile)
should_cleanup_keyfile = True
elif self.is_keyid(key):
keyid = key
elif os.path.isfile(key):
keyfile = key
keyid = self.getkeyid(keyfile)
else:
self.module.fail_json(msg="Not a valid key %s" % key)
keyid = self.normalize_keyid(keyid)
if state == 'present':
if self.is_key_imported(keyid):
module.exit_json(changed=False)
else:
if not keyfile:
self.module.fail_json(msg="When importing a key, a valid file must be given")
self.import_key(keyfile, dryrun=module.check_mode)
if should_cleanup_keyfile:
self.module.cleanup(keyfile)
module.exit_json(changed=True)
else:
if self.is_key_imported(keyid):
self.drop_key(keyid, dryrun=module.check_mode)
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
def fetch_key(self, url):
"""Downloads a key from url, returns a valid path to a gpg key"""
try:
rsp, info = fetch_url(self.module, url)
key = rsp.read()
if not is_pubkey(key):
self.module.fail_json(msg="Not a public key: %s" % url)
tmpfd, tmpname = tempfile.mkstemp()
tmpfile = os.fdopen(tmpfd, "w+b")
tmpfile.write(key)
tmpfile.close()
return tmpname
except urllib2.URLError, e:
self.module.fail_json(msg=str(e))
def normalize_keyid(self, keyid):
"""Ensure a keyid doesn't have a leading 0x, has leading or trailing whitespace, and make sure is lowercase"""
ret = keyid.strip().lower()
if ret.startswith('0x'):
return ret[2:]
elif ret.startswith('0X'):
return ret[2:]
else:
return ret
def getkeyid(self, keyfile):
gpg = self.module.get_bin_path('gpg', True)
stdout, stderr = self.execute_command([gpg, '--no-tty', '--batch', '--with-colons', '--fixed-list-mode', '--list-packets', keyfile])
for line in stdout.splitlines():
line = line.strip()
if line.startswith(':signature packet:'):
# We want just the last 8 characters of the keyid
keyid = line.split()[-1].strip()[8:]
return keyid
self.json_fail(msg="Unexpected gpg output")
def is_keyid(self, keystr):
"""Verifies if a key, as provided by the user is a keyid"""
return re.match('(0x)?[0-9a-f]{8}', keystr, flags=re.IGNORECASE)
def execute_command(self, cmd):
if self.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd))
rc, stdout, stderr = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg=stderr)
return stdout, stderr
def is_key_imported(self, keyid):
stdout, stderr = self.execute_command([self.rpm, '-qa', 'gpg-pubkey'])
for line in stdout.splitlines():
line = line.strip()
if not line:
continue
match = re.match('gpg-pubkey-([0-9a-f]+)-([0-9a-f]+)', line)
if not match:
self.module.fail_json(msg="rpm returned unexpected output [%s]" % line)
else:
if keyid == match.group(1):
return True
return False
def import_key(self, keyfile, dryrun=False):
if not dryrun:
self.execute_command([self.rpm, '--import', keyfile])
def drop_key(self, key, dryrun=False):
if not dryrun:
self.execute_command([self.rpm, '--erase', '--allmatches', "gpg-pubkey-%s" % key])
def main():
module = AnsibleModule(
argument_spec = dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
key=dict(required=True, type='str'),
validate_certs=dict(default='yes', type='bool'),
),
supports_check_mode=True
)
RpmKey(module)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 |
sebi-hgdata/ansible | lib/ansible/runner/action_plugins/template.py | 4 | 6978 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import pipes
from ansible.utils import template
from ansible import utils
from ansible import errors
from ansible.runner.return_data import ReturnData
import base64
class ActionModule(object):
TRANSFERS_FILES = True
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
''' handler for template operations '''
if not self.runner.is_playbook:
raise errors.AnsibleError("in current versions of ansible, templates are only usable in playbooks")
# load up options
options = {}
if complex_args:
options.update(complex_args)
options.update(utils.parse_kv(module_args))
source = options.get('src', None)
dest = options.get('dest', None)
if (source is None and 'first_available_file' not in inject) or dest is None:
result = dict(failed=True, msg="src and dest are required")
return ReturnData(conn=conn, comm_ok=False, result=result)
# if we have first_available_file in our vars
# look up the files and use the first one we find as src
if 'first_available_file' in inject:
found = False
for fn in self.runner.module_vars.get('first_available_file'):
fn_orig = fn
fnt = template.template(self.runner.basedir, fn, inject)
fnd = utils.path_dwim(self.runner.basedir, fnt)
if not os.path.exists(fnd) and '_original_file' in inject:
fnd = utils.path_dwim_relative(inject['_original_file'], 'templates', fnt, self.runner.basedir, check=False)
if os.path.exists(fnd):
source = fnd
found = True
break
if not found:
result = dict(failed=True, msg="could not find src in first_available_file list")
return ReturnData(conn=conn, comm_ok=False, result=result)
else:
source = template.template(self.runner.basedir, source, inject)
if '_original_file' in inject:
source = utils.path_dwim_relative(inject['_original_file'], 'templates', source, self.runner.basedir)
else:
source = utils.path_dwim(self.runner.basedir, source)
# Expand any user home dir specification
dest = self.runner._remote_expand_user(conn, dest, tmp)
if dest.endswith("/"): # CCTODO: Fix path for Windows hosts.
base = os.path.basename(source)
dest = os.path.join(dest, base)
# template the source data locally & get ready to transfer
try:
resultant = template.template_from_file(self.runner.basedir, source, inject, vault_password=self.runner.vault_pass)
except Exception, e:
result = dict(failed=True, msg=type(e).__name__ + ": " + str(e))
return ReturnData(conn=conn, comm_ok=False, result=result)
local_checksum = utils.checksum_s(resultant)
remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject)
if local_checksum != remote_checksum:
# template is different from the remote value
# if showing diffs, we need to get the remote value
dest_contents = ''
if self.runner.diff:
# using persist_files to keep the temp directory around to avoid needing to grab another
dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % dest, inject=inject, persist_files=True)
if 'content' in dest_result.result:
dest_contents = dest_result.result['content']
if dest_result.result['encoding'] == 'base64':
dest_contents = base64.b64decode(dest_contents)
else:
raise Exception("unknown encoding, failed: %s" % dest_result.result)
xfered = self.runner._transfer_str(conn, tmp, 'source', resultant)
# fix file permissions when the copy is done as a different user
if self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root':
self.runner._remote_chmod(conn, 'a+r', xfered, tmp)
# run the copy module
new_module_args = dict(
src=xfered,
dest=dest,
original_basename=os.path.basename(source),
follow=True,
)
module_args_tmp = utils.merge_module_args(module_args, new_module_args)
if self.runner.noop_on_check(inject):
return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=source, before=dest_contents, after=resultant))
else:
res = self.runner._execute_module(conn, tmp, 'copy', module_args_tmp, inject=inject, complex_args=complex_args)
if res.result.get('changed', False):
res.diff = dict(before=dest_contents, after=resultant)
return res
else:
# when running the file module based on the template data, we do
# not want the source filename (the name of the template) to be used,
# since this would mess up links, so we clear the src param and tell
# the module to follow links. When doing that, we have to set
# original_basename to the template just in case the dest is
# a directory.
new_module_args = dict(
src=None,
original_basename=os.path.basename(source),
follow=True,
)
# be sure to inject the check mode param into the module args and
# rely on the file module to report its changed status
if self.runner.noop_on_check(inject):
new_module_args['CHECKMODE'] = True
module_args = utils.merge_module_args(module_args, new_module_args)
return self.runner._execute_module(conn, tmp, 'file', module_args, inject=inject, complex_args=complex_args)
| gpl-3.0 |
divio/django-shop | shop/admin/delivery.py | 1 | 10294 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import url
from django.contrib import admin
from django.db.models import Sum
from django.forms import models, ValidationError
from django.http import HttpResponse
from django.template.loader import select_template
from django.urls import reverse
from django.utils import timezone
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from shop.conf import app_settings
from shop.admin.order import OrderItemInline
from shop.models.order import OrderItemModel
from shop.models.delivery import DeliveryModel
from shop.modifiers.pool import cart_modifiers_pool
from shop.serializers.delivery import DeliverySerializer
from shop.serializers.order import OrderDetailSerializer
class OrderItemForm(models.ModelForm):
"""
This form handles an ordered item, but adds a number field to modify the number of
items to deliver.
"""
class Meta:
model = OrderItemModel
exclude = ()
def __init__(self, *args, **kwargs):
if 'instance' in kwargs:
kwargs.setdefault('initial', {})
deliver_quantity = kwargs['instance'].quantity - self.get_delivered(kwargs['instance'])
kwargs['initial'].update(deliver_quantity=deliver_quantity)
else:
deliver_quantity = None
super(OrderItemForm, self).__init__(*args, **kwargs)
if deliver_quantity == 0:
self['deliver_quantity'].field.widget.attrs.update(readonly='readonly')
@classmethod
def get_delivered(cls, instance):
"""
Returns the quantity already delivered for this order item.
"""
aggr = instance.deliver_item.aggregate(delivered=Sum('quantity'))
return aggr['delivered'] or 0
def clean(self):
cleaned_data = super(OrderItemForm, self).clean()
if cleaned_data.get('deliver_quantity') is not None:
if cleaned_data['deliver_quantity'] < 0:
raise ValidationError(_("Only a positive number of items can be delivered"), code='invalid')
if cleaned_data['deliver_quantity'] > self.instance.quantity - self.get_delivered(self.instance):
raise ValidationError(_("The number of items to deliver exceeds the ordered quantity"), code='invalid')
return cleaned_data
def has_changed(self):
"""Force form to changed"""
return True
class OrderItemInlineDelivery(OrderItemInline):
def get_fields(self, request, obj=None):
fields = list(super(OrderItemInlineDelivery, self).get_fields(request, obj))
if obj:
if obj.status == 'pick_goods' and obj.unfulfilled_items > 0:
fields[1] += ('deliver_quantity', 'canceled',)
else:
fields[1] += ('get_delivered', 'show_ready',)
return fields
def get_readonly_fields(self, request, obj=None):
readonly_fields = list(super(OrderItemInlineDelivery, self).get_readonly_fields(request, obj))
if obj:
if not (obj.status == 'pick_goods' and obj.unfulfilled_items > 0):
readonly_fields.extend(['get_delivered', 'show_ready'])
return readonly_fields
def get_formset(self, request, obj=None, **kwargs):
"""
Add field `quantity` to the form on the fly, using the same numeric type as `OrderItem.quantity`
"""
labels = {'quantity': _("Deliver quantity")}
attrs = models.fields_for_model(obj.items.model, fields=['quantity'], labels=labels)
# rename to deliver_quantity, since quantity is already used
attrs['deliver_quantity'] = attrs.pop('quantity')
if obj.status == 'pick_goods' and obj.unfulfilled_items > 0:
attrs['deliver_quantity'].widget.attrs.update(style='width: 50px;')
else:
attrs['deliver_quantity'].required = False
form = type(str('OrderItemForm'), (OrderItemForm,), attrs)
labels = {'canceled': _("Cancel this item")}
kwargs.update(form=form, labels=labels)
formset = super(OrderItemInlineDelivery, self).get_formset(request, obj, **kwargs)
return formset
def get_delivered(self, obj=None):
return OrderItemForm.get_delivered(obj)
get_delivered.short_description = _("Delivered quantity")
def show_ready(self, obj=None):
return not obj.canceled
show_ready.boolean = True
show_ready.short_description = _("Ready for delivery")
def get_shipping_choices():
choices = [sm.get_choice() for sm in cart_modifiers_pool.get_shipping_modifiers()]
return choices
class DeliveryForm(models.ModelForm):
shipping_method = models.ChoiceField(
label=_("Shipping by"),
choices=get_shipping_choices,
)
class Meta:
model = DeliveryModel
exclude = []
def has_changed(self):
return True
def clean_shipping_method(self):
if not self.cleaned_data['shipping_method']:
return self.instance.shipping_method
return self.cleaned_data['shipping_method']
class DeliveryInline(admin.TabularInline):
model = DeliveryModel
form = DeliveryForm
extra = 0
fields = ['shipping_id', 'shipping_method', 'delivered_items', 'print_out', 'fulfilled_at', 'shipped_at']
readonly_fields = ['delivered_items', 'print_out', 'fulfilled_at', 'shipped_at']
def has_delete_permission(self, request, obj=None):
return False
def get_max_num(self, request, obj=None, **kwargs):
qs = self.model.objects.filter(order=obj)
return qs.count()
def get_fields(self, request, obj=None):
assert obj is not None, "An Order object can not be added through the Django-Admin"
fields = list(super(DeliveryInline, self).get_fields(request, obj))
if not obj.allow_partial_delivery:
fields.remove('delivered_items')
return fields
def get_readonly_fields(self, request, obj=None):
readonly_fields = list(super(DeliveryInline, self).get_readonly_fields(request, obj))
if not app_settings.SHOP_OVERRIDE_SHIPPING_METHOD or obj.status == 'ready_for_delivery':
readonly_fields.append('shipping_method')
return readonly_fields
def get_formset(self, request, obj=None, **kwargs):
formset = super(DeliveryInline, self).get_formset(request, obj, **kwargs)
if not app_settings.SHOP_OVERRIDE_SHIPPING_METHOD or obj.status == 'ready_for_delivery':
# make readonly field optional
formset.form.base_fields['shipping_method'].required = False
return formset
def delivered_items(self, obj):
aggr = obj.items.aggregate(quantity=Sum('quantity'))
aggr['quantity'] = aggr['quantity'] or 0
aggr.update(items=obj.items.count())
return '{quantity}/{items}'.format(**aggr)
delivered_items.short_description = _("Quantity/Items")
def print_out(self, obj):
if obj.fulfilled_at is None:
return ''
link = reverse('admin:print_delivery_note', args=(obj.id,)), _("Delivery Note")
return format_html(
'<span class="object-tools"><a href="{0}" class="viewsitelink" target="_new">{1}</a></span>',
*link)
print_out.short_description = _("Print out")
def fulfilled(self, obj):
if obj.fulfilled_at:
return timezone.localtime(obj.fulfilled_at).ctime() # TODO: find the correct time format
return _("Pending")
fulfilled.short_description = _("Fulfilled at")
class DeliveryOrderAdminMixin(object):
"""
Add this mixin to the class defining the OrderAdmin
"""
def get_urls(self):
my_urls = [
url(r'^(?P<delivery_pk>\d+)/print_delivery_note/$',
self.admin_site.admin_view(self.render_delivery_note),
name='print_delivery_note'),
]
my_urls.extend(super(DeliveryOrderAdminMixin, self).get_urls())
return my_urls
def render_delivery_note(self, request, delivery_pk=None):
template = select_template([
'{}/print/delivery-note.html'.format(app_settings.APP_LABEL.lower()),
'shop/print/delivery-note.html'
])
delivery = DeliveryModel.objects.get(pk=delivery_pk)
context = {'request': request, 'render_label': 'print'}
customer_serializer = app_settings.CUSTOMER_SERIALIZER(delivery.order.customer)
order_serializer = OrderDetailSerializer(delivery.order, context=context)
delivery_serializer = DeliverySerializer(delivery, context=context)
content = template.render({
'customer': customer_serializer.data,
'order': order_serializer.data,
'delivery': delivery_serializer.data,
'object': delivery,
})
return HttpResponse(content)
def get_inline_instances(self, request, obj=None):
assert obj is not None, "An Order object can not be added through the Django-Admin"
assert hasattr(obj, 'associate_with_delivery'), "Add 'shop.shipping.workflows.SimpleShippingWorkflowMixin' " \
"(or a class inheriting from thereof) to SHOP_ORDER_WORKFLOWS."
inline_instances = list(super(DeliveryOrderAdminMixin, self).get_inline_instances(request, obj))
if obj.associate_with_delivery:
if obj.allow_partial_delivery:
# replace `OrderItemInline` by `OrderItemInlineDelivery` for that instance.
inline_instances = [
OrderItemInlineDelivery(self.model, self.admin_site) if isinstance(instance, OrderItemInline) else instance
for instance in inline_instances
]
inline_instances.append(DeliveryInline(self.model, self.admin_site))
return inline_instances
def save_related(self, request, form, formsets, change):
super(DeliveryOrderAdminMixin, self).save_related(request, form, formsets, change)
if form.instance.status == 'pack_goods' and 'status' in form.changed_data:
orderitem_formset = [fs for fs in formsets if issubclass(fs.model, OrderItemModel)][0]
form.instance.update_or_create_delivery(orderitem_formset.cleaned_data)
| bsd-3-clause |
grengojbo/st2 | st2common/tests/unit/test_model_base.py | 5 | 6102 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import pecan
import unittest
from webob import exc
from st2common.models.api import base
class FakeModel(base.BaseAPI):
model = None
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"a": {"type": "string"}
},
"additionalProperties": False
}
@mock.patch.object(pecan, 'request', mock.MagicMock(json={'a': 'b'}))
@mock.patch.object(pecan, 'response', mock.MagicMock())
class TestModelBase(unittest.TestCase):
def setUp(self):
super(TestModelBase, self).setUp()
self.f = mock.MagicMock(__name__="Name")
def test_expose_decorator(self):
@base.jsexpose()
def f(self, *args, **kwargs):
self.f(self, args, kwargs)
f(self)
self.f.assert_called_once_with(self, (), {})
def test_expose_argument(self):
@base.jsexpose()
def f(self, id, *args, **kwargs):
self.f(self, id, args, kwargs)
f(self, '11')
self.f.assert_called_once_with(self, '11', (), {})
def test_expose_argument_unused(self):
@base.jsexpose()
def f(self, *args, **kwargs):
self.f(self, args, kwargs)
f(self, '11')
self.f.assert_called_once_with(self, ('11',), {})
def test_expose_argument_type_casting(self):
@base.jsexpose(arg_types=[int])
def f(self, id, *args, **kwargs):
self.f(self, id, args, kwargs)
f(self, '11')
self.f.assert_called_once_with(self, 11, (), {})
def test_expose_argument_with_default(self):
@base.jsexpose(arg_types=[int])
def f(self, id, some=None, *args, **kwargs):
self.f(self, id, some, args, kwargs)
f(self, '11')
self.f.assert_called_once_with(self, 11, None, (), {})
def test_expose_kv_unused(self):
@base.jsexpose([int, int, str])
def f(self, id, *args, **kwargs):
self.f(self, id, args, kwargs)
f(self, '11', number='7', name="fox")
self.f.assert_called_once_with(self, 11, (), {'number': '7', 'name': 'fox'})
def test_expose_kv_type_casting(self):
@base.jsexpose([int, int, str])
def f(self, id, number, name, *args, **kwargs):
self.f(self, id, number, name, args, kwargs)
f(self, '11', number='7', name="fox")
self.f.assert_called_once_with(self, 11, 7, 'fox', (), {})
def test_expose_body_unused(self):
APIModelMock = mock.MagicMock()
@base.jsexpose(body_cls=APIModelMock)
def f(self, *args, **kwargs):
self.f(self, args, kwargs)
f(self)
APIModelMock.assert_called_once_with(a='b')
self.f.assert_called_once_with(self, (APIModelMock(),), {})
def test_expose_body(self):
APIModelMock = mock.MagicMock()
@base.jsexpose(body_cls=APIModelMock)
def f(self, body, *args, **kwargs):
self.f(self, body, args, kwargs)
f(self)
APIModelMock.assert_called_once_with(a='b')
self.f.assert_called_once_with(self, APIModelMock(), (), {})
def test_expose_body_and_arguments_unused(self):
APIModelMock = mock.MagicMock()
@base.jsexpose(body_cls=APIModelMock)
def f(self, body, *args, **kwargs):
self.f(self, body, args, kwargs)
f(self, '11')
APIModelMock.assert_called_once_with(a='b')
self.f.assert_called_once_with(self, APIModelMock(), ('11', ), {})
def test_expose_body_and_arguments_type_casting(self):
APIModelMock = mock.MagicMock()
@base.jsexpose(arg_types=[int], body_cls=APIModelMock)
def f(self, id, body, *args, **kwargs):
self.f(self, id, body, args, kwargs)
f(self, '11')
APIModelMock.assert_called_once_with(a='b')
self.f.assert_called_once_with(self, 11, APIModelMock(), (), {})
@unittest.skip
def test_expose_body_and_typed_arguments_unused(self):
APIModelMock = mock.MagicMock()
@base.jsexpose(arg_types=[int], body_cls=APIModelMock)
def f(self, id, body, *args, **kwargs):
self.f(self, id, body, args, kwargs)
f(self, '11', 'some')
APIModelMock.assert_called_once_with(a='b')
self.f.assert_called_once_with(self, 11, APIModelMock(), ('some', ), {})
@unittest.skip
def test_expose_body_and_typed_kw_unused(self):
APIModelMock = mock.MagicMock()
@base.jsexpose(arg_types=[int], body_cls=APIModelMock)
def f(self, body, id, *args, **kwargs):
self.f(self, body, id, args, kwargs)
f(self, id='11')
APIModelMock.assert_called_once_with(a='b')
self.f.assert_called_once_with(self, APIModelMock(), 11, (), {})
@mock.patch.object(pecan, 'response', mock.MagicMock(status=200))
def test_expose_schema_validation_failed(self):
@base.jsexpose(body_cls=FakeModel)
def f(self, body, *args, **kwargs):
self.f(self, body, *args, **kwargs)
pecan.request.json = {'a': '123'}
rtn_val = f(self)
self.assertEqual(rtn_val, 'null')
pecan.request.json = {'a': '123', 'b': '456'}
self.assertRaisesRegexp(exc.HTTPBadRequest, ''b' was unexpected', f, self)
| apache-2.0 |
PaulWay/insights-core | insights/parsers/systemd/unitfiles.py | 1 | 1843 | from ... import Parser, parser
from .. import get_active_lines
@parser('systemctl_list-unit-files')
class UnitFiles(Parser):
"""
A parser for working with data gathered from `systemctl list-unit-files` utility.
"""
def __init__(self, *args, **kwargs):
self.services = {}
"""dict: Dictionary of bool indicating if service is enabled,
access by service name ."""
self.service_list = []
"""list: List of service names in order of appearance."""
self.parsed_lines = {}
"""dict: Dictionary of content lines access by service name."""
super(UnitFiles, self).__init__(*args, **kwargs)
def parse_content(self, content):
"""
Main parsing class method which stores all interesting data from the content.
Args:
content (context.content): Parser context content
"""
# 'static' means 'on' to fulfill dependency of something else that is on
valid_states = {'enabled', 'static', 'disabled', 'invalid'}
on_states = {'enabled', 'static'}
for line in get_active_lines(content):
parts = line.split(None) # AWK like split, strips whitespaces
if len(parts) == 2 and any(part in valid_states for part in parts):
service, state = parts
enabled = state in on_states
self.services[service] = enabled
self.parsed_lines[service] = line
self.service_list.append(service)
def is_on(self, service_name):
"""
Checks if the service is enabled in systemctl.
Args:
service_name (str): service name including '.service'
Returns:
bool: True if service is enabled, False otherwise
"""
return self.services.get(service_name, False)
| apache-2.0 |
totallybradical/temp_servo2 | tests/wpt/web-platform-tests/WebCryptoAPI/tools/generate.py | 49 | 2532 | # script to generate the generateKey tests
import os
here = os.path.dirname(__file__)
successes_html = """<!DOCTYPE html>
<meta charset=utf-8>
<meta name="timeout" content="long">
<title>WebCryptoAPI: generateKey() Successful Calls</title>
<link rel="author" title="Charles Engelke" href="mailto:w3c@engelke.com">
<link rel="help" href="https://www.w3.org/TR/WebCryptoAPI/#dfn-SubtleCrypto-method-generateKey">
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/WebCryptoAPI/util/helpers.js"></script>
<script src="successes.js"></script>
<h1>generateKey Tests for Good Parameters</h1>
<p>
<strong>Warning!</strong> RSA key generation is intrinsically
very slow, so the related tests can take up to
several minutes to complete, depending on browser!
</p>
<div id="log"></div>
<script>
run_test([%s]);
</script>"""
failures_html = """<!DOCTYPE html>
<meta charset=utf-8>
<meta name="timeout" content="long">
<title>WebCryptoAPI: generateKey() for Failures</title>
<link rel="author" title="Charles Engelke" href="mailto:w3c@engelke.com">
<link rel="help" href="https://www.w3.org/TR/WebCryptoAPI/#dfn-SubtleCrypto-method-generateKey">
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/WebCryptoAPI/util/helpers.js"></script>
<script src="failures.js"></script>
<h1>generateKey Tests for Bad Parameters</h1>
<div id="log"></div>
<script>
run_test([%s]);
</script>
"""
successes_worker = """// META: timeout=long
importScripts("/resources/testharness.js");
importScripts("../util/helpers.js");
importScripts("successes.js");
run_test([%s]);
done();"""
failures_worker = """// META: timeout=long
importScripts("/resources/testharness.js");
importScripts("../util/helpers.js");
importScripts("failures.js");
run_test([%s]);
done();"""
names = ["AES-CTR", "AES-CBC", "AES-GCM", "AES-KW", "HMAC", "RSASSA-PKCS1-v1_5",
"RSA-PSS", "RSA-OAEP", "ECDSA", "ECDH"]
for filename_pattern, template in [("test_successes_%s.html", successes_html),
("test_failures_%s.html", failures_html),
("successes_%s.worker.js", successes_worker),
("failures_%s.worker.js", failures_worker)]:
for name in names:
path = os.path.join(here, os.pardir, "generateKey", filename_pattern % name)
with open(path, "w") as f:
f.write(template % '"%s"' % name)
| mpl-2.0 |
CiscoSystems/neutron | neutron/plugins/cisco/extensions/_qos_view.py | 50 | 1662 | # Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def get_view_builder(req):
base_url = req.application_url
return ViewBuilder(base_url)
class ViewBuilder(object):
"""ViewBuilder for QoS, derived from neutron.views.networks."""
def __init__(self, base_url):
"""Initialize builder.
:param base_url: url of the root wsgi application
"""
self.base_url = base_url
def build(self, qos_data, is_detail=False):
"""Generic method used to generate a QoS entity."""
if is_detail:
qos = self._build_detail(qos_data)
else:
qos = self._build_simple(qos_data)
return qos
def _build_simple(self, qos_data):
"""Return a simple description of qos."""
return dict(qos=dict(id=qos_data['qos_id']))
def _build_detail(self, qos_data):
"""Return a detailed description of qos."""
return dict(qos=dict(id=qos_data['qos_id'],
name=qos_data['qos_name'],
description=qos_data['qos_desc']))
| apache-2.0 |
CUCWD/edx-platform | cms/djangoapps/contentstore/views/course.py | 4 | 76075 | """
Views related to operations on course objects
"""
import copy
import json
import logging
import random
import re
import string # pylint: disable=deprecated-module
import django.utils
import six
from ccx_keys.locator import CCXLocator
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied, ValidationError
from django.urls import reverse
from django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseNotFound
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_GET, require_http_methods
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import BlockUsageLocator
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.djangoapps.waffle_utils import WaffleSwitchNamespace
from openedx.features.course_experience.waffle import waffle as course_experience_waffle
from openedx.features.course_experience.waffle import ENABLE_COURSE_ABOUT_SIDEBAR_HTML
from six import text_type
from contentstore.course_group_config import (
COHORT_SCHEME,
ENROLLMENT_SCHEME,
RANDOM_SCHEME,
GroupConfiguration,
GroupConfigurationsValidationError
)
from contentstore.course_info_model import delete_course_update, get_course_updates, update_course_updates
from contentstore.courseware_index import CoursewareSearchIndexer, SearchIndexingError
from contentstore.push_notification import push_notification_enabled
from contentstore.tasks import rerun_course as rerun_course_task
from contentstore.utils import (
add_instructor,
get_lms_link_for_item,
initialize_permissions,
remove_all_instructors,
reverse_course_url,
reverse_library_url,
reverse_url,
reverse_usage_url
)
from contentstore.views.entrance_exam import create_entrance_exam, delete_entrance_exam, update_entrance_exam
from course_action_state.managers import CourseActionStateItemNotFoundError
from course_action_state.models import CourseRerunState, CourseRerunUIStateManager
from course_creators.views import add_user_with_status_unrequested, get_course_creator_status
from edxmako.shortcuts import render_to_response
from milestones import api as milestones_api
from models.settings.course_grading import CourseGradingModel
from models.settings.course_metadata import CourseMetadata
from models.settings.encoder import CourseSettingsEncoder
from openedx.core.djangoapps.credit.api import get_credit_requirements, is_credit_course
from openedx.core.djangoapps.credit.tasks import update_credit_course_requirements
from openedx.core.djangoapps.models.course_details import CourseDetails
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangolib.js_utils import dump_js_escaped_json
from openedx.core.lib.course_tabs import CourseTabPluginManager
from openedx.core.lib.courses import course_image_url
from student import auth
from student.auth import has_course_author_access, has_studio_read_access, has_studio_write_access
from student.roles import CourseCreatorRole, CourseInstructorRole, CourseStaffRole, GlobalStaff, UserBasedRole
from util.course import get_link_for_about_page
from util.date_utils import get_default_time_display
from util.json_request import JsonResponse, JsonResponseBadRequest, expect_json
from util.milestones_helpers import (
is_entrance_exams_enabled,
is_prerequisite_courses_enabled,
is_valid_course_key,
remove_prerequisite_course,
set_prerequisite_courses
)
from util.organizations_helpers import add_organization_course, get_organization_by_short_name, organizations_enabled
from util.string_utils import _has_non_ascii_characters
from xblock_django.api import deprecated_xblocks
from xmodule.contentstore.content import StaticContent
from xmodule.course_module import DEFAULT_START_DATE, CourseFields
from xmodule.error_module import ErrorDescriptor
from xmodule.modulestore import EdxJSONEncoder
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import DuplicateCourseError, ItemNotFoundError
from xmodule.tabs import CourseTab, CourseTabList, InvalidTabsException
from .component import ADVANCED_COMPONENT_TYPES
from .item import create_xblock_info
from .library import LIBRARIES_ENABLED, get_library_creator_status
log = logging.getLogger(__name__)
__all__ = ['course_info_handler', 'course_handler', 'course_listing',
'course_info_update_handler', 'course_search_index_handler',
'course_rerun_handler',
'settings_handler',
'grading_handler',
'advanced_settings_handler',
'course_notifications_handler',
'textbooks_list_handler', 'textbooks_detail_handler',
'group_configurations_list_handler', 'group_configurations_detail_handler']
WAFFLE_NAMESPACE = 'studio_home'
class AccessListFallback(Exception):
"""
An exception that is raised whenever we need to `fall back` to fetching *all* courses
available to a user, rather than using a shorter method (i.e. fetching by group)
"""
pass
def get_course_and_check_access(course_key, user, depth=0):
"""
Internal method used to calculate and return the locator and course module
for the view functions in this file.
"""
if not has_studio_read_access(user, course_key):
raise PermissionDenied()
course_module = modulestore().get_course(course_key, depth=depth)
return course_module
def reindex_course_and_check_access(course_key, user):
"""
Internal method used to restart indexing on a course.
"""
if not has_course_author_access(user, course_key):
raise PermissionDenied()
return CoursewareSearchIndexer.do_course_reindex(modulestore(), course_key)
@login_required
def course_notifications_handler(request, course_key_string=None, action_state_id=None):
"""
Handle incoming requests for notifications in a RESTful way.
course_key_string and action_state_id must both be set; else a HttpBadResponseRequest is returned.
For each of these operations, the requesting user must have access to the course;
else a PermissionDenied error is returned.
GET
json: return json representing information about the notification (action, state, etc)
DELETE
json: return json repressing success or failure of dismissal/deletion of the notification
PUT
Raises a NotImplementedError.
POST
Raises a NotImplementedError.
"""
# ensure that we have a course and an action state
if not course_key_string or not action_state_id:
return HttpResponseBadRequest()
response_format = request.GET.get('format') or request.POST.get('format') or 'html'
course_key = CourseKey.from_string(course_key_string)
if response_format == 'json' or 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
if not has_studio_write_access(request.user, course_key):
raise PermissionDenied()
if request.method == 'GET':
return _course_notifications_json_get(action_state_id)
elif request.method == 'DELETE':
# we assume any delete requests dismiss actions from the UI
return _dismiss_notification(request, action_state_id)
elif request.method == 'PUT':
raise NotImplementedError()
elif request.method == 'POST':
raise NotImplementedError()
else:
return HttpResponseBadRequest()
else:
return HttpResponseNotFound()
def _course_notifications_json_get(course_action_state_id):
"""
Return the action and the action state for the given id
"""
try:
action_state = CourseRerunState.objects.find_first(id=course_action_state_id)
except CourseActionStateItemNotFoundError:
return HttpResponseBadRequest()
action_state_info = {
'action': action_state.action,
'state': action_state.state,
'should_display': action_state.should_display
}
return JsonResponse(action_state_info)
def _dismiss_notification(request, course_action_state_id): # pylint: disable=unused-argument
"""
Update the display of the course notification
"""
try:
action_state = CourseRerunState.objects.find_first(id=course_action_state_id)
except CourseActionStateItemNotFoundError:
# Can't dismiss a notification that doesn't exist in the first place
return HttpResponseBadRequest()
if action_state.state == CourseRerunUIStateManager.State.FAILED:
# We remove all permissions for this course key at this time, since
# no further access is required to a course that failed to be created.
remove_all_instructors(action_state.course_key)
# The CourseRerunState is no longer needed by the UI; delete
action_state.delete()
return JsonResponse({'success': True})
# pylint: disable=unused-argument
@login_required
def course_handler(request, course_key_string=None):
"""
The restful handler for course specific requests.
It provides the course tree with the necessary information for identifying and labeling the parts. The root
will typically be a 'course' object but may not be especially as we support modules.
GET
html: return course listing page if not given a course id
html: return html page overview for the given course if given a course id
json: return json representing the course branch's index entry as well as dag w/ all of the children
replaced w/ json docs where each doc has {'_id': , 'display_name': , 'children': }
POST
json: create a course, return resulting json
descriptor (same as in GET course/...). Leaving off /branch/draft would imply create the course w/ default
branches. Cannot change the structure contents ('_id', 'display_name', 'children') but can change the
index entry.
PUT
json: update this course (index entry not xblock) such as repointing head, changing display name, org,
course, run. Return same json as above.
DELETE
json: delete this branch from this course (leaving off /branch/draft would imply delete the course)
"""
try:
response_format = request.GET.get('format') or request.POST.get('format') or 'html'
if response_format == 'json' or 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
if request.method == 'GET':
course_key = CourseKey.from_string(course_key_string)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user, depth=None)
return JsonResponse(_course_outline_json(request, course_module))
elif request.method == 'POST': # not sure if this is only post. If one will have ids, it goes after access
return _create_or_rerun_course(request)
elif not has_studio_write_access(request.user, CourseKey.from_string(course_key_string)):
raise PermissionDenied()
elif request.method == 'PUT':
raise NotImplementedError()
elif request.method == 'DELETE':
raise NotImplementedError()
else:
return HttpResponseBadRequest()
elif request.method == 'GET': # assume html
if course_key_string is None:
return redirect(reverse('home'))
else:
return course_index(request, CourseKey.from_string(course_key_string))
else:
return HttpResponseNotFound()
except InvalidKeyError:
raise Http404
@login_required
@ensure_csrf_cookie
@require_http_methods(["GET"])
def course_rerun_handler(request, course_key_string):
"""
The restful handler for course reruns.
GET
html: return html page with form to rerun a course for the given course id
"""
# Only global staff (PMs) are able to rerun courses during the soft launch
if not GlobalStaff().has_user(request.user):
raise PermissionDenied()
course_key = CourseKey.from_string(course_key_string)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user, depth=3)
if request.method == 'GET':
return render_to_response('course-create-rerun.html', {
'source_course_key': course_key,
'display_name': course_module.display_name,
'user': request.user,
'course_creator_status': _get_course_creator_status(request.user),
'allow_unicode_course_id': settings.FEATURES.get('ALLOW_UNICODE_COURSE_ID', False)
})
@login_required
@ensure_csrf_cookie
@require_GET
def course_search_index_handler(request, course_key_string):
"""
The restful handler for course indexing.
GET
html: return status of indexing task
json: return status of indexing task
"""
# Only global staff (PMs) are able to index courses
if not GlobalStaff().has_user(request.user):
raise PermissionDenied()
course_key = CourseKey.from_string(course_key_string)
content_type = request.META.get('CONTENT_TYPE', None)
if content_type is None:
content_type = "application/json; charset=utf-8"
with modulestore().bulk_operations(course_key):
try:
reindex_course_and_check_access(course_key, request.user)
except SearchIndexingError as search_err:
return HttpResponse(dump_js_escaped_json({
"user_message": search_err.error_list
}), content_type=content_type, status=500)
return HttpResponse(dump_js_escaped_json({
"user_message": _("Course has been successfully reindexed.")
}), content_type=content_type, status=200)
def _course_outline_json(request, course_module):
"""
Returns a JSON representation of the course module and recursively all of its children.
"""
is_concise = request.GET.get('format') == 'concise'
include_children_predicate = lambda xblock: not xblock.category == 'vertical'
if is_concise:
include_children_predicate = lambda xblock: xblock.has_children
return create_xblock_info(
course_module,
include_child_info=True,
course_outline=False if is_concise else True,
include_children_predicate=include_children_predicate,
is_concise=is_concise,
user=request.user
)
def get_in_process_course_actions(request):
"""
Get all in-process course actions
"""
return [
course for course in
CourseRerunState.objects.find_all(
exclude_args={'state': CourseRerunUIStateManager.State.SUCCEEDED},
should_display=True,
)
if has_studio_read_access(request.user, course.course_key)
]
def _accessible_courses_summary_iter(request, org=None):
"""
List all courses available to the logged in user by iterating through all the courses
Arguments:
request: the request object
org (string): if not None, this value will limit the courses returned. An empty
string will result in no courses, and otherwise only courses with the
specified org will be returned. The default value is None.
"""
def course_filter(course_summary):
"""
Filter out unusable and inaccessible courses
"""
# pylint: disable=fixme
# TODO remove this condition when templates purged from db
if course_summary.location.course == 'templates':
return False
return has_studio_read_access(request.user, course_summary.id)
if org is not None:
courses_summary = [] if org == '' else CourseOverview.get_all_courses(orgs=[org])
else:
courses_summary = modulestore().get_course_summaries()
courses_summary = six.moves.filter(course_filter, courses_summary)
in_process_course_actions = get_in_process_course_actions(request)
return courses_summary, in_process_course_actions
def _accessible_courses_iter(request):
"""
List all courses available to the logged in user by iterating through all the courses.
"""
def course_filter(course):
"""
Filter out unusable and inaccessible courses
"""
if isinstance(course, ErrorDescriptor):
return False
# Custom Courses for edX (CCX) is an edX feature for re-using course content.
# CCXs cannot be edited in Studio (aka cms) and should not be shown in this dashboard.
if isinstance(course.id, CCXLocator):
return False
# pylint: disable=fixme
# TODO remove this condition when templates purged from db
if course.location.course == 'templates':
return False
return has_studio_read_access(request.user, course.id)
courses = six.moves.filter(course_filter, modulestore().get_courses())
in_process_course_actions = get_in_process_course_actions(request)
return courses, in_process_course_actions
def _accessible_courses_iter_for_tests(request):
"""
List all courses available to the logged in user by iterating through all the courses.
CourseSummary objects are used for listing purposes.
This method is only used by tests.
"""
def course_filter(course):
"""
Filter out unusable and inaccessible courses
"""
# Custom Courses for edX (CCX) is an edX feature for re-using course content.
# CCXs cannot be edited in Studio (aka cms) and should not be shown in this dashboard.
if isinstance(course.id, CCXLocator):
return False
# pylint: disable=fixme
# TODO remove this condition when templates purged from db
if course.location.course == 'templates':
return False
return has_studio_read_access(request.user, course.id)
courses = six.moves.filter(course_filter, modulestore().get_course_summaries())
in_process_course_actions = get_in_process_course_actions(request)
return courses, in_process_course_actions
def _accessible_courses_list_from_groups(request):
"""
List all courses available to the logged in user by reversing access group names
"""
def filter_ccx(course_access):
""" CCXs cannot be edited in Studio and should not be shown in this dashboard """
return not isinstance(course_access.course_id, CCXLocator)
instructor_courses = UserBasedRole(request.user, CourseInstructorRole.ROLE).courses_with_role()
staff_courses = UserBasedRole(request.user, CourseStaffRole.ROLE).courses_with_role()
all_courses = filter(filter_ccx, instructor_courses | staff_courses)
courses_list = []
course_keys = {}
for course_access in all_courses:
if course_access.course_id is None:
raise AccessListFallback
course_keys[course_access.course_id] = course_access.course_id
course_keys = course_keys.values()
if course_keys:
courses_list = modulestore().get_course_summaries(course_keys=course_keys)
return courses_list, []
def _accessible_libraries_iter(user, org=None):
"""
List all libraries available to the logged in user by iterating through all libraries.
org (string): if not None, this value will limit the libraries returned. An empty
string will result in no libraries, and otherwise only libraries with the
specified org will be returned. The default value is None.
"""
if org is not None:
libraries = [] if org == '' else modulestore().get_libraries(org=org)
else:
libraries = modulestore().get_library_summaries()
# No need to worry about ErrorDescriptors - split's get_libraries() never returns them.
return (lib for lib in libraries if has_studio_read_access(user, lib.location.library_key))
@login_required
@ensure_csrf_cookie
def course_listing(request):
"""
List all courses and libraries available to the logged in user
"""
optimization_enabled = GlobalStaff().has_user(request.user) and \
WaffleSwitchNamespace(name=WAFFLE_NAMESPACE).is_enabled(u'enable_global_staff_optimization')
org = request.GET.get('org', '') if optimization_enabled else None
courses_iter, in_process_course_actions = get_courses_accessible_to_user(request, org)
user = request.user
libraries = _accessible_libraries_iter(request.user, org) if LIBRARIES_ENABLED else []
def format_in_process_course_view(uca):
"""
Return a dict of the data which the view requires for each unsucceeded course
"""
return {
u'display_name': uca.display_name,
u'course_key': unicode(uca.course_key),
u'org': uca.course_key.org,
u'number': uca.course_key.course,
u'run': uca.course_key.run,
u'is_failed': True if uca.state == CourseRerunUIStateManager.State.FAILED else False,
u'is_in_progress': True if uca.state == CourseRerunUIStateManager.State.IN_PROGRESS else False,
u'dismiss_link': reverse_course_url(
u'course_notifications_handler',
uca.course_key,
kwargs={
u'action_state_id': uca.id,
},
) if uca.state == CourseRerunUIStateManager.State.FAILED else u''
}
def format_library_for_view(library):
"""
Return a dict of the data which the view requires for each library
"""
return {
u'display_name': library.display_name,
u'library_key': unicode(library.location.library_key),
u'url': reverse_library_url(u'library_handler', unicode(library.location.library_key)),
u'org': library.display_org_with_default,
u'number': library.display_number_with_default,
u'can_edit': has_studio_write_access(request.user, library.location.library_key),
}
split_archived = settings.FEATURES.get(u'ENABLE_SEPARATE_ARCHIVED_COURSES', False)
active_courses, archived_courses = _process_courses_list(courses_iter, in_process_course_actions, split_archived)
in_process_course_actions = [format_in_process_course_view(uca) for uca in in_process_course_actions]
return render_to_response(u'index.html', {
u'courses': active_courses,
u'archived_courses': archived_courses,
u'in_process_course_actions': in_process_course_actions,
u'libraries_enabled': LIBRARIES_ENABLED,
u'libraries': [format_library_for_view(lib) for lib in libraries],
u'show_new_library_button': get_library_creator_status(user),
u'user': user,
u'request_course_creator_url': reverse('request_course_creator'),
u'course_creator_status': _get_course_creator_status(user),
u'rerun_creator_status': GlobalStaff().has_user(user),
u'allow_unicode_course_id': settings.FEATURES.get(u'ALLOW_UNICODE_COURSE_ID', False),
u'allow_course_reruns': settings.FEATURES.get(u'ALLOW_COURSE_RERUNS', True),
u'optimization_enabled': optimization_enabled
})
def _get_rerun_link_for_item(course_key):
""" Returns the rerun link for the given course key. """
return reverse_course_url('course_rerun_handler', course_key)
def _deprecated_blocks_info(course_module, deprecated_block_types):
"""
Returns deprecation information about `deprecated_block_types`
Arguments:
course_module (CourseDescriptor): course object
deprecated_block_types (list): list of deprecated blocks types
Returns:
Dict with following keys:
deprecated_enabled_block_types (list): list containing all deprecated blocks types enabled on this course
blocks (list): List of `deprecated_enabled_block_types` instances and their parent's url
advance_settings_url (str): URL to advance settings page
"""
data = {
'deprecated_enabled_block_types': [
block_type for block_type in course_module.advanced_modules if block_type in deprecated_block_types
],
'blocks': [],
'advance_settings_url': reverse_course_url('advanced_settings_handler', course_module.id)
}
deprecated_blocks = modulestore().get_items(
course_module.id,
qualifiers={
'category': re.compile('^' + '$|^'.join(deprecated_block_types) + '$')
}
)
for block in deprecated_blocks:
data['blocks'].append([
reverse_usage_url('container_handler', block.parent),
block.display_name
])
return data
@login_required
@ensure_csrf_cookie
def course_index(request, course_key):
"""
Display an editable course overview.
org, course, name: Attributes of the Location for the item to edit
"""
# A depth of None implies the whole course. The course outline needs this in order to compute has_changes.
# A unit may not have a draft version, but one of its components could, and hence the unit itself has changes.
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user, depth=None)
if not course_module:
raise Http404
lms_link = get_lms_link_for_item(course_module.location)
reindex_link = None
if settings.FEATURES.get('ENABLE_COURSEWARE_INDEX', False):
reindex_link = "/course/{course_id}/search_reindex".format(course_id=unicode(course_key))
sections = course_module.get_children()
course_structure = _course_outline_json(request, course_module)
locator_to_show = request.GET.get('show', None)
course_release_date = get_default_time_display(course_module.start) if course_module.start != DEFAULT_START_DATE else _("Unscheduled")
settings_url = reverse_course_url('settings_handler', course_key)
try:
current_action = CourseRerunState.objects.find_first(course_key=course_key, should_display=True)
except (ItemNotFoundError, CourseActionStateItemNotFoundError):
current_action = None
deprecated_block_names = [block.name for block in deprecated_xblocks()]
deprecated_blocks_info = _deprecated_blocks_info(course_module, deprecated_block_names)
return render_to_response('course_outline.html', {
'language_code': request.LANGUAGE_CODE,
'context_course': course_module,
'lms_link': lms_link,
'sections': sections,
'course_structure': course_structure,
'initial_state': course_outline_initial_state(locator_to_show, course_structure) if locator_to_show else None,
'rerun_notification_id': current_action.id if current_action else None,
'course_release_date': course_release_date,
'settings_url': settings_url,
'reindex_link': reindex_link,
'deprecated_blocks_info': deprecated_blocks_info,
'notification_dismiss_url': reverse_course_url(
'course_notifications_handler',
current_action.course_key,
kwargs={
'action_state_id': current_action.id,
},
) if current_action else None,
})
def get_courses_accessible_to_user(request, org=None):
"""
Try to get all courses by first reversing django groups and fallback to old method if it fails
Note: overhead of pymongo reads will increase if getting courses from django groups fails
Arguments:
request: the request object
org (string): for global staff users ONLY, this value will be used to limit
the courses returned. A value of None will have no effect (all courses
returned), an empty string will result in no courses, and otherwise only courses with the
specified org will be returned. The default value is None.
"""
if GlobalStaff().has_user(request.user):
# user has global access so no need to get courses from django groups
courses, in_process_course_actions = _accessible_courses_summary_iter(request, org)
else:
try:
courses, in_process_course_actions = _accessible_courses_list_from_groups(request)
except AccessListFallback:
# user have some old groups or there was some error getting courses from django groups
# so fallback to iterating through all courses
courses, in_process_course_actions = _accessible_courses_summary_iter(request)
return courses, in_process_course_actions
def _process_courses_list(courses_iter, in_process_course_actions, split_archived=False):
"""
Iterates over the list of courses to be displayed to the user, and:
* Removes any in-process courses from the courses list. "In-process" refers to courses
that are in the process of being generated for re-run.
* If split_archived=True, removes any archived courses and returns them in a separate list.
Archived courses have has_ended() == True.
* Formats the returned courses (in both lists) to prepare them for rendering to the view.
"""
def format_course_for_view(course):
"""
Return a dict of the data which the view requires for each course
"""
return {
'display_name': course.display_name,
'course_key': unicode(course.location.course_key),
'url': reverse_course_url('course_handler', course.id),
'lms_link': get_lms_link_for_item(course.location),
'rerun_link': _get_rerun_link_for_item(course.id),
'org': course.display_org_with_default,
'number': course.display_number_with_default,
'run': course.location.run
}
in_process_action_course_keys = {uca.course_key for uca in in_process_course_actions}
active_courses = []
archived_courses = []
for course in courses_iter:
if isinstance(course, ErrorDescriptor) or (course.id in in_process_action_course_keys):
continue
formatted_course = format_course_for_view(course)
if split_archived and course.has_ended():
archived_courses.append(formatted_course)
else:
active_courses.append(formatted_course)
return active_courses, archived_courses
def course_outline_initial_state(locator_to_show, course_structure):
"""
Returns the desired initial state for the course outline view. If the 'show' request parameter
was provided, then the view's initial state will be to have the desired item fully expanded
and to scroll to see the new item.
"""
def find_xblock_info(xblock_info, locator):
"""
Finds the xblock info for the specified locator.
"""
if xblock_info['id'] == locator:
return xblock_info
children = xblock_info['child_info']['children'] if xblock_info.get('child_info', None) else None
if children:
for child_xblock_info in children:
result = find_xblock_info(child_xblock_info, locator)
if result:
return result
return None
def collect_all_locators(locators, xblock_info):
"""
Collect all the locators for an xblock and its children.
"""
locators.append(xblock_info['id'])
children = xblock_info['child_info']['children'] if xblock_info.get('child_info', None) else None
if children:
for child_xblock_info in children:
collect_all_locators(locators, child_xblock_info)
selected_xblock_info = find_xblock_info(course_structure, locator_to_show)
if not selected_xblock_info:
return None
expanded_locators = []
collect_all_locators(expanded_locators, selected_xblock_info)
return {
'locator_to_show': locator_to_show,
'expanded_locators': expanded_locators
}
@expect_json
def _create_or_rerun_course(request):
"""
To be called by requests that create a new destination course (i.e., create_new_course and rerun_course)
Returns the destination course_key and overriding fields for the new course.
Raises DuplicateCourseError and InvalidKeyError
"""
if not auth.user_has_role(request.user, CourseCreatorRole()):
raise PermissionDenied()
try:
org = request.json.get('org')
course = request.json.get('number', request.json.get('course'))
display_name = request.json.get('display_name')
# force the start date for reruns and allow us to override start via the client
start = request.json.get('start', CourseFields.start.default)
run = request.json.get('run')
# allow/disable unicode characters in course_id according to settings
if not settings.FEATURES.get('ALLOW_UNICODE_COURSE_ID'):
if _has_non_ascii_characters(org) or _has_non_ascii_characters(course) or _has_non_ascii_characters(run):
return JsonResponse(
{'error': _('Special characters not allowed in organization, course number, and course run.')},
status=400
)
fields = {'start': start}
if display_name is not None:
fields['display_name'] = display_name
# Set a unique wiki_slug for newly created courses. To maintain active wiki_slugs for
# existing xml courses this cannot be changed in CourseDescriptor.
# # TODO get rid of defining wiki slug in this org/course/run specific way and reconcile
# w/ xmodule.course_module.CourseDescriptor.__init__
wiki_slug = u"{0}.{1}.{2}".format(org, course, run)
definition_data = {'wiki_slug': wiki_slug}
fields.update(definition_data)
source_course_key = request.json.get('source_course_key')
if source_course_key:
source_course_key = CourseKey.from_string(source_course_key)
destination_course_key = rerun_course(request.user, source_course_key, org, course, run, fields)
return JsonResponse({
'url': reverse_url('course_handler'),
'destination_course_key': unicode(destination_course_key)
})
else:
try:
new_course = create_new_course(request.user, org, course, run, fields)
return JsonResponse({
'url': reverse_course_url('course_handler', new_course.id),
'course_key': unicode(new_course.id),
})
except ValidationError as ex:
return JsonResponse({'error': text_type(ex)}, status=400)
except DuplicateCourseError:
return JsonResponse({
'ErrMsg': _(
'There is already a course defined with the same '
'organization and course number. Please '
'change either organization or course number to be unique.'
),
'OrgErrMsg': _(
'Please change either the organization or '
'course number so that it is unique.'),
'CourseErrMsg': _(
'Please change either the organization or '
'course number so that it is unique.'),
})
except InvalidKeyError as error:
return JsonResponse({
"ErrMsg": _("Unable to create course '{name}'.\n\n{err}").format(name=display_name, err=text_type(error))}
)
def create_new_course(user, org, number, run, fields):
"""
Create a new course run.
Raises:
DuplicateCourseError: Course run already exists.
"""
org_data = get_organization_by_short_name(org)
if not org_data and organizations_enabled():
raise ValidationError(_('You must link this course to an organization in order to continue. Organization '
'you selected does not exist in the system, you will need to add it to the system'))
store_for_new_course = modulestore().default_modulestore.get_modulestore_type()
new_course = create_new_course_in_store(store_for_new_course, user, org, number, run, fields)
add_organization_course(org_data, new_course.id)
return new_course
def create_new_course_in_store(store, user, org, number, run, fields):
"""
Create course in store w/ handling instructor enrollment, permissions, and defaulting the wiki slug.
Separated out b/c command line course creation uses this as well as the web interface.
"""
# Set default language from settings and enable web certs
fields.update({
'language': getattr(settings, 'DEFAULT_COURSE_LANGUAGE', 'en'),
'cert_html_view_enabled': True,
})
with modulestore().default_store(store):
# Creating the course raises DuplicateCourseError if an existing course with this org/name is found
new_course = modulestore().create_course(
org,
number,
run,
user.id,
fields=fields,
)
# Make sure user has instructor and staff access to the new course
add_instructor(new_course.id, user, user)
# Initialize permissions for user in the new course
initialize_permissions(new_course.id, user)
return new_course
def rerun_course(user, source_course_key, org, number, run, fields, async=True):
"""
Rerun an existing course.
"""
# verify user has access to the original course
if not has_studio_write_access(user, source_course_key):
raise PermissionDenied()
# create destination course key
store = modulestore()
with store.default_store('split'):
destination_course_key = store.make_course_key(org, number, run)
# verify org course and run don't already exist
if store.has_course(destination_course_key, ignore_case=True):
raise DuplicateCourseError(source_course_key, destination_course_key)
# Make sure user has instructor and staff access to the destination course
# so the user can see the updated status for that course
add_instructor(destination_course_key, user, user)
# Mark the action as initiated
CourseRerunState.objects.initiated(source_course_key, destination_course_key, user, fields['display_name'])
# Clear the fields that must be reset for the rerun
fields['advertised_start'] = None
fields['enrollment_start'] = None
fields['enrollment_end'] = None
fields['video_upload_pipeline'] = {}
json_fields = json.dumps(fields, cls=EdxJSONEncoder)
args = [unicode(source_course_key), unicode(destination_course_key), user.id, json_fields]
if async:
rerun_course_task.delay(*args)
else:
rerun_course_task(*args)
return destination_course_key
# pylint: disable=unused-argument
@login_required
@ensure_csrf_cookie
@require_http_methods(["GET"])
def course_info_handler(request, course_key_string):
"""
GET
html: return html for editing the course info handouts and updates.
"""
try:
course_key = CourseKey.from_string(course_key_string)
except InvalidKeyError:
raise Http404
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
if not course_module:
raise Http404
if 'text/html' in request.META.get('HTTP_ACCEPT', 'text/html'):
return render_to_response(
'course_info.html',
{
'context_course': course_module,
'updates_url': reverse_course_url('course_info_update_handler', course_key),
'handouts_locator': course_key.make_usage_key('course_info', 'handouts'),
'base_asset_url': StaticContent.get_base_url_path_for_course_assets(course_module.id),
'push_notification_enabled': push_notification_enabled()
}
)
else:
return HttpResponseBadRequest("Only supports html requests")
# pylint: disable=unused-argument
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
@expect_json
def course_info_update_handler(request, course_key_string, provided_id=None):
"""
restful CRUD operations on course_info updates.
provided_id should be none if it's new (create) and index otherwise.
GET
json: return the course info update models
POST
json: create an update
PUT or DELETE
json: change an existing update
"""
if 'application/json' not in request.META.get('HTTP_ACCEPT', 'application/json'):
return HttpResponseBadRequest("Only supports json requests")
course_key = CourseKey.from_string(course_key_string)
usage_key = course_key.make_usage_key('course_info', 'updates')
if provided_id == '':
provided_id = None
# check that logged in user has permissions to this item (GET shouldn't require this level?)
if not has_studio_write_access(request.user, usage_key.course_key):
raise PermissionDenied()
if request.method == 'GET':
course_updates = get_course_updates(usage_key, provided_id, request.user.id)
if isinstance(course_updates, dict) and course_updates.get('error'):
return JsonResponse(course_updates, course_updates.get('status', 400))
else:
return JsonResponse(course_updates)
elif request.method == 'DELETE':
try:
return JsonResponse(delete_course_update(usage_key, request.json, provided_id, request.user))
except:
return HttpResponseBadRequest(
"Failed to delete",
content_type="text/plain"
)
# can be either and sometimes django is rewriting one to the other:
elif request.method in ('POST', 'PUT'):
try:
return JsonResponse(update_course_updates(usage_key, request.json, provided_id, request.user))
except:
return HttpResponseBadRequest(
"Failed to save",
content_type="text/plain"
)
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "PUT", "POST"))
@expect_json
def settings_handler(request, course_key_string):
"""
Course settings for dates and about pages
GET
html: get the page
json: get the CourseDetails model
PUT
json: update the Course and About xblocks through the CourseDetails model
"""
course_key = CourseKey.from_string(course_key_string)
credit_eligibility_enabled = settings.FEATURES.get('ENABLE_CREDIT_ELIGIBILITY', False)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
upload_asset_url = reverse_course_url('assets_handler', course_key)
# see if the ORG of this course can be attributed to a defined configuration . In that case, the
# course about page should be editable in Studio
marketing_site_enabled = configuration_helpers.get_value_for_org(
course_module.location.org,
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
)
enable_extended_course_details = configuration_helpers.get_value_for_org(
course_module.location.org,
'ENABLE_EXTENDED_COURSE_DETAILS',
settings.FEATURES.get('ENABLE_EXTENDED_COURSE_DETAILS', False)
)
about_page_editable = not marketing_site_enabled
enrollment_end_editable = GlobalStaff().has_user(request.user) or not marketing_site_enabled
short_description_editable = configuration_helpers.get_value_for_org(
course_module.location.org,
'EDITABLE_SHORT_DESCRIPTION',
settings.FEATURES.get('EDITABLE_SHORT_DESCRIPTION', True)
)
sidebar_html_enabled = course_experience_waffle().is_enabled(ENABLE_COURSE_ABOUT_SIDEBAR_HTML)
# self_paced_enabled = SelfPacedConfiguration.current().enabled
settings_context = {
'context_course': course_module,
'course_locator': course_key,
'lms_link_for_about_page': get_link_for_about_page(course_module),
'course_image_url': course_image_url(course_module, 'course_image'),
'banner_image_url': course_image_url(course_module, 'banner_image'),
'video_thumbnail_image_url': course_image_url(course_module, 'video_thumbnail_image'),
'details_url': reverse_course_url('settings_handler', course_key),
'about_page_editable': about_page_editable,
'short_description_editable': short_description_editable,
'sidebar_html_enabled': sidebar_html_enabled,
'upload_asset_url': upload_asset_url,
'course_handler_url': reverse_course_url('course_handler', course_key),
'language_options': settings.ALL_LANGUAGES,
'credit_eligibility_enabled': credit_eligibility_enabled,
'is_credit_course': False,
'show_min_grade_warning': False,
'enrollment_end_editable': enrollment_end_editable,
'is_prerequisite_courses_enabled': is_prerequisite_courses_enabled(),
'is_entrance_exams_enabled': is_entrance_exams_enabled(),
'enable_extended_course_details': enable_extended_course_details
}
if is_prerequisite_courses_enabled():
courses, in_process_course_actions = get_courses_accessible_to_user(request)
# exclude current course from the list of available courses
courses = (course for course in courses if course.id != course_key)
if courses:
courses, __ = _process_courses_list(courses, in_process_course_actions)
settings_context.update({'possible_pre_requisite_courses': list(courses)})
if credit_eligibility_enabled:
if is_credit_course(course_key):
# get and all credit eligibility requirements
credit_requirements = get_credit_requirements(course_key)
# pair together requirements with same 'namespace' values
paired_requirements = {}
for requirement in credit_requirements:
namespace = requirement.pop("namespace")
paired_requirements.setdefault(namespace, []).append(requirement)
# if 'minimum_grade_credit' of a course is not set or 0 then
# show warning message to course author.
show_min_grade_warning = False if course_module.minimum_grade_credit > 0 else True
settings_context.update(
{
'is_credit_course': True,
'credit_requirements': paired_requirements,
'show_min_grade_warning': show_min_grade_warning,
}
)
return render_to_response('settings.html', settings_context)
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
if request.method == 'GET':
course_details = CourseDetails.fetch(course_key)
return JsonResponse(
course_details,
# encoder serializes dates, old locations, and instances
encoder=CourseSettingsEncoder
)
# For every other possible method type submitted by the caller...
else:
# if pre-requisite course feature is enabled set pre-requisite course
if is_prerequisite_courses_enabled():
prerequisite_course_keys = request.json.get('pre_requisite_courses', [])
if prerequisite_course_keys:
if not all(is_valid_course_key(course_key) for course_key in prerequisite_course_keys):
return JsonResponseBadRequest({"error": _("Invalid prerequisite course key")})
set_prerequisite_courses(course_key, prerequisite_course_keys)
else:
# None is chosen, so remove the course prerequisites
course_milestones = milestones_api.get_course_milestones(course_key=course_key, relationship="requires")
for milestone in course_milestones:
remove_prerequisite_course(course_key, milestone)
# If the entrance exams feature has been enabled, we'll need to check for some
# feature-specific settings and handle them accordingly
# We have to be careful that we're only executing the following logic if we actually
# need to create or delete an entrance exam from the specified course
if is_entrance_exams_enabled():
course_entrance_exam_present = course_module.entrance_exam_enabled
entrance_exam_enabled = request.json.get('entrance_exam_enabled', '') == 'true'
ee_min_score_pct = request.json.get('entrance_exam_minimum_score_pct', None)
# If the entrance exam box on the settings screen has been checked...
if entrance_exam_enabled:
# Load the default minimum score threshold from settings, then try to override it
entrance_exam_minimum_score_pct = float(settings.ENTRANCE_EXAM_MIN_SCORE_PCT)
if ee_min_score_pct:
entrance_exam_minimum_score_pct = float(ee_min_score_pct)
if entrance_exam_minimum_score_pct.is_integer():
entrance_exam_minimum_score_pct = entrance_exam_minimum_score_pct / 100
# If there's already an entrance exam defined, we'll update the existing one
if course_entrance_exam_present:
exam_data = {
'entrance_exam_minimum_score_pct': entrance_exam_minimum_score_pct
}
update_entrance_exam(request, course_key, exam_data)
# If there's no entrance exam defined, we'll create a new one
else:
create_entrance_exam(request, course_key, entrance_exam_minimum_score_pct)
# If the entrance exam box on the settings screen has been unchecked,
# and the course has an entrance exam attached...
elif not entrance_exam_enabled and course_entrance_exam_present:
delete_entrance_exam(request, course_key)
# Perform the normal update workflow for the CourseDetails model
return JsonResponse(
CourseDetails.update_from_json(course_key, request.json, request.user),
encoder=CourseSettingsEncoder
)
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
@expect_json
def grading_handler(request, course_key_string, grader_index=None):
"""
Course Grading policy configuration
GET
html: get the page
json no grader_index: get the CourseGrading model (graceperiod, cutoffs, and graders)
json w/ grader_index: get the specific grader
PUT
json no grader_index: update the Course through the CourseGrading model
json w/ grader_index: create or update the specific grader (create if index out of range)
"""
course_key = CourseKey.from_string(course_key_string)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
course_details = CourseGradingModel.fetch(course_key)
return render_to_response('settings_graders.html', {
'context_course': course_module,
'course_locator': course_key,
'course_details': course_details,
'grading_url': reverse_course_url('grading_handler', course_key),
'is_credit_course': is_credit_course(course_key),
})
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
if request.method == 'GET':
if grader_index is None:
return JsonResponse(
CourseGradingModel.fetch(course_key),
# encoder serializes dates, old locations, and instances
encoder=CourseSettingsEncoder
)
else:
return JsonResponse(CourseGradingModel.fetch_grader(course_key, grader_index))
elif request.method in ('POST', 'PUT'): # post or put, doesn't matter.
# update credit course requirements if 'minimum_grade_credit'
# field value is changed
if 'minimum_grade_credit' in request.json:
update_credit_course_requirements.delay(unicode(course_key))
# None implies update the whole model (cutoffs, graceperiod, and graders) not a specific grader
if grader_index is None:
return JsonResponse(
CourseGradingModel.update_from_json(course_key, request.json, request.user),
encoder=CourseSettingsEncoder
)
else:
return JsonResponse(
CourseGradingModel.update_grader_from_json(course_key, request.json, request.user)
)
elif request.method == "DELETE" and grader_index is not None:
CourseGradingModel.delete_grader(course_key, grader_index, request.user)
return JsonResponse()
def _refresh_course_tabs(request, course_module):
"""
Automatically adds/removes tabs if changes to the course require them.
Raises:
InvalidTabsException: raised if there's a problem with the new version of the tabs.
"""
def update_tab(tabs, tab_type, tab_enabled):
"""
Adds or removes a course tab based upon whether it is enabled.
"""
tab_panel = {
"type": tab_type.type,
}
has_tab = tab_panel in tabs
if tab_enabled and not has_tab:
tabs.append(CourseTab.from_json(tab_panel))
elif not tab_enabled and has_tab:
tabs.remove(tab_panel)
course_tabs = copy.copy(course_module.tabs)
# Additionally update any tabs that are provided by non-dynamic course views
for tab_type in CourseTabPluginManager.get_tab_types():
if not tab_type.is_dynamic and tab_type.is_default:
tab_enabled = tab_type.is_enabled(course_module, user=request.user)
update_tab(course_tabs, tab_type, tab_enabled)
CourseTabList.validate_tabs(course_tabs)
# Save the tabs into the course if they have been changed
if course_tabs != course_module.tabs:
course_module.tabs = course_tabs
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
@expect_json
def advanced_settings_handler(request, course_key_string):
"""
Course settings configuration
GET
html: get the page
json: get the model
PUT, POST
json: update the Course's settings. The payload is a json rep of the
metadata dicts.
"""
course_key = CourseKey.from_string(course_key_string)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
return render_to_response('settings_advanced.html', {
'context_course': course_module,
'advanced_dict': CourseMetadata.fetch(course_module),
'advanced_settings_url': reverse_course_url('advanced_settings_handler', course_key)
})
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
if request.method == 'GET':
return JsonResponse(CourseMetadata.fetch(course_module))
else:
try:
# validate data formats and update the course module.
# Note: don't update mongo yet, but wait until after any tabs are changed
is_valid, errors, updated_data = CourseMetadata.validate_and_update_from_json(
course_module,
request.json,
user=request.user,
)
if is_valid:
try:
# update the course tabs if required by any setting changes
_refresh_course_tabs(request, course_module)
except InvalidTabsException as err:
log.exception(text_type(err))
response_message = [
{
'message': _('An error occurred while trying to save your tabs'),
'model': {'display_name': _('Tabs Exception')}
}
]
return JsonResponseBadRequest(response_message)
# now update mongo
modulestore().update_item(course_module, request.user.id)
return JsonResponse(updated_data)
else:
return JsonResponseBadRequest(errors)
# Handle all errors that validation doesn't catch
except (TypeError, ValueError, InvalidTabsException) as err:
return HttpResponseBadRequest(
django.utils.html.escape(text_type(err)),
content_type="text/plain"
)
class TextbookValidationError(Exception):
"An error thrown when a textbook input is invalid"
pass
def validate_textbooks_json(text):
"""
Validate the given text as representing a single PDF textbook
"""
try:
textbooks = json.loads(text)
except ValueError:
raise TextbookValidationError("invalid JSON")
if not isinstance(textbooks, (list, tuple)):
raise TextbookValidationError("must be JSON list")
for textbook in textbooks:
validate_textbook_json(textbook)
# check specified IDs for uniqueness
all_ids = [textbook["id"] for textbook in textbooks if "id" in textbook]
unique_ids = set(all_ids)
if len(all_ids) > len(unique_ids):
raise TextbookValidationError("IDs must be unique")
return textbooks
def validate_textbook_json(textbook):
"""
Validate the given text as representing a list of PDF textbooks
"""
if isinstance(textbook, basestring):
try:
textbook = json.loads(textbook)
except ValueError:
raise TextbookValidationError("invalid JSON")
if not isinstance(textbook, dict):
raise TextbookValidationError("must be JSON object")
if not textbook.get("tab_title"):
raise TextbookValidationError("must have tab_title")
tid = unicode(textbook.get("id", ""))
if tid and not tid[0].isdigit():
raise TextbookValidationError("textbook ID must start with a digit")
return textbook
def assign_textbook_id(textbook, used_ids=()):
"""
Return an ID that can be assigned to a textbook
and doesn't match the used_ids
"""
tid = BlockUsageLocator.clean(textbook["tab_title"])
if not tid[0].isdigit():
# stick a random digit in front
tid = random.choice(string.digits) + tid
while tid in used_ids:
# add a random ASCII character to the end
tid = tid + random.choice(string.ascii_lowercase)
return tid
@require_http_methods(("GET", "POST", "PUT"))
@login_required
@ensure_csrf_cookie
def textbooks_list_handler(request, course_key_string):
"""
A RESTful handler for textbook collections.
GET
html: return textbook list page (Backbone application)
json: return JSON representation of all textbooks in this course
POST
json: create a new textbook for this course
PUT
json: overwrite all textbooks in the course with the given list
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
course = get_course_and_check_access(course_key, request.user)
if "application/json" not in request.META.get('HTTP_ACCEPT', 'text/html'):
# return HTML page
upload_asset_url = reverse_course_url('assets_handler', course_key)
textbook_url = reverse_course_url('textbooks_list_handler', course_key)
return render_to_response('textbooks.html', {
'context_course': course,
'textbooks': course.pdf_textbooks,
'upload_asset_url': upload_asset_url,
'textbook_url': textbook_url,
})
# from here on down, we know the client has requested JSON
if request.method == 'GET':
return JsonResponse(course.pdf_textbooks)
elif request.method == 'PUT':
try:
textbooks = validate_textbooks_json(request.body)
except TextbookValidationError as err:
return JsonResponse({"error": text_type(err)}, status=400)
tids = set(t["id"] for t in textbooks if "id" in t)
for textbook in textbooks:
if "id" not in textbook:
tid = assign_textbook_id(textbook, tids)
textbook["id"] = tid
tids.add(tid)
if not any(tab['type'] == 'pdf_textbooks' for tab in course.tabs):
course.tabs.append(CourseTab.load('pdf_textbooks'))
course.pdf_textbooks = textbooks
store.update_item(course, request.user.id)
return JsonResponse(course.pdf_textbooks)
elif request.method == 'POST':
# create a new textbook for the course
try:
textbook = validate_textbook_json(request.body)
except TextbookValidationError as err:
return JsonResponse({"error": text_type(err)}, status=400)
if not textbook.get("id"):
tids = set(t["id"] for t in course.pdf_textbooks if "id" in t)
textbook["id"] = assign_textbook_id(textbook, tids)
existing = course.pdf_textbooks
existing.append(textbook)
course.pdf_textbooks = existing
if not any(tab['type'] == 'pdf_textbooks' for tab in course.tabs):
course.tabs.append(CourseTab.load('pdf_textbooks'))
store.update_item(course, request.user.id)
resp = JsonResponse(textbook, status=201)
resp["Location"] = reverse_course_url(
'textbooks_detail_handler',
course.id,
kwargs={'textbook_id': textbook["id"]}
)
return resp
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
def textbooks_detail_handler(request, course_key_string, textbook_id):
"""
JSON API endpoint for manipulating a textbook via its internal ID.
Used by the Backbone application.
GET
json: return JSON representation of textbook
POST or PUT
json: update textbook based on provided information
DELETE
json: remove textbook
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
matching_id = [tb for tb in course_module.pdf_textbooks
if unicode(tb.get("id")) == unicode(textbook_id)]
if matching_id:
textbook = matching_id[0]
else:
textbook = None
if request.method == 'GET':
if not textbook:
return JsonResponse(status=404)
return JsonResponse(textbook)
elif request.method in ('POST', 'PUT'): # can be either and sometimes
# django is rewriting one to the other
try:
new_textbook = validate_textbook_json(request.body)
except TextbookValidationError as err:
return JsonResponse({"error": text_type(err)}, status=400)
new_textbook["id"] = textbook_id
if textbook:
i = course_module.pdf_textbooks.index(textbook)
new_textbooks = course_module.pdf_textbooks[0:i]
new_textbooks.append(new_textbook)
new_textbooks.extend(course_module.pdf_textbooks[i + 1:])
course_module.pdf_textbooks = new_textbooks
else:
course_module.pdf_textbooks.append(new_textbook)
store.update_item(course_module, request.user.id)
return JsonResponse(new_textbook, status=201)
elif request.method == 'DELETE':
if not textbook:
return JsonResponse(status=404)
i = course_module.pdf_textbooks.index(textbook)
remaining_textbooks = course_module.pdf_textbooks[0:i]
remaining_textbooks.extend(course_module.pdf_textbooks[i + 1:])
course_module.pdf_textbooks = remaining_textbooks
store.update_item(course_module, request.user.id)
return JsonResponse()
def remove_content_or_experiment_group(request, store, course, configuration, group_configuration_id, group_id=None):
"""
Remove content group or experiment group configuration only if it's not in use.
"""
configuration_index = course.user_partitions.index(configuration)
if configuration.scheme.name == RANDOM_SCHEME:
usages = GroupConfiguration.get_content_experiment_usage_info(store, course)
used = int(group_configuration_id) in usages
if used:
return JsonResponse(
{"error": _("This group configuration is in use and cannot be deleted.")},
status=400
)
course.user_partitions.pop(configuration_index)
elif configuration.scheme.name == COHORT_SCHEME:
if not group_id:
return JsonResponse(status=404)
group_id = int(group_id)
usages = GroupConfiguration.get_partitions_usage_info(store, course)
used = group_id in usages
if used:
return JsonResponse(
{"error": _("This content group is in use and cannot be deleted.")},
status=400
)
matching_groups = [group for group in configuration.groups if group.id == group_id]
if matching_groups:
group_index = configuration.groups.index(matching_groups[0])
configuration.groups.pop(group_index)
else:
return JsonResponse(status=404)
course.user_partitions[configuration_index] = configuration
store.update_item(course, request.user.id)
return JsonResponse(status=204)
@require_http_methods(("GET", "POST"))
@login_required
@ensure_csrf_cookie
def group_configurations_list_handler(request, course_key_string):
"""
A RESTful handler for Group Configurations
GET
html: return Group Configurations list page (Backbone application)
POST
json: create new group configuration
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
course = get_course_and_check_access(course_key, request.user)
if 'text/html' in request.META.get('HTTP_ACCEPT', 'text/html'):
group_configuration_url = reverse_course_url('group_configurations_list_handler', course_key)
course_outline_url = reverse_course_url('course_handler', course_key)
should_show_experiment_groups = are_content_experiments_enabled(course)
if should_show_experiment_groups:
experiment_group_configurations = GroupConfiguration.get_split_test_partitions_with_usage(store, course)
else:
experiment_group_configurations = None
all_partitions = GroupConfiguration.get_all_user_partition_details(store, course)
should_show_enrollment_track = False
has_content_groups = False
displayable_partitions = []
for partition in all_partitions:
if partition['scheme'] == COHORT_SCHEME:
has_content_groups = True
displayable_partitions.append(partition)
elif partition['scheme'] == ENROLLMENT_SCHEME:
should_show_enrollment_track = len(partition['groups']) > 1
# Add it to the front of the list if it should be shown.
if should_show_enrollment_track:
displayable_partitions.insert(0, partition)
elif partition['scheme'] != RANDOM_SCHEME:
# Experiment group configurations are handled explicitly above. We don't
# want to display their groups twice.
displayable_partitions.append(partition)
# Add empty content group if there is no COHORT User Partition in the list.
# This will add ability to add new groups in the view.
if not has_content_groups:
displayable_partitions.append(GroupConfiguration.get_or_create_content_group(store, course))
return render_to_response('group_configurations.html', {
'context_course': course,
'group_configuration_url': group_configuration_url,
'course_outline_url': course_outline_url,
'experiment_group_configurations': experiment_group_configurations,
'should_show_experiment_groups': should_show_experiment_groups,
'all_group_configurations': displayable_partitions,
'should_show_enrollment_track': should_show_enrollment_track
})
elif "application/json" in request.META.get('HTTP_ACCEPT'):
if request.method == 'POST':
# create a new group configuration for the course
try:
new_configuration = GroupConfiguration(request.body, course).get_user_partition()
except GroupConfigurationsValidationError as err:
return JsonResponse({"error": text_type(err)}, status=400)
course.user_partitions.append(new_configuration)
response = JsonResponse(new_configuration.to_json(), status=201)
response["Location"] = reverse_course_url(
'group_configurations_detail_handler',
course.id,
kwargs={'group_configuration_id': new_configuration.id}
)
store.update_item(course, request.user.id)
return response
else:
return HttpResponse(status=406)
@login_required
@ensure_csrf_cookie
@require_http_methods(("POST", "PUT", "DELETE"))
def group_configurations_detail_handler(request, course_key_string, group_configuration_id, group_id=None):
"""
JSON API endpoint for manipulating a group configuration via its internal ID.
Used by the Backbone application.
POST or PUT
json: update group configuration based on provided information
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
course = get_course_and_check_access(course_key, request.user)
matching_id = [p for p in course.user_partitions
if unicode(p.id) == unicode(group_configuration_id)]
if matching_id:
configuration = matching_id[0]
else:
configuration = None
if request.method in ('POST', 'PUT'): # can be either and sometimes
# django is rewriting one to the other
try:
new_configuration = GroupConfiguration(request.body, course, group_configuration_id).get_user_partition()
except GroupConfigurationsValidationError as err:
return JsonResponse({"error": text_type(err)}, status=400)
if configuration:
index = course.user_partitions.index(configuration)
course.user_partitions[index] = new_configuration
else:
course.user_partitions.append(new_configuration)
store.update_item(course, request.user.id)
configuration = GroupConfiguration.update_usage_info(store, course, new_configuration)
return JsonResponse(configuration, status=201)
elif request.method == "DELETE":
if not configuration:
return JsonResponse(status=404)
return remove_content_or_experiment_group(
request=request,
store=store,
course=course,
configuration=configuration,
group_configuration_id=group_configuration_id,
group_id=group_id
)
def are_content_experiments_enabled(course):
"""
Returns True if content experiments have been enabled for the course.
"""
return (
'split_test' in ADVANCED_COMPONENT_TYPES and
'split_test' in course.advanced_modules
)
def _get_course_creator_status(user):
"""
Helper method for returning the course creator status for a particular user,
taking into account the values of DISABLE_COURSE_CREATION and ENABLE_CREATOR_GROUP.
If the user passed in has not previously visited the index page, it will be
added with status 'unrequested' if the course creator group is in use.
"""
if user.is_staff:
course_creator_status = 'granted'
elif settings.FEATURES.get('DISABLE_COURSE_CREATION', False):
course_creator_status = 'disallowed_for_this_site'
elif settings.FEATURES.get('ENABLE_CREATOR_GROUP', False):
course_creator_status = get_course_creator_status(user)
if course_creator_status is None:
# User not grandfathered in as an existing user, has not previously visited the dashboard page.
# Add the user to the course creator admin table with status 'unrequested'.
add_user_with_status_unrequested(user)
course_creator_status = get_course_creator_status(user)
else:
course_creator_status = 'granted'
return course_creator_status
| agpl-3.0 |
akhmadMizkat/odoo | addons/base_action_rule/base_action_rule.py | 10 | 21424 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from collections import defaultdict
from datetime import datetime
from dateutil.relativedelta import relativedelta
import datetime as DT
import dateutil
import time
import logging
import openerp
from openerp import SUPERUSER_ID
from openerp.modules.registry import RegistryManager
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.safe_eval import safe_eval as eval
_logger = logging.getLogger(__name__)
DATE_RANGE_FUNCTION = {
'minutes': lambda interval: relativedelta(minutes=interval),
'hour': lambda interval: relativedelta(hours=interval),
'day': lambda interval: relativedelta(days=interval),
'month': lambda interval: relativedelta(months=interval),
False: lambda interval: relativedelta(0),
}
def get_datetime(date_str):
'''Return a datetime from a date string or a datetime string'''
# complete date time if date_str contains only a date
if ' ' not in date_str:
date_str = date_str + " 00:00:00"
return datetime.strptime(date_str, DEFAULT_SERVER_DATETIME_FORMAT)
class base_action_rule(osv.osv):
""" Base Action Rules """
_name = 'base.action.rule'
_description = 'Action Rules'
_order = 'sequence'
_columns = {
'name': fields.char('Rule Name', required=True),
'model_id': fields.many2one('ir.model', 'Related Document Model',
required=True, domain=[('transient', '=', False)]),
'model': fields.related('model_id', 'model', type="char", string='Model'),
'create_date': fields.datetime('Create Date', readonly=1),
'active': fields.boolean('Active',
help="When unchecked, the rule is hidden and will not be executed."),
'sequence': fields.integer('Sequence',
help="Gives the sequence order when displaying a list of rules."),
'kind': fields.selection(
[('on_create', 'On Creation'),
('on_write', 'On Update'),
('on_create_or_write', 'On Creation & Update'),
('on_unlink', 'On Deletion'),
('on_change', 'Based on Form Modification'),
('on_time', 'Based on Timed Condition')],
string='When to Run'),
'trg_date_id': fields.many2one('ir.model.fields', string='Trigger Date',
help="When should the condition be triggered. If present, will be checked by the scheduler. If empty, will be checked at creation and update.",
domain="[('model_id', '=', model_id), ('ttype', 'in', ('date', 'datetime'))]"),
'trg_date_range': fields.integer('Delay after trigger date',
help="Delay after the trigger date." \
"You can put a negative number if you need a delay before the" \
"trigger date, like sending a reminder 15 minutes before a meeting."),
'trg_date_range_type': fields.selection([('minutes', 'Minutes'), ('hour', 'Hours'),
('day', 'Days'), ('month', 'Months')], 'Delay type'),
'trg_date_calendar_id': fields.many2one(
'resource.calendar', 'Use Calendar',
help='When calculating a day-based timed condition, it is possible to use a calendar to compute the date based on working days.',
ondelete='set null',
),
'act_user_id': fields.many2one('res.users', 'Set Responsible'),
'act_followers': fields.many2many("res.partner", string="Add Followers"),
'server_action_ids': fields.many2many('ir.actions.server', string='Server Actions',
domain="[('model_id', '=', model_id)]",
help="Examples: email reminders, call object service, etc."),
'filter_pre_id': fields.many2one(
'ir.filters', string='Before Update Filter',
ondelete='restrict', domain="[('model_id', '=', model_id.model)]",
help="If present, this condition must be satisfied before the update of the record."),
'filter_pre_domain': fields.char(string='Before Update Domain', help="If present, this condition must be satisfied before the update of the record."),
'filter_id': fields.many2one(
'ir.filters', string='Filter',
ondelete='restrict', domain="[('model_id', '=', model_id.model)]",
help="If present, this condition must be satisfied before executing the action rule."),
'filter_domain': fields.char(string='Domain', help="If present, this condition must be satisfied before executing the action rule."),
'last_run': fields.datetime('Last Run', readonly=1, copy=False),
'on_change_fields': fields.char(string="On Change Fields Trigger",
help="Comma-separated list of field names that triggers the onchange."),
}
# which fields have an impact on the registry
CRITICAL_FIELDS = ['model_id', 'active', 'kind', 'on_change_fields']
_defaults = {
'active': True,
'trg_date_range_type': 'day',
}
def onchange_kind(self, cr, uid, ids, kind, context=None):
clear_fields = []
if kind in ['on_create', 'on_create_or_write', 'on_unlink']:
clear_fields = ['filter_pre_id', 'filter_pre_domain', 'trg_date_id', 'trg_date_range', 'trg_date_range_type']
elif kind in ['on_write', 'on_create_or_write']:
clear_fields = ['trg_date_id', 'trg_date_range', 'trg_date_range_type']
elif kind == 'on_time':
clear_fields = ['filter_pre_id', 'filter_pre_domain']
return {'value': dict.fromkeys(clear_fields, False)}
def onchange_filter_pre_id(self, cr, uid, ids, filter_pre_id, context=None):
ir_filter = self.pool['ir.filters'].browse(cr, uid, filter_pre_id, context=context)
return {'value': {'filter_pre_domain': ir_filter.domain}}
def onchange_filter_id(self, cr, uid, ids, filter_id, context=None):
ir_filter = self.pool['ir.filters'].browse(cr, uid, filter_id, context=context)
return {'value': {'filter_domain': ir_filter.domain}}
@openerp.api.model
def _get_actions(self, records, kinds):
""" Return the actions of the given kinds for records' model. The
returned actions' context contain an object to manage processing.
"""
if '__action_done' not in self._context:
self = self.with_context(__action_done={})
domain = [('model', '=', records._name), ('kind', 'in', kinds)]
actions = self.with_context(active_test=True).search(domain)
return actions.with_env(self.env)
@openerp.api.model
def _get_eval_context(self):
""" Prepare the context used when evaluating python code
:returns: dict -- evaluation context given to (safe_)eval """
return {
'datetime': DT,
'dateutil': dateutil,
'time': time,
'uid': self.env.uid,
'user': self.env.user,
}
@openerp.api.model
def _filter_pre(self, records):
""" Filter the records that satisfy the precondition of action ``self``. """
if self.filter_pre_id and records:
eval_context = self._get_eval_context()
domain = [('id', 'in', records.ids)] + eval(self.filter_pre_id.domain, eval_context)
ctx = eval(self.filter_pre_id.context)
return records.with_context(**ctx).search(domain).with_env(records.env)
elif self.filter_pre_domain and records:
eval_context = self._get_eval_context()
domain = [('id', 'in', records.ids)] + eval(self.filter_pre_domain, eval_context)
return records.search(domain)
else:
return records
@openerp.api.model
def _filter_post(self, records):
""" Filter the records that satisfy the postcondition of action ``self``. """
if self.filter_id and records:
eval_context = self._get_eval_context()
domain = [('id', 'in', records.ids)] + eval(self.filter_id.domain, eval_context)
ctx = eval(self.filter_id.context)
return records.with_context(**ctx).search(domain).with_env(records.env)
elif self.filter_domain and records:
eval_context = self._get_eval_context()
domain = [('id', 'in', records.ids)] + eval(self.filter_domain, eval_context)
return records.search(domain)
else:
return records
@openerp.api.multi
def _process(self, records):
""" Process action ``self`` on the ``records`` that have not been done yet. """
# filter out the records on which self has already been done, then mark
# remaining records as done (to avoid recursive processing)
action_done = self._context['__action_done']
records -= action_done.setdefault(self, records.browse())
if not records:
return
action_done[self] |= records
# modify records
values = {}
if 'date_action_last' in records._fields:
values['date_action_last'] = openerp.fields.Datetime.now()
if self.act_user_id and 'user_id' in records._fields:
values['user_id'] = self.act_user_id.id
if values:
records.write(values)
# subscribe followers
if self.act_followers and hasattr(records, 'message_subscribe'):
records.message_subscribe(self.act_followers.ids)
# execute server actions
if self.server_action_ids:
for record in records:
ctx = {'active_model': record._name, 'active_ids': record.ids, 'active_id': record.id}
self.server_action_ids.with_context(**ctx).run()
def _register_hook(self, cr):
""" Patch models that should trigger action rules based on creation,
modification, deletion of records and form onchanges.
"""
#
# Note: the patched methods must be defined inside another function,
# otherwise their closure may be wrong. For instance, the function
# create refers to the outer variable 'create', which you expect to be
# bound to create itself. But that expectation is wrong if create is
# defined inside a loop; in that case, the variable 'create' is bound to
# the last function defined by the loop.
#
def make_create():
""" Instanciate a create method that processes action rules. """
@openerp.api.model
def create(self, vals, **kw):
# retrieve the action rules to possibly execute
actions = self.env['base.action.rule']._get_actions(self, ['on_create', 'on_create_or_write'])
# call original method
record = create.origin(self.with_env(actions.env), vals, **kw)
# check postconditions, and execute actions on the records that satisfy them
for action in actions.with_context(old_values=None):
action._process(action._filter_post(record))
return record.with_env(self.env)
return create
def make_write():
""" Instanciate a _write method that processes action rules. """
#
# Note: we patch method _write() instead of write() in order to
# catch updates made by field recomputations.
#
@openerp.api.multi
def _write(self, vals, **kw):
# retrieve the action rules to possibly execute
actions = self.env['base.action.rule']._get_actions(self, ['on_write', 'on_create_or_write'])
records = self.with_env(actions.env)
# check preconditions on records
pre = {action: action._filter_pre(records) for action in actions}
# read old values before the update
old_values = {
old_vals.pop('id'): old_vals
for old_vals in records.read(list(vals))
}
# call original method
_write.origin(records, vals, **kw)
# check postconditions, and execute actions on the records that satisfy them
for action in actions.with_context(old_values=old_values):
action._process(action._filter_post(pre[action]))
return True
return _write
def make_unlink():
""" Instanciate an unlink method that processes action rules. """
@openerp.api.multi
def unlink(self, **kwargs):
# retrieve the action rules to possibly execute
actions = self.env['base.action.rule']._get_actions(self, ['on_unlink'])
records = self.with_env(actions.env)
# check conditions, and execute actions on the records that satisfy them
for action in actions:
action._process(action._filter_post(pre[action]))
# call original method
return unlink.origin(self, **kwargs)
return unlink
def make_onchange(action_rule_id):
""" Instanciate an onchange method for the given action rule. """
def base_action_rule_onchange(self):
action_rule = self.env['base.action.rule'].browse(action_rule_id)
server_actions = action_rule.server_action_ids.with_context(active_model=self._name, onchange_self=self)
result = {}
for server_action in server_actions:
res = server_action.run()
if res and 'value' in res:
res['value'].pop('id', None)
self.update(self._convert_to_cache(res['value'], validate=False))
if res and 'domain' in res:
result.setdefault('domain', {}).update(res['domain'])
if res and 'warning' in res:
result['warning'] = res['warning']
return result
return base_action_rule_onchange
patched_models = defaultdict(set)
def patch(model, name, method):
""" Patch method `name` on `model`, unless it has been patched already. """
if model not in patched_models[name]:
patched_models[name].add(model)
model._patch_method(name, method)
# retrieve all actions, and patch their corresponding model
ids = self.search(cr, SUPERUSER_ID, [])
for action_rule in self.browse(cr, SUPERUSER_ID, ids):
model = action_rule.model_id.model
model_obj = self.pool.get(model)
if not model_obj:
continue
if action_rule.kind == 'on_create':
patch(model_obj, 'create', make_create())
elif action_rule.kind == 'on_create_or_write':
patch(model_obj, 'create', make_create())
patch(model_obj, '_write', make_write())
elif action_rule.kind == 'on_write':
patch(model_obj, '_write', make_write())
elif action_rule.kind == 'on_unlink':
patch(model_obj, 'unlink', make_unlink())
elif action_rule.kind == 'on_change':
# register an onchange method for the action_rule
method = make_onchange(action_rule.id)
for field_name in action_rule.on_change_fields.split(","):
field_name = field_name.strip()
model_obj._onchange_methods[field_name].append(method)
def _update_cron(self, cr, uid, context=None):
""" Activate the cron job depending on whether there exists action rules
based on time conditions. """
try:
cron = self.pool['ir.model.data'].get_object(
cr, uid, 'base_action_rule', 'ir_cron_crm_action', context=context)
except ValueError:
return False
return cron.toggle(model=self._name, domain=[('kind', '=', 'on_time')])
def _update_registry(self, cr, uid, context=None):
""" Update the registry after a modification on action rules. """
if self.pool.ready:
# for the sake of simplicity, simply force the registry to reload
cr.commit()
openerp.api.Environment.reset()
RegistryManager.new(cr.dbname)
RegistryManager.signal_registry_change(cr.dbname)
def create(self, cr, uid, vals, context=None):
res_id = super(base_action_rule, self).create(cr, uid, vals, context=context)
self._update_cron(cr, uid, context=context)
self._update_registry(cr, uid, context=context)
return res_id
def write(self, cr, uid, ids, vals, context=None):
super(base_action_rule, self).write(cr, uid, ids, vals, context=context)
if set(vals) & set(self.CRITICAL_FIELDS):
self._update_cron(cr, uid, context=context)
self._update_registry(cr, uid, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
res = super(base_action_rule, self).unlink(cr, uid, ids, context=context)
self._update_cron(cr, uid, context=context)
self._update_registry(cr, uid, context=context)
return res
def onchange_model_id(self, cr, uid, ids, model_id, context=None):
data = {'model': False, 'filter_pre_id': False, 'filter_id': False}
if model_id:
model = self.pool.get('ir.model').browse(cr, uid, model_id, context=context)
data.update({'model': model.model})
return {'value': data}
def _check_delay(self, cr, uid, action, record, record_dt, context=None):
if action.trg_date_calendar_id and action.trg_date_range_type == 'day':
start_dt = get_datetime(record_dt)
action_dt = self.pool['resource.calendar'].schedule_days_get_date(
cr, uid, action.trg_date_calendar_id.id, action.trg_date_range,
day_date=start_dt, compute_leaves=True, context=context
)
else:
delay = DATE_RANGE_FUNCTION[action.trg_date_range_type](action.trg_date_range)
action_dt = get_datetime(record_dt) + delay
return action_dt
def _check(self, cr, uid, automatic=False, use_new_cursor=False, context=None):
""" This Function is called by scheduler. """
context = context or {}
if '__action_done' not in context:
context = dict(context, __action_done={})
# retrieve all the action rules to run based on a timed condition
action_dom = [('kind', '=', 'on_time')]
action_ids = self.search(cr, uid, action_dom, context=dict(context, active_test=True))
eval_context = self._get_eval_context(cr, uid, context=context)
for action in self.browse(cr, uid, action_ids, context=context):
now = datetime.now()
if action.last_run:
last_run = get_datetime(action.last_run)
else:
last_run = datetime.utcfromtimestamp(0)
# retrieve all the records that satisfy the action's condition
model = self.pool[action.model_id.model]
domain = []
ctx = dict(context)
if action.filter_domain is not False:
domain = eval(action.filter_domain, eval_context)
elif action.filter_id:
domain = eval(action.filter_id.domain, eval_context)
ctx.update(eval(action.filter_id.context))
if 'lang' not in ctx:
# Filters might be language-sensitive, attempt to reuse creator lang
# as we are usually running this as super-user in background
[filter_meta] = action.filter_id.get_metadata()
user_id = filter_meta['write_uid'] and filter_meta['write_uid'][0] or \
filter_meta['create_uid'][0]
ctx['lang'] = self.pool['res.users'].browse(cr, uid, user_id).lang
record_ids = model.search(cr, uid, domain, context=ctx)
# determine when action should occur for the records
date_field = action.trg_date_id.name
if date_field == 'date_action_last' and 'create_date' in model._fields:
get_record_dt = lambda record: record[date_field] or record.create_date
else:
get_record_dt = lambda record: record[date_field]
# process action on the records that should be executed
for record in model.browse(cr, uid, record_ids, context=context):
record_dt = get_record_dt(record)
if not record_dt:
continue
action_dt = self._check_delay(cr, uid, action, record, record_dt, context=context)
if last_run <= action_dt < now:
try:
action._process(record)
except Exception:
import traceback
_logger.error(traceback.format_exc())
action.write({'last_run': now.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})
if automatic:
# auto-commit for batch processing
cr.commit()
| gpl-3.0 |
RomainBrault/scikit-learn | examples/decomposition/plot_sparse_coding.py | 60 | 4016 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution // subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=n_components // 5)
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15, 'navy'),
('Lasso', 'lasso_cd', 2, None, 'turquoise'), ]
lw = 2
plt.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
plt.subplot(1, 2, subplot + 1)
plt.title('Sparse coding against %s dictionary' % title)
plt.plot(y, lw=lw, linestyle='--', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero, color in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y.reshape(1, -1))
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color=color, lw=lw,
label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y.reshape(1, -1))
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color='darkorange', lw=lw,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error'
% (len(idx), squared_error))
plt.axis('tight')
plt.legend(shadow=False, loc='best')
plt.subplots_adjust(.04, .07, .97, .90, .09, .2)
plt.show()
| bsd-3-clause |
empireryan/director | src/python/ddapp/trackers.py | 6 | 2726 | import time
from ddapp import segmentationroutines
from ddapp import segmentation
from ddapp.timercallback import TimerCallback
from ddapp.visualization import *
class TrackDrillOnTable(object):
def __init__(self):
self.tableCentroid = None
def updateFit(self):
# get and display: .1sec
polyData = segmentation.getDisparityPointCloud()
if (polyData is None):
return
updatePolyData(polyData, 'pointcloud snapshot', colorByName='rgb_colors', visible=False)
t0 = time.time()
if (self.tableCentroid is None):
# initial fit .75 sec
print "Boot Strapping tracker"
self.tableCentroid = segmentation.findAndFitDrillBarrel(polyData)
else:
# refit .07 sec
#print "current centroid"
#print self.tableCentroid
viewFrame = segmentationroutines.SegmentationContext.getGlobalInstance().getViewFrame()
forwardDirection = np.array([1.0, 0.0, 0.0])
viewFrame.TransformVector(forwardDirection, forwardDirection)
robotOrigin = viewFrame.GetPosition()
robotForward =forwardDirection
fitResults = []
drillFrame = segmentation.segmentDrillBarrelFrame(self.tableCentroid, polyData, robotForward)
clusterObj = updatePolyData(polyData, 'surface cluster refit', color=[1,1,0], parent=segmentation.getDebugFolder(), visible=False)
fitResults.append((clusterObj, drillFrame))
segmentation.sortFittedDrills(fitResults, robotOrigin, robotForward)
class PointerTracker(object):
'''
See segmentation.estimatePointerTip() documentation.
'''
def __init__(self, robotModel, stereoPointCloudItem):
self.robotModel = robotModel
self.stereoPointCloudItem = stereoPointCloudItem
self.timer = TimerCallback(targetFps=5)
self.timer.callback = self.updateFit
def start(self):
self.timer.start()
def stop(self):
self.timer.stop()
def cleanup(self):
om.removeFromObjectModel(om.findObjectByName('segmentation'))
def updateFit(self, polyData=None):
#if not self.stereoPointCloudItem.getProperty('Visible'):
# return
if not polyData:
self.stereoPointCloudItem.update()
polyData = self.stereoPointCloudItem.polyData
if not polyData or not polyData.GetNumberOfPoints():
self.cleanup()
return
self.tipPosition = segmentation.estimatePointerTip(self.robotModel, polyData)
if self.tipPosition is None:
self.cleanup()
def getPointerTip(self):
return self.tipPosition
| bsd-3-clause |
basicthinker/ThyNVM | src/cpu/DummyChecker.py | 69 | 2259 | # Copyright (c) 2010-2011 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Geoffrey Blake
from m5.params import *
from CheckerCPU import CheckerCPU
class DummyChecker(CheckerCPU):
type = 'DummyChecker'
cxx_header = 'cpu/dummy_checker.hh'
| bsd-3-clause |
joyxu/autotest | frontend/afe/rpc_interface.py | 2 | 48269 | """
Functions to expose over the RPC interface.
For all modify* and delete* functions that ask for an 'id' parameter to
identify the object to operate on, the id may be either
* the database row ID
* the name of the object (label name, hostname, user login, etc.)
* a dictionary containing uniquely identifying field (this option should seldom
be used)
When specifying foreign key fields (i.e. adding hosts to a label, or adding
users to an ACL group), the given value may be either the database row ID or the
name of the object.
All get* functions return lists of dictionaries. Each dictionary represents one
object and maps field names to values.
Some examples:
modify_host(2, hostname='myhost') # modify hostname of host with database ID 2
modify_host('ipaj2', hostname='myhost') # modify hostname of host 'ipaj2'
modify_test('sleeptest', test_type='Client', params=', seconds=60')
delete_acl_group(1) # delete by ID
delete_acl_group('Everyone') # delete by name
acl_group_add_users('Everyone', ['mbligh', 'showard'])
get_jobs(owner='showard', status='Queued')
See doctests/001_rpc_test.txt for (lots) more examples.
"""
__author__ = 'showard@google.com (Steve Howard)'
import datetime
import xmlrpclib
import logging
import os
# psutil is a non stdlib import, it needs to be installed
import psutil
try:
import autotest.common as common
except ImportError:
import common
from autotest.frontend.afe import models, model_logic, model_attributes
from autotest.frontend.afe import control_file, rpc_utils, reservations
from autotest.server.hosts.remote import get_install_server_info
from autotest.client.shared import version
from autotest.client.shared.settings import settings
#
# IMPORTANT: please update INTERFACE_VERSION with the current date whenever
# the interface changes, so that RPC clients can handle the changes
#
INTERFACE_VERSION = (2013, 9, 11)
# labels
def add_label(name, kernel_config=None, platform=None, only_if_needed=None):
"""
Add (create) label.
:param name: The name of the label.
:param kernel_config: Kernel configuration (optional).
:param platform: Platform (optional).
:param only_if_need: Only if needed (optional).
:return: ID.
"""
return models.Label.add_object(
name=name, kernel_config=kernel_config, platform=platform,
only_if_needed=only_if_needed).id
def modify_label(id, **data):
"""
Modify (update) label.
:param id: Label identification.
:param data: Fields to modify.
:return: None.
"""
models.Label.smart_get(id).update_object(data)
def delete_label(id):
"""
Delete label.
:param id: Label identification.
:return: None.
"""
models.Label.smart_get(id).delete()
def label_add_hosts(id, hosts):
"""
Add multiple hosts to a label.
:param id: Label identification.
:param hosts: A sequence of hosts.
:return: None.
"""
host_objs = models.Host.smart_get_bulk(hosts)
label = models.Label.smart_get(id)
if label.platform:
models.Host.check_no_platform(host_objs)
label.host_set.add(*host_objs)
def label_remove_hosts(id, hosts):
"""
Remove hosts from label.
:param id: Label identification.
:param hosts: A sequence of hosts.
:return: None.
"""
host_objs = models.Host.smart_get_bulk(hosts)
models.Label.smart_get(id).host_set.remove(*host_objs)
def get_labels(**filter_data):
"""
Get labels.
:param filter_data: Filters out which labels to get.
:return: A sequence of nested dictionaries of label information.
"""
return rpc_utils.prepare_rows_as_nested_dicts(
models.Label.query_objects(filter_data),
('atomic_group',))
# atomic groups
def add_atomic_group(name, max_number_of_machines=None, description=None):
"""
Add (create) atomic group.
:param name: Name of the atomic group.
:param max_number_of_machines: Maximum number of machines (optional).
:param description: Description (optional).
:return: ID.
"""
return models.AtomicGroup.add_object(
name=name, max_number_of_machines=max_number_of_machines,
description=description).id
def modify_atomic_group(id, **data):
"""
Modify (update) atomic group.
:param data: Fields to modify.
:return: None.
"""
models.AtomicGroup.smart_get(id).update_object(data)
def delete_atomic_group(id):
"""
Delete atomic group.
:param id: Atomic group identification.
:return: None.
"""
models.AtomicGroup.smart_get(id).delete()
def atomic_group_add_labels(id, labels):
"""
Add labels to atomic group.
:param id: Atomic group identification.
:param labels: Sequence of labels.
:return: None.
"""
label_objs = models.Label.smart_get_bulk(labels)
models.AtomicGroup.smart_get(id).label_set.add(*label_objs)
def atomic_group_remove_labels(id, labels):
"""
Remove labels from atomic group.
:param id: Atomic group identification.
:param labels: Sequence of labels.
:return: None.
"""
label_objs = models.Label.smart_get_bulk(labels)
models.AtomicGroup.smart_get(id).label_set.remove(*label_objs)
def get_atomic_groups(**filter_data):
"""
Get atomic groups.
:param filter_data: Filters out which atomic groups to get.
:return: Sequence of atomic groups.
"""
return rpc_utils.prepare_for_serialization(
models.AtomicGroup.list_objects(filter_data))
# hosts
def add_host(hostname, status=None, locked=None, protection=None):
"""
Add (create) host.
:param hostname: The hostname.
:param status: Status (optional).
:param locked: Locked (optional).
:param protection: Protection (optional).
:return: ID.
"""
return models.Host.add_object(hostname=hostname, status=status,
locked=locked, protection=protection).id
def modify_host(id, **data):
"""
Modify (update) host.
:param id: Host identification.
:param data: Fields to modify.
:return: None.
"""
rpc_utils.check_modify_host(data)
host = models.Host.smart_get(id)
rpc_utils.check_modify_host_locking(host, data)
host.update_object(data)
def modify_hosts(host_filter_data, update_data):
"""
Modify multiple hosts.
:param host_filter_data: Filters out which hosts to modify.
:param update_data: A dictionary with the changes to make to the hosts.
:return: None.
"""
rpc_utils.check_modify_host(update_data)
hosts = models.Host.query_objects(host_filter_data)
for host in hosts:
host.update_object(update_data)
def host_add_labels(id, labels):
"""
Add labels to host.
:param id: Host identification.
:param labels: Sequence of labels.
:return: None.
"""
labels = models.Label.smart_get_bulk(labels)
host = models.Host.smart_get(id)
platforms = [label.name for label in labels if label.platform]
if len(platforms) > 1:
raise model_logic.ValidationError(
{'labels': 'Adding more than one platform label: %s' %
', '.join(platforms)})
if len(platforms) == 1:
models.Host.check_no_platform([host])
host.labels.add(*labels)
def host_remove_labels(id, labels):
"""
Remove labels from host.
:param id: Host Identification.
:param labels: Sequence of labels.
:return: None.
"""
labels = models.Label.smart_get_bulk(labels)
models.Host.smart_get(id).labels.remove(*labels)
def set_host_attribute(attribute, value, **host_filter_data):
"""
Set host attribute.
:param attribute: string name of attribute.
:param value: string, or None to delete an attribute.
:param host_filter_data: filter data to apply to Hosts to choose hosts
to act upon.
:return: None.
"""
assert host_filter_data # disallow accidental actions on all hosts
hosts = models.Host.query_objects(host_filter_data)
models.AclGroup.check_for_acl_violation_hosts(hosts)
for host in hosts:
host.set_or_delete_attribute(attribute, value)
def delete_host(id):
"""
Delete host.
:param id: Host identification.
:return: None.
"""
models.Host.smart_get(id).delete()
def get_hosts(multiple_labels=(), exclude_only_if_needed_labels=False,
exclude_atomic_group_hosts=False, valid_only=True, **filter_data):
"""
Get hosts.
:param multiple_labels: match hosts in all of the labels given (optional).
Should be a list of label names.
:param exclude_only_if_needed_labels: Exclude hosts with
at least one "only_if_needed" label applied (optional).
:param exclude_atomic_group_hosts: Exclude hosts that have one or more
atomic group labels associated with them.
:param valid_only: Filter valid hosts (optional).
:param filter_data: Filters out which hosts to get.
:return: Sequence of hosts.
"""
hosts = rpc_utils.get_host_query(multiple_labels,
exclude_only_if_needed_labels,
exclude_atomic_group_hosts,
valid_only, filter_data)
hosts = list(hosts)
models.Host.objects.populate_relationships(hosts, models.Label,
'label_list')
models.Host.objects.populate_relationships(hosts, models.AclGroup,
'acl_list')
models.Host.objects.populate_relationships(hosts, models.HostAttribute,
'attribute_list')
install_server = None
install_server_info = get_install_server_info()
install_server_type = install_server_info.get('type', None)
install_server_url = install_server_info.get('xmlrpc_url', None)
if install_server_type == 'cobbler' and install_server_url:
install_server = xmlrpclib.ServerProxy(install_server_url)
host_dicts = []
for host_obj in hosts:
host_dict = host_obj.get_object_dict()
host_dict['labels'] = [label.name for label in host_obj.label_list]
host_dict['platform'], host_dict['atomic_group'] = (rpc_utils.
find_platform_and_atomic_group(host_obj))
host_dict['acls'] = [acl.name for acl in host_obj.acl_list]
host_dict['attributes'] = dict((attribute.attribute, attribute.value)
for attribute in host_obj.attribute_list)
error_encountered = True
if install_server is not None:
system_params = {"name": host_dict['hostname']}
system_list = install_server.find_system(system_params, True)
if len(system_list) < 1:
msg = 'System "%s" not found on install server'
rpc_logger = logging.getLogger('rpc_logger')
rpc_logger.info(msg, host_dict['hostname'])
elif len(system_list) > 1:
msg = 'Found multiple systems on install server named %s'
if install_server_type == 'cobbler':
msg = '%s. This should never happen on cobbler' % msg
rpc_logger = logging.getLogger('rpc_logger')
rpc_logger.error(msg, host_dict['hostname'])
else:
system = system_list[0]
if host_dict['platform']:
error_encountered = False
profiles = sorted(install_server.get_item_names('profile'))
host_dict['profiles'] = profiles
host_dict['profiles'].insert(0, 'Do_not_install')
use_current_profile = settings.get_value('INSTALL_SERVER',
'use_current_profile', type=bool, default=True)
if use_current_profile:
host_dict['current_profile'] = system['profile']
else:
host_dict['current_profile'] = 'Do_not_install'
if error_encountered:
host_dict['profiles'] = ['N/A']
host_dict['current_profile'] = 'N/A'
host_dicts.append(host_dict)
return rpc_utils.prepare_for_serialization(host_dicts)
def get_num_hosts(multiple_labels=(), exclude_only_if_needed_labels=False,
exclude_atomic_group_hosts=False, valid_only=True,
**filter_data):
"""
Get the number of hosts. Same parameters as get_hosts().
:return: The number of matching hosts.
"""
hosts = rpc_utils.get_host_query(multiple_labels,
exclude_only_if_needed_labels,
exclude_atomic_group_hosts,
valid_only, filter_data)
return hosts.count()
def get_install_server_profiles():
"""
Get install server profiles.
:return: Sequence of profiles.
"""
install_server = None
install_server_info = get_install_server_info()
install_server_type = install_server_info.get('type', None)
install_server_url = install_server_info.get('xmlrpc_url', None)
if install_server_type == 'cobbler' and install_server_url:
install_server = xmlrpclib.ServerProxy(install_server_url)
if install_server is None:
return None
return install_server.get_item_names('profile')
def get_profiles():
"""
Get profiles.
:return: Sequence of profiles.
"""
error_encountered = True
profile_dicts = []
profiles = get_install_server_profiles()
if profiles is not None:
if len(profiles) < 1:
msg = 'No profiles defined on install server'
rpc_logger = logging.getLogger('rpc_logger')
rpc_logger.info(msg)
else:
error_encountered = False
# not sorted
profiles.sort()
profile_dicts.append(dict(name="Do_not_install"))
for profile in profiles:
profile_dicts.append(dict(name=profile))
if error_encountered:
profile_dicts.append(dict(name="N/A"))
return rpc_utils.prepare_for_serialization(profile_dicts)
def get_num_profiles():
"""
Get the number of profiles. Same parameters as get_profiles().
:return: The number of defined profiles.
"""
error_encountered = True
profiles = get_install_server_profiles()
if profiles is not None:
if len(profiles) < 1:
# 'N/A'
return 1
else:
# include 'Do_not_install'
return len(profiles) + 1
if error_encountered:
# 'N/A'
return 1
def reserve_hosts(host_filter_data, username=None):
"""
Reserve some hosts.
:param host_filter_data: Filters out which hosts to reserve.
:param username: login of the user reserving hosts
:type username: str
:return: None.
"""
hosts = models.Host.query_objects(host_filter_data)
reservations.create(hosts_to_reserve=[h.hostname for h in hosts],
username=username)
def release_hosts(host_filter_data, username=None):
"""
Release some hosts.
:param host_filter_data: Filters out which hosts to release.
:param username: login of the user reserving hosts
:type username: str
:return: None.
"""
hosts = models.Host.query_objects(host_filter_data)
reservations.release(hosts_to_release=[h.hostname for h in hosts],
username=username)
# tests
def add_test(name, test_type, path, author=None, dependencies=None,
experimental=True, run_verify=None, test_class=None,
test_time=None, test_category=None, description=None,
sync_count=1):
"""
Add (create) test.
:param name: Test name.
:param test_type: Test type (Client or Server).
:param path: Relative path to the test.
:param author: The author of the test (optional).
:param dependencies: Dependencies (optional).
:param experimental: Experimental? (True or False) (optional).
:param run_verify: Run verify? (True or False) (optional).
:param test_class: Test class (optional).
:param test_time: Test time (optional).
:param test_category: Test category (optional).
:param description: Description (optional).
:param sync_count: Sync count (optional).
:return: ID.
"""
return models.Test.add_object(name=name, test_type=test_type, path=path,
author=author, dependencies=dependencies,
experimental=experimental,
run_verify=run_verify, test_time=test_time,
test_category=test_category,
sync_count=sync_count,
test_class=test_class,
description=description).id
def modify_test(id, **data):
"""
Modify (update) test.
:param id: Test identification.
:param data: Test data to modify.
:return: None.
"""
models.Test.smart_get(id).update_object(data)
def delete_test(id):
"""
Delete test.
:param id: Test identification.
:return: None.
"""
models.Test.smart_get(id).delete()
def get_tests(**filter_data):
"""
Get tests.
:param filter_data: Filters out which tests to get.
:return: Sequence of tests.
"""
return rpc_utils.prepare_for_serialization(
models.Test.list_objects(filter_data))
# profilers
def add_profiler(name, description=None):
"""
Add (create) profiler.
:param name: The name of the profiler.
:param description: Description (optional).
:return: ID.
"""
return models.Profiler.add_object(name=name, description=description).id
def modify_profiler(id, **data):
"""
Modify (update) profiler.
:param id: Profiler identification.
:param data: Profiler data to modify.
:return: None.
"""
models.Profiler.smart_get(id).update_object(data)
def delete_profiler(id):
"""
Delete profiler.
:param id: Profiler identification.
:return: None.
"""
models.Profiler.smart_get(id).delete()
def get_profilers(**filter_data):
"""
Get all profilers.
:param filter_data: Filters out which profilers to get.
:return: Sequence of profilers.
"""
return rpc_utils.prepare_for_serialization(
models.Profiler.list_objects(filter_data))
# users
def add_user(login, access_level=None):
"""
Add (create) user.
:param login: The login name.
:param acess_level: Access level (optional).
:return: ID.
"""
return models.User.add_object(login=login, access_level=access_level).id
def modify_user(id, **data):
"""
Modify (update) user.
:param id: User identification.
:param data: User data to modify.
:return: None.
"""
models.User.smart_get(id).update_object(data)
def delete_user(id):
"""
Delete user.
:param id: User identification.
:return: None.
"""
models.User.smart_get(id).delete()
def get_users(**filter_data):
"""
Get users.
:param filter_data: Filters out which users to get.
:return: Sequence of users.
"""
return rpc_utils.prepare_for_serialization(
models.User.list_objects(filter_data))
# acl groups
def add_acl_group(name, description=None):
"""
Add (create) ACL group.
:param name: The name of the ACL group.
:param description: Description (optional).
:return: ID.
"""
group = models.AclGroup.add_object(name=name, description=description)
group.users.add(models.User.current_user())
return group.id
def modify_acl_group(id, **data):
"""
Modify (update) ACL group.
:param id: ACL group identification.
:param data: ACL group data to modify.
:return: None.
"""
group = models.AclGroup.smart_get(id)
group.check_for_acl_violation_acl_group()
group.update_object(data)
group.add_current_user_if_empty()
def acl_group_add_users(id, users):
"""
Add users to an ACL group.
:param id: ACL group identification.
:param users: Sequence of users.
:return: None.
"""
group = models.AclGroup.smart_get(id)
group.check_for_acl_violation_acl_group()
users = models.User.smart_get_bulk(users)
group.users.add(*users)
def acl_group_remove_users(id, users):
"""
Remove users from an ACL group.
:param id: ACL group identification.
:param users: Sequence of users.
:return: None.
"""
group = models.AclGroup.smart_get(id)
group.check_for_acl_violation_acl_group()
users = models.User.smart_get_bulk(users)
group.users.remove(*users)
group.add_current_user_if_empty()
def acl_group_add_hosts(id, hosts):
"""
Add hosts to an ACL group.
:param id: ACL group identification.
:param hosts: Sequence of hosts to add.
:return: None.
"""
group = models.AclGroup.smart_get(id)
group.check_for_acl_violation_acl_group()
hosts = models.Host.smart_get_bulk(hosts)
group.hosts.add(*hosts)
group.on_host_membership_change()
def acl_group_remove_hosts(id, hosts):
"""
Remove hosts from an ACL group.
:param id: ACL group identification.
:param hosts: Sequence of hosts to remove.
:return: None.
"""
group = models.AclGroup.smart_get(id)
group.check_for_acl_violation_acl_group()
hosts = models.Host.smart_get_bulk(hosts)
group.hosts.remove(*hosts)
group.on_host_membership_change()
def delete_acl_group(id):
"""
Delete ACL group.
:param id: ACL group identification.
:return: None.
"""
models.AclGroup.smart_get(id).delete()
def get_acl_groups(**filter_data):
"""
Get ACL groups.
:param filter_data: Filters out which ACL groups to get.
:return: Sequence of ACL groups.
"""
acl_groups = models.AclGroup.list_objects(filter_data)
for acl_group in acl_groups:
acl_group_obj = models.AclGroup.objects.get(id=acl_group['id'])
acl_group['users'] = [user.login
for user in acl_group_obj.users.all()]
acl_group['hosts'] = [host.hostname
for host in acl_group_obj.hosts.all()]
return rpc_utils.prepare_for_serialization(acl_groups)
# jobs
def generate_control_file(tests=(), kernel=None, label=None, profilers=(),
client_control_file='', use_container=False,
profile_only=None, upload_kernel_config=False):
"""
Generates a client-side control file to load a kernel and run tests.
:param tests List of tests to run.
:param kernel A list of kernel info dictionaries configuring which kernels
to boot for this job and other options for them
:param label Name of label to grab kernel config from.
:param profilers List of profilers to activate during the job.
:param client_control_file The contents of a client-side control file to
run at the end of all tests. If this is supplied, all tests must be
client side.
TODO: in the future we should support server control files directly
to wrap with a kernel. That'll require changing the parameter
name and adding a boolean to indicate if it is a client or server
control file.
:param use_container unused argument today. TODO: Enable containers
on the host during a client side test.
:param profile_only A boolean that indicates what default profile_only
mode to use in the control file. Passing None will generate a
control file that does not explcitly set the default mode at all.
:param upload_kernel_config: if enabled it will generate server control
file code that uploads the kernel config file to the client and
tells the client of the new (local) path when compiling the kernel;
the tests must be server side tests
:return: a dict with the following keys:
control_file: str, The control file text.
is_server: bool, is the control file a server-side control file?
synch_count: How many machines the job uses per autoserv execution.
synch_count == 1 means the job is asynchronous.
dependencies: A list of the names of labels on which the job depends.
"""
if not tests and not client_control_file:
return dict(control_file='', is_server=False, synch_count=1,
dependencies=[])
cf_info, test_objects, profiler_objects, label = (
rpc_utils.prepare_generate_control_file(tests, kernel, label,
profilers))
cf_info['control_file'] = control_file.generate_control(
tests=test_objects, kernels=kernel, platform=label,
profilers=profiler_objects, is_server=cf_info['is_server'],
client_control_file=client_control_file, profile_only=profile_only,
upload_kernel_config=upload_kernel_config)
return cf_info
def create_parameterized_job(name, priority, test, parameters, kernel=None,
label=None, profiles=[], profilers=(),
profiler_parameters=None,
use_container=False, profile_only=None,
upload_kernel_config=False, hosts=[],
meta_hosts=[], meta_host_profiles=[], one_time_hosts=[],
atomic_group_name=None, synch_count=None,
is_template=False, timeout=None,
max_runtime_hrs=None, run_verify=True,
email_list='', dependencies=(), reboot_before=None,
reboot_after=None, parse_failed_repair=None,
hostless=False, keyvals=None, drone_set=None,
reserve_hosts=False):
"""
Creates and enqueues a parameterized job.
Most parameters a combination of the parameters for generate_control_file()
and create_job(), with the exception of:
:param test name or ID of the test to run
:param parameters a map of parameter name ->
tuple of (param value, param type)
:param profiler_parameters a dictionary of parameters for the profilers:
key: profiler name
value: dict of param name -> tuple of
(param value,
param type)
"""
# Save the values of the passed arguments here. What we're going to do with
# them is pass them all to rpc_utils.get_create_job_common_args(), which
# will extract the subset of these arguments that apply for
# rpc_utils.create_job_common(), which we then pass in to that function.
args = locals()
# Set up the parameterized job configs
test_obj = models.Test.smart_get(test)
if test_obj.test_type == model_attributes.TestTypes.SERVER:
control_type = models.Job.ControlType.SERVER
else:
control_type = models.Job.ControlType.CLIENT
try:
label = models.Label.smart_get(label)
except models.Label.DoesNotExist:
label = None
kernel_objs = models.Kernel.create_kernels(kernel)
profiler_objs = [models.Profiler.smart_get(profiler)
for profiler in profilers]
parameterized_job = models.ParameterizedJob.objects.create(
test=test_obj, label=label, use_container=use_container,
profile_only=profile_only,
upload_kernel_config=upload_kernel_config)
parameterized_job.kernels.add(*kernel_objs)
for profiler in profiler_objs:
parameterized_profiler = models.ParameterizedJobProfiler.objects.create(
parameterized_job=parameterized_job,
profiler=profiler)
profiler_params = profiler_parameters.get(profiler.name, {})
for name, (value, param_type) in profiler_params.iteritems():
models.ParameterizedJobProfilerParameter.objects.create(
parameterized_job_profiler=parameterized_profiler,
parameter_name=name,
parameter_value=value,
parameter_type=param_type)
try:
for parameter in test_obj.testparameter_set.all():
if parameter.name in parameters:
param_value, param_type = parameters.pop(parameter.name)
parameterized_job.parameterizedjobparameter_set.create(
test_parameter=parameter, parameter_value=param_value,
parameter_type=param_type)
if parameters:
raise Exception('Extra parameters remain: %r' % parameters)
return rpc_utils.create_job_common(
parameterized_job=parameterized_job.id,
control_type=control_type,
**rpc_utils.get_create_job_common_args(args))
except:
parameterized_job.delete()
raise
def create_job(name, priority, control_file, control_type,
hosts=[], profiles=[], meta_hosts=[], meta_host_profiles=[],
one_time_hosts=[], atomic_group_name=None, synch_count=None,
is_template=False, timeout=None, max_runtime_hrs=None,
run_verify=True, email_list='', dependencies=(), reboot_before=None,
reboot_after=None, parse_failed_repair=None, hostless=False,
keyvals=None, drone_set=None, reserve_hosts=False):
"""
Create and enqueue a job.
:param name: name of this job
:param priority: Low, Medium, High, Urgent
:param control_file: String contents of the control file.
:param control_type: Type of control file, Client or Server.
:param synch_count: How many machines the job uses per autoserv execution.
synch_count == 1 means the job is asynchronous. If an
atomic group is given this value is treated as a
minimum.
:param is_template: If true then create a template job.
:param timeout: Hours after this call returns until the job times out.
:param max_runtime_hrs: Hours from job starting time until job times out
:param run_verify: Should the host be verified before running the test?
:param email_list: String containing emails to mail when the job is done
:param dependencies: List of label names on which this job depends
:param reboot_before: Never, If dirty, or Always
:param reboot_after: Never, If all tests passed, or Always
:param parse_failed_repair: if true, results of failed repairs launched by
this job will be parsed as part of the job.
:param hostless: if true, create a hostless job
:param keyvals: dict of keyvals to associate with the job
:param hosts: List of hosts to run job on.
:param profiles: List of profiles to use, in sync with @hosts list
:param meta_hosts: List where each entry is a label name, and for each
entry one host will be chosen from that label to run
the job on.
:param one_time_hosts: List of hosts not in the database to run the job on.
:param atomic_group_name: name of an atomic group to schedule the job on.
:param drone_set: The name of the drone set to run this test on.
:param reserve_hosts: If set we will reseve the hosts that were allocated
for this job
:returns: The created Job id number.
:rtype: integer
"""
return rpc_utils.create_job_common(
**rpc_utils.get_create_job_common_args(locals()))
def abort_host_queue_entries(**filter_data):
"""
Abort a set of host queue entries.
:param filter_data: Filters out which hosts.
:return: None.
"""
query = models.HostQueueEntry.query_objects(filter_data)
query = query.filter(complete=False)
models.AclGroup.check_abort_permissions(query)
host_queue_entries = list(query.select_related())
rpc_utils.check_abort_synchronous_jobs(host_queue_entries)
for queue_entry in host_queue_entries:
queue_entry.abort()
def reverify_hosts(**filter_data):
"""
Schedules a set of hosts for verify.
:param filter_data: Filters out which hosts.
:return: A list of hostnames that a verify task was created for.
"""
hosts = models.Host.query_objects(filter_data)
models.AclGroup.check_for_acl_violation_hosts(hosts)
for host in hosts:
models.SpecialTask.schedule_special_task(host,
models.SpecialTask.Task.VERIFY)
return list(sorted(host.hostname for host in hosts))
def get_jobs(not_yet_run=False, running=False, finished=False, **filter_data):
"""
Extra filter args for get_jobs:
-not_yet_run: Include only jobs that have not yet started running.
-running: Include only jobs that have start running but for which not
all hosts have completed.
-finished: Include only jobs for which all hosts have completed (or
aborted).
At most one of these three fields should be specified.
"""
filter_data['extra_args'] = rpc_utils.extra_job_filters(not_yet_run,
running,
finished)
job_dicts = []
jobs = list(models.Job.query_objects(filter_data))
models.Job.objects.populate_relationships(jobs, models.Label,
'dependencies')
models.Job.objects.populate_relationships(jobs, models.JobKeyval, 'keyvals')
for job in jobs:
job_dict = job.get_object_dict()
job_dict['dependencies'] = ','.join(label.name
for label in job.dependencies)
job_dict['keyvals'] = dict((keyval.key, keyval.value)
for keyval in job.keyvals)
job_dicts.append(job_dict)
return rpc_utils.prepare_for_serialization(job_dicts)
def get_num_jobs(not_yet_run=False, running=False, finished=False,
**filter_data):
"""
See get_jobs() for documentation of extra filter parameters.
"""
filter_data['extra_args'] = rpc_utils.extra_job_filters(not_yet_run,
running,
finished)
return models.Job.query_count(filter_data)
def get_jobs_summary(**filter_data):
"""
Like get_jobs(), but adds a 'status_counts' field, which is a dictionary
mapping status strings to the number of hosts currently with that
status, i.e. {'Queued' : 4, 'Running' : 2}.
"""
jobs = get_jobs(**filter_data)
ids = [job['id'] for job in jobs]
all_status_counts = models.Job.objects.get_status_counts(ids)
for job in jobs:
job['status_counts'] = all_status_counts[job['id']]
return rpc_utils.prepare_for_serialization(jobs)
def get_info_for_clone(id, preserve_metahosts, queue_entry_filter_data=None):
"""
Retrieves all the information needed to clone a job.
"""
job = models.Job.objects.get(id=id)
job_info = rpc_utils.get_job_info(job,
preserve_metahosts,
queue_entry_filter_data)
host_dicts = []
for host, profile in zip(job_info['hosts'], job_info['profiles']):
host_dict = get_hosts(id=host.id)[0]
other_labels = host_dict['labels']
if host_dict['platform']:
other_labels.remove(host_dict['platform'])
host_dict['other_labels'] = ', '.join(other_labels)
host_dict['profile'] = profile
host_dicts.append(host_dict)
for host in job_info['one_time_hosts']:
host_dict = dict(hostname=host.hostname,
id=host.id,
platform='(one-time host)',
locked_text='')
host_dicts.append(host_dict)
# convert keys from Label objects to strings (names of labels)
meta_host_counts = dict((meta_host.name, count) for meta_host, count
in job_info['meta_host_counts'].iteritems())
info = dict(job=job.get_object_dict(),
meta_host_counts=meta_host_counts,
hosts=host_dicts)
info['job']['dependencies'] = job_info['dependencies']
if job_info['atomic_group']:
info['atomic_group_name'] = (job_info['atomic_group']).name
else:
info['atomic_group_name'] = None
info['hostless'] = job_info['hostless']
info['drone_set'] = job.drone_set and job.drone_set.name
return rpc_utils.prepare_for_serialization(info)
# host queue entries
def get_host_queue_entries(**filter_data):
"""
:return: A sequence of nested dictionaries of host and job information.
"""
return rpc_utils.prepare_rows_as_nested_dicts(
models.HostQueueEntry.query_objects(filter_data),
('host', 'atomic_group', 'job'))
def get_num_host_queue_entries(**filter_data):
"""
Get the number of host queue entries associated with this job.
"""
return models.HostQueueEntry.query_count(filter_data)
def get_hqe_percentage_complete(**filter_data):
"""
Computes the fraction of host queue entries matching the given filter data
that are complete.
"""
query = models.HostQueueEntry.query_objects(filter_data)
complete_count = query.filter(complete=True).count()
total_count = query.count()
if total_count == 0:
return 1
return float(complete_count) / total_count
# special tasks
def get_special_tasks(**filter_data):
return rpc_utils.prepare_rows_as_nested_dicts(
models.SpecialTask.query_objects(filter_data),
('host', 'queue_entry'))
# support for host detail view
def get_host_queue_entries_and_special_tasks(hostname, query_start=None,
query_limit=None):
"""
:return: an interleaved list of HostQueueEntries and SpecialTasks,
in approximate run order. each dict contains keys for type, host,
job, status, started_on, execution_path, and ID.
"""
total_limit = None
if query_limit is not None:
total_limit = query_start + query_limit
filter_data = {'host__hostname': hostname,
'query_limit': total_limit,
'sort_by': ['-id']}
queue_entries = list(models.HostQueueEntry.query_objects(filter_data))
special_tasks = list(models.SpecialTask.query_objects(filter_data))
interleaved_entries = rpc_utils.interleave_entries(queue_entries,
special_tasks)
if query_start is not None:
interleaved_entries = interleaved_entries[query_start:]
if query_limit is not None:
interleaved_entries = interleaved_entries[:query_limit]
return rpc_utils.prepare_for_serialization(interleaved_entries)
def get_num_host_queue_entries_and_special_tasks(hostname):
filter_data = {'host__hostname': hostname}
return (models.HostQueueEntry.query_count(filter_data)
+ models.SpecialTask.query_count(filter_data))
# recurring run
def get_recurring(**filter_data):
"""
Return recurring jobs.
:param filter_data: Filters out which recurring jobs to get.
:return: Sequence of recurring jobs.
"""
return rpc_utils.prepare_rows_as_nested_dicts(
models.RecurringRun.query_objects(filter_data),
('job', 'owner'))
def get_num_recurring(**filter_data):
"""
Get the number of recurring jobs.
:param filter_data: Filters out which recurring jobs to get.
:return: Number of recurring jobs.
"""
return models.RecurringRun.query_count(filter_data)
def delete_recurring_runs(**filter_data):
"""
Delete recurring jobs.
:param filter_data: Filters out which recurring jobs to delete.
:return: None.
"""
to_delete = models.RecurringRun.query_objects(filter_data)
to_delete.delete()
def create_recurring_run(job_id, start_date, loop_period, loop_count):
"""
Create (add) a recurring job.
:param job_id: Job identification.
:param start_date: Start date.
:param loop_period: Loop period.
:param loop_count: Loo counter.
:return: None.
"""
owner = models.User.current_user().login
job = models.Job.objects.get(id=job_id)
return job.create_recurring_job(start_date=start_date,
loop_period=loop_period,
loop_count=loop_count,
owner=owner)
# other
def echo(data=""):
"""
Echo - for doing a basic test to see if RPC calls
can successfully be made.
:param data: Object to echo, it must be serializable.
:return: Object echoed back.
"""
return data
def get_motd():
"""
Returns the message of the day (MOTD).
:return: String with MOTD.
"""
return rpc_utils.get_motd()
def get_static_data():
"""
Returns a dictionary containing a bunch of data that shouldn't change
often and is otherwise inaccessible. This includes:
priorities: List of job priority choices.
default_priority: Default priority value for new jobs.
users: Sorted list of all users.
labels: Sorted list of all labels.
atomic_groups: Sorted list of all atomic groups.
tests: Sorted list of all tests.
profilers: Sorted list of all profilers.
current_user: Logged-in username.
host_statuses: Sorted list of possible Host statuses.
job_statuses: Sorted list of possible HostQueueEntry statuses.
job_timeout_default: The default job timeout length in hours.
parse_failed_repair_default: Default value for the parse_failed_repair job
option.
reboot_before_options: A list of valid RebootBefore string enums.
reboot_after_options: A list of valid RebootAfter string enums.
motd: Server's message of the day.
status_dictionary: A mapping from one word job status names to a more
informative description.
"""
job_fields = models.Job.get_field_dict()
default_drone_set_name = models.DroneSet.default_drone_set_name()
drone_sets = ([default_drone_set_name] +
sorted(drone_set.name for drone_set in
models.DroneSet.objects.exclude(
name=default_drone_set_name)))
result = {}
result['priorities'] = models.Job.Priority.choices()
default_priority = job_fields['priority'].default
default_string = models.Job.Priority.get_string(default_priority)
result['default_priority'] = default_string
result['users'] = get_users(sort_by=['login'])
result['labels'] = get_labels(sort_by=['-platform', 'name'])
result['atomic_groups'] = get_atomic_groups(sort_by=['name'])
result['tests'] = get_tests(sort_by=['name'])
result['profilers'] = get_profilers(sort_by=['name'])
result['current_user'] = rpc_utils.prepare_for_serialization(
models.User.current_user().get_object_dict())
result['host_statuses'] = sorted(models.Host.Status.names)
result['job_statuses'] = sorted(models.HostQueueEntry.Status.names)
result['job_timeout_default'] = models.Job.DEFAULT_TIMEOUT
result['job_max_runtime_hrs_default'] = models.Job.DEFAULT_MAX_RUNTIME_HRS
result['parse_failed_repair_default'] = bool(
models.Job.DEFAULT_PARSE_FAILED_REPAIR)
result['reboot_before_options'] = model_attributes.RebootBefore.names
result['reboot_after_options'] = model_attributes.RebootAfter.names
result['motd'] = rpc_utils.get_motd()
result['drone_sets_enabled'] = models.DroneSet.drone_sets_enabled()
result['drone_sets'] = drone_sets
result['parameterized_jobs'] = models.Job.parameterized_jobs_enabled()
result['status_dictionary'] = {"Aborted": "Aborted",
"Verifying": "Verifying Host",
"Pending": "Waiting on other hosts",
"Running": "Running autoserv",
"Completed": "Autoserv completed",
"Failed": "Failed to complete",
"Queued": "Queued",
"Starting": "Next in host's queue",
"Stopped": "Other host(s) failed verify",
"Parsing": "Awaiting parse of final results",
"Gathering": "Gathering log files",
"Template": "Template job for recurring run",
"Waiting": "Waiting for scheduler action",
"Archiving": "Archiving results"}
return result
def get_server_time():
"""
Return server current time.
:return: Date string in format YYYY-MM-DD HH:MM
"""
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
def get_version():
"""
Return autotest version.
:return: String with version.
"""
return version.get_version()
def get_interface_version():
"""
Return interface version.
:return: Sequence with year, month number, day.
"""
return INTERFACE_VERSION
def _get_logs_used_space():
"""
(Internal) Return disk usage (percentage) for the results directory.
:return: Usage in percents (integer value).
"""
logs_dir = settings.get_value('COMMON', 'test_output_dir', default=None)
autodir = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..'))
if logs_dir is None:
logs_dir = os.path.join(autodir, 'results')
usage = psutil.disk_usage(logs_dir)
return int(usage.percent)
def _process_running(process_name):
"""
(Internal) Return whether a given process name is running.
:param process_name: The name of the process.
:return: True (running) or False (no).
"""
process_running = False
for p in psutil.process_iter():
for args in p.cmdline:
if os.path.basename(args) == process_name and p.is_running:
process_running = True
return process_running
def get_server_status():
"""
Get autotest server system information.
:return: Dict with keys:
* 'disk_space_percentage' Autotest log directory disk usage
* 'scheduler_running' Whether the autotest scheduler is running
* 'sheduler_watcher_running' Whether the scheduler watcher is
running
* 'concerns' Global evaluation of whether there are problems to
be addressed
"""
server_status = {}
concerns = False
disk_treshold = int(settings.get_value('SERVER', 'logs_disk_usage_treshold',
default="80"))
used_space_logs = _get_logs_used_space()
if used_space_logs > disk_treshold:
concerns = True
server_status['used_space_logs'] = used_space_logs
scheduler_running = _process_running('autotest-scheduler')
if not scheduler_running:
concerns = True
server_status['scheduler_running'] = scheduler_running
watcher_running = _process_running('autotest-scheduler-watcher')
if not watcher_running:
concerns = True
server_status['scheduler_watcher_running'] = watcher_running
if settings.get_value('INSTALL_SERVER', 'xmlrpc_url', default=''):
install_server_running = get_install_server_profiles() is not None
if not install_server_running:
concerns = True
else:
install_server_running = False
server_status['install_server_running'] = install_server_running
server_status['concerns'] = concerns
return server_status
| gpl-2.0 |
blindpenguin/orangelime | node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/flock_tool.py | 1835 | 1748 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""These functions are executed via gyp-flock-tool when using the Makefile
generator. Used on systems that don't have a built-in flock."""
import fcntl
import os
import struct
import subprocess
import sys
def main(args):
executor = FlockTool()
executor.Dispatch(args)
class FlockTool(object):
"""This class emulates the 'flock' command."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
# Note that the stock python on SunOS has a bug
# where fcntl.flock(fd, LOCK_EX) always fails
# with EBADF, that's why we use this F_SETLK
# hack instead.
fd = os.open(lockfile, os.O_WRONLY|os.O_NOCTTY|os.O_CREAT, 0666)
if sys.platform.startswith('aix'):
# Python on AIX is compiled with LARGEFILE support, which changes the
# struct size.
op = struct.pack('hhIllqq', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
else:
op = struct.pack('hhllhhl', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
fcntl.fcntl(fd, fcntl.F_SETLK, op)
return subprocess.call(cmd_list)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit |
bbc/kamaelia | Code/Python/Kamaelia/Kamaelia/Util/LossyConnector.py | 9 | 3299 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
====================================
Lossy connections between components
====================================
A component that passes on any data it receives, but will throw it away if the
next component's inbox is unable to accept new items.
Example Usage
-------------
Using a lossy connector to drop excess data::
src = fastProducer().activate()
lsy = LossyConnector().activate()
dst = slowConsumer().activate()
src.link( (src,"outbox"), (lsy,"inbox") )
src.link( (lsy,"outbox"), (dst,"inbox"), pipewidth=1 )
The outbox of the lossy connector is joined to a linkage that can buffer a
maximum of one item. Once full, the lossy connector causes items to be dropped.
How does it work?
-----------------
This component receives data on its "inbox" inbox and immediately sends it on
out of its "oubox" outbox.
If the act of sending the data causes a noSpaceInBox exception, then it is
caught, and the data that it was trying to send is simply discarded.
I a producerFinished or shutdownMicroprocess message is received on the
component's "control" inbox, then the message is forwarded on out of its
"signal" outbox and the component then immediately terminates.
"""
from Axon.Component import component
from Axon.AxonExceptions import noSpaceInBox
from Axon.Ipc import producerFinished, shutdownMicroprocess
class LossyConnector(component):
"""\
LossyConnector() -> new LossyConnector component
Component that forwards data from inbox to outbox, but discards data if
destination is full.
"""
Inboxes = { "inbox" : "Data to be passed on",
"control" : "Shutdown signalling",
}
Outboxes = { "outbox" : "Data received on 'inbox' inbox",
"signal" : "Shutdown signalling",
}
def mainBody(self):
"""Main loop body."""
while self.dataReady("inbox"):
try:
self.send(self.recv())
except noSpaceInBox:
pass # This is the lossy bit although most data will get through normally.
if self.dataReady("control"):
mes = self.recv("control")
if isinstance(mes, producerFinished) or isinstance(mes, shutdownMicroprocess):
self.send(mes,"signal")
return 0
return 1
__kamaelia_components__ = ( LossyConnector, )
| apache-2.0 |
helenst/django | tests/view_tests/tests/test_specials.py | 66 | 1428 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase, override_settings
@override_settings(ROOT_URLCONF='view_tests.generic_urls')
class URLHandling(TestCase):
"""
Tests for URL handling in views and responses.
"""
redirect_target = "/%E4%B8%AD%E6%96%87/target/"
def test_combining_redirect(self):
"""
Tests that redirecting to an IRI, requiring encoding before we use it
in an HTTP response, is handled correctly. In this case the arg to
HttpRedirect is ASCII but the current request path contains non-ASCII
characters so this test ensures the creation of the full path with a
base non-ASCII part is handled correctly.
"""
response = self.client.get('/中文/')
self.assertRedirects(response, self.redirect_target)
def test_nonascii_redirect(self):
"""
Tests that a non-ASCII argument to HttpRedirect is handled properly.
"""
response = self.client.get('/nonascii_redirect/')
self.assertRedirects(response, self.redirect_target)
def test_permanent_nonascii_redirect(self):
"""
Tests that a non-ASCII argument to HttpPermanentRedirect is handled
properly.
"""
response = self.client.get('/permanent_nonascii_redirect/')
self.assertRedirects(response, self.redirect_target, status_code=301)
| bsd-3-clause |
CodingVault/LeetCodeInPython | binary_tree_inorder.py | 1 | 1714 | #!/usr/bin/env python
# encoding: utf-8
"""
binary_tree_inorder.py
Created by Shengwei on 2014-07-04.
"""
# https://oj.leetcode.com/problems/binary-tree-inorder-traversal/
# tags: easy, tree, traversal, dfs
"""
Given a binary tree, return the inorder traversal of its nodes' values.
For example:
Given binary tree {1,#,2,3},
1
\
2
/
3
return [1,3,2].
Note: Recursive solution is trivial, could you do it iteratively?
confused what "{1,#,2,3}" means? > read more on how binary tree is serialized on OJ.
"""
# http://leetcode.com/2010/04/binary-search-tree-in-order-traversal.html
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @return a list of integers
def inorderTraversal(self, root):
res = []
stack = [root]
# process stack if it has at least one
# element that is not None
while len(stack) > 1 or stack[-1]:
# if the top element in the stack is None,
# it means left subtree has been processed;
# in other words, the second top element
# should be processed now.
# if it's not None, add left subtree to stack
if stack[-1]:
stack.append(stack[-1].left)
else:
# pop up the None element
# alternative: stack[-2:] = [stack[-2].right]
stack.pop()
node = stack.pop()
res.append(node.val)
stack.append(node.right)
return res
| apache-2.0 |
cahdudul/akh8960_cm | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
wuxianghou/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader_unittest.py | 124 | 6488 | # Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.host_mock import MockHost
from .layouttestresultsreader import LayoutTestResultsReader
class LayoutTestResultsReaderTest(unittest.TestCase):
def test_missing_layout_test_results(self):
host = MockHost()
reader = LayoutTestResultsReader(host, "/mock-results", "/var/logs")
layout_tests_results_path = '/mock-results/full_results.json'
unit_tests_results_path = '/mock-results/webkit_unit_tests_output.xml'
host.filesystem = MockFileSystem({layout_tests_results_path: None,
unit_tests_results_path: None})
# Make sure that our filesystem mock functions as we expect.
self.assertRaises(IOError, host.filesystem.read_text_file, layout_tests_results_path)
self.assertRaises(IOError, host.filesystem.read_text_file, unit_tests_results_path)
# layout_test_results shouldn't raise even if the results.json file is missing.
self.assertIsNone(reader.results())
def test_create_unit_test_results(self):
host = MockHost()
reader = LayoutTestResultsReader(host, "/mock-results", "/var/logs")
unit_tests_results_path = '/mock-results/webkit_unit_tests_output.xml'
no_failures_xml = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="3" failures="0" disabled="0" errors="0" time="11.35" name="AllTests">
<testsuite name="RenderTableCellDeathTest" tests="3" failures="0" disabled="0" errors="0" time="0.677">
<testcase name="CanSetColumn" status="run" time="0.168" classname="RenderTableCellDeathTest" />
<testcase name="CrashIfSettingUnsetColumnIndex" status="run" time="0.129" classname="RenderTableCellDeathTest" />
<testcase name="CrashIfSettingUnsetRowIndex" status="run" time="0.123" classname="RenderTableCellDeathTest" />
</testsuite>
</testsuites>"""
host.filesystem = MockFileSystem({unit_tests_results_path: no_failures_xml})
self.assertEqual(reader._create_unit_test_results(), [])
def test_missing_unit_test_results_path(self):
host = MockHost()
reader = LayoutTestResultsReader(host, "/mock-results", "/var/logs")
reader._create_layout_test_results = lambda: LayoutTestResults([])
reader._create_unit_test_results = lambda: None
# layout_test_results shouldn't raise even if the unit tests xml file is missing.
self.assertIsNotNone(reader.results(), None)
self.assertEqual(reader.results().failing_tests(), [])
def test_layout_test_results(self):
reader = LayoutTestResultsReader(MockHost(), "/mock-results", "/var/logs")
reader._read_file_contents = lambda path: None
self.assertIsNone(reader.results())
reader._read_file_contents = lambda path: ""
self.assertIsNone(reader.results())
reader._create_layout_test_results = lambda: LayoutTestResults([])
results = reader.results()
self.assertIsNotNone(results)
self.assertEqual(results.failure_limit_count(), 30) # This value matches RunTests.NON_INTERACTIVE_FAILURE_LIMIT_COUNT
def test_archive_last_layout_test_results(self):
host = MockHost()
results_directory = "/mock-results"
reader = LayoutTestResultsReader(host, results_directory, "/var/logs")
patch = host.bugs.fetch_attachment(10001)
host.filesystem = MockFileSystem()
# Should fail because the results_directory does not exist.
expected_logs = "/mock-results does not exist, not archiving.\n"
archive = OutputCapture().assert_outputs(self, reader.archive, [patch], expected_logs=expected_logs)
self.assertIsNone(archive)
host.filesystem.maybe_make_directory(results_directory)
self.assertTrue(host.filesystem.exists(results_directory))
self.assertIsNotNone(reader.archive(patch))
self.assertFalse(host.filesystem.exists(results_directory))
def test_archive_last_layout_test_results_with_relative_path(self):
host = MockHost()
results_directory = "/mock-checkout/layout-test-results"
host.filesystem.maybe_make_directory(results_directory)
host.filesystem.maybe_make_directory('/var/logs')
self.assertTrue(host.filesystem.exists(results_directory))
host.filesystem.chdir('/var')
reader = LayoutTestResultsReader(host, results_directory, 'logs')
patch = host.bugs.fetch_attachment(10001)
# Should fail because the results_directory does not exist.
self.assertIsNotNone(reader.archive(patch))
self.assertEqual(host.workspace.source_path, results_directory)
self.assertEqual(host.workspace.zip_path, '/var/logs/50000-layout-test-results.zip')
| bsd-3-clause |
cneill/designate | designate/storage/impl_sqlalchemy/migrate_repo/versions/052_secondary_zones.py | 7 | 3803 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
from sqlalchemy import DateTime, Enum, Integer, String, ForeignKeyConstraint
from sqlalchemy.schema import Column, MetaData, Table
from sqlalchemy.sql import select
from migrate.changeset.constraint import UniqueConstraint
from designate import utils
from designate.sqlalchemy.types import UUID
meta = MetaData()
ZONE_ATTRIBUTE_KEYS = ('master',)
ZONE_TYPES = ('PRIMARY', 'SECONDARY')
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
keys = Enum(name='key', *ZONE_ATTRIBUTE_KEYS)
domain_attributes_table = Table(
'domain_attributes', meta,
Column('id', UUID(), default=utils.generate_uuid, primary_key=True),
Column('version', Integer(), default=1, nullable=False),
Column('created_at', DateTime, default=lambda: timeutils.utcnow()),
Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()),
Column('key', keys),
Column('value', String(255), nullable=False),
Column('domain_id', UUID(), nullable=False),
UniqueConstraint('key', 'value', 'domain_id',
name='unique_attributes'),
ForeignKeyConstraint(['domain_id'], ['domains.id'],
ondelete='CASCADE'),
mysql_engine='INNODB',
mysql_charset='utf8'
)
domains_table = Table('domains', meta, autoload=True)
types = Enum(name='types', metadata=meta, *ZONE_TYPES)
types.create()
# Add type and transferred_at to domains
type_ = Column('type', types, default='PRIMARY', server_default='PRIMARY')
transferred_at = Column('transferred_at', DateTime, default=None)
type_.create(domains_table, populate_default=True)
transferred_at.create(domains_table, populate_default=True)
domain_attributes_table.create()
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):
constraint = UniqueConstraint(
'name', 'deleted', name='unique_domain_name', table=domains_table)
# Add missing unique index
constraint.create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
keys = Enum(name='key', metadata=meta, *ZONE_ATTRIBUTE_KEYS)
types = Enum(name='types', metadata=meta, *ZONE_TYPES)
domains_attributes_table = Table('domain_attributes', meta, autoload=True)
domains_table = Table('domains', meta, autoload=True)
domains = select(columns=[domains_table.c.id, domains_table.c.type])\
.where(domains_table.c.type == 'SECONDARY')\
.execute().fetchall()
for dom in domains:
delete = domains_table.delete()\
.where(domains_table.id == dom.id)
delete.execute()
domains_table.c.type.drop()
domains_table.c.transferred_at.drop()
domains_attributes_table.drop()
keys.drop()
types.drop()
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):
constraint = UniqueConstraint(
'name', 'deleted', name='unique_domain_name', table=domains_table)
# Add missing unique index
constraint.create()
| apache-2.0 |
mesosphere/dcos-cli | cli/tests/integrations/test_package.py | 1 | 36558 | import base64
import contextlib
import json
import os
import sys
import pytest
import six
from dcos import config, constants, subcommand
from .helpers.common import (assert_command, assert_lines, base64_to_dict,
delete_zk_node, delete_zk_nodes, exec_command,
file_json, update_config)
from .helpers.marathon import watch_all_deployments
from .helpers.package import (package_install, package_uninstall,
setup_universe_server, teardown_universe_server,
UNIVERSE_REPO, UNIVERSE_TEST_REPOS)
from .helpers.service import get_services, service_shutdown
from ..common import file_bytes
@pytest.fixture
def env():
r = os.environ.copy()
r.update({constants.PATH_ENV: os.environ[constants.PATH_ENV]})
return r
def setup_module(module):
setup_universe_server()
def teardown_module(module):
services = get_services()
for framework in services:
if framework['name'] == 'chronos':
service_shutdown(framework['id'])
teardown_universe_server()
@pytest.fixture(scope="module")
def zk_znode(request):
request.addfinalizer(delete_zk_nodes)
return request
def test_package():
with open('dcoscli/data/help/package.txt') as content:
assert_command(['dcos', 'package', '--help'],
stdout=content.read().encode('utf-8'))
def test_info():
info = b"Install and manage DC/OS software packages\n"
assert_command(['dcos', 'package', '--info'],
stdout=info)
def test_version():
assert_command(['dcos', 'package', '--version'],
stdout=b'dcos-package version SNAPSHOT\n')
def test_repo_list():
repo_list = bytes(
(
"test-universe: {test-universe}\n"
"helloworld-universe: {helloworld-universe}\n"
).format(**UNIVERSE_TEST_REPOS),
'utf-8'
)
assert_command(['dcos', 'package', 'repo', 'list'], stdout=repo_list)
# test again, but override the dcos_url with a cosmos_url config
dcos_url = config.get_config_val("core.dcos_url")
with update_config('package.cosmos_url', dcos_url):
assert_command(['dcos', 'package', 'repo', 'list'], stdout=repo_list)
def test_repo_add():
repo_list = bytes(
(
"test-universe: {test-universe}\n"
"helloworld-universe: {helloworld-universe}\n"
"Universe: {0}\n"
).format(UNIVERSE_REPO, **UNIVERSE_TEST_REPOS),
'utf-8'
)
args = ["Universe", UNIVERSE_REPO]
_repo_add(args, repo_list)
def test_repo_add_index():
repo17 = "http://universe.mesosphere.com/repo-1.7"
repo_list = bytes(
(
"test-universe: {test-universe}\n"
"1.7-universe: {0}\n"
"helloworld-universe: {helloworld-universe}\n"
"Universe: {1}\n"
).format(repo17, UNIVERSE_REPO, **UNIVERSE_TEST_REPOS),
'utf-8'
)
args = ["1.7-universe", repo17, '--index=1']
_repo_add(args, repo_list)
def test_repo_remove():
repo_list = bytes(
(
"test-universe: {test-universe}\n"
"helloworld-universe: {helloworld-universe}\n"
"Universe: {0}\n"
).format(UNIVERSE_REPO, **UNIVERSE_TEST_REPOS),
'utf-8'
)
_repo_remove(['1.7-universe'], repo_list)
repo_list = bytes(
(
"test-universe: {test-universe}\n"
"helloworld-universe: {helloworld-universe}\n"
).format(**UNIVERSE_TEST_REPOS),
'utf-8'
)
_repo_remove(['Universe'], repo_list)
def test_repo_remove_multi():
# Add "Universe" repo so we can test removing it
repo_list = bytes(
(
"test-universe: {test-universe}\n"
"helloworld-universe: {helloworld-universe}\n"
"Universe: {0}\n"
).format(UNIVERSE_REPO, **UNIVERSE_TEST_REPOS),
'utf-8'
)
args = ["Universe", UNIVERSE_REPO]
_repo_add(args, repo_list)
# Add "1.7-universe" repo so we can test removing it
repo17 = "http://universe.mesosphere.com/repo-1.7"
repo_list = bytes(
(
"test-universe: {test-universe}\n"
"1.7-universe: {1}\n"
"helloworld-universe: {helloworld-universe}\n"
"Universe: {0}\n"
).format(UNIVERSE_REPO, repo17, **UNIVERSE_TEST_REPOS),
'utf-8'
)
args = ["1.7-universe", repo17, '--index=1']
_repo_add(args, repo_list)
repo_list = bytes(
(
"test-universe: {test-universe}\n"
"helloworld-universe: {helloworld-universe}\n"
).format(**UNIVERSE_TEST_REPOS),
'utf-8'
)
_repo_remove(['1.7-universe', 'Universe'], repo_list)
def test_repo_empty():
for name in UNIVERSE_TEST_REPOS.keys():
assert_command(['dcos', 'package', 'repo', 'remove', name])
returncode, stdout, stderr = exec_command(
['dcos', 'package', 'repo', 'list'])
stderr_msg = (b"There are currently no repos configured. "
b"Please use `dcos package repo add` to add a repo\n")
assert returncode == 1
assert stdout == b''
assert stderr == stderr_msg
for name, url in UNIVERSE_TEST_REPOS.items():
assert_command(['dcos', 'package', 'repo', 'add', name, url])
def test_describe_nonexistent():
assert_command(['dcos', 'package', 'describe', 'xyzzy'],
stderr=b'Package [xyzzy] not found\n',
returncode=1)
def test_describe_nonexistent_version():
stderr = b'Version [a.b.c] of package [marathon] not found\n'
assert_command(['dcos', 'package', 'describe', 'marathon',
'--package-version=a.b.c'],
stderr=stderr,
returncode=1)
def test_describe_cli():
stdout = file_json(
'tests/data/package/json/test_describe_cli_kafka.json')
assert_command(['dcos', 'package', 'describe', 'kafka', '--cli'],
stdout=stdout)
def test_describe_app():
stdout = file_bytes(
'tests/data/package/json/test_describe_app_marathon.json')
assert_command(['dcos', 'package', 'describe', 'marathon', '--app'],
stdout=stdout)
def test_describe_config():
stdout = file_json(
'tests/data/package/json/test_describe_marathon_config.json')
assert_command(['dcos', 'package', 'describe', 'marathon', '--config'],
stdout=stdout)
def test_describe_render():
stdout = file_json(
'tests/data/package/json/test_describe_marathon_app_render.json')
stdout = json.loads(stdout.decode('utf-8'))
expected_labels = stdout.pop("labels", None)
returncode, stdout_, stderr = exec_command(
['dcos', 'package', 'describe', 'marathon', '--app', '--render'])
stdout_ = json.loads(stdout_.decode('utf-8'))
actual_labels = stdout_.pop("labels", None)
for label, value in expected_labels.items():
if label == "DCOS_PACKAGE_METADATA":
# We covert the metadata into a dictionary
# so that failures in equality are more descriptive
assert base64_to_dict(value) == \
base64_to_dict(actual_labels.get(label))
else:
assert value == actual_labels.get(label)
assert stdout == stdout_
assert stderr == b''
assert returncode == 0
def test_describe_package_version():
stdout = file_json(
'tests/data/package/json/test_describe_marathon_package_version.json')
returncode_, stdout_, stderr_ = exec_command(
['dcos', 'package', 'describe', 'marathon',
'--package-version=1.3.10'])
assert returncode_ == 0
output = json.loads(stdout_.decode('utf-8'))
assert output == json.loads(stdout.decode('utf-8'))
assert stderr_ == b''
def test_describe_package_version_missing():
stderr = b'Version [bogus] of package [marathon] not found\n'
assert_command(
['dcos', 'package', 'describe', 'marathon', '--package-version=bogus'],
returncode=1,
stderr=stderr)
def test_describe_package_versions():
stdout = file_bytes(
'tests/data/package/json/test_describe_kafka_package_versions.json')
assert_command(
['dcos', 'package', 'describe', 'kafka', '--package-versions'],
stdout=stdout)
def test_describe_options():
stdout = file_json(
'tests/data/package/json/test_describe_app_options.json')
stdout = json.loads(stdout.decode('utf-8'))
expected_labels = stdout.pop("labels", None)
returncode, stdout_, stderr = exec_command(
['dcos', 'package', 'describe', '--app', '--options',
'tests/data/package/marathon.json', 'marathon'])
stdout_ = json.loads(stdout_.decode('utf-8'))
actual_labels = stdout_.pop("labels", None)
for label, value in expected_labels.items():
if label == "DCOS_PACKAGE_METADATA":
# We covert the metadata into a dictionary
# so that failures in equality are more descriptive
assert base64_to_dict(value) == \
base64_to_dict(actual_labels.get(label))
else:
assert value == actual_labels.get(label)
assert stdout == stdout_
assert stderr == b''
assert returncode == 0
def test_describe_app_cli():
stdout = file_bytes(
'tests/data/package/json/test_describe_app_cli.json')
assert_command(
['dcos', 'package', 'describe', 'kafka', '--app', '--cli'],
stdout=stdout)
def test_bad_install():
args = ['--options=tests/data/package/chronos-bad.json', '--yes']
stderr = """\
Please create a JSON file with the appropriate options, and pass the \
/path/to/file as an --options argument.
"""
_install_bad_chronos(args=args,
stderr=stderr)
def test_bad_install_helloworld_msg():
stdout = (
b'By Deploying, you agree to the Terms '
b'and Conditions https://mesosphere.com/'
b'catalog-terms-conditions/#community-services\n'
b'A sample pre-installation message\n'
b'Installing Marathon app for package [helloworld] version '
b'[0.1.0] with app id [/foo]\n'
b'Usage of --app-id is deprecated. Use --options instead and specify '
b'a file that contains [service.name] property\n'
b'Installing CLI subcommand for package [helloworld] '
b'version [0.1.0]\n'
b'New command available: dcos ' +
_executable_name(b'helloworld') +
b'\nA sample post-installation message\n'
)
_install_helloworld(['--yes', '--app-id=/foo'],
stdout=stdout)
stdout2 = (
b'By Deploying, you agree to the Terms '
b'and Conditions https://mesosphere.com/'
b'catalog-terms-conditions/#community-services\n'
b'A sample pre-installation message\n'
b'Installing Marathon app for package [helloworld] version '
b'[0.1.0] with app id [/foo/bar]\n'
b'Usage of --app-id is deprecated. Use --options instead and specify '
b'a file that contains [service.name] property\n'
)
stderr = (b'Object is not valid\n'
b'Groups and Applications may not have the same '
b'identifier.\n')
_install_helloworld(['--yes', '--app-id=/foo/bar'],
stdout=stdout2,
stderr=stderr,
returncode=1)
_uninstall_helloworld()
def test_install_missing_options_file():
"""Test that a missing options file results in the expected stderr
message."""
assert_command(
['dcos', 'package', 'install', 'chronos', '--yes',
'--options=asdf.json'],
returncode=1,
stderr=b"Error opening file [asdf.json]: No such file or directory\n")
def test_install_specific_version():
stdout = (
b'By Deploying, you agree to the Terms '
b'and Conditions https://mesosphere.com/'
b'catalog-terms-conditions/#certified-services\n'
b'We recommend a minimum of one node with at least 2 '
b'CPU\'s and 1GB of RAM available for the Marathon Service.\n'
b'Installing Marathon app for package [marathon] '
b'version [0.11.1]\n'
b'Marathon DCOS Service has been successfully installed!\n\n'
b'\tDocumentation: https://mesosphere.github.io/marathon\n'
b'\tIssues: https://github.com/mesosphere/marathon/issues\n\n'
)
uninstall_stderr = (
b'Uninstalled package [marathon] version [0.11.1]\n'
b'The Marathon DCOS Service has been uninstalled and will no '
b'longer run.\nPlease follow the instructions at http://docs.'
b'mesosphere.com/services/marathon/#uninstall to clean up any '
b'persisted state\n'
)
with _package(name='marathon',
args=['--yes', '--package-version=0.11.1'],
stdout=stdout,
uninstall_stderr=uninstall_stderr):
returncode, stdout, stderr = exec_command(
['dcos', 'package', 'list', 'marathon', '--json'])
assert returncode == 0
assert stderr == b''
assert json.loads(stdout.decode('utf-8'))[0]['version'] == "0.11.1"
def test_install_bad_package_version():
stderr = b'Version [a.b.c] of package [cassandra] not found\n'
assert_command(
['dcos', 'package', 'install', 'cassandra',
'--package-version=a.b.c'],
returncode=1,
stderr=stderr)
def test_package_metadata():
_install_helloworld()
# test marathon labels
expected_metadata = {
'maintainer': 'support@mesosphere.io',
'framework': False,
'name': 'helloworld',
'version': '0.1.0',
'packagingVersion': '3.0',
'preInstallNotes': 'A sample pre-installation message',
'selected': False,
'website': 'https://github.com/mesosphere/dcos-helloworld',
'description': 'Example DCOS application package',
'tags': ['mesosphere', 'example', 'subcommand'],
'postInstallNotes': 'A sample post-installation message'
}
expected_source = bytes(
UNIVERSE_TEST_REPOS['helloworld-universe'],
'utf-8'
)
expected_labels = {
'DCOS_PACKAGE_NAME': b'helloworld',
'DCOS_PACKAGE_VERSION': b'0.1.0',
'DCOS_PACKAGE_SOURCE': expected_source,
}
app_labels = _get_app_labels('helloworld')
for label, value in expected_labels.items():
assert value == six.b(app_labels.get(label))
assert expected_metadata == base64_to_dict(six.b(
app_labels.get('DCOS_PACKAGE_METADATA')))
# test local package.json
package = file_json(
'tests/data/package/json/test_package_metadata.json')
package = json.loads(package.decode("UTF-8"))
helloworld_subcommand = subcommand.InstalledSubcommand("helloworld")
# test local package.json
assert helloworld_subcommand.package_json() == package
# uninstall helloworld
_uninstall_helloworld()
def test_images_in_metadata():
package_install('cassandra')
labels = _get_app_labels('/cassandra')
dcos_package_metadata = labels.get("DCOS_PACKAGE_METADATA")
images = json.loads(
base64.b64decode(dcos_package_metadata).decode('utf-8'))["images"]
assert images.get("icon-small") is not None
assert images.get("icon-medium") is not None
assert images.get("icon-large") is not None
# uninstall
stderr = (
b'Uninstalled package [cassandra] version [1.0.25-3.0.10]\n'
b'DC/OS Apache Cassandra service has been uninstalled.\n'
b'Please follow the instructions at https://docs.mesosphere.com/'
b'current/usage/service-guides/cassandra/uninstall to remove any '
b'persistent state if required.\n'
)
package_uninstall('cassandra', stderr=stderr)
delete_zk_node('dcos-service-cassandra')
def test_install_with_id(zk_znode):
args = ['--app-id=chronos-1', '--yes']
stdout = (
b'Installing Marathon app for package [chronos] version [3.0.1] with '
b'app id [chronos-1]\n'
b'Usage of --app-id is deprecated. Use --options instead and specify '
b'a file that contains [service.name] property\n'
)
_install_chronos(args=args, stdout=stdout)
args = ['--app-id=chronos-2', '--yes']
stdout = (
b'Installing Marathon app for package [chronos] version [3.0.1] with '
b'app id [chronos-2]\n'
b'Usage of --app-id is deprecated. Use --options instead and specify '
b'a file that contains [service.name] property\n'
)
_install_chronos(args=args, stdout=stdout)
def test_install_missing_package():
stderr = b'Package [missing-package] not found\n'
assert_command(['dcos', 'package', 'install', 'missing-package'],
returncode=1,
stderr=stderr)
def test_uninstall_with_id(zk_znode):
_uninstall_chronos(args=['--app-id=chronos-1'])
def test_uninstall_all(zk_znode):
_uninstall_chronos(args=['--all'])
def test_uninstall_missing():
stderr = 'Package [chronos] is not installed\n'
_uninstall_chronos(returncode=1, stderr=stderr)
stderr = 'Package [chronos] with id [/chronos-1] is not installed\n'
_uninstall_chronos(
args=['--app-id=chronos-1'],
returncode=1,
stderr=stderr)
def test_uninstall_subcommand():
_install_helloworld()
_uninstall_helloworld()
_list(args=['--json'], stdout=b'[]\n')
def test_uninstall_cli():
_install_helloworld()
_uninstall_cli_helloworld()
stdout_json = {
"apps": [
"/helloworld"
],
"description": "Example DCOS application package",
"framework": False,
"maintainer": "support@mesosphere.io",
"name": "helloworld",
"packagingVersion": "3.0",
"postInstallNotes": "A sample post-installation message",
"preInstallNotes": "A sample pre-installation message",
"selected": False,
"tags": [
"mesosphere",
"example",
"subcommand"
],
"version": "0.1.0",
"website": "https://github.com/mesosphere/dcos-helloworld"
}
returncode_, stdout_, stderr_ = exec_command(
['dcos', 'package', 'list', '--json'])
assert stderr_ == b''
assert returncode_ == 0
output = json.loads(stdout_.decode('utf-8'))[0]
assert output == stdout_json
_uninstall_helloworld()
def test_uninstall_multiple_apps():
stdout = (
b'By Deploying, you agree to the Terms '
b'and Conditions https://mesosphere.com/'
b'catalog-terms-conditions/#community-services\n'
b'A sample pre-installation message\n'
b'Installing Marathon app for package [helloworld] version '
b'[0.1.0] with app id [/helloworld-1]\n'
b'Usage of --app-id is deprecated. Use --options instead and specify '
b'a file that contains [service.name] property\n'
b'A sample post-installation message\n'
)
_install_helloworld(['--yes', '--app-id=/helloworld-1', '--app'],
stdout=stdout)
stdout = (
b'By Deploying, you agree to the Terms '
b'and Conditions https://mesosphere.com/'
b'catalog-terms-conditions/#community-services\n'
b'A sample pre-installation message\n'
b'Installing Marathon app for package [helloworld] version '
b'[0.1.0] with app id [/helloworld-2]\n'
b'Usage of --app-id is deprecated. Use --options instead and specify '
b'a file that contains [service.name] property\n'
b'A sample post-installation message\n'
)
_install_helloworld(['--yes', '--app-id=/helloworld-2', '--app'],
stdout=stdout)
stderr = (b"Multiple apps named [helloworld] are installed: "
b"[/helloworld-1, /helloworld-2].\n"
b"Please use --app-id to specify the ID of the app "
b"to uninstall, or use --all to uninstall all apps.\n")
returncode = 1
_uninstall_helloworld(stderr=stderr,
returncode=returncode,
uninstalled=b'')
_uninstall_helloworld(args=['--all'], stdout=b'', stderr=b'', returncode=0)
watch_all_deployments()
def test_list(zk_znode):
empty = b'[]\n'
_list(args=['--json'], stdout=empty)
_list(args=['xyzzy', '--json'], stdout=empty)
_list(args=['--app-id=/xyzzy', '--json'], stdout=empty)
with _chronos_package():
expected_output = file_json(
'tests/data/package/json/test_list_chronos.json')
_list(args=['--json'], stdout=expected_output)
_list(args=['--json', 'chronos'], stdout=expected_output)
_list(args=['--json', '--app-id=/chronos'], stdout=expected_output)
le_package = 'ceci-nest-pas-une-package'
_list(args=['--json', le_package], stdout=empty)
_list(args=['--json', '--app-id=/' + le_package], stdout=empty)
def test_list_table():
with _helloworld():
assert_lines(['dcos', 'package', 'list'], 2)
def test_install_yes():
with open('tests/data/package/assume_yes.txt') as yes_file:
_install_helloworld(
args=[],
stdin=yes_file,
stdout=(
b'By Deploying, you agree to the Terms '
b'and Conditions https://mesosphere.com/'
b'catalog-terms-conditions/#community-services\n'
b'A sample pre-installation message\n'
b'Continue installing? [yes/no] '
b'Installing Marathon app for package [helloworld] version '
b'[0.1.0]\n'
b'Installing CLI subcommand for package [helloworld] '
b'version [0.1.0]\n'
b'New command available: dcos ' +
_executable_name(b'helloworld') +
b'\nA sample post-installation message\n'
)
)
_uninstall_helloworld()
def test_install_no():
with open('tests/data/package/assume_no.txt') as no_file:
_install_helloworld(
args=[],
stdin=no_file,
stdout=(
b'By Deploying, you agree to the Terms '
b'and Conditions https://mesosphere.com/'
b'catalog-terms-conditions/#community-services\n'
b'A sample pre-installation message\n'
b'Continue installing? [yes/no] Exiting installation.\n'
)
)
def test_list_cli():
_install_helloworld()
stdout = file_json(
'tests/data/package/json/test_list_helloworld.json')
_list(args=['--json'], stdout=stdout)
_uninstall_helloworld()
stdout = (
b'By Deploying, you agree to the Terms '
b'and Conditions https://mesosphere.com/'
b'catalog-terms-conditions/#community-services\n'
b"Installing CLI subcommand for package [helloworld] " +
b"version [0.1.0]\n"
b"New command available: dcos " +
_executable_name(b'helloworld') +
b"\n"
)
_install_helloworld(args=['--cli', '--yes'], stdout=stdout)
stdout = file_json(
'tests/data/package/json/test_list_helloworld_cli.json')
_list(args=['--json'], stdout=stdout)
_uninstall_cli_helloworld()
def test_list_cli_only(env):
helloworld_path = 'tests/data/package/json/test_list_helloworld_cli.json'
helloworld_json = file_json(helloworld_path)
with _helloworld_cli(), \
update_config('package.cosmos_url', 'http://nohost', env):
assert_command(
cmd=['dcos', 'package', 'list', '--json', '--cli'],
stdout=helloworld_json)
assert_command(
cmd=['dcos', 'package', 'list', '--json', '--cli',
'--app-id=/helloworld'],
stdout=b'[]\n')
assert_command(
cmd=['dcos', 'package', 'list', '--json', '--cli', 'helloworld'],
stdout=helloworld_json)
def test_cli_global():
helloworld_path = 'tests/data/package/json/test_list_helloworld_cli.json'
helloworld_json = file_json(helloworld_path)
with _helloworld_cli(global_=True):
assert os.path.exists(subcommand.global_package_dir("helloworld"))
assert_command(
cmd=['dcos', 'package', 'list', '--json', '--cli'],
stdout=helloworld_json)
def test_uninstall_multiple_frameworknames(zk_znode):
_install_chronos(
args=['--yes', '--options=tests/data/package/chronos-1.json'])
_install_chronos(
args=['--yes', '--options=tests/data/package/chronos-2.json'])
watch_all_deployments()
expected_output = file_json(
'tests/data/package/json/test_list_chronos_two_users.json')
_list(args=['--json'], stdout=expected_output)
_list(args=['--json', 'chronos'], stdout=expected_output)
_list(args=['--json', '--app-id=/chronos-user-1'], stdout=file_json(
'tests/data/package/json/test_list_chronos_user_1.json'))
_list(args=['--json', '--app-id=/chronos-user-2'], stdout=file_json(
'tests/data/package/json/test_list_chronos_user_2.json'))
_uninstall_chronos(
args=['--app-id=chronos-user-1'],
returncode=1,
stderr='Uninstalled package [chronos] version [3.0.1]\n'
'Unable to shutdown [chronos] service framework with name '
'[chronos-user] because there are multiple framework ids '
'matching this name: ')
_uninstall_chronos(
args=['--app-id=chronos-user-2'],
returncode=1,
stderr='Uninstalled package [chronos] version [3.0.1]\n'
'Unable to shutdown [chronos] service framework with name '
'[chronos-user] because there are multiple framework ids '
'matching this name: ')
for framework in get_services(args=['--inactive']):
if framework['name'] == 'chronos-user':
service_shutdown(framework['id'])
def test_search():
returncode, stdout, stderr = exec_command(
['dcos', 'package', 'search', 'cron', '--json'])
assert returncode == 0
assert b'chronos' in stdout
assert stderr == b''
returncode, stdout, stderr = exec_command(
['dcos', 'package', 'search', 'xyzzy', '--json'])
assert returncode == 0
assert b'"packages": []' in stdout
assert stderr == b''
returncode, stdout, stderr = exec_command(
['dcos', 'package', 'search', 'xyzzy'])
assert returncode == 1
assert b'' == stdout
assert stderr == b'No packages found.\n'
returncode, stdout, stderr = exec_command(
['dcos', 'package', 'search', '--json'])
registries = json.loads(stdout.decode('utf-8'))
# assert the number of packages is gte the number at the time
# this test was written
assert len(registries['packages']) >= 5
assert returncode == 0
assert stderr == b''
def test_search_table():
returncode, stdout, stderr = exec_command(
['dcos', 'package', 'search'])
assert returncode == 0
assert b'chronos' in stdout
assert len(stdout.decode('utf-8').split('\n')) > 5
assert stderr == b''
def test_search_ends_with_wildcard():
returncode, stdout, stderr = exec_command(
['dcos', 'package', 'search', 'c*', '--json'])
assert returncode == 0
assert b'chronos' in stdout
assert b'cassandra' in stdout
assert stderr == b''
registries = json.loads(stdout.decode('utf-8'))
# cosmos matches wildcards in name/description/tags
# so will find more results (3 instead of 2)
assert len(registries['packages']) >= 2
def test_search_start_with_wildcard():
returncode, stdout, stderr = exec_command(
['dcos', 'package', 'search', '*nos', '--json'])
assert returncode == 0
assert b'chronos' in stdout
assert stderr == b''
registries = json.loads(stdout.decode('utf-8'))
assert len(registries['packages']) == 1
def test_search_middle_with_wildcard():
returncode, stdout, stderr = exec_command(
['dcos', 'package', 'search', 'c*s', '--json'])
assert returncode == 0
assert b'chronos' in stdout
assert stderr == b''
registries = json.loads(stdout.decode('utf-8'))
assert len(registries['packages']) == 4
def _get_app_labels(app_id):
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'show', app_id])
assert returncode == 0
assert stderr == b''
app_json = json.loads(stdout.decode('utf-8'))
return app_json.get('labels')
def _executable_name(name):
if sys.platform == 'win32':
return name + b'.exe'
else:
return name
def _install_helloworld(
args=['--yes'],
stdout=(
b'By Deploying, you agree to the Terms '
b'and Conditions https://mesosphere.com/'
b'catalog-terms-conditions/#community-services\n'
b'A sample pre-installation message\n'
b'Installing Marathon app for package [helloworld] '
b'version [0.1.0]\n'
b'Installing CLI subcommand for package [helloworld] '
b'version [0.1.0]\n'
b'New command available: dcos ' +
_executable_name(b'helloworld') +
b'\nA sample post-installation message\n'
),
stderr=b'',
returncode=0,
stdin=None):
assert_command(
['dcos', 'package', 'install', 'helloworld'] + args,
stdout=stdout,
returncode=returncode,
stdin=stdin,
stderr=stderr)
def _uninstall_helloworld(
args=[],
stdout=b'',
stderr=b'',
returncode=0,
uninstalled=b'Uninstalled package [helloworld] version [0.1.0]\n'):
assert_command(['dcos', 'package', 'uninstall', 'helloworld',
'--yes'] + args,
stdout=stdout,
stderr=uninstalled+stderr,
returncode=returncode)
watch_all_deployments()
def _uninstall_cli_helloworld(
args=[],
stdout=b'',
stderr=b'',
returncode=0):
assert_command(['dcos', 'package', 'uninstall', 'helloworld',
'--cli'] + args,
stdout=stdout,
stderr=stderr,
returncode=returncode)
def _uninstall_chronos(args=[], returncode=0, stdout=b'', stderr=''):
result_returncode, result_stdout, result_stderr = exec_command(
['dcos', 'package', 'uninstall', 'chronos', '--yes'] + args)
assert result_returncode == returncode
assert result_stdout == stdout
assert result_stderr.decode('utf-8').startswith(stderr)
def _install_bad_chronos(
args=['--yes'],
stdout=(
b'By Deploying, you agree to the Terms '
b'and Conditions https://mesosphere.com/'
b'catalog-terms-conditions/#community-services\n'
b'We recommend a minimum of one node with at least 2 '
b'CPUs and 2.5GiB of RAM available for the Chronos '
b'Service.\n'
),
stderr=''):
cmd = ['dcos', 'package', 'install', 'chronos'] + args
returncode_, stdout_, stderr_ = exec_command(cmd)
assert returncode_ == 1
assert stderr in stderr_.decode('utf-8')
assert stdout_ == stdout
def _install_chronos(
args=['--yes'],
returncode=0,
stdout=b'Installing Marathon app for package [chronos] '
b'version [3.0.1]\n',
stderr=b'',
pre_install_notes=(
b'By Deploying, you agree to the Terms '
b'and Conditions https://mesosphere.com/'
b'catalog-terms-conditions/#community-services\n'
b'We recommend a minimum of one node with at least '
b'2 CPUs and 2.5GiB of RAM available for the '
b'Chronos Service.\n'
),
post_install_notes=b'Chronos DCOS Service has been successfully '
b'installed!\n\n'
b'\tDocumentation: http://mesos.github.io/'
b'chronos\n'
b'\tIssues: https://github.com/mesos/chronos/'
b'issues\n',
stdin=None):
cmd = ['dcos', 'package', 'install', 'chronos'] + args
assert_command(
cmd,
returncode,
pre_install_notes + stdout + post_install_notes,
stderr,
stdin=stdin)
@contextlib.contextmanager
def _chronos_package(
args=['--yes'],
returncode=0,
stdout=b'Installing Marathon app for package [chronos] '
b'version [3.0.1]\n',
stderr=b'',
pre_install_notes=(
b'By Deploying, you agree to the Terms '
b'and Conditions https://mesosphere.com/'
b'catalog-terms-conditions/#community-services\n'
b'We recommend a minimum of one node with at least '
b'2 CPUs and 2.5GiB of RAM available for the '
b'Chronos Service.\n'
),
post_install_notes=b'Chronos DCOS Service has been successfully '
b'installed!\n\n'
b'\tDocumentation: http://mesos.github.io/'
b'chronos\n'
b'\tIssues: https://github.com/mesos/chronos/'
b'issues\n',
stdin=None):
_install_chronos(
args,
returncode,
stdout,
stderr,
pre_install_notes,
post_install_notes,
stdin)
try:
yield
finally:
_uninstall_chronos()
delete_zk_node('chronos')
watch_all_deployments()
def _list(args, stdout):
assert_command(['dcos', 'package', 'list'] + args, stdout=stdout)
HELLOWORLD_CLI_STDOUT = (
b'Installing CLI subcommand for package [helloworld] '
b'version [0.1.0]\n'
b'New command available: dcos ' +
_executable_name(b'helloworld') + b'\n'
)
def _helloworld():
stdout = (
b'By Deploying, you agree to the Terms '
b'and Conditions https://mesosphere.com/'
b'catalog-terms-conditions/#community-services\n'
b'A sample pre-installation message\n'
b'Installing Marathon app for package [helloworld] version '
b'[0.1.0]\n' + HELLOWORLD_CLI_STDOUT +
b'A sample post-installation message\n'
)
stderr = b'Uninstalled package [helloworld] version [0.1.0]\n'
return _package(name='helloworld',
args=['--yes'],
stdout=stdout,
uninstall_stderr=stderr)
def _helloworld_cli(global_=False):
args = ['--yes', '--cli']
if global_:
args += ['--global']
return _package(name='helloworld',
args=args,
stdout=(
b'By Deploying, you agree to the Terms '
b'and Conditions https://mesosphere.com/'
b'catalog-terms-conditions/#community-services\n' +
HELLOWORLD_CLI_STDOUT
),
uninstall_stderr=b'')
@contextlib.contextmanager
def _package(name,
args,
stdout=b'',
uninstall_stderr=b''):
"""Context manager that installs a package on entrance, and uninstalls it on
exit.
:param name: package name
:type name: str
:param args: extra CLI args
:type args: [str]
:param stdout: Expected stdout
:type stdout: bytes
:param uninstall_stderr: Expected stderr
:type uninstall_stderr: bytes
:rtype: None
"""
command = ['dcos', 'package', 'install', name] + args
installed = False
try:
returncode_, stdout_, stderr_ = exec_command(command)
installed = (returncode_ == 0)
assert installed
assert stdout_ == stdout
yield
finally:
if installed:
assert_command(
['dcos', 'package', 'uninstall', name, '--yes'],
stderr=uninstall_stderr)
watch_all_deployments()
def _repo_add(args=[], repo_list=[]):
assert_command(['dcos', 'package', 'repo', 'add'] + args)
assert_command(['dcos', 'package', 'repo', 'list'], stdout=repo_list)
def _repo_remove(args=[], repo_list=[]):
assert_command(['dcos', 'package', 'repo', 'remove'] + args)
assert_command(['dcos', 'package', 'repo', 'list'], stdout=repo_list)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.