repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
|---|---|---|---|---|---|
ahmadio/edx-platform
|
common/djangoapps/student/tests/test_userstanding.py
|
106
|
3869
|
"""
These are tests for disabling and enabling student accounts, and for making sure
that students with disabled accounts are unable to access the courseware.
"""
import unittest
from student.tests.factories import UserFactory, UserStandingFactory
from student.models import UserStanding
from django.conf import settings
from django.test import TestCase, Client
from django.core.urlresolvers import reverse
class UserStandingTest(TestCase):
"""test suite for user standing view for enabling and disabling accounts"""
def setUp(self):
super(UserStandingTest, self).setUp()
# create users
self.bad_user = UserFactory.create(
username='bad_user',
)
self.good_user = UserFactory.create(
username='good_user',
)
self.non_staff = UserFactory.create(
username='non_staff',
)
self.admin = UserFactory.create(
username='admin',
is_staff=True,
)
# create clients
self.bad_user_client = Client()
self.good_user_client = Client()
self.non_staff_client = Client()
self.admin_client = Client()
for user, client in [
(self.bad_user, self.bad_user_client),
(self.good_user, self.good_user_client),
(self.non_staff, self.non_staff_client),
(self.admin, self.admin_client),
]:
client.login(username=user.username, password='test')
UserStandingFactory.create(
user=self.bad_user,
account_status=UserStanding.ACCOUNT_DISABLED,
changed_by=self.admin
)
# set stock url to test disabled accounts' access to site
self.some_url = '/'
# since it's only possible to disable accounts from lms, we're going
# to skip tests for cms
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_disable_account(self):
self.assertEqual(
UserStanding.objects.filter(user=self.good_user).count(), 0
)
response = self.admin_client.post(reverse('disable_account_ajax'), {
'username': self.good_user.username,
'account_action': 'disable',
})
self.assertEqual(
UserStanding.objects.get(user=self.good_user).account_status,
UserStanding.ACCOUNT_DISABLED
)
def test_disabled_account_403s(self):
response = self.bad_user_client.get(self.some_url)
self.assertEqual(response.status_code, 403)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_reenable_account(self):
response = self.admin_client.post(reverse('disable_account_ajax'), {
'username': self.bad_user.username,
'account_action': 'reenable'
})
self.assertEqual(
UserStanding.objects.get(user=self.bad_user).account_status,
UserStanding.ACCOUNT_ENABLED
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_non_staff_cant_access_disable_view(self):
response = self.non_staff_client.get(reverse('manage_user_standing'), {
'user': self.non_staff,
})
self.assertEqual(response.status_code, 404)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_non_staff_cant_disable_account(self):
response = self.non_staff_client.post(reverse('disable_account_ajax'), {
'username': self.good_user.username,
'user': self.non_staff,
'account_action': 'disable'
})
self.assertEqual(response.status_code, 404)
self.assertEqual(
UserStanding.objects.filter(user=self.good_user).count(), 0
)
|
agpl-3.0
|
schleichdi2/OPENNFR-6.1-CORE
|
bitbake/lib/bb/ui/uievent.py
|
4
|
5163
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
# Copyright (C) 2006 - 2007 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Use this class to fork off a thread to recieve event callbacks from the bitbake
server and queue them for the UI to process. This process must be used to avoid
client/server deadlocks.
"""
import socket, threading, pickle, collections
from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
class BBUIEventQueue:
def __init__(self, BBServer, clientinfo=("localhost, 0")):
self.eventQueue = []
self.eventQueueLock = threading.Lock()
self.eventQueueNotify = threading.Event()
self.BBServer = BBServer
self.clientinfo = clientinfo
server = UIXMLRPCServer(self.clientinfo)
self.host, self.port = server.socket.getsockname()
server.register_function( self.system_quit, "event.quit" )
server.register_function( self.send_event, "event.sendpickle" )
server.socket.settimeout(1)
self.EventHandle = None
# the event handler registration may fail here due to cooker being in invalid state
# this is a transient situation, and we should retry a couple of times before
# giving up
for count_tries in range(5):
ret = self.BBServer.registerEventHandler(self.host, self.port)
if isinstance(ret, collections.Iterable):
self.EventHandle, error = ret
else:
self.EventHandle = ret
error = ""
if self.EventHandle != None:
break
errmsg = "Could not register UI event handler. Error: %s, host %s, "\
"port %d" % (error, self.host, self.port)
bb.warn("%s, retry" % errmsg)
import time
time.sleep(1)
else:
raise Exception(errmsg)
self.server = server
self.t = threading.Thread()
self.t.setDaemon(True)
self.t.run = self.startCallbackHandler
self.t.start()
def getEvent(self):
self.eventQueueLock.acquire()
if len(self.eventQueue) == 0:
self.eventQueueLock.release()
return None
item = self.eventQueue.pop(0)
if len(self.eventQueue) == 0:
self.eventQueueNotify.clear()
self.eventQueueLock.release()
return item
def waitEvent(self, delay):
self.eventQueueNotify.wait(delay)
return self.getEvent()
def queue_event(self, event):
self.eventQueueLock.acquire()
self.eventQueue.append(event)
self.eventQueueNotify.set()
self.eventQueueLock.release()
def send_event(self, event):
self.queue_event(pickle.loads(event))
def startCallbackHandler(self):
self.server.timeout = 1
bb.utils.set_process_name("UIEventQueue")
while not self.server.quit:
try:
self.server.handle_request()
except Exception as e:
import traceback
logger.error("BBUIEventQueue.startCallbackHandler: Exception while trying to handle request: %s\n%s" % (e, traceback.format_exc()))
self.server.server_close()
def system_quit( self ):
"""
Shut down the callback thread
"""
try:
self.BBServer.unregisterEventHandler(self.EventHandle)
except:
pass
self.server.quit = True
class UIXMLRPCServer (SimpleXMLRPCServer):
def __init__( self, interface ):
self.quit = False
SimpleXMLRPCServer.__init__( self,
interface,
requestHandler=SimpleXMLRPCRequestHandler,
logRequests=False, allow_none=True, use_builtin_types=True)
def get_request(self):
while not self.quit:
try:
sock, addr = self.socket.accept()
sock.settimeout(1)
return (sock, addr)
except socket.timeout:
pass
return (None, None)
def close_request(self, request):
if request is None:
return
SimpleXMLRPCServer.close_request(self, request)
def process_request(self, request, client_address):
if request is None:
return
SimpleXMLRPCServer.process_request(self, request, client_address)
|
gpl-2.0
|
yugang/crosswalk-test-suite
|
misc/wrt-androidarm-UA-tests/inst.wgt.py
|
136
|
6856
|
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = "/home/app/content"
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
pkg_infos = line.split()
if len(pkg_infos) == 4:
continue
name = pkg_infos[5]
name = name.lstrip('[').rstrip(']')
print "name is: %s" % name
if pkg_name == name:
test_pkg_id = pkg_infos[3]
test_pkg_id = test_pkg_id.lstrip('[').rstrip(']')
print test_pkg_id
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
bsd-3-clause
|
SeaFalcon/Musicool_Pr
|
lib/sqlalchemy/testing/warnings.py
|
33
|
1682
|
# testing/warnings.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
import warnings
from .. import exc as sa_exc
from .. import util
import re
def testing_warn(msg, stacklevel=3):
"""Replaces sqlalchemy.util.warn during tests."""
filename = "sqlalchemy.testing.warnings"
lineno = 1
if isinstance(msg, util.string_types):
warnings.warn_explicit(msg, sa_exc.SAWarning, filename, lineno)
else:
warnings.warn_explicit(msg, filename, lineno)
def resetwarnings():
"""Reset warning behavior to testing defaults."""
util.warn = util.langhelpers.warn = testing_warn
warnings.filterwarnings('ignore',
category=sa_exc.SAPendingDeprecationWarning)
warnings.filterwarnings('error', category=sa_exc.SADeprecationWarning)
warnings.filterwarnings('error', category=sa_exc.SAWarning)
def assert_warnings(fn, warnings, regex=False):
"""Assert that each of the given warnings are emitted by fn."""
from .assertions import eq_, emits_warning
canary = []
orig_warn = util.warn
def capture_warnings(*args, **kw):
orig_warn(*args, **kw)
popwarn = warnings.pop(0)
canary.append(popwarn)
if regex:
assert re.match(popwarn, args[0])
else:
eq_(args[0], popwarn)
util.warn = util.langhelpers.warn = capture_warnings
result = emits_warning()(fn)()
assert canary, "No warning was emitted"
return result
|
apache-2.0
|
skyscrapers/monitoring-plugins
|
check_elb.py
|
1
|
1132
|
#!/usr/bin/env python
# Written by filip@skyscrape.rs
# 2014-04-29
import json
import os
import sys
from pprint import pprint
import commands
paramList = sys.argv
if len(paramList) <= 1:
print "USAGE " + paramList[0] + " <ELB name>"
sys.exit(2)
elbName = paramList[1]
cmd = "/usr/local/bin/aws --profile nagiosro elb describe-instance-health --load-balancer-name " + elbName
output = commands.getoutput(cmd)
jsondata = json.loads(output)
counterH = 0
instancesH = ""
counterUH = 0
instancesUH = ""
for item in jsondata['InstanceStates']:
if item['State'] == 'InService':
counterH += 1
instancesH += " " + item['InstanceId']
else:
counterUH += 1
instancesUH += " " + item['InstanceId']
if not instancesH:
instancesH = " none"
if not instancesUH:
instancesUH = " none"
msg = "InService count " + str(counterH) + ":" + instancesH + ". OutOfService count " + str(counterUH) + ":" + instancesUH
if counterH == 0:
print "CRITICAL - " + msg
sys.exit(2)
elif counterUH > 0:
print "WARNING - " + msg
sys.exit(1)
else:
print "OK - " + msg
sys.exit(0)
|
gpl-3.0
|
jayvdb/mwparserfromhell
|
src/mwparserfromhell/nodes/comment.py
|
2
|
1655
|
# Copyright (C) 2012-2020 Ben Kurtovic <ben.kurtovic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from ._base import Node
__all__ = ["Comment"]
class Comment(Node):
"""Represents a hidden HTML comment, like ``<!-- foobar -->``."""
def __init__(self, contents):
super().__init__()
self.contents = contents
def __str__(self):
return "<!--" + self.contents + "-->"
@property
def contents(self):
"""The hidden text contained between ``<!--`` and ``-->``."""
return self._contents
@contents.setter
def contents(self, value):
self._contents = str(value)
|
mit
|
sappjw/gourmet
|
gourmet/gglobals.py
|
6
|
6335
|
import os, os.path, gobject, re, gtk
import tempfile
from gdebug import debug
from OptionParser import args
from util import windows
tmpdir = tempfile.gettempdir()
if args.gourmetdir:
gourmetdir = args.gourmetdir
debug("User specified gourmetdir %s"%gourmetdir,0)
else:
if os.name =='nt':
# Under Windows, we cannot unfortunately just use os.environ, see
# http://stackoverflow.com/questions/2608200/problems-with-umlauts-in-python-appdata-environvent-variable
# We might drop this workaround with Python 3 (all strings are unicode)
# and/or GTK+ 3 (use Glib.get_home_dir()).
APPDATA = windows.getenv(u'APPDATA').decode('utf-8')
gourmetdir = os.path.join(APPDATA,'gourmet')
else:
gourmetdir = os.path.join(os.path.expanduser('~'),'.gourmet')
try:
if not os.path.exists(gourmetdir):
debug('Creating %s'%gourmetdir,0)
os.makedirs(gourmetdir)
except OSError:
try:
debug("Unable to create standard config directory in home directory. Looking for .gourmet in working directory instead.",0)
gourmetdir = '.gourmet'
if not os.path.exists(gourmetdir):
debug("Creating .gourmet in working directory",0)
os.makedirs(gourmetdir)
except OSError:
print "Unable to create gourmet directory."
raise
import sys
sys.exit()
if not os.access(gourmetdir,os.W_OK):
debug('Cannot write to configuration directory, %s'%gourmetdir,-1)
import sys
sys.exit()
debug('gourmetdir=%s'%gourmetdir,2)
use_threads = args.threads
# Uncomment the below to test FauxThreads
#use_threads = False
# note: this stuff must be kept in sync with changes in setup.py
import settings
uibase = os.path.join(settings.ui_base)
lib_dir = os.path.join(settings.lib_dir,'gourmet')
# To have strings from .ui files (gtk.Builder) translated on all platforms,
# we need the following module to enable localization on all platforms.
try:
import elib.intl
elib.intl.install('gourmet', settings.locale_base)
except ImportError:
print 'elib.intl failed to load.'
print 'IF YOU HAVE TROUBLE WITH TRANSLATIONS, MAKE SURE YOU HAVE THIS LIBRARY INSTALLED.'
from gettext import gettext as _
data_dir = settings.data_dir
imagedir = os.path.join(settings.data_dir,'images')
style_dir = os.path.join(settings.data_dir,'style')
icondir = os.path.join(settings.icon_base,"48x48","apps")
doc_base = settings.doc_base
plugin_base = settings.plugin_base
# GRAB PLUGIN DIR FOR HTML IMPORT
if args.html_plugin_dir:
html_plugin_dir = args.html_plugin_dir
else:
html_plugin_dir = os.path.join(gourmetdir,'html_plugins')
if not os.path.exists(html_plugin_dir):
os.makedirs(html_plugin_dir)
template_file = os.path.join(settings.data_dir,'RULES_TEMPLATE')
if os.path.exists(template_file):
import shutil
shutil.copy(template_file,
os.path.join(html_plugin_dir,'RULES_TEMPLATE')
)
REC_ATTRS = [('title',_('Title'),'Entry'),
('category',_('Category'),'Combo'),
('cuisine',_('Cuisine'),'Combo'),
('rating',_('Rating'),'Entry'),
('source',_('Source'),'Combo'),
('link',_('Website'),'Entry'),
('yields',_('Yield'),'Entry'),
('yield_unit',_('Yield Unit'),'Combo'),
('preptime',_('Preparation Time'),'Entry'),
('cooktime',_('Cooking Time'),'Entry'),
]
INT_REC_ATTRS = ['rating','preptime','cooktime']
FLOAT_REC_ATTRS = ['yields']
TEXT_ATTR_DIC = {'instructions':_('Instructions'),
'modifications':_('Notes'),
}
REC_ATTR_DIC={}
NAME_TO_ATTR = {_('Instructions'):'instructions',
_('Notes'):'modifications',
_('Modifications'):'modifications',
}
DEFAULT_ATTR_ORDER = ['title',
#'servings',
'yields',
'cooktime',
'preptime',
'category',
'cuisine',
'rating',
'source',
'link',
]
DEFAULT_TEXT_ATTR_ORDER = ['instructions',
'modifications',]
def build_rec_attr_dic ():
for attr, name, widget in REC_ATTRS:
REC_ATTR_DIC[attr]=name
NAME_TO_ATTR[name]=attr
build_rec_attr_dic()
DEFAULT_HIDDEN_COLUMNS = [REC_ATTR_DIC[attr] for attr in
['link','yields','yield_unit','preptime','cooktime']
]
from gtk_extras import dialog_extras
def launch_url (url, ext=""):
if os.name == 'nt':
os.startfile(url)
elif os.name == 'posix':
try:
gtk.show_uri(gtk.gdk.Screen(),url,0L)
except gobject.GError, err:
#print dir(err)
label = _('Unable to open URL')
for reg, msg in [('mailto:',_('Unable to launch mail reader.')),
('http:',_('Unable to open website.')),
('file:',_('Unable to open file.'))]:
if re.match(reg,url.lower()): label = msg
dialog_extras.show_message(
label=label,
sublabel=err.message,
expander=[_('_Details'),
_("There was an error launching the url: %s"%url)]
)
# Set up custom STOCK items and ICONS!
icon_factory = gtk.IconFactory()
def add_icon (file_name, stock_id, label=None, modifier=0, keyval=0):
pb = gtk.gdk.pixbuf_new_from_file(file_name)
iconset = gtk.IconSet(pb)
icon_factory.add(stock_id,iconset)
icon_factory.add_default()
gtk.stock_add([(stock_id,
label,
modifier,
keyval,
"")])
for filename,stock_id,label,modifier,keyval in [
('AddToShoppingList.png','add-to-shopping-list',_('Add to _Shopping List'),gtk.gdk.CONTROL_MASK,gtk.gdk.keyval_from_name('l')),
('reccard.png','recipe-card',None,0,0),
('reccard_edit.png','edit-recipe-card',None,0,0),
]:
add_icon(os.path.join(imagedir,filename),stock_id,label,modifier,keyval)
|
gpl-2.0
|
GiedriusM/openthread
|
tools/spinel-cli/spinel/config.py
|
5
|
3527
|
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
""" Module-wide logging configuration for spinel package. """
import logging
import logging.config
DEBUG_ENABLE = 0
DEBUG_TUN = 0
DEBUG_HDLC = 0
DEBUG_STREAM_TX = 0
DEBUG_STREAM_RX = 0
DEBUG_LOG_PKT = DEBUG_ENABLE
DEBUG_LOG_SERIAL = DEBUG_ENABLE
DEBUG_LOG_PROP = DEBUG_ENABLE
DEBUG_CMD_RESPONSE = 0
DEBUG_EXPERIMENTAL = 1
LOGGER = logging.getLogger(__name__)
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'minimal': {
'format': '%(message)s'
},
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'console': {
#'level':'INFO',
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
#'syslog': {
# 'level':'DEBUG',
# 'class':'logging.handlers.SysLogHandler',
# 'address': '/dev/log'
#},
},
'loggers': {
'spinel': {
'handlers': ['console'], # ,'syslog'],
'level': 'DEBUG',
'propagate': True
}
}
})
def debug_set_level(level):
""" Set logging level for spinel module. """
global DEBUG_ENABLE, DEBUG_LOG_PROP
global DEBUG_LOG_PKT, DEBUG_LOG_SERIAL
global DEBUG_STREAM_RX, DEBUG_STREAM_TX, DEBUG_HDLC
# Defaut to all logging disabled
DEBUG_ENABLE = 0
DEBUG_LOG_PROP = 0
DEBUG_LOG_PKT = 0
DEBUG_LOG_SERIAL = 0
DEBUG_HDLC = 0
DEBUG_STREAM_RX = 0
DEBUG_STREAM_TX = 0
if level:
DEBUG_ENABLE = level
if level >= 1:
DEBUG_LOG_PROP = 1
if level >= 2:
DEBUG_LOG_PKT = 1
if level >= 3:
DEBUG_LOG_SERIAL = 1
if level >= 4:
DEBUG_HDLC = 1
if level >= 5:
DEBUG_STREAM_RX = 1
DEBUG_STREAM_TX = 1
print("DEBUG_ENABLE = " + str(DEBUG_ENABLE))
|
bsd-3-clause
|
ctiller/grpc
|
tools/run_tests/xds_k8s_test_driver/framework/xds_flags.py
|
5
|
3275
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import flags
import googleapiclient.discovery
# GCP
PROJECT = flags.DEFINE_string("project",
default=None,
help="GCP Project ID. Required")
NAMESPACE = flags.DEFINE_string(
"namespace",
default=None,
help="Isolate GCP resources using given namespace / name prefix. Required")
NETWORK = flags.DEFINE_string("network",
default="default",
help="GCP Network ID")
# Mirrors --xds-server-uri argument of Traffic Director gRPC Bootstrap
XDS_SERVER_URI = flags.DEFINE_string(
"xds_server_uri",
default=None,
help="Override Traffic Director server uri, for testing")
ENSURE_FIREWALL = flags.DEFINE_bool(
"ensure_firewall",
default=False,
help="Ensure the allow-health-check firewall exists before each test case")
FIREWALL_SOURCE_RANGE = flags.DEFINE_list(
"firewall_source_range",
default=['35.191.0.0/16', '130.211.0.0/22'],
help="Update the source range of the firewall rule.")
FIREWALL_ALLOWED_PORTS = flags.DEFINE_list(
"firewall_allowed_ports",
default=['8080-8100'],
help="Update the allowed ports of the firewall rule.")
# Test server
SERVER_NAME = flags.DEFINE_string("server_name",
default="psm-grpc-server",
help="Server deployment and service name")
SERVER_PORT = flags.DEFINE_integer("server_port",
default=8080,
lower_bound=0,
upper_bound=65535,
help="Server test port")
SERVER_MAINTENANCE_PORT = flags.DEFINE_integer(
"server_maintenance_port",
lower_bound=0,
upper_bound=65535,
default=None,
help="Server port running maintenance services: health check, channelz, etc"
)
SERVER_XDS_HOST = flags.DEFINE_string("server_xds_host",
default='xds-test-server',
help="Test server xDS hostname")
SERVER_XDS_PORT = flags.DEFINE_integer("server_xds_port",
default=8000,
help="Test server xDS port")
# Test client
CLIENT_NAME = flags.DEFINE_string("client_name",
default="psm-grpc-client",
help="Client deployment and service name")
CLIENT_PORT = flags.DEFINE_integer("client_port",
default=8079,
help="Client test port")
flags.mark_flags_as_required([
"project",
"namespace",
])
|
apache-2.0
|
thomasbarillot/DAQ
|
HHGMonitor/ADQ14_FWDAQ_streaming_example.py
|
1
|
10160
|
#!/usr/bin/env python3
#
# Copyright 2015 Signal Processing Devices Sweden AB. All rights reserved.
#
# Description: ADQ14 FWDAQ streaming example
# Documentation:
#
import numpy as np
import ctypes as ct
import matplotlib.pyplot as plt
import sys
import time
import os
sys.path.insert(1, os.path.dirname(os.path.realpath(__file__))+'/..')
from modules.example_helpers import *
# Record settings
number_of_records = 1000
samples_per_record = 512
# Plot data if set to True
plot_data = True
# Print metadata in headers
print_headers = True
# DMA transfer buffer settings
transfer_buffer_size = 65536
num_transfer_buffers = 8
# DMA flush timeout in seconds
flush_timeout = 0.5
# Load ADQAPI
ADQAPI = adqapi_load()
# Create ADQControlUnit
adq_cu = ct.c_void_p(ADQAPI.CreateADQControlUnit())
# Enable error logging from ADQAPI
ADQAPI.ADQControlUnit_EnableErrorTrace(adq_cu, 3, '.')
# Find ADQ devices
ADQAPI.ADQControlUnit_FindDevices(adq_cu)
n_of_ADQ = ADQAPI.ADQControlUnit_NofADQ(adq_cu)
print('Number of ADQ found: {}'.format(n_of_ADQ))
# Exit if no devices were found
if n_of_ADQ < 1:
print('No ADQ connected.')
ADQAPI.DeleteADQControlUnit(adq_cu)
adqapi_unload(ADQAPI)
sys.exit(1)
# Select ADQ
if n_of_ADQ > 1:
adq_num = int(input('Select ADQ device 1-{:d}: '.format(n_of_ADQ)))
else:
adq_num = 1
print_adq_device_revisions(ADQAPI, adq_cu, adq_num)
# Set clock source
ADQ_CLOCK_INT_INTREF = 0
ADQAPI.ADQ_SetClockSource(adq_cu, adq_num, ADQ_CLOCK_INT_INTREF)
# Maximum number of channels for ADQ14 FWPD is four
max_number_of_channels = ADQAPI.ADQ_GetNofChannels(adq_cu, adq_num)
# Setup test pattern
# 0 enables the analog input from the ADCs
# > 0 enables a specific test pattern
# Note: Default is to enable a test pattern (4) and disconnect the
# analog inputs inside the FPGA.
ADQAPI.ADQ_SetTestPatternMode(adq_cu, adq_num, 4)
# Set trig mode
SW_TRIG = 1
EXT_TRIG_1 = 2
EXT_TRIG_2 = 7
EXT_TRIG_3 = 8
LVL_TRIG = 3
INT_TRIG = 4
LVL_FALLING = 0
LVL_RISING = 1
trig_type = EXT_TRIG_1
success = ADQAPI.ADQ_SetTriggerMode(adq_cu, adq_num, trig_type)
if (success == 0):
print('ADQ_SetTriggerMode failed.')
success = ADQAPI.ADQ_SetLvlTrigLevel(adq_cu, adq_num, 0)
if (success == 0):
print('ADQ_SetLvlTrigLevel failed.')
success = ADQAPI.ADQ_SetTrigLevelResetValue(adq_cu, adq_num, 1000)
if (success == 0):
print('ADQ_SetTrigLevelResetValue failed.')
success = ADQAPI.ADQ_SetLvlTrigChannel(adq_cu, adq_num, 1)
if (success == 0):
print('ADQ_SetLvlTrigChannel failed.')
success = ADQAPI.ADQ_SetLvlTrigEdge(adq_cu, adq_num, LVL_RISING)
if (success == 0):
print('ADQ_SetLvlTrigEdge failed.')
# Setup acquisition
channels_mask = 0xf
ADQAPI.ADQ_TriggeredStreamingSetup(adq_cu, adq_num, number_of_records, samples_per_record, 0, 0, channels_mask)
ADQAPI.ADQ_SetStreamStatus(adq_cu, adq_num, 1);
# Get number of channels from device
number_of_channels = ADQAPI.ADQ_GetNofChannels(adq_cu, adq_num)
# Setup size of transfer buffers
print('Setting up streaming...')
ADQAPI.ADQ_SetTransferBuffers(adq_cu, adq_num, num_transfer_buffers, transfer_buffer_size)
# Start streaming
print('Collecting data, please wait...')
ADQAPI.ADQ_StopStreaming(adq_cu, adq_num)
ADQAPI.ADQ_StartStreaming(adq_cu, adq_num)
# Allocate target buffers for intermediate data storage
target_buffers = (ct.POINTER(ct.c_int16*transfer_buffer_size)*number_of_channels)()
for bufp in target_buffers:
bufp.contents = (ct.c_int16*transfer_buffer_size)()
# Create some buffers for the full records
data_16bit = [np.array([], dtype=np.int16),
np.array([], dtype=np.int16),
np.array([], dtype=np.int16),
np.array([], dtype=np.int16)]
# Allocate target buffers for headers
headerbuf_list = [(HEADER*number_of_records)() for ch in range(number_of_channels)]
# Create an C array of pointers to header buffers
headerbufp_list = ((ct.POINTER(HEADER*number_of_records))*number_of_channels)()
# Initiate pointers with allocated header buffers
for ch,headerbufp in enumerate(headerbufp_list):
headerbufp.contents = headerbuf_list[ch]
# Create a second level pointer to each buffer pointer,
# these will only be used to change the bufferp_list pointer values
headerbufvp_list = [ct.cast(ct.pointer(headerbufp_list[ch]), ct.POINTER(ct.c_void_p)) for ch in range(number_of_channels)]
# Allocate length output variable
samples_added = (4*ct.c_uint)()
for ind in range(len(samples_added)):
samples_added[ind] = 0
headers_added = (4*ct.c_uint)()
for ind in range(len(headers_added)):
headers_added[ind] = 0
header_status = (4*ct.c_uint)()
for ind in range(len(header_status)):
header_status[ind] = 0
# Generate triggers if software trig is used
if (trig_type == 1):
for trig in range(number_of_records):
ADQAPI.ADQ_SWTrig(adq_cu, adq_num)
print('Waiting for data...')
# Collect data until all requested records have been recieved
records_completed = [0, 0, 0, 0]
headers_completed = [0, 0, 0, 0]
records_completed_cnt = 0
ltime = time.time()
buffers_filled = ct.c_uint(0)
# Read out data until records_completed for ch A is number_of_records
while (number_of_records > records_completed[0]):
buffers_filled.value = 0
collect_result = 1
poll_time_diff_prev = time.time()
# Wait for next data buffer
while ((buffers_filled.value == 0) and (collect_result)):
collect_result = ADQAPI.ADQ_GetTransferBufferStatus(adq_cu, adq_num,
ct.byref(buffers_filled))
poll_time_diff = time.time()
if ((poll_time_diff - poll_time_diff_prev) > flush_timeout):
# Force flush
print('No data for {}s, flushing the DMA buffer.'.format(flush_timeout))
status = ADQAPI.ADQ_FlushDMA(adq_cu, adq_num);
print('ADQAPI.ADQ_FlushDMA returned {}'.format(adq_status(status)))
poll_time_diff_prev = time.time()
# Fetch data and headers into target buffers
status = ADQAPI.ADQ_GetDataStreaming(adq_cu, adq_num,
target_buffers,
headerbufp_list,
channels_mask,
ct.byref(samples_added),
ct.byref(headers_added),
ct.byref(header_status))
if status == 0:
print('GetDataStreaming failed!')
sys.exit()
for ch in range(number_of_channels):
if (headers_added[ch] > 0):
# The last call to GetDataStreaming has generated header data
if (header_status[ch]):
headers_done = headers_added[ch]
else:
# One incomplete header
headers_done = headers_added[ch]-1
# Update counter counting completed records
headers_completed[ch] += headers_done
# Update the number of completed records if at least one header has completed
if (headers_done > 0):
records_completed[ch] = headerbuf_list[ch][headers_completed[ch]-1].RecordNumber + 1
# Update header pointer so that it points to the current header
headerbufvp_list[ch].contents.value += headers_done*ct.sizeof(headerbuf_list[ch]._type_)
if headers_done > 0 and (np.sum(records_completed)-records_completed_cnt) > 1000:
dtime = time.time()-ltime
if (dtime > 0):
print('{:d} {:.2f} MB/s'.format(np.sum(records_completed),
((samples_per_record
*2
*(np.sum(records_completed)-records_completed_cnt))
/(dtime))/(1024*1024)))
sys.stdout.flush()
records_completed_cnt = np.sum(records_completed)
ltime = time.time()
if (samples_added[ch] > 0 and plot_data):
# Copy channel data to continuous buffer
data_buf = np.frombuffer(target_buffers[ch].contents, dtype=np.int16, count=samples_added[ch])
data_16bit[ch] = np.append(data_16bit[ch], data_buf)
print(records_completed[0])
# Stop streaming
ADQAPI.ADQ_StopStreaming(adq_cu, adq_num)
# Print recieved headers
if print_headers:
for ch in range(max_number_of_channels):
if number_of_records > 0:
print('------------------')
print('Headers channel {}'.format(ch))
print('------------------')
for rec in range(number_of_records):
header = headerbuf_list[ch][rec]
print('RecordStatus: {}'.format(header.RecordNumber))
print('UserID: {}'.format(header.UserID))
print('SerialNumber: {}'.format(header.SerialNumber))
print('Channel: {}'.format(header.Channel))
print('DataFormat: {}'.format(header.DataFormat))
print('RecordNumber: {}'.format(header.RecordNumber))
print('Timestamp: {} ns'.format(header.Timestamp * 0.125))
print('RecordStart: {} ns'.format(header.RecordStart * 0.125))
print('SamplePeriod: {} ns'.format(header.SamplePeriod * 0.125))
print('RecordLength: {} ns'.format(header.RecordLength * (header.SamplePeriod* 0.125)))
print('------------------')
# Plot data
if plot_data:
for ch in range(max_number_of_channels):
if number_of_records > 0:
widths = np.array([], dtype=np.uint32)
record_end_offset = 0
# Extract record lengths from headers
for rec in range(number_of_records):
header = headerbuf_list[ch][rec]
if rec>0:
print header.Timestamp*0.125-headerbuf_list[ch][rec-1].Timestamp*0.125
widths = np.append(widths, header.RecordLength)
# Get new figure
plt.figure(ch)
plt.clf()
# Plot data
plt.plot(data_16bit[ch].T, '.-')
# Set window title
plt.gcf().canvas.set_window_title('Channel {}'.format(ch))
# Set grid mode
plt.grid(which='Major')
# Mark records in plot
alternate_background(plt.gca(), 0, widths, labels=True)
# Show plot
plt.show()
# Delete ADQ device handle
ADQAPI.ADQControlUnit_DeleteADQ(adq_cu, adq_num)
# Delete ADQControlunit
ADQAPI.DeleteADQControlUnit(adq_cu)
print('Done.')
|
mit
|
caesar2164/edx-platform
|
common/djangoapps/third_party_auth/tests/test_views.py
|
13
|
6457
|
"""
Test the views served by third_party_auth.
"""
import ddt
from lxml import etree
from onelogin.saml2.errors import OneLogin_Saml2_Error
import unittest
from django.conf import settings
from .testutil import AUTH_FEATURE_ENABLED, SAMLTestCase
# Define some XML namespaces:
from third_party_auth.tasks import SAML_XML_NS
XMLDSIG_XML_NS = 'http://www.w3.org/2000/09/xmldsig#'
@unittest.skipUnless(AUTH_FEATURE_ENABLED, 'third_party_auth not enabled')
@ddt.ddt
class SAMLMetadataTest(SAMLTestCase):
"""
Test the SAML metadata view
"""
METADATA_URL = '/auth/saml/metadata.xml'
def test_saml_disabled(self):
""" When SAML is not enabled, the metadata view should return 404 """
self.enable_saml(enabled=False)
response = self.client.get(self.METADATA_URL)
self.assertEqual(response.status_code, 404)
def test_metadata(self):
self.enable_saml()
doc = self._fetch_metadata()
# Check the ACS URL:
acs_node = doc.find(".//{}".format(etree.QName(SAML_XML_NS, 'AssertionConsumerService')))
self.assertIsNotNone(acs_node)
self.assertEqual(acs_node.attrib['Location'], 'http://example.none/auth/complete/tpa-saml/')
def test_default_contact_info(self):
self.enable_saml()
self.check_metadata_contacts(
xml=self._fetch_metadata(),
tech_name=u"{} Support".format(settings.PLATFORM_NAME),
tech_email="technical@example.com",
support_name=u"{} Support".format(settings.PLATFORM_NAME),
support_email="technical@example.com"
)
def test_custom_contact_info(self):
self.enable_saml(
other_config_str=(
'{'
'"TECHNICAL_CONTACT": {"givenName": "Jane Tech", "emailAddress": "jane@example.com"},'
'"SUPPORT_CONTACT": {"givenName": "Joe Support", "emailAddress": "joe@example.com"}'
'}'
)
)
self.check_metadata_contacts(
xml=self._fetch_metadata(),
tech_name="Jane Tech",
tech_email="jane@example.com",
support_name="Joe Support",
support_email="joe@example.com"
)
@ddt.data(
# Test two slightly different key pair export formats
('saml_key', 'MIICsDCCAhmgAw'),
('saml_key_alt', 'MIICWDCCAcGgAw'),
)
@ddt.unpack
def test_signed_metadata(self, key_name, pub_key_starts_with):
self.enable_saml(
private_key=self._get_private_key(key_name),
public_key=self._get_public_key(key_name),
other_config_str='{"SECURITY_CONFIG": {"signMetadata": true} }',
)
self._validate_signed_metadata(pub_key_starts_with=pub_key_starts_with)
def test_secure_key_configuration(self):
""" Test that the SAML private key can be stored in Django settings and not the DB """
self.enable_saml(
public_key='',
private_key='',
other_config_str='{"SECURITY_CONFIG": {"signMetadata": true} }',
)
with self.assertRaises(OneLogin_Saml2_Error):
self._fetch_metadata() # OneLogin_Saml2_Error: Cannot sign metadata: missing SP private key.
with self.settings(
SOCIAL_AUTH_SAML_SP_PRIVATE_KEY=self._get_private_key('saml_key'),
SOCIAL_AUTH_SAML_SP_PUBLIC_CERT=self._get_public_key('saml_key'),
):
self._validate_signed_metadata()
def _validate_signed_metadata(self, pub_key_starts_with='MIICsDCCAhmgAw'):
""" Fetch the SAML metadata and do some validation """
doc = self._fetch_metadata()
sig_node = doc.find(".//{}".format(etree.QName(XMLDSIG_XML_NS, 'SignatureValue')))
self.assertIsNotNone(sig_node)
# Check that the right public key was used:
pub_key_node = doc.find(".//{}".format(etree.QName(XMLDSIG_XML_NS, 'X509Certificate')))
self.assertIsNotNone(pub_key_node)
self.assertIn(pub_key_starts_with, pub_key_node.text)
def _fetch_metadata(self):
""" Fetch and parse the metadata XML at self.METADATA_URL """
response = self.client.get(self.METADATA_URL)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/xml')
# The result should be valid XML:
try:
metadata_doc = etree.fromstring(response.content)
except etree.LxmlError:
self.fail('SAML metadata must be valid XML')
self.assertEqual(metadata_doc.tag, etree.QName(SAML_XML_NS, 'EntityDescriptor'))
return metadata_doc
def check_metadata_contacts(self, xml, tech_name, tech_email, support_name, support_email):
""" Validate that the contact info in the metadata has the expected values """
technical_node = xml.find(".//{}[@contactType='technical']".format(etree.QName(SAML_XML_NS, 'ContactPerson')))
self.assertIsNotNone(technical_node)
tech_name_node = technical_node.find(etree.QName(SAML_XML_NS, 'GivenName'))
self.assertEqual(tech_name_node.text, tech_name)
tech_email_node = technical_node.find(etree.QName(SAML_XML_NS, 'EmailAddress'))
self.assertEqual(tech_email_node.text, tech_email)
support_node = xml.find(".//{}[@contactType='support']".format(etree.QName(SAML_XML_NS, 'ContactPerson')))
self.assertIsNotNone(support_node)
support_name_node = support_node.find(etree.QName(SAML_XML_NS, 'GivenName'))
self.assertEqual(support_name_node.text, support_name)
support_email_node = support_node.find(etree.QName(SAML_XML_NS, 'EmailAddress'))
self.assertEqual(support_email_node.text, support_email)
@unittest.skipUnless(AUTH_FEATURE_ENABLED, 'third_party_auth not enabled')
class SAMLAuthTest(SAMLTestCase):
"""
Test the SAML auth views
"""
LOGIN_URL = '/auth/login/tpa-saml/'
def test_login_without_idp(self):
""" Accessing the login endpoint without an idp query param should return 302 """
self.enable_saml()
response = self.client.get(self.LOGIN_URL)
self.assertEqual(response.status_code, 302)
def test_login_disabled(self):
""" When SAML is not enabled, the login view should return 404 """
self.enable_saml(enabled=False)
response = self.client.get(self.LOGIN_URL)
self.assertEqual(response.status_code, 404)
|
agpl-3.0
|
ytanay/thinglang
|
setup.py
|
1
|
1198
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# To use a consistent encoding
from codecs import open
from os import path
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='thinglang',
version='0.0.0',
description='Yet another general purpose programming language',
long_description=long_description,
url='https://github.com/ytanay/thinglang',
author='Yotam Tanay',
author_email='yotam@yotamtanay.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
packages=find_packages(exclude=['docs', 'tests']),
package_data={'': ['*.thingsymbols']},
include_package_data=True
)
|
mit
|
Lujeni/ansible
|
lib/ansible/modules/network/cloudengine/ce_vrf.py
|
13
|
10999
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_vrf
version_added: "2.4"
short_description: Manages VPN instance on HUAWEI CloudEngine switches.
description:
- Manages VPN instance of HUAWEI CloudEngine switches.
author: Yang yang (@QijunPan)
notes:
- If I(state=absent), the route will be removed, regardless of the non-required options.
- This module requires the netconf system service be enabled on the remote device being managed.
- Recommended connection is C(netconf).
- This module also works with C(local) connections for legacy playbooks.
options:
vrf:
description:
- VPN instance, the length of vrf name is 1 - 31, i.e. "test", but can not be C(_public_).
required: true
description:
description:
- Description of the vrf, the string length is 1 - 242 .
state:
description:
- Manage the state of the resource.
choices: ['present','absent']
default: present
'''
EXAMPLES = '''
- name: vrf module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Config a vpn install named vpna, description is test
ce_vrf:
vrf: vpna
description: test
state: present
provider: "{{ cli }}"
- name: Delete a vpn install named vpna
ce_vrf:
vrf: vpna
state: absent
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"vrf": "vpna",
"description": "test",
"state": "present"}
existing:
description: k/v pairs of existing switchport
returned: always
type: dict
sample: {}
end_state:
description: k/v pairs of switchport after module execution
returned: always
type: dict
sample: {"vrf": "vpna",
"description": "test",
"present": "present"}
updates:
description: command list sent to the device
returned: always
type: list
sample: ["ip vpn-instance vpna",
"description test"]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec
CE_NC_GET_VRF = """
<filter type="subtree">
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName></vrfName>
<vrfDescription></vrfDescription>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
</filter>
"""
CE_NC_CREATE_VRF = """
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance operation="merge">
<vrfName>%s</vrfName>
<vrfDescription>%s</vrfDescription>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
"""
CE_NC_DELETE_VRF = """
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance operation="delete">
<vrfName>%s</vrfName>
<vrfDescription>%s</vrfDescription>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
"""
def build_config_xml(xmlstr):
"""build_config_xml"""
return '<config> ' + xmlstr + ' </config>'
class Vrf(object):
"""Manage vpn instance"""
def __init__(self, argument_spec, ):
self.spec = argument_spec
self.module = None
self.init_module()
# vpn instance info
self.vrf = self.module.params['vrf']
self.description = self.module.params['description']
self.state = self.module.params['state']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def init_module(self):
"""init_module"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed."""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def set_update_cmd(self):
""" set update command"""
if not self.changed:
return
if self.state == "present":
self.updates_cmd.append('ip vpn-instance %s' % (self.vrf))
if self.description:
self.updates_cmd.append('description %s' % (self.description))
else:
self.updates_cmd.append('undo ip vpn-instance %s' % (self.vrf))
def get_vrf(self):
""" check if vrf is need to change"""
getxmlstr = CE_NC_GET_VRF
xml_str = get_nc_config(self.module, getxmlstr)
xml_str = xml_str.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
vpn_instances = root.findall(
"l3vpn/l3vpncomm/l3vpnInstances/l3vpnInstance")
if vpn_instances:
for vpn_instance in vpn_instances:
if vpn_instance.find('vrfName').text == self.vrf:
if vpn_instance.find('vrfDescription').text == self.description:
if self.state == "present":
return False
else:
return True
else:
return True
return self.state == "present"
else:
return self.state == "present"
def check_params(self):
"""Check all input params"""
# vrf and description check
if self.vrf == '_public_':
self.module.fail_json(
msg='Error: The vrf name _public_ is reserved.')
if len(self.vrf) < 1 or len(self.vrf) > 31:
self.module.fail_json(
msg='Error: The vrf name length must between 1 and 242.')
if self.description:
if len(self.description) < 1 or len(self.description) > 242:
self.module.fail_json(
msg='Error: The vrf description length must between 1 and 242.')
def operate_vrf(self):
"""config/delete vrf"""
if not self.changed:
return
if self.state == "present":
if self.description is None:
configxmlstr = CE_NC_CREATE_VRF % (self.vrf, '')
else:
configxmlstr = CE_NC_CREATE_VRF % (self.vrf, self.description)
else:
configxmlstr = CE_NC_DELETE_VRF % (self.vrf, self.description)
conf_str = build_config_xml(configxmlstr)
recv_xml = set_nc_config(self.module, conf_str)
self.check_response(recv_xml, "OPERATE_VRF")
def get_proposed(self):
"""get_proposed"""
if self.state == 'present':
self.proposed['vrf'] = self.vrf
if self.description:
self.proposed['description'] = self.description
else:
self.proposed = dict()
self.proposed['state'] = self.state
def get_existing(self):
"""get_existing"""
change = self.get_vrf()
if change:
if self.state == 'present':
self.existing = dict()
else:
self.existing['vrf'] = self.vrf
if self.description:
self.existing['description'] = self.description
self.changed = True
else:
if self.state == 'absent':
self.existing = dict()
else:
self.existing['vrf'] = self.vrf
if self.description:
self.existing['description'] = self.description
self.changed = False
def get_end_state(self):
"""get_end_state"""
change = self.get_vrf()
if not change:
if self.state == 'present':
self.end_state['vrf'] = self.vrf
if self.description:
self.end_state['description'] = self.description
else:
self.end_state = dict()
else:
if self.state == 'present':
self.end_state = dict()
else:
self.end_state['vrf'] = self.vrf
if self.description:
self.end_state['description'] = self.description
def work(self):
"""worker"""
self.check_params()
self.get_existing()
self.get_proposed()
self.operate_vrf()
self.set_update_cmd()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""main"""
argument_spec = dict(
vrf=dict(required=True, type='str'),
description=dict(required=False, type='str'),
state=dict(choices=['absent', 'present'],
default='present', required=False),
)
argument_spec.update(ce_argument_spec)
interface = Vrf(argument_spec)
interface.work()
if __name__ == '__main__':
main()
|
gpl-3.0
|
SkyLined/headsup
|
decode/GIF_EXTENSION_COMMENT.py
|
1
|
1171
|
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Structure import Structure;
# http://www.w3.org/Graphics/GIF/spec-gif89a.txt
class GIF_EXTENSION_COMMENT(Structure):
type_name = 'COMMENT_EXTENSION';
def __init__(self, stream, offset, max_size, parent, name):
import C;
from GIF_BLOCK import GIF_BLOCK;
Structure.__init__(self, stream, offset, max_size, parent, name);
self._data = self.Member(GIF_BLOCK, 'data');
self._comment = self._data.ContainMember(C.STRING, \
'comment', self._data.contained_current_max_size);
self._data.ContainUnused(); # Should always be 0.
|
apache-2.0
|
tudorvio/nova
|
nova/virt/hyperv/vmutils.py
|
18
|
32674
|
# Copyright (c) 2010 Cloud.com, Inc
# Copyright 2012 Cloudbase Solutions Srl / Pedro Navarro Perez
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VM related operations on Hyper-V.
"""
import sys
import time
import uuid
if sys.platform == 'win32':
import wmi
from oslo_config import cfg
from oslo_log import log as logging
import six
from six.moves import range
from nova import exception
from nova.i18n import _, _LW
from nova.virt.hyperv import constants
from nova.virt.hyperv import hostutils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# TODO(alexpilotti): Move the exceptions to a separate module
# TODO(alexpilotti): Add more domain exceptions
class HyperVException(exception.NovaException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
# TODO(alexpilotti): Add a storage exception base class
class VHDResizeException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class HyperVAuthorizationException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class UnsupportedConfigDriveFormatException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class VMUtils(object):
# These constants can be overridden by inherited classes
_PHYS_DISK_RES_SUB_TYPE = 'Microsoft Physical Disk Drive'
_DISK_DRIVE_RES_SUB_TYPE = 'Microsoft Synthetic Disk Drive'
_DVD_DRIVE_RES_SUB_TYPE = 'Microsoft Synthetic DVD Drive'
_HARD_DISK_RES_SUB_TYPE = 'Microsoft Virtual Hard Disk'
_DVD_DISK_RES_SUB_TYPE = 'Microsoft Virtual CD/DVD Disk'
_IDE_CTRL_RES_SUB_TYPE = 'Microsoft Emulated IDE Controller'
_SCSI_CTRL_RES_SUB_TYPE = 'Microsoft Synthetic SCSI Controller'
_SERIAL_PORT_RES_SUB_TYPE = 'Microsoft Serial Port'
_SETTINGS_DEFINE_STATE_CLASS = 'Msvm_SettingsDefineState'
_VIRTUAL_SYSTEM_SETTING_DATA_CLASS = 'Msvm_VirtualSystemSettingData'
_RESOURCE_ALLOC_SETTING_DATA_CLASS = 'Msvm_ResourceAllocationSettingData'
_PROCESSOR_SETTING_DATA_CLASS = 'Msvm_ProcessorSettingData'
_MEMORY_SETTING_DATA_CLASS = 'Msvm_MemorySettingData'
_STORAGE_ALLOC_SETTING_DATA_CLASS = _RESOURCE_ALLOC_SETTING_DATA_CLASS
_SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS = \
'Msvm_SyntheticEthernetPortSettingData'
_AFFECTED_JOB_ELEMENT_CLASS = "Msvm_AffectedJobElement"
_SHUTDOWN_COMPONENT = "Msvm_ShutdownComponent"
_VIRTUAL_SYSTEM_CURRENT_SETTINGS = 3
_AUTOMATIC_STARTUP_ACTION_NONE = 0
_vm_power_states_map = {constants.HYPERV_VM_STATE_ENABLED: 2,
constants.HYPERV_VM_STATE_DISABLED: 3,
constants.HYPERV_VM_STATE_SHUTTING_DOWN: 4,
constants.HYPERV_VM_STATE_REBOOT: 10,
constants.HYPERV_VM_STATE_PAUSED: 32768,
constants.HYPERV_VM_STATE_SUSPENDED: 32769}
def __init__(self, host='.'):
self._enabled_states_map = {v: k for k, v in
six.iteritems(self._vm_power_states_map)}
if sys.platform == 'win32':
self._init_hyperv_wmi_conn(host)
self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host)
# On version of Hyper-V prior to 2012 trying to directly set properties
# in default setting data WMI objects results in an exception
self._clone_wmi_objs = False
if sys.platform == 'win32':
hostutls = hostutils.HostUtils()
self._clone_wmi_objs = not hostutls.check_min_windows_version(6, 2)
def _init_hyperv_wmi_conn(self, host):
self._conn = wmi.WMI(moniker='//%s/root/virtualization' % host)
def list_instance_notes(self):
instance_notes = []
for vs in self._conn.Msvm_VirtualSystemSettingData(
['ElementName', 'Notes'],
SettingType=self._VIRTUAL_SYSTEM_CURRENT_SETTINGS):
if vs.Notes is not None:
instance_notes.append(
(vs.ElementName, [v for v in vs.Notes.split('\n') if v]))
return instance_notes
def list_instances(self):
"""Return the names of all the instances known to Hyper-V."""
return [v.ElementName for v in
self._conn.Msvm_VirtualSystemSettingData(
['ElementName'],
SettingType=self._VIRTUAL_SYSTEM_CURRENT_SETTINGS)]
def get_vm_summary_info(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vmsettings = vm.associators(
wmi_association_class=self._SETTINGS_DEFINE_STATE_CLASS,
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
settings_paths = [v.path_() for v in vmsettings]
# See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
(ret_val, summary_info) = vs_man_svc.GetSummaryInformation(
[constants.VM_SUMMARY_NUM_PROCS,
constants.VM_SUMMARY_ENABLED_STATE,
constants.VM_SUMMARY_MEMORY_USAGE,
constants.VM_SUMMARY_UPTIME],
settings_paths)
if ret_val:
raise HyperVException(_('Cannot get VM summary data for: %s')
% vm_name)
si = summary_info[0]
memory_usage = None
if si.MemoryUsage is not None:
memory_usage = long(si.MemoryUsage)
up_time = None
if si.UpTime is not None:
up_time = long(si.UpTime)
# Nova requires a valid state to be returned. Hyper-V has more
# states than Nova, typically intermediate ones and since there is
# no direct mapping for those, ENABLED is the only reasonable option
# considering that in all the non mappable states the instance
# is running.
enabled_state = self._enabled_states_map.get(si.EnabledState,
constants.HYPERV_VM_STATE_ENABLED)
summary_info_dict = {'NumberOfProcessors': si.NumberOfProcessors,
'EnabledState': enabled_state,
'MemoryUsage': memory_usage,
'UpTime': up_time}
return summary_info_dict
def _lookup_vm_check(self, vm_name):
vm = self._lookup_vm(vm_name)
if not vm:
raise exception.NotFound(_('VM not found: %s') % vm_name)
return vm
def _lookup_vm(self, vm_name):
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
n = len(vms)
if n == 0:
return None
elif n > 1:
raise HyperVException(_('Duplicate VM name found: %s') % vm_name)
else:
return vms[0]
def vm_exists(self, vm_name):
return self._lookup_vm(vm_name) is not None
def get_vm_id(self, vm_name):
vm = self._lookup_vm_check(vm_name)
return vm.Name
def _get_vm_setting_data(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
# Avoid snapshots
return [s for s in vmsettings if s.SettingType == 3][0]
def _set_vm_memory(self, vm, vmsetting, memory_mb, dynamic_memory_ratio):
mem_settings = vmsetting.associators(
wmi_result_class=self._MEMORY_SETTING_DATA_CLASS)[0]
max_mem = long(memory_mb)
mem_settings.Limit = max_mem
if dynamic_memory_ratio > 1:
mem_settings.DynamicMemoryEnabled = True
# Must be a multiple of 2
reserved_mem = min(
long(max_mem / dynamic_memory_ratio) >> 1 << 1,
max_mem)
else:
mem_settings.DynamicMemoryEnabled = False
reserved_mem = max_mem
mem_settings.Reservation = reserved_mem
# Start with the minimum memory
mem_settings.VirtualQuantity = reserved_mem
self._modify_virt_resource(mem_settings, vm.path_())
def _set_vm_vcpus(self, vm, vmsetting, vcpus_num, limit_cpu_features):
procsetting = vmsetting.associators(
wmi_result_class=self._PROCESSOR_SETTING_DATA_CLASS)[0]
vcpus = long(vcpus_num)
procsetting.VirtualQuantity = vcpus
procsetting.Reservation = vcpus
procsetting.Limit = 100000 # static assignment to 100%
procsetting.LimitProcessorFeatures = limit_cpu_features
self._modify_virt_resource(procsetting, vm.path_())
def update_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features,
dynamic_memory_ratio):
vm = self._lookup_vm_check(vm_name)
vmsetting = self._get_vm_setting_data(vm)
self._set_vm_memory(vm, vmsetting, memory_mb, dynamic_memory_ratio)
self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features)
def check_admin_permissions(self):
if not self._conn.Msvm_VirtualSystemManagementService():
msg = _("The Windows account running nova-compute on this Hyper-V"
" host doesn't have the required permissions to create or"
" operate the virtual machine.")
raise HyperVAuthorizationException(msg)
def create_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features,
dynamic_memory_ratio, vm_gen, instance_path, notes=None):
"""Creates a VM."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
LOG.debug('Creating VM %s', vm_name)
vm = self._create_vm_obj(vs_man_svc, vm_name, vm_gen, notes,
dynamic_memory_ratio, instance_path)
vmsetting = self._get_vm_setting_data(vm)
LOG.debug('Setting memory for vm %s', vm_name)
self._set_vm_memory(vm, vmsetting, memory_mb, dynamic_memory_ratio)
LOG.debug('Set vCPUs for vm %s', vm_name)
self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features)
def _create_vm_obj(self, vs_man_svc, vm_name, vm_gen, notes,
dynamic_memory_ratio, instance_path):
vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
vs_gs_data.ElementName = vm_name
# Don't start automatically on host boot
vs_gs_data.AutomaticStartupAction = self._AUTOMATIC_STARTUP_ACTION_NONE
vs_gs_data.ExternalDataRoot = instance_path
vs_gs_data.SnapshotDataRoot = instance_path
(vm_path,
job_path,
ret_val) = vs_man_svc.DefineVirtualSystem([], None,
vs_gs_data.GetText_(1))
self.check_ret_val(ret_val, job_path)
vm = self._get_wmi_obj(vm_path)
if notes:
vmsetting = self._get_vm_setting_data(vm)
vmsetting.Notes = '\n'.join(notes)
self._modify_virtual_system(vs_man_svc, vm_path, vmsetting)
return self._get_wmi_obj(vm_path)
def _modify_virtual_system(self, vs_man_svc, vm_path, vmsetting):
(job_path, ret_val) = vs_man_svc.ModifyVirtualSystem(
ComputerSystem=vm_path,
SystemSettingData=vmsetting.GetText_(1))[1:]
self.check_ret_val(ret_val, job_path)
def get_vm_scsi_controller(self, vm_name):
vm = self._lookup_vm_check(vm_name)
return self._get_vm_scsi_controller(vm)
def _get_vm_scsi_controller(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
res = [r for r in rasds
if r.ResourceSubType == self._SCSI_CTRL_RES_SUB_TYPE][0]
return res.path_()
def _get_vm_ide_controller(self, vm, ctrller_addr):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
ide_ctrls = [r for r in rasds
if r.ResourceSubType == self._IDE_CTRL_RES_SUB_TYPE
and r.Address == str(ctrller_addr)]
return ide_ctrls[0].path_() if ide_ctrls else None
def get_vm_ide_controller(self, vm_name, ctrller_addr):
vm = self._lookup_vm_check(vm_name)
return self._get_vm_ide_controller(vm, ctrller_addr)
def get_attached_disks(self, scsi_controller_path):
volumes = self._conn.query(
self._get_attached_disks_query_string(scsi_controller_path))
return volumes
def _get_attached_disks_query_string(self, scsi_controller_path):
return ("SELECT * FROM %(class_name)s WHERE ("
"ResourceSubType='%(res_sub_type)s' OR "
"ResourceSubType='%(res_sub_type_virt)s') AND "
"Parent='%(parent)s'" % {
'class_name': self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
'res_sub_type': self._PHYS_DISK_RES_SUB_TYPE,
'res_sub_type_virt': self._DISK_DRIVE_RES_SUB_TYPE,
'parent': scsi_controller_path.replace("'", "''")})
def _get_new_setting_data(self, class_name):
obj = self._conn.query("SELECT * FROM %s WHERE InstanceID "
"LIKE '%%\\Default'" % class_name)[0]
return self._check_clone_wmi_obj(class_name, obj)
def _get_new_resource_setting_data(self, resource_sub_type,
class_name=None):
if class_name is None:
class_name = self._RESOURCE_ALLOC_SETTING_DATA_CLASS
obj = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = "
"'%(res_sub_type)s' AND "
"InstanceID LIKE '%%\\Default'" %
{"class_name": class_name,
"res_sub_type": resource_sub_type})[0]
return self._check_clone_wmi_obj(class_name, obj)
def _check_clone_wmi_obj(self, class_name, obj):
if self._clone_wmi_objs:
return self._clone_wmi_obj(class_name, obj)
else:
return obj
def _clone_wmi_obj(self, class_name, obj):
wmi_class = getattr(self._conn, class_name)
new_obj = wmi_class.new()
# Copy the properties from the original.
for prop in obj._properties:
value = obj.Properties_.Item(prop).Value
new_obj.Properties_.Item(prop).Value = value
return new_obj
def attach_scsi_drive(self, vm_name, path, drive_type=constants.DISK):
vm = self._lookup_vm_check(vm_name)
ctrller_path = self._get_vm_scsi_controller(vm)
drive_addr = self.get_free_controller_slot(ctrller_path)
self.attach_drive(vm_name, path, ctrller_path, drive_addr, drive_type)
def attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
drive_type=constants.DISK):
vm = self._lookup_vm_check(vm_name)
ctrller_path = self._get_vm_ide_controller(vm, ctrller_addr)
self.attach_drive(vm_name, path, ctrller_path, drive_addr, drive_type)
def attach_drive(self, vm_name, path, ctrller_path, drive_addr,
drive_type=constants.DISK):
"""Create a drive and attach it to the vm."""
vm = self._lookup_vm_check(vm_name)
if drive_type == constants.DISK:
res_sub_type = self._DISK_DRIVE_RES_SUB_TYPE
elif drive_type == constants.DVD:
res_sub_type = self._DVD_DRIVE_RES_SUB_TYPE
drive = self._get_new_resource_setting_data(res_sub_type)
# Set the ctrller as parent.
drive.Parent = ctrller_path
drive.Address = drive_addr
# Add the cloned disk drive object to the vm.
new_resources = self._add_virt_resource(drive, vm.path_())
drive_path = new_resources[0]
if drive_type == constants.DISK:
res_sub_type = self._HARD_DISK_RES_SUB_TYPE
elif drive_type == constants.DVD:
res_sub_type = self._DVD_DISK_RES_SUB_TYPE
res = self._get_new_resource_setting_data(res_sub_type)
# Set the new drive as the parent.
res.Parent = drive_path
res.Connection = [path]
# Add the new vhd object as a virtual hard disk to the vm.
self._add_virt_resource(res, vm.path_())
def create_scsi_controller(self, vm_name):
"""Create an iscsi controller ready to mount volumes."""
vm = self._lookup_vm_check(vm_name)
scsicontrl = self._get_new_resource_setting_data(
self._SCSI_CTRL_RES_SUB_TYPE)
scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
self._add_virt_resource(scsicontrl, vm.path_())
def attach_volume_to_controller(self, vm_name, controller_path, address,
mounted_disk_path):
"""Attach a volume to a controller."""
vm = self._lookup_vm_check(vm_name)
diskdrive = self._get_new_resource_setting_data(
self._PHYS_DISK_RES_SUB_TYPE)
diskdrive.Address = address
diskdrive.Parent = controller_path
diskdrive.HostResource = [mounted_disk_path]
self._add_virt_resource(diskdrive, vm.path_())
def _get_disk_resource_address(self, disk_resource):
return disk_resource.Address
def set_disk_host_resource(self, vm_name, controller_path, address,
mounted_disk_path):
disk_found = False
vm = self._lookup_vm_check(vm_name)
(disk_resources, volume_resources) = self._get_vm_disks(vm)
for disk_resource in disk_resources + volume_resources:
if (disk_resource.Parent == controller_path and
self._get_disk_resource_address(disk_resource) ==
str(address)):
if (disk_resource.HostResource and
disk_resource.HostResource[0] != mounted_disk_path):
LOG.debug('Updating disk host resource "%(old)s" to '
'"%(new)s"' %
{'old': disk_resource.HostResource[0],
'new': mounted_disk_path})
disk_resource.HostResource = [mounted_disk_path]
self._modify_virt_resource(disk_resource, vm.path_())
disk_found = True
break
if not disk_found:
LOG.warning(_LW('Disk not found on controller '
'"%(controller_path)s" with '
'address "%(address)s"'),
{'controller_path': controller_path,
'address': address})
def set_nic_connection(self, vm_name, nic_name, vswitch_conn_data):
nic_data = self._get_nic_data_by_name(nic_name)
nic_data.Connection = [vswitch_conn_data]
vm = self._lookup_vm_check(vm_name)
self._modify_virt_resource(nic_data, vm.path_())
def _get_nic_data_by_name(self, name):
return self._conn.Msvm_SyntheticEthernetPortSettingData(
ElementName=name)[0]
def create_nic(self, vm_name, nic_name, mac_address):
"""Create a (synthetic) nic and attach it to the vm."""
# Create a new nic
new_nic_data = self._get_new_setting_data(
self._SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS)
# Configure the nic
new_nic_data.ElementName = nic_name
new_nic_data.Address = mac_address.replace(':', '')
new_nic_data.StaticMacAddress = 'True'
new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
# Add the new nic to the vm
vm = self._lookup_vm_check(vm_name)
self._add_virt_resource(new_nic_data, vm.path_())
def soft_shutdown_vm(self, vm_name):
vm = self._lookup_vm_check(vm_name)
shutdown_component = vm.associators(
wmi_result_class=self._SHUTDOWN_COMPONENT)
if not shutdown_component:
# If no shutdown_component is found, it means the VM is already
# in a shutdown state.
return
reason = 'Soft shutdown requested by OpenStack Nova.'
(ret_val, ) = shutdown_component[0].InitiateShutdown(Force=False,
Reason=reason)
self.check_ret_val(ret_val, None)
def set_vm_state(self, vm_name, req_state):
"""Set the desired state of the VM."""
vm = self._lookup_vm_check(vm_name)
(job_path,
ret_val) = vm.RequestStateChange(self._vm_power_states_map[req_state])
# Invalid state for current operation (32775) typically means that
# the VM is already in the state requested
self.check_ret_val(ret_val, job_path, [0, 32775])
LOG.debug("Successfully changed vm state of %(vm_name)s "
"to %(req_state)s",
{'vm_name': vm_name, 'req_state': req_state})
def _get_disk_resource_disk_path(self, disk_resource):
return disk_resource.Connection
def get_vm_storage_paths(self, vm_name):
vm = self._lookup_vm_check(vm_name)
(disk_resources, volume_resources) = self._get_vm_disks(vm)
volume_drives = []
for volume_resource in volume_resources:
drive_path = volume_resource.HostResource[0]
volume_drives.append(drive_path)
disk_files = []
for disk_resource in disk_resources:
disk_files.extend(
[c for c in self._get_disk_resource_disk_path(disk_resource)])
return (disk_files, volume_drives)
def _get_vm_disks(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._STORAGE_ALLOC_SETTING_DATA_CLASS)
disk_resources = [r for r in rasds if
r.ResourceSubType in
[self._HARD_DISK_RES_SUB_TYPE,
self._DVD_DISK_RES_SUB_TYPE]]
if (self._RESOURCE_ALLOC_SETTING_DATA_CLASS !=
self._STORAGE_ALLOC_SETTING_DATA_CLASS):
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
volume_resources = [r for r in rasds if
r.ResourceSubType == self._PHYS_DISK_RES_SUB_TYPE]
return (disk_resources, volume_resources)
def destroy_vm(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
# Remove the VM. Does not destroy disks.
(job_path, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
self.check_ret_val(ret_val, job_path)
def check_ret_val(self, ret_val, job_path, success_values=[0]):
if ret_val == constants.WMI_JOB_STATUS_STARTED:
return self._wait_for_job(job_path)
elif ret_val not in success_values:
raise HyperVException(_('Operation failed with return value: %s')
% ret_val)
def _wait_for_job(self, job_path):
"""Poll WMI job state and wait for completion."""
job = self._get_wmi_obj(job_path)
while job.JobState == constants.WMI_JOB_STATE_RUNNING:
time.sleep(0.1)
job = self._get_wmi_obj(job_path)
if job.JobState != constants.WMI_JOB_STATE_COMPLETED:
job_state = job.JobState
if job.path().Class == "Msvm_ConcreteJob":
err_sum_desc = job.ErrorSummaryDescription
err_desc = job.ErrorDescription
err_code = job.ErrorCode
raise HyperVException(_("WMI job failed with status "
"%(job_state)d. Error details: "
"%(err_sum_desc)s - %(err_desc)s - "
"Error code: %(err_code)d") %
{'job_state': job_state,
'err_sum_desc': err_sum_desc,
'err_desc': err_desc,
'err_code': err_code})
else:
(error, ret_val) = job.GetError()
if not ret_val and error:
raise HyperVException(_("WMI job failed with status "
"%(job_state)d. Error details: "
"%(error)s") %
{'job_state': job_state,
'error': error})
else:
raise HyperVException(_("WMI job failed with status "
"%d. No error "
"description available") %
job_state)
desc = job.Description
elap = job.ElapsedTime
LOG.debug("WMI job succeeded: %(desc)s, Elapsed=%(elap)s",
{'desc': desc, 'elap': elap})
return job
def _get_wmi_obj(self, path):
return wmi.WMI(moniker=path.replace('\\', '/'))
def _add_virt_resource(self, res_setting_data, vm_path):
"""Adds a new resource to the VM."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_xml = [res_setting_data.GetText_(1)]
(job_path,
new_resources,
ret_val) = vs_man_svc.AddVirtualSystemResources(res_xml, vm_path)
self.check_ret_val(ret_val, job_path)
return new_resources
def _modify_virt_resource(self, res_setting_data, vm_path):
"""Updates a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
ResourceSettingData=[res_setting_data.GetText_(1)],
ComputerSystem=vm_path)
self.check_ret_val(ret_val, job_path)
def _remove_virt_resource(self, res_setting_data, vm_path):
"""Removes a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_path = [res_setting_data.path_()]
(job_path, ret_val) = vs_man_svc.RemoveVirtualSystemResources(res_path,
vm_path)
self.check_ret_val(ret_val, job_path)
def take_vm_snapshot(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val,
snp_setting_data) = vs_man_svc.CreateVirtualSystemSnapshot(vm.path_())
self.check_ret_val(ret_val, job_path)
job_wmi_path = job_path.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
snp_setting_data = job.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)[0]
return snp_setting_data.path_()
def remove_vm_snapshot(self, snapshot_path):
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.RemoveVirtualSystemSnapshot(
snapshot_path)
self.check_ret_val(ret_val, job_path)
def detach_vm_disk(self, vm_name, disk_path, is_physical=True):
vm = self._lookup_vm_check(vm_name)
disk_resource = self._get_mounted_disk_resource_from_path(disk_path,
is_physical)
if disk_resource:
parent = self._conn.query("SELECT * FROM "
"Msvm_ResourceAllocationSettingData "
"WHERE __PATH = '%s'" %
disk_resource.Parent)[0]
self._remove_virt_resource(disk_resource, vm.path_())
if not is_physical:
self._remove_virt_resource(parent, vm.path_())
def _get_mounted_disk_resource_from_path(self, disk_path, is_physical):
if is_physical:
class_name = self._RESOURCE_ALLOC_SETTING_DATA_CLASS
res_sub_type = self._PHYS_DISK_RES_SUB_TYPE
else:
class_name = self._STORAGE_ALLOC_SETTING_DATA_CLASS
res_sub_type = self._HARD_DISK_RES_SUB_TYPE
disk_resources = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = "
"'%(res_sub_type)s'" %
{"class_name": class_name,
"res_sub_type": res_sub_type})
for disk_resource in disk_resources:
if disk_resource.HostResource:
if disk_resource.HostResource[0].lower() == disk_path.lower():
return disk_resource
def get_mounted_disk_by_drive_number(self, device_number):
mounted_disks = self._conn.query("SELECT * FROM Msvm_DiskDrive "
"WHERE DriveNumber=" +
str(device_number))
if len(mounted_disks):
return mounted_disks[0].path_()
def get_controller_volume_paths(self, controller_path):
disks = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = '%(res_sub_type)s' "
"AND Parent='%(parent)s'" %
{"class_name":
self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
"res_sub_type":
self._PHYS_DISK_RES_SUB_TYPE,
"parent":
controller_path})
disk_data = {}
for disk in disks:
if disk.HostResource:
disk_data[disk.path().RelPath] = disk.HostResource[0]
return disk_data
def get_free_controller_slot(self, scsi_controller_path):
attached_disks = self.get_attached_disks(scsi_controller_path)
used_slots = [int(disk.AddressOnParent) for disk in attached_disks]
for slot in range(constants.SCSI_CONTROLLER_SLOTS_NUMBER):
if slot not in used_slots:
return slot
raise HyperVException(_("Exceeded the maximum number of slots"))
def enable_vm_metrics_collection(self, vm_name):
raise NotImplementedError(_("Metrics collection is not supported on "
"this version of Hyper-V"))
def get_vm_serial_port_connection(self, vm_name, update_connection=None):
vm = self._lookup_vm_check(vm_name)
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
serial_port = (
[r for r in rasds if
r.ResourceSubType == self._SERIAL_PORT_RES_SUB_TYPE][0])
if update_connection:
serial_port.Connection = [update_connection]
self._modify_virt_resource(serial_port, vm.path_())
if len(serial_port.Connection) > 0:
return serial_port.Connection[0]
def get_active_instances(self):
"""Return the names of all the active instances known to Hyper-V."""
vm_names = self.list_instances()
vms = [self._lookup_vm(vm_name) for vm_name in vm_names]
active_vm_names = [v.ElementName for v in vms
if v.EnabledState == constants.HYPERV_VM_STATE_ENABLED]
return active_vm_names
|
apache-2.0
|
setminami/TokageUtil
|
util/vectorutil.py
|
1
|
6987
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, traceback, glob, threading
import datetime as dt
import numpy as np
from . import printutil as pu
DEFAULTVECTOR_DIR = os.path.expanduser('~/workspace/img-buff/motions/vector')
MACROSQURE = 16
# 人間が決めるものなので、5x2固定にしてある
# BIASを考慮して、200 -> 100, 200 -> 140 領域を4x2に対応させる 右へ行くほど-yへ傾斜をかける
CAM02_SHOOT_ORIGINS = [[(195, 125), (150, 125), (130, 125), (100, 120), (80, 117)],
[(195, 195), (150, 180), (130, 170), (100, 165), (60, 150)]]
# 'noLenz': [[(180, 117), (130, 117), (100, 117), (65, 90)],
# [(180, 195), (130, 180), (100, 160), (60, 130)]]
AREA_DIV = (len(CAM02_SHOOT_ORIGINS[0]), len(CAM02_SHOOT_ORIGINS))
class VectorReverseIndex(object):
""" Thread safed Singleton """
_instance = None
_lock = threading.Lock()
def __init__(self, path):
# print('__init__ entered path = %s'%path)
self.__vectPath = path
pass
def __new__(cls, path):
# print('__new__ entered path = %s'%path)
with cls._lock:
if cls._instance is None:
# print('_instance was None')
cls._instance = super().__new__(cls)
cls.__vectPath = path
cls.__checkedFiles = set([])
cls.__Img2Vect = {}
else:
if cls.__vectPath != path:
cls.__vectPath = path
# print('_instance is/ path = %s'%cls.__vectPath)
return cls._instance
@property
def queryVectFile(self):
return self.__Img2Vect
def __expandImgNamesInVect(self, vectFile:str):
# print('$$ process entered %s'%vectFile)
imgs = np.load(vectFile)[1]
# print('$$ %s'%imgs)
root = os.path.basename(vectFile)
for i in imgs:
self.__Img2Vect[i] = root
# print('$$ stocked %s'%self.__Img2Vect)
@staticmethod
def updateVectors(path=DEFAULTVECTOR_DIR):
ins = VectorReverseIndex(path=path)
# print('search in %s'%os.path.join(ins.__vectPath, '*.npy'))
files = glob.glob(os.path.join(ins.__vectPath, '*.npy'))
# print('*** %s ***'%files)
notYets = set(files) - ins.__checkedFiles
# print('### notYets = %s ###'%notYets)
if len(notYets) == 0:
return
[ins.__expandImgNamesInVect(n) for n in notYets]
class ScreenRatio(object):
""" Cam001の座標をCam002サーボ座標に変換 """
def __init__(self, screenSize, dim, origins):
assert len(screenSize) == 2
assert (isinstance(screenSize[0], int) and isinstance(screenSize[1], int)) == True
self.__screen_size = screenSize
assert len(dim) == 2
assert (isinstance(dim[0], int) and isinstance(dim[1], int)) == True
# dim[0], dim[0] + 1いずれかは偶数なので、必ず2で割り切れる
#self.__dim = (dim[0] * (1 + dim[0]) // 2, dim[1])
self.__dim = dim
# 毎度N回足すのではなく、一度算出されたら変わらないので持っておく
self.includeX = [self.__sumFromOne(i + 1) for i in range(self.dim[0])]
self.includeY = [self.__sumFromOne(i + 1) for i in range(self.dim[1])]
assert len(origins) > 0, 'origins type donot fulfill requirements'
self.__origins = origins
def __sumFromOne(self, n):
assert n > 0
return (1 + n) * n / 2
@property
def screen_size(self):
return self.__screen_size
@property
def dim(self):
return self.__dim
@property
def origins(self):
return self.__origins
@origins.setter
def origins(self, val):
assert isinstance(val, list)
assert len(val) > 0, 'value type donot fulfill requirements'
self.__origins = val
@property
def divided_units(self):
if not hasattr(self, '__divided_units'):
assert self.__origins is not None, 'not fulfill requirements'
self.__divided_units = (self.screen_size[0] // self.dim[0],
self.screen_size[1] // self.dim[1])
return self.__divided_units
def whereisHitArea(self, x, y, isCenter=True):
""" どのエリアに属するかを算出する """
if isinstance(x, int) and isinstance(y, int):
o = x, y
else:
o = midXY(x, y) if isCenter else minXY(x, y)
rawX, rawY = int(o[0] // self.divided_units[0]), int(o[1] // self.divided_units[1])
# 左から走査する
# hitX = 0
# for bX in self.includeX:
# if rawX >= bX:
# break
# else:
# hitX += 1
return rawX, rawY
def minXY(xVector, yVector):
return xVector.min() * MACROSQURE, yVector.min() * MACROSQURE
def maxXY(xVector, yVector):
return xVector.max() * MACROSQURE, yVector.max() * MACROSQURE
def midXY(xVector, yVector):
""" min, maxの中点 """
minX, minY = minXY(xVector, yVector)
maxX, maxY = maxXY(xVector, yVector)
# (max - min) / 2 + min = max / 2 + min / 2 = (max + min) / 2
return (minX + maxX) / 2, (minY + maxY) / 2
def motionFrame(xVector, yVector):
assert len(xVector) == len(yVector), 'xy not matches %d:%d'%(len(xVector), len(yVector))
minPX, minPY = minXY(xVector, yVector)
maxPX, maxPY = maxXY(xVector, yVector)
return (minPX, minPY, maxPX - minPX, maxPY - minPY)
def cropedFrame(x, y, isCenter:bool, box):
assert len(x) == len(y), 'xy not matches %d:%d'%(len(x), len(y))
assert len(box) == 2, 'Illegal box has found %s'%box
midPX, midPY = midXY(x, y)
diffX, diffY = box[0] / 2, box[1] / 2
return (midPX - diffX, midPY - diffY, box[0], box[1])
def __divideArea(screenSize, dividedBy=AREA_DIV):
""" 領域をAREA_DEVで分割かつSHOOT_ORIGINSの比率から算出した対応領域比を返す """
assert len(screenSize) == 2
assert len(dividedBy) == 2
return screenSize[0] // dividedBy[0], screenSize[1] // dividedBy[1]
def whereIsHitArea(screenSize, x, y, isCenter=True):
"""
xy座標群の中点が、divAreaの中でどこに包含されるかを返す
領域をAREA_DEVで分割かつSHOOT_ORIGINSの比率から算出した対応領域を配列で返す
"""
divArea = __divideArea(screenSize)
if isinstance(x, int) and isinstance(y, int):
o = x, y
else:
o = midXY(x, y) if isCenter else minXY(x, y)
# 0,0,0に横分割の単位元、1,0,1に縦分割の単位元がはいっている
# 単位元で割ることで、何番目のマスに属するかを算出している
hitX, hitY = int(o[0] // divArea[0]), int(o[1] // divArea[1])
assert hitX < divArea[0]
assert hitY < divArea[1]
return hitX, hitY
|
apache-2.0
|
dfunckt/django
|
tests/forms_tests/field_tests/test_timefield.py
|
42
|
2043
|
from __future__ import unicode_literals
import datetime
from django.forms import TimeField, ValidationError
from django.test import SimpleTestCase
from . import FormFieldAssertionsMixin
class TimeFieldTest(FormFieldAssertionsMixin, SimpleTestCase):
def test_timefield_1(self):
f = TimeField()
self.assertEqual(datetime.time(14, 25), f.clean(datetime.time(14, 25)))
self.assertEqual(datetime.time(14, 25, 59), f.clean(datetime.time(14, 25, 59)))
self.assertEqual(datetime.time(14, 25), f.clean('14:25'))
self.assertEqual(datetime.time(14, 25, 59), f.clean('14:25:59'))
with self.assertRaisesMessage(ValidationError, "'Enter a valid time.'"):
f.clean('hello')
with self.assertRaisesMessage(ValidationError, "'Enter a valid time.'"):
f.clean('1:24 p.m.')
def test_timefield_2(self):
f = TimeField(input_formats=['%I:%M %p'])
self.assertEqual(datetime.time(14, 25), f.clean(datetime.time(14, 25)))
self.assertEqual(datetime.time(14, 25, 59), f.clean(datetime.time(14, 25, 59)))
self.assertEqual(datetime.time(4, 25), f.clean('4:25 AM'))
self.assertEqual(datetime.time(16, 25), f.clean('4:25 PM'))
with self.assertRaisesMessage(ValidationError, "'Enter a valid time.'"):
f.clean('14:30:45')
def test_timefield_3(self):
f = TimeField()
# Test whitespace stripping behavior (#5714)
self.assertEqual(datetime.time(14, 25), f.clean(' 14:25 '))
self.assertEqual(datetime.time(14, 25, 59), f.clean(' 14:25:59 '))
with self.assertRaisesMessage(ValidationError, "'Enter a valid time.'"):
f.clean(' ')
def test_timefield_changed(self):
t1 = datetime.time(12, 51, 34, 482548)
t2 = datetime.time(12, 51)
f = TimeField(input_formats=['%H:%M', '%H:%M %p'])
self.assertTrue(f.has_changed(t1, '12:51'))
self.assertFalse(f.has_changed(t2, '12:51'))
self.assertFalse(f.has_changed(t2, '12:51 PM'))
|
bsd-3-clause
|
lebauce/artub
|
gouzi/__init__.py
|
1
|
12352
|
# Glumol - An adventure game creator
# Copyright (C) 1998-2008 Sylvain Baubeau & Alexis Contour
# This file is part of Glumol.
# Glumol is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# Glumol is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Glumol. If not, see <http://www.gnu.org/licenses/>.
import sys
import xml.dom
import xml.dom.minidom
import string
from os.path import dirname
from compiler import parse, walk
import types
import pprint
import pyunparse
import compiler.ast as ast
from types import ListType, TupleType
class ClassNotFound:
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
class MethodNotFound: pass
class Visitor:
def __init__(self, name):
self.name = name
self.result = None
def visitStmt(self, node, scope=None):
self.p = node
for i in node.nodes:
self.visit(i, scope)
class ClassFinder(Visitor):
def visitClass(self, node, scope=None):
if node.name == self.name:
if not self.result:
self.result = node
self.parent = self.p
for n in node.bases:
self.visit(n, node.name)
self.visit(node.code, node.name)
class AllClassesFinder(Visitor):
def __init__(self, bases):
self.bases = bases
self.result = []
def visitClass(self, node, scope=None):
if self.bases:
for i in self.bases:
if i in node.bases:
self.result.append(node)
else:
self.result.append(node)
self.visit(node.code, node.name)
class MethodFinder(Visitor):
def __init__(self, name, classname):
self.name = name
self.classname = classname
self.result = None
def visitClass(self, node, scope=None):
if node.name == self.classname:
self.visit(node.code, scope)
def visitFunction(self, node, scope=None):
if node.name == self.name:
self.result = node
self.parent = self.p
self.visit(node.code, scope)
class PropFinder(Visitor):
def visitAssign(self, node, scope=None):
if isinstance(node.nodes[0], ast.Slice):
if node.nodes[0].expr.expr.name == "self" and \
node.nodes[0].expr.attrname == self.name:
if not self.result:
self.result = node
self.parent = self.p
elif isinstance(node.nodes[0], ast.AssAttr):
if node.nodes[0].attrname == self.name:
if not self.result:
self.result = node
self.parent = self.p
for n in node.nodes:
self.visit(n, scope)
self.visit(node.expr, scope)
class GlobalPropFinder(Visitor):
def visitAssign(self, node, scope=None):
if isinstance(node.nodes[0], ast.AssName):
if node.nodes[0].name == self.name:
if not self.result:
self.result = node
self.parent = self.p
for n in node.nodes:
self.visit(n, scope)
self.visit(node.expr, scope)
def visitFunction(self, node, scope=None):
pass
class Canard:
def __init__(self, ast, parent = None):
self.ast = ast
self.parent = parent
class Method(Canard):
def get_nodes(self, ast = None):
if ast == None: ast = self.ast
return ast.code.nodes
def get_prop(self, name, ast = None):
if ast == None: ast = self.ast
cf = PropFinder(name)
walk(self.ast, cf)
return Prop(cf.result)
def add_property(self, name, valuecode, _ast = None, position = -1):
if _ast == None: _ast = self.ast
valuecode = "self." + name + "=" + valuecode
ast2 = parse(valuecode)
if len(self.get_nodes(_ast)) == 1 and isinstance(self.get_nodes()[0], ast.Pass):
del self.get_nodes()[0]
if position != -1:
self.get_nodes(_ast).insert(position, ast2.node.nodes[0])
else:
self.get_nodes(_ast).append(ast2.node.nodes[0])
class Prop(Canard):
def change_property(self, valuecode, ast = None):
if ast == None: ast = self.ast
ast2 = parse("a = " + valuecode)
self.ast.expr = ast2.node.nodes[0].expr
def get_repr(self):
if self.ast and self.ast.expr:
b = Manipulator.buf()
pyunparse.ast2py(self.ast.expr, b)
return b.listing
else:
return ""
class GlobalProp(Canard):
def change_property(self, valuecode, ast = None):
if ast == None: ast = self.ast
ast2 = parse("a = " + valuecode)
self.ast.expr = ast2.node.nodes[0].expr
def get_repr(self):
if self.ast and self.ast.expr:
buf = buffer()
b = Manipulator.buf()
pyunparse.ast2py(self.ast.expr, b)
return b.listing
else:
return ""
class Module(Canard):
def remove_class(self, name, ast = None):
c = self.get_class(name, ast)
self.get_nodes().remove(c.ast)
def get_all_classes(self, ast = None, derived_from = []):
if ast == None: ast = self.ast
cf = AllClassesFinder(derived_from)
walk(ast, cf)
return map(Classe, cf.result)
def get_class(self, name, ast = None):
if ast == None: ast = self.ast
names = name.split('.')
cl = None
try:
for name in names:
cf = ClassFinder(name)
walk(ast, cf)
ast = Classe(cf.result, cf.parent).ast
return Classe(cf.result, cf.parent)
except:
raise ClassNotFound(name)
def get_method(self, name, ast = None):
if ast == None: ast = self.ast
cf = MethodFinder(name, "")
walk(self.ast.node, cf)
if not cf.result: raise MethodNotFound()
return Method(cf.result)
def remove_function(self, name, ast = None):
m = self.get_method(name, ast)
self.ast.node.nodes.remove(m.ast)
def get_node(self, ast = None):
if ast == None: ast = self.ast
return ast.node
def get_nodes(self, ast = None):
if ast == None: ast = self.ast
return ast.node.nodes
def insert(self, node, position = -1):
if position != -1:
self.get_nodes().insert(position, node)
else:
self.get_nodes().append(node)
def add_function(self, funccode, ast = None, position = -1):
if ast == None: ast = self.ast
ast2 = parse(funccode)
ast2.node.nodes[0].lineno = self.ast.lineno
self.insert(ast2.node.nodes[0], position)
return Method(ast2.node.nodes[0])
def add_class(self, name, base_classes, body, ast = None, position = -1):
if ast == None: ast = self.ast
s = "class " + name
if base_classes:
s = s + "("
n = 0
for i in base_classes:
if n:
s += ", "
else: n = 1
s = s + i
s = s + ")"
s = s + ":\n"
for i in body:
s = s + " " + i + "\n"
ast2 = parse(s)
self.insert(ast2.node.nodes[0], position)
return Classe(ast2.node.nodes[0])
class Classe(Module):
def __init__(self, ast, parent = None):
Module.__init__(self, ast, parent)
self.name = ast.name
self.method = '__glumolinit__'
self.method_args = '(self)'
def get_node(self, ast = None):
if ast == None: ast = self.ast
return ast.code
def get_nodes(self, ast = None):
if ast == None: ast = self.ast
return ast.code.nodes
def get_all_classes(self, ast = None, derived_from = []):
if ast == None: ast = self.ast.code
return Module.get_all_classes(self, ast, derived_from)
def get_constructor(self, ast = None):
return self.get_method(self.method, ast)
def remove_child_class(self, name, ast = None):
m = self.get_method(name, ast)
self.ast.code.nodes.remove(m.ast)
def get_method(self, name, ast = None):
if ast == None: ast = self.ast
cf = MethodFinder(name, self.ast.name)
walk(ast, cf)
if not cf.result: raise MethodNotFound()
return Method(cf.result)
def set_global_property(self, name, value, ast = None):
if ast == None: ast = self.ast
try:
p = self.get_global_prop(name)
p.change_property(value)
except:
self.add_global_property(name, value)
def set_property(self, name, value, ast = None, method = None):
if ast == None: ast = self.ast
cons = None
try:
if method: cons = self.get_method(method, ast)
else: cons = self.get_constructor(ast)
except:
raise
if not method: method = self.method
cons = self.add_function("def " + method + "(" + self.method_args + "): pass")
try:
i = name.find('[') # Bien crade. Pour le cas : self.prop[:] = ...
pname = name
if i != -1:
pname = name[:i]
p = cons.get_prop(pname)
p.change_property(value)
except:
p = cons.add_property(name, value) # No property
def remove_global_property(self, name):
m = self.get_global_prop(name)
self.get_nodes().remove(m.ast)
def remove_property(self, name, ast = None):
cons = self.get_constructor(ast)
m = cons.get_prop(name, ast)
cons.ast.code.nodes.remove(m.ast)
if not cons.ast.code.nodes:
cons.ast.code.nodes.append(parse("pass"))
def remove_function(self, name, ast = None):
m = self.get_method(name, ast)
self.ast.code.nodes.remove(m.ast)
def add_property(self, name, valuecode, ast = None, position = -1):
cons = self.get_constructor(ast)
return cons.add_property(name, valuecode, ast, position)
def add_global_property(self, name, valuecode, ast = None, position = -1):
if ast == None: ast = self.ast
valuecode = name + "=" + valuecode
ast2 = parse(valuecode)
if position != -1:
self.get_nodes().insert(position, ast2.node.nodes[0])
else:
self.get_nodes().append(ast2.node.nodes[0])
def get_global_prop(self, name, ast = None):
if ast == None: ast = self.ast
pf = GlobalPropFinder(name)
walk(ast, pf)
return Prop(pf.result)
def get_prop(self, name, ast = None):
if ast == None: ast = self.ast
cons = self.get_constructor(ast)
return cons.get_prop(name)
class Manipulator(Module):
def __init__(self, script):
Module.__init__(self, self.parse_script(script))
def sync(self, script):
self.ast = self.parse_script(script)
def parse_script(self, script):
return parse(script.__dict__["listing"])
class buf:
def __init__(self):
self.listing = ""
def write(self, s):
self.listing = self.listing + s
def topy(self):
b = Manipulator.buf()
v = pyunparse.UnparsingVisitor(stream=b)
v.v(self.ast)
for k, j in self.breakpoints.items():
if v.line_map.has_key(j.line + 1):
# print "changing breakpoint from line", j.line + 1, "to line", v.line_map[j.line + 1]
j.line = v.line_map[j.line + 1] - 1
return b.listing + "\n"
|
gpl-2.0
|
Starch/paperwork
|
scripts/simulate-workdir_3d.py
|
2
|
8178
|
#!/usr/bin/env python3
import csv
import os
import multiprocessing
import multiprocessing.pool
import sys
import tempfile
import traceback
import threading
import gi
gi.require_version('Gdk', '3.0')
gi.require_version('PangoCairo', '1.0')
gi.require_version('Poppler', '0.18')
from paperwork_backend import config
from paperwork_backend import docimport
from paperwork_backend import docsearch
from paperwork_backend.util import rm_rf
"""
Create a work directory progressively, like a user would.
Uses an existing work directory for reference.
Compute statistics regarding label guessing
Scenario tested here:
for each document:
- the user scan the first page
- labels are guessed and added
- user fixes the labels
- user scans the remaining pages of the document
"""
g_lock = threading.Lock()
def upd_index(dst_dsearch, doc, new):
index_updater = dst_dsearch.get_index_updater(optimize=False)
if new:
index_updater.add_doc(doc, index_update=False)
else:
index_updater.upd_doc(doc, index_update=False)
index_updater.commit(index_update=False)
def label_guess(dst_dsearch, src_doc, dst_doc):
""" Guess the labels, and apply the guess on the document """
guessed_labels = dst_dsearch.guess_labels(dst_doc)
for label in guessed_labels:
dst_dsearch.add_label(dst_doc, label, update_index=False)
upd_index(dst_dsearch, dst_doc, new=True)
def fix_labels(stats, dst_dsearch, src_doc, dst_doc):
""" Acts like the user fixing the labels """
stats['nb_documents'] += 1
stats['nb_src_labels'] += len(src_doc.labels)
stats['nb_dst_labels'] += len(dst_doc.labels)
changed = False
correct = 0
missing = 0
wrong = 0
to_remove = set()
to_add = set()
for dst_label in dst_doc.labels:
if dst_label not in src_doc.labels:
stats['wrong_guess'] += 1
wrong += 1
to_remove.add(dst_label)
changed = True
for label in to_remove:
dst_dsearch.remove_label(dst_doc, label, update_index=False)
for src_label in src_doc.labels:
if src_label in dst_doc.labels:
stats['correct_guess'] += 1
correct += 1
else:
stats['missing_guess'] += 1
missing += 1
to_add.add(src_label)
changed = True
for label in to_add:
if label not in dst_dsearch.labels.values():
dst_dsearch.create_label(label)
dst_dsearch.add_label(dst_doc, label, update_index=False)
if changed:
upd_index(dst_dsearch, dst_doc, new=False)
else:
stats['perfect'] += 1
g_lock.acquire()
try:
print("Document [{}|{}]".format(
dst_dsearch.label_guesser.min_yes,
src_doc.docid
))
out = u"success: {}%/{} || ".format(
int(stats['perfect'] * 100 / stats['nb_documents']),
stats['nb_documents']
)
out += "ok: {}".format(correct)
if missing:
out += " / MISSING: {}".format(missing)
if wrong:
out += " / WRONG: {}".format(wrong)
finally:
g_lock.release()
print(out)
def print_stats(stats):
# avoid division by zero
if stats['nb_src_labels'] == 0:
stats['nb_src_labels'] = -1
if stats['nb_dst_labels'] == 0:
stats['nb_dst_labels'] = -1
nb_documents = stats['nb_documents']
if nb_documents == 0:
nb_documents += 1
g_lock.acquire()
try:
print("---")
print("Success/total: {}/{} = {}%".format(
stats['perfect'], nb_documents,
int(stats['perfect'] * 100 / nb_documents)
))
print("Labels correctly guessed: {}/{} = {}%".format(
stats['correct_guess'], stats['nb_src_labels'],
int(stats['correct_guess'] * 100 / stats['nb_src_labels'])
))
print("Labels not guessed: {}/{} = {}%".format(
stats['missing_guess'], stats['nb_src_labels'],
int(stats['missing_guess'] * 100 / stats['nb_src_labels'])
))
print("Labels wrongly guessed: {}/{} = {}%".format(
stats['wrong_guess'], stats['nb_dst_labels'],
int(stats['wrong_guess'] * 100 / stats['nb_dst_labels'])
))
finally:
g_lock.release()
def run_simulation(
src_dsearch,
min_yes,
csvwriter
):
stats = {
'nb_documents': 0,
'correct_guess': 0,
'missing_guess': 0,
'wrong_guess': 0,
'nb_src_labels': 0,
'nb_dst_labels': 0,
'perfect': 0,
}
dst_doc_dir = tempfile.mkdtemp(suffix="paperwork-simulate-docs")
dst_index_dir = tempfile.mkdtemp(suffix="paperwork-simulate-index")
print(
"Destination directories : {} | {}".format(dst_doc_dir, dst_index_dir)
)
dst_dsearch = docsearch.DocSearch(dst_doc_dir, indexdir=dst_index_dir)
dst_dsearch.reload_index()
dst_dsearch.label_guesser.min_yes = min_yes
try:
documents = [x for x in src_dsearch.docs]
documents.sort(key=lambda doc: doc.docid)
for src_doc in documents:
files = os.listdir(src_doc.path)
files.sort()
current_doc = None
for filename in files:
if "thumb" in filename:
continue
filepath = os.path.join(src_doc.path, filename)
fileuri = "file://" + filepath
importers = docimport.get_possible_importers(
fileuri, current_doc=current_doc
)
if len(importers) <= 0:
continue
assert(len(importers) == 1)
importer = importers[0]
(docs, page, new) = importer.import_doc(
fileuri, dst_dsearch, current_doc
)
dst_doc = docs[0]
for page_nb in range(0, dst_doc.nb_pages):
if dst_doc.can_edit:
dst_doc.pages[page_nb].boxes = \
src_doc.pages[page_nb].boxes
dst_doc.pages[page_nb].drop_cache()
if current_doc is None:
# first page --> guess labels and see if it matchs
label_guess(dst_dsearch, src_doc, dst_doc)
fix_labels(stats, dst_dsearch, src_doc, dst_doc)
else:
# just update the index
upd_index(dst_dsearch, dst_doc, new=False)
current_doc = docs[0]
finally:
g_lock.acquire()
try:
csvwriter.writerow([
min_yes,
stats['nb_documents'], stats['perfect'],
])
finally:
g_lock.release()
rm_rf(dst_doc_dir)
rm_rf(dst_index_dir)
print_stats(stats)
def _run_simulation(*args):
try:
run_simulation(*args)
except Exception as exc:
print("EXCEPTION: {}".format(exc))
traceback.print_exc()
raise
def main():
if len(sys.argv) < 3:
print("Syntax:")
print(
" {} [min_yeses] [out_csv_file]".format(
sys.argv[0]
)
)
sys.exit(1)
min_yeses = eval(sys.argv[1])
out_csv_file = sys.argv[2]
pconfig = config.PaperworkConfig()
pconfig.read()
src_dir = pconfig.settings['workdir'].value
print("Source work directory : {}".format(src_dir))
src_dsearch = docsearch.DocSearch(src_dir)
src_dsearch.reload_index()
nb_threads = multiprocessing.cpu_count()
pool = multiprocessing.pool.ThreadPool(processes=nb_threads)
with open(out_csv_file, 'a', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
for min_yes in min_yeses:
pool.apply_async(
_run_simulation,
(src_dsearch, min_yes, csvwriter,)
)
pool.close()
pool.join()
print("All done !")
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("Interrupted")
|
gpl-3.0
|
beernarrd/gramps
|
gramps/gen/filters/rules/note/__init__.py
|
4
|
1568
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
# Copyright (C) 2007 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Package providing filter rules for GRAMPS.
"""
from ._allnotes import AllNotes
from ._hasidof import HasIdOf
from ._regexpidof import RegExpIdOf
from ._matchesregexpof import MatchesRegexpOf
from ._matchessubstringof import MatchesSubstringOf
from ._hasreferencecountof import HasReferenceCountOf
from ._noteprivate import NotePrivate
from ._matchesfilter import MatchesFilter
from ._hasnote import HasNote
from ._changedsince import ChangedSince
from ._hastag import HasTag
from ._hastype import HasType
editor_rule_list = [
AllNotes,
HasIdOf,
RegExpIdOf,
HasNote,
MatchesRegexpOf,
HasReferenceCountOf,
NotePrivate,
MatchesFilter,
ChangedSince,
HasTag,
HasType,
]
|
gpl-2.0
|
toobaz/pandas
|
pandas/core/groupby/generic.py
|
1
|
62983
|
"""
Define the SeriesGroupBy and DataFrameGroupBy
classes that hold the groupby interfaces (and some implementations).
These are user facing as the result of the ``df.groupby(...)`` operations,
which here returns a DataFrameGroupBy object.
"""
from collections import OrderedDict, abc, namedtuple
import copy
import functools
from functools import partial
from textwrap import dedent
import typing
from typing import Any, Callable, FrozenSet, Iterator, Sequence, Type, Union
import warnings
import numpy as np
from pandas._libs import Timestamp, lib
from pandas.compat import PY36
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution
from pandas.core.dtypes.cast import maybe_convert_objects, maybe_downcast_to_dtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
is_bool,
is_datetimelike,
is_dict_like,
is_integer_dtype,
is_interval_dtype,
is_list_like,
is_numeric_dtype,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.missing import _isna_ndarraylike, isna, notna
from pandas._typing import FrameOrSeries
import pandas.core.algorithms as algorithms
from pandas.core.base import DataError, SpecificationError
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import ABCDataFrame, ABCSeries, NDFrame, _shared_docs
from pandas.core.groupby import base
from pandas.core.groupby.groupby import (
GroupBy,
_apply_docs,
_transform_template,
groupby,
)
from pandas.core.index import Index, MultiIndex, _all_indexes_same
import pandas.core.indexes.base as ibase
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.sparse.frame import SparseDataFrame
from pandas.plotting import boxplot_frame_groupby
NamedAgg = namedtuple("NamedAgg", ["column", "aggfunc"])
# TODO(typing) the return value on this callable should be any *scalar*.
AggScalar = Union[str, Callable[..., Any]]
# TODO: validate types on ScalarResult and move to _typing
# Blocked from using by https://github.com/python/mypy/issues/1484
# See note at _mangle_lambda_list
ScalarResult = typing.TypeVar("ScalarResult")
def whitelist_method_generator(
base_class: Type[GroupBy], klass: Type[FrameOrSeries], whitelist: FrozenSet[str]
) -> Iterator[str]:
"""
Yields all GroupBy member defs for DataFrame/Series names in whitelist.
Parameters
----------
base_class : Groupby class
base class
klass : DataFrame or Series class
class where members are defined.
whitelist : frozenset
Set of names of klass methods to be constructed
Returns
-------
The generator yields a sequence of strings, each suitable for exec'ing,
that define implementations of the named methods for DataFrameGroupBy
or SeriesGroupBy.
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
property_wrapper_template = """@property
def %(name)s(self) :
\"""%(doc)s\"""
return self.__getattr__('%(name)s')"""
for name in whitelist:
# don't override anything that was explicitly defined
# in the base class
if hasattr(base_class, name):
continue
# ugly, but we need the name string itself in the method.
f = getattr(klass, name)
doc = f.__doc__
doc = doc if type(doc) == str else ""
wrapper_template = property_wrapper_template
params = {"name": name, "doc": doc}
yield wrapper_template % params
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, alt=None, numeric_only=True, min_count=-1):
new_items, new_blocks = self._cython_agg_blocks(
how, alt=alt, numeric_only=numeric_only, min_count=min_count
)
return self._wrap_agged_blocks(new_items, new_blocks)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, alt=None, numeric_only=True, min_count=-1):
# TODO: the actual managing of mgr_locs is a PITA
# here, it should happen via BlockManager.combine
data, agg_axis = self._get_data_to_aggregate()
if numeric_only:
data = data.get_numeric_data(copy=False)
new_blocks = []
new_items = []
deleted_items = []
no_result = object()
for block in data.blocks:
# Avoid inheriting result from earlier in the loop
result = no_result
locs = block.mgr_locs.as_array
try:
result, _ = self.grouper.aggregate(
block.values, how, axis=agg_axis, min_count=min_count
)
except NotImplementedError:
# generally if we have numeric_only=False
# and non-applicable functions
# try to python agg
if alt is None:
# we cannot perform the operation
# in an alternate way, exclude the block
deleted_items.append(locs)
continue
# call our grouper again with only this block
obj = self.obj[data.items[locs]]
s = groupby(obj, self.grouper)
try:
result = s.aggregate(lambda x: alt(x, axis=self.axis))
except TypeError:
# we may have an exception in trying to aggregate
# continue and exclude the block
deleted_items.append(locs)
continue
finally:
if result is not no_result:
dtype = block.values.dtype
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result, dtype=dtype)
newb = block.make_block(result)
new_items.append(locs)
new_blocks.append(newb)
if len(new_blocks) == 0:
raise DataError("No numeric types to aggregate")
# reset the locs in the blocks to correspond to our
# current ordering
indexer = np.concatenate(new_items)
new_items = data.items.take(np.sort(indexer))
if len(deleted_items):
# we need to adjust the indexer to account for the
# items we have removed
# really should be done in internals :<
deleted = np.concatenate(deleted_items)
ai = np.arange(len(data))
mask = np.zeros(len(data))
mask[deleted] = 1
indexer = (ai - mask.cumsum())[indexer]
offset = 0
for b in new_blocks:
loc = len(b.mgr_locs)
b.mgr_locs = indexer[offset : (offset + loc)]
offset += loc
return new_items, new_blocks
def aggregate(self, func, *args, **kwargs):
_level = kwargs.pop("_level", None)
relabeling = func is None and _is_multi_agg_with_relabel(**kwargs)
if relabeling:
func, columns, order = _normalize_keyword_aggregation(kwargs)
kwargs = {}
elif func is None:
# nicer error message
raise TypeError("Must provide 'func' or tuples of " "'(column, aggfunc).")
func = _maybe_mangle_lambdas(func)
result, how = self._aggregate(func, _level=_level, *args, **kwargs)
if how is None:
return result
if result is None:
# grouper specific aggregations
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs(
[func], _level=_level, _axis=self.axis
)
result.columns = Index(
result.columns.levels[0], name=self._selected_obj.columns.name
)
if isinstance(self.obj, SparseDataFrame):
# Backwards compat for groupby.agg() with sparse
# values. concat no longer converts DataFrame[Sparse]
# to SparseDataFrame, so we do it here.
result = SparseDataFrame(result._data)
except Exception:
result = self._aggregate_generic(func, *args, **kwargs)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = np.arange(len(result))
if relabeling:
result = result[order]
result.columns = columns
return result._convert(datetime=True)
agg = aggregate
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError("Number of keys must be 1")
axis = self.axis
obj = self._obj_with_exclusions
result = OrderedDict()
if axis != obj._info_axis_number:
try:
for name, data in self:
result[name] = self._try_cast(func(data, *args, **kwargs), data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs), data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise AbstractMethodError(self)
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = OrderedDict()
cannot_agg = []
errors = None
for item in obj:
try:
data = obj[item]
colg = SeriesGroupBy(data, selection=item, grouper=self.grouper)
cast = self._transform_should_cast(func)
result[item] = colg.aggregate(func, *args, **kwargs)
if cast:
result[item] = self._try_cast(result[item], data)
except ValueError:
cannot_agg.append(item)
continue
except TypeError as e:
cannot_agg.append(item)
errors = e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
# GH6337
if not len(result_columns) and errors is not None:
raise errors
return DataFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if len(output) == len(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if isinstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys, names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
return DataFrame(index=keys)
key_names = self.grouper.names
# GH12824.
def first_not_none(values):
try:
return next(com._not_none(*values))
except StopIteration:
return None
v = first_not_none(values)
if v is None:
# GH9684. If all values are None, then this will throw an error.
# We'd prefer it return an empty dataframe.
return DataFrame()
elif isinstance(v, DataFrame):
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = self.grouper.result_index
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
v = first_not_none(values)
if v is None:
return DataFrame()
elif isinstance(v, NDFrame):
values = [
x if x is not None else v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if isinstance(v, (np.ndarray, Index, Series)):
if isinstance(v, Series):
applied_index = self._selected_obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([x.index for x in values])
singular_series = len(values) == 1 and applied_index.nlevels == 1
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.core.reshape.concat import concat
return concat(values)
if not all_indexed_same:
# GH 8467
return self._concat_objects(keys, values, not_indexed_same=True)
try:
if self.axis == 0:
# GH6124 if the list of Series have a consistent name,
# then propagate that name to the result.
index = v.index.copy()
if index.name is None:
# Only propagate the series name to the result
# if all series have a consistent name. If the
# series do not have a consistent name, do
# nothing.
names = {v.name for v in values}
if len(names) == 1:
index.name = list(names)[0]
# normally use vstack as its faster than concat
# and if we have mi-columns
if (
isinstance(v.index, MultiIndex)
or key_index is None
or isinstance(key_index, MultiIndex)
):
stacked_values = np.vstack([np.asarray(v) for v in values])
result = DataFrame(
stacked_values, index=key_index, columns=index
)
else:
# GH5788 instead of stacking; concat gets the
# dtypes correct
from pandas.core.reshape.concat import concat
result = concat(
values,
keys=key_index,
names=key_index.names,
axis=self.axis,
).unstack()
result.columns = index
else:
stacked_values = np.vstack([np.asarray(v) for v in values])
result = DataFrame(
stacked_values.T, index=v.index, columns=key_index
)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
return Series(values, index=key_index, name=self._selection_name)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
so = self._selected_obj
if so.ndim == 2 and so.dtypes.apply(is_datetimelike).any():
result = _recast_datetimelike_result(result)
else:
result = result._convert(datetime=True)
return self._reindex_output(result)
# values are not series or array-like but scalars
else:
# only coerce dates if we find at least 1 datetime
coerce = any(isinstance(x, Timestamp) for x in values)
# self._selection_name not passed through to Series as the
# result should not take the name of original selection
# of columns
return Series(values, index=key_index)._convert(
datetime=True, coerce=coerce
)
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
def _transform_general(self, func, *args, **kwargs):
from pandas.core.reshape.concat import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, "name", name)
if path is None:
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except ValueError:
msg = "transform must return a scalar value for each group"
raise ValueError(msg)
else:
res = path(group)
if isinstance(res, Series):
# we need to broadcast across the
# other dimension; this will preserve dtypes
# GH14457
if not np.prod(group.shape):
continue
elif res.index.is_(obj.index):
r = concat([res] * len(group.columns), axis=1)
r.columns = group.columns
r.index = group.index
else:
r = DataFrame(
np.concatenate([res.values] * len(group.index)).reshape(
group.shape
),
columns=group.columns,
index=group.index,
)
applied.append(r)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
other_axis = 1 if self.axis == 0 else 0 # switches between 0 & 1
concatenated = concat(applied, axis=self.axis, verify_integrity=False)
concatenated = concatenated.reindex(concat_index, axis=other_axis, copy=False)
return self._set_result_index_ordered(concatenated)
@Substitution(klass="DataFrame", selected="")
@Appender(_transform_template)
def transform(self, func, *args, **kwargs):
# optimized transforms
func = self._get_cython_func(func) or func
if isinstance(func, str):
if not (func in base.transform_kernel_whitelist):
msg = "'{func}' is not a valid function name for transform(name)"
raise ValueError(msg.format(func=func))
if func in base.cythonized_kernels:
# cythonized transformation or canned "reduction+broadcast"
return getattr(self, func)(*args, **kwargs)
else:
# If func is a reduction, we need to broadcast the
# result to the whole group. Compute func result
# and deal with possible broadcasting below.
result = getattr(self, func)(*args, **kwargs)
else:
return self._transform_general(func, *args, **kwargs)
# a reduction transform
if not isinstance(result, DataFrame):
return self._transform_general(func, *args, **kwargs)
obj = self._obj_with_exclusions
# nuisance columns
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
return self._transform_fast(result, obj, func)
def _transform_fast(self, result, obj, func_nm):
"""
Fast transform path for aggregations
"""
# if there were groups with no observations (Categorical only?)
# try casting data to original dtype
cast = self._transform_should_cast(func_nm)
# for each col, reshape to to size of original frame
# by take operation
ids, _, ngroup = self.grouper.group_info
output = []
for i, _ in enumerate(result.columns):
res = algorithms.take_1d(result.iloc[:, i].values, ids)
if cast:
res = self._try_cast(res, obj.iloc[:, i])
output.append(res)
return DataFrame._from_arrays(output, columns=result.columns, index=obj.index)
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, str):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis
)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis
)
return fast_path, slow_path
def _choose_path(self, fast_path, slow_path, group):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
# verify fast path does not change columns (and names), otherwise
# its results cannot be joined with those of the slow path
if res_fast.columns != group.columns:
return path, res
# verify numerical equality with the slow path
if res.shape == res_fast.shape:
res_r = res.values.ravel()
res_fast_r = res_fast.values.ravel()
mask = notna(res_r)
if (res_r[mask] == res_fast_r[mask]).all():
path = fast_path
except Exception:
pass
return path, res
def _transform_item_by_item(self, obj, wrapper):
# iterate through columns
output = {}
inds = []
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
inds.append(i)
except Exception:
pass
if len(output) == 0: # pragma: no cover
raise TypeError("Transform function invalid for data types")
columns = obj.columns
if len(output) < len(obj.columns):
columns = columns.take(inds)
return DataFrame(output, index=obj.index, columns=columns)
def filter(self, func, dropna=True, *args, **kwargs): # noqa
"""
Return a copy of a DataFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Returns
-------
filtered : DataFrame
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.filter(lambda x: x['B'].mean() > 3.)
A B C
1 bar 2 5.0
3 bar 4 1.0
5 bar 6 9.0
"""
indices = []
obj = self._selected_obj
gen = self.grouper.get_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
try:
res = res.squeeze()
except AttributeError: # allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if is_bool(res) or (is_scalar(res) and isna(res)):
if res and notna(res):
indices.append(self._get_index(name))
else:
# non scalars aren't allowed
raise TypeError(
"filter function returned a %s, "
"but expected a scalar bool" % type(res).__name__
)
return self._apply_filter(indices, dropna)
class SeriesGroupBy(GroupBy):
#
# Make class defs of attributes on SeriesGroupBy whitelist
_apply_whitelist = base.series_apply_whitelist
for _def_str in whitelist_method_generator(GroupBy, Series, _apply_whitelist):
exec(_def_str)
@property
def _selection_name(self):
"""
since we are a series, we by definition only have
a single name, but may be the result of a selection or
the name of our object
"""
if self._selection is None:
return self.obj.name
else:
return self._selection
_agg_see_also_doc = dedent(
"""
See Also
--------
pandas.Series.groupby.apply
pandas.Series.groupby.transform
pandas.Series.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.groupby([1, 1, 2, 2]).min()
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg('min')
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])
min max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.groupby([1, 1, 2, 2]).agg(
... minimum='min',
... maximum='max',
... )
minimum maximum
1 1 2
2 3 4
"""
)
@Appender(
_apply_docs["template"].format(
input="series", examples=_apply_docs["series_examples"]
)
)
def apply(self, func, *args, **kwargs):
return super().apply(func, *args, **kwargs)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, func_or_funcs=None, *args, **kwargs):
_level = kwargs.pop("_level", None)
relabeling = func_or_funcs is None
columns = None
no_arg_message = (
"Must provide 'func_or_funcs' or named " "aggregation **kwargs."
)
if relabeling:
columns = list(kwargs)
if not PY36:
# sort for 3.5 and earlier
columns = list(sorted(columns))
func_or_funcs = [kwargs[col] for col in columns]
kwargs = {}
if not columns:
raise TypeError(no_arg_message)
if isinstance(func_or_funcs, str):
return getattr(self, func_or_funcs)(*args, **kwargs)
if isinstance(func_or_funcs, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
func_or_funcs = _maybe_mangle_lambdas(func_or_funcs)
ret = self._aggregate_multiple_funcs(func_or_funcs, (_level or 0) + 1)
if relabeling:
ret.columns = columns
else:
cyfunc = self._get_cython_func(func_or_funcs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
print("Warning, ignoring as_index=True")
# _level handled at higher
if not _level and isinstance(ret, dict):
from pandas import concat
ret = concat(ret, axis=1)
return ret
agg = aggregate
def _aggregate_multiple_funcs(self, arg, _level):
if isinstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
if isinstance(self._selected_obj, Series) and _level <= 1:
msg = dedent(
"""\
using a dict on a Series for aggregation
is deprecated and will be removed in a future version. Use \
named aggregation instead.
>>> grouper.agg(name_1=func_1, name_2=func_2)
"""
)
warnings.warn(msg, FutureWarning, stacklevel=3)
columns = list(arg.keys())
arg = arg.items()
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg]
# indicated column order
columns = next(zip(*arg))
else:
# list of functions / function names
columns = []
for f in arg:
columns.append(com.get_callable_name(f) or f)
arg = zip(columns, arg)
results = OrderedDict()
for name, func in arg:
obj = self
if name in results:
raise SpecificationError(
"Function names must be unique, found multiple named "
"{}".format(name)
)
# reset the cache so that we
# only include the named selection
if name in self._selected_obj:
obj = copy.copy(obj)
obj._reset_cache()
obj._selection = name
results[name] = obj.aggregate(func)
if any(isinstance(x, DataFrame) for x in results.values()):
# let higher level handle
if _level:
return results
return DataFrame(results, columns=columns)
def _wrap_output(self, output, index, names=None):
""" common agg/transform wrapping logic """
output = output[self._selection_name]
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
name = self._selection_name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
def _wrap_aggregated_output(self, output, names=None):
result = self._wrap_output(
output=output, index=self.grouper.result_index, names=names
)
return self._reindex_output(result)._convert(datetime=True)
def _wrap_transformed_output(self, output, names=None):
return self._wrap_output(output=output, index=self.obj.index, names=names)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self._selection_name, index=keys)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823 #24880
index = _get_index()
result = self._reindex_output(DataFrame(values, index=index))
# if self.observed is False,
# keep all-NaN rows created while re-indexing
result = result.stack(dropna=self.observed)
result.name = self._selection_name
return result
if isinstance(values[0], Series):
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
else:
# GH #6265 #24880
result = Series(data=values, index=_get_index(), name=self._selection_name)
return self._reindex_output(result)
def _aggregate_named(self, func, *args, **kwargs):
result = OrderedDict()
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception("Must produce aggregated value")
result[name] = self._try_cast(output, group)
return result
@Substitution(klass="Series", selected="A.")
@Appender(_transform_template)
def transform(self, func, *args, **kwargs):
func = self._get_cython_func(func) or func
if isinstance(func, str):
if not (func in base.transform_kernel_whitelist):
msg = "'{func}' is not a valid function name for transform(name)"
raise ValueError(msg.format(func=func))
if func in base.cythonized_kernels:
# cythonized transform or canned "agg+broadcast"
return getattr(self, func)(*args, **kwargs)
else:
# If func is a reduction, we need to broadcast the
# result to the whole group. Compute func result
# and deal with possible broadcasting below.
return self._transform_fast(
lambda: getattr(self, func)(*args, **kwargs), func
)
# reg transform
klass = self._selected_obj.__class__
results = []
wrapper = lambda x: func(x, *args, **kwargs)
for name, group in self:
object.__setattr__(group, "name", name)
res = wrapper(group)
if isinstance(res, (ABCDataFrame, ABCSeries)):
res = res._values
indexer = self._get_index(name)
s = klass(res, indexer)
results.append(s)
# check for empty "results" to avoid concat ValueError
if results:
from pandas.core.reshape.concat import concat
result = concat(results).sort_index()
else:
result = Series()
# we will only try to coerce the result type if
# we have a numeric dtype, as these are *always* udfs
# the cython take a different path (and casting)
dtype = self._selected_obj.dtype
if is_numeric_dtype(dtype):
result = maybe_downcast_to_dtype(result, dtype)
result.name = self._selected_obj.name
result.index = self._selected_obj.index
return result
def _transform_fast(self, func, func_nm):
"""
fast version of transform, only applicable to
builtin/cythonizable functions
"""
if isinstance(func, str):
func = getattr(self, func)
ids, _, ngroup = self.grouper.group_info
cast = self._transform_should_cast(func_nm)
out = algorithms.take_1d(func()._values, ids)
if cast:
out = self._try_cast(out, self.obj)
return Series(out, index=self.obj.index, name=self.obj.name)
def filter(self, func, dropna=True, *args, **kwargs): # noqa
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Series
"""
if isinstance(func, str):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notna(b)
try:
indices = [
self._get_index(name) for name, group in self if true_and_notna(group)
]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered
def nunique(self, dropna=True):
"""
Return number of unique elements in the group.
Returns
-------
Series
Number of unique values within each group.
"""
ids, _, _ = self.grouper.group_info
val = self.obj._internal_get_values()
try:
sorter = np.lexsort((val, ids))
except TypeError: # catches object dtypes
msg = "val.dtype must be object, got {}".format(val.dtype)
assert val.dtype == object, msg
val, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((val, ids))
_isna = lambda a: a == -1
else:
_isna = isna
ids, val = ids[sorter], val[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, val[1:] != val[:-1]]
# 1st item of each group is a new unique observation
mask = _isna(val)
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype("int64", copy=False)
if len(ids):
# NaN/NaT group exists if the head of ids is -1,
# so remove it from res and exclude its index from idx
if ids[0] == -1:
res = out[1:]
idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids[idx]] = out
return Series(res, index=ri, name=self._selection_name)
@Appender(Series.describe.__doc__)
def describe(self, **kwargs):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
def value_counts(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True
):
from pandas.core.reshape.tile import cut
from pandas.core.reshape.merge import _get_join_indexers
if bins is not None and not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return self.apply(
Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
ids, _, _ = self.grouper.group_info
val = self.obj._internal_get_values()
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Series(val), bins, include_lowest=True)
lev = lab.cat.categories
lab = lev.take(lab.cat.codes)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
if is_interval_dtype(lab):
# TODO: should we do this inside II?
sorter = np.lexsort((lab.left, lab.right, ids))
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
labels = list(map(rep, self.grouper.recons_labels)) + [llab(lab, inc)]
levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
names = self.grouper.names + [self._selection_name]
if dropna:
mask = labels[-1] != -1
if mask.all():
dropna = False
else:
out, labels = out[mask], [label[mask] for label in labels]
if normalize:
out = out.astype("float")
d = np.diff(np.r_[idx, len(ids)])
if dropna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, labels[-1] = out[sorter], labels[-1][sorter]
if bins is None:
mi = MultiIndex(
levels=levels, codes=labels, names=names, verify_integrity=False
)
if is_integer_dtype(out):
out = ensure_int64(out)
return Series(out, index=mi, name=self._selection_name)
# for compat. with libgroupby.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype="bool")
for lab in labels[:-1]:
diff |= np.r_[True, lab[1:] != lab[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, labels[-1]]
_, idx = _get_join_indexers(left, right, sort=False, how="left")
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
codes = list(map(lambda lab: np.repeat(lab[diff], nbin), labels[:-1]))
codes.append(left[-1])
mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)
if is_integer_dtype(out):
out = ensure_int64(out)
return Series(out, index=mi, name=self._selection_name)
def count(self):
"""
Compute count of group, excluding missing values.
Returns
-------
Series
Count of values within each group.
"""
ids, _, ngroups = self.grouper.group_info
val = self.obj._internal_get_values()
mask = (ids != -1) & ~isna(val)
ids = ensure_platform_int(ids)
minlength = ngroups or 0
out = np.bincount(ids[mask], minlength=minlength)
return Series(
out,
index=self.grouper.result_index,
name=self._selection_name,
dtype="int64",
)
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None):
"""Calculate pct_change of each value to previous entry in group"""
# TODO: Remove this conditional when #23918 is fixed
if freq:
return self.apply(
lambda x: x.pct_change(
periods=periods, fill_method=fill_method, limit=limit, freq=freq
)
)
filled = getattr(self, fill_method)(limit=limit)
fill_grp = filled.groupby(self.grouper.labels)
shifted = fill_grp.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
class DataFrameGroupBy(NDFrameGroupBy):
_apply_whitelist = base.dataframe_apply_whitelist
#
# Make class defs of attributes on DataFrameGroupBy whitelist.
for _def_str in whitelist_method_generator(GroupBy, DataFrame, _apply_whitelist):
exec(_def_str)
_block_agg_axis = 1
_agg_see_also_doc = dedent(
"""
See Also
--------
pandas.DataFrame.groupby.apply
pandas.DataFrame.groupby.transform
pandas.DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 2],
... 'B': [1, 2, 3, 4],
... 'C': np.random.randn(4)})
>>> df
A B C
0 1 1 0.362838
1 1 2 0.227877
2 2 3 1.267767
3 2 4 -0.562860
The aggregation is for each column.
>>> df.groupby('A').agg('min')
B C
A
1 1 0.227877
2 3 -0.562860
Multiple aggregations
>>> df.groupby('A').agg(['min', 'max'])
B C
min max min max
A
1 1 2 0.227877 0.362838
2 3 4 -0.562860 1.267767
Select a column for aggregation
>>> df.groupby('A').B.agg(['min', 'max'])
min max
A
1 1 2
2 3 4
Different aggregations per column
>>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'})
B C
min max sum
A
1 1 2 0.590716
2 3 4 0.704907
To control the output names with different aggregations per column,
pandas supports "named aggregation"
>>> df.groupby("A").agg(
... b_min=pd.NamedAgg(column="B", aggfunc="min"),
... c_sum=pd.NamedAgg(column="C", aggfunc="sum"))
b_min c_sum
A
1 1 -1.956929
2 3 -0.322183
- The keywords are the *output* column names
- The values are tuples whose first element is the column to select
and the second element is the aggregation to apply to that column.
Pandas provides the ``pandas.NamedAgg`` namedtuple with the fields
``['column', 'aggfunc']`` to make it clearer what the arguments are.
As usual, the aggregation can be a callable or a string alias.
See :ref:`groupby.aggregate.named` for more.
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="DataFrame",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg=None, *args, **kwargs):
return super().aggregate(arg, *args, **kwargs)
agg = aggregate
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if ndim == 2:
if subset is None:
subset = self.obj
return DataFrameGroupBy(
subset,
self.grouper,
selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index,
observed=self.observed,
)
elif ndim == 1:
if subset is None:
subset = self.obj[key]
return SeriesGroupBy(
subset, selection=key, grouper=self.grouper, observed=self.observed
)
raise AssertionError("invalid ndim for _gotitem")
def _wrap_generic_output(self, result, obj):
result_index = self.grouper.levels[0]
if self.axis == 0:
return DataFrame(result, index=obj.columns, columns=result_index).T
else:
return DataFrame(result, index=obj.index, columns=result_index)
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._data, 1
else:
return obj._data, 1
def _insert_inaxis_grouper_inplace(self, result):
# zip in reverse so we can always insert at loc 0
izip = zip(
*map(
reversed,
(
self.grouper.names,
self.grouper.get_group_levels(),
[grp.in_axis for grp in self.grouper.groupings],
),
)
)
for name, lev, in_axis in izip:
if in_axis:
result.insert(0, name, lev)
def _wrap_aggregated_output(self, output, names=None):
agg_axis = 0 if self.axis == 1 else 1
agg_labels = self._obj_with_exclusions._get_axis(agg_axis)
output_keys = self._decide_output_index(output, agg_labels)
if not self.as_index:
result = DataFrame(output, columns=output_keys)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
result = DataFrame(output, index=index, columns=output_keys)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _wrap_transformed_output(self, output, names=None):
return DataFrame(output, index=self.obj.index)
def _wrap_agged_blocks(self, items, blocks):
if not self.as_index:
index = np.arange(blocks[0].values.shape[-1])
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _iterate_column_groupbys(self):
for i, colname in enumerate(self._selected_obj.columns):
yield colname, SeriesGroupBy(
self._selected_obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions,
)
def _apply_to_column_groupbys(self, func):
from pandas.core.reshape.concat import concat
return concat(
(func(col_groupby) for _, col_groupby in self._iterate_column_groupbys()),
keys=self._selected_obj.columns,
axis=1,
)
def count(self):
"""
Compute count of group, excluding missing values.
Returns
-------
DataFrame
Count of values within each group.
"""
data, _ = self._get_data_to_aggregate()
ids, _, ngroups = self.grouper.group_info
mask = ids != -1
val = (
(mask & ~_isna_ndarraylike(np.atleast_2d(blk.get_values())))
for blk in data.blocks
)
loc = (blk.mgr_locs for blk in data.blocks)
counter = partial(lib.count_level_2d, labels=ids, max_bin=ngroups, axis=1)
blk = map(make_block, map(counter, val), loc)
return self._wrap_agged_blocks(data.items, list(blk))
def nunique(self, dropna=True):
"""
Return DataFrame with number of distinct observations per group for
each column.
.. versionadded:: 0.20.0
Parameters
----------
dropna : boolean, default True
Don't include NaN in the counts.
Returns
-------
nunique: DataFrame
Examples
--------
>>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
... 'ham', 'ham'],
... 'value1': [1, 5, 5, 2, 5, 5],
... 'value2': list('abbaxy')})
>>> df
id value1 value2
0 spam 1 a
1 egg 5 b
2 egg 5 b
3 spam 2 a
4 ham 5 x
5 ham 5 y
>>> df.groupby('id').nunique()
id value1 value2
id
egg 1 1 1
ham 1 1 2
spam 1 2 1
Check for rows with the same id but conflicting values:
>>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
id value1 value2
0 spam 1 a
3 spam 2 a
4 ham 5 x
5 ham 5 y
"""
obj = self._selected_obj
def groupby_series(obj, col=None):
return SeriesGroupBy(obj, selection=col, grouper=self.grouper).nunique(
dropna=dropna
)
if isinstance(obj, Series):
results = groupby_series(obj)
else:
from pandas.core.reshape.concat import concat
results = [groupby_series(obj[col], col) for col in obj.columns]
results = concat(results, axis=1)
results.columns.names = obj.columns.names
if not self.as_index:
results.index = ibase.default_index(len(results))
return results
boxplot = boxplot_frame_groupby
def _is_multi_agg_with_relabel(**kwargs):
"""
Check whether kwargs passed to .agg look like multi-agg with relabeling.
Parameters
----------
**kwargs : dict
Returns
-------
bool
Examples
--------
>>> _is_multi_agg_with_relabel(a='max')
False
>>> _is_multi_agg_with_relabel(a_max=('a', 'max'),
... a_min=('a', 'min'))
True
>>> _is_multi_agg_with_relabel()
False
"""
return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values()) and kwargs
def _normalize_keyword_aggregation(kwargs):
"""
Normalize user-provided "named aggregation" kwargs.
Transforms from the new ``Dict[str, NamedAgg]`` style kwargs
to the old OrderedDict[str, List[scalar]]].
Parameters
----------
kwargs : dict
Returns
-------
aggspec : dict
The transformed kwargs.
columns : List[str]
The user-provided keys.
order : List[Tuple[str, str]]
Pairs of the input and output column names.
Examples
--------
>>> _normalize_keyword_aggregation({'output': ('input', 'sum')})
(OrderedDict([('input', ['sum'])]), ('output',), [('input', 'sum')])
"""
if not PY36:
kwargs = OrderedDict(sorted(kwargs.items()))
# Normalize the aggregation functions as Dict[column, List[func]],
# process normally, then fixup the names.
# TODO(Py35): When we drop python 3.5, change this to
# defaultdict(list)
# TODO: aggspec type: typing.OrderedDict[str, List[AggScalar]]
# May be hitting https://github.com/python/mypy/issues/5958
# saying it doesn't have an attribute __name__
aggspec = OrderedDict()
order = []
columns, pairs = list(zip(*kwargs.items()))
for name, (column, aggfunc) in zip(columns, pairs):
if column in aggspec:
aggspec[column].append(aggfunc)
else:
aggspec[column] = [aggfunc]
order.append((column, com.get_callable_name(aggfunc) or aggfunc))
return aggspec, columns, order
# TODO: Can't use, because mypy doesn't like us setting __name__
# error: "partial[Any]" has no attribute "__name__"
# the type is:
# typing.Sequence[Callable[..., ScalarResult]]
# -> typing.Sequence[Callable[..., ScalarResult]]:
def _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]:
"""
Possibly mangle a list of aggfuncs.
Parameters
----------
aggfuncs : Sequence
Returns
-------
mangled: list-like
A new AggSpec sequence, where lambdas have been converted
to have unique names.
Notes
-----
If just one aggfunc is passed, the name will not be mangled.
"""
if len(aggfuncs) <= 1:
# don't mangle for .agg([lambda x: .])
return aggfuncs
i = 0
mangled_aggfuncs = []
for aggfunc in aggfuncs:
if com.get_callable_name(aggfunc) == "<lambda>":
aggfunc = functools.partial(aggfunc)
aggfunc.__name__ = "<lambda_{}>".format(i)
i += 1
mangled_aggfuncs.append(aggfunc)
return mangled_aggfuncs
def _maybe_mangle_lambdas(agg_spec: Any) -> Any:
"""
Make new lambdas with unique names.
Parameters
----------
agg_spec : Any
An argument to NDFrameGroupBy.agg.
Non-dict-like `agg_spec` are pass through as is.
For dict-like `agg_spec` a new spec is returned
with name-mangled lambdas.
Returns
-------
mangled : Any
Same type as the input.
Examples
--------
>>> _maybe_mangle_lambdas('sum')
'sum'
>>> _maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP
[<function __main__.<lambda_0>,
<function pandas...._make_lambda.<locals>.f(*args, **kwargs)>]
"""
is_dict = is_dict_like(agg_spec)
if not (is_dict or is_list_like(agg_spec)):
return agg_spec
mangled_aggspec = type(agg_spec)() # dict or OrderdDict
if is_dict:
for key, aggfuncs in agg_spec.items():
if is_list_like(aggfuncs) and not is_dict_like(aggfuncs):
mangled_aggfuncs = _managle_lambda_list(aggfuncs)
else:
mangled_aggfuncs = aggfuncs
mangled_aggspec[key] = mangled_aggfuncs
else:
mangled_aggspec = _managle_lambda_list(agg_spec)
return mangled_aggspec
def _recast_datetimelike_result(result: DataFrame) -> DataFrame:
"""
If we have date/time like in the original, then coerce dates
as we are stacking can easily have object dtypes here.
Parameters
----------
result : DataFrame
Returns
-------
DataFrame
Notes
-----
- Assumes Groupby._selected_obj has ndim==2 and at least one
datetimelike column
"""
result = result.copy()
obj_cols = [
idx for idx in range(len(result.columns)) if is_object_dtype(result.dtypes[idx])
]
# See GH#26285
for n in obj_cols:
converted = maybe_convert_objects(
result.iloc[:, n].values, convert_numeric=False
)
result.iloc[:, n] = converted
return result
|
bsd-3-clause
|
googlearchive/titan
|
titan/files/mixins/versions.py
|
1
|
61817
|
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Titan version control system, including atomic commits of groups of files.
Documentation:
http://googlecloudplatform.github.io/titan/files/versions.html
"""
import hashlib
import logging
import os
import re
from google.appengine.ext import ndb
from titan.common import strong_counters
from titan import files
from titan import users
from titan.common import utils
from titan.files import dirs
class ChangesetStatus(object):
staging = 'new'
presubmit = 'pre-submit'
submitted = 'submitted' # "final changeset".
deleted = 'deleted'
deleted_by_submit = 'deleted-by-submit'
class FileStatus(object):
created = 'created'
edited = 'edited'
deleted = 'deleted'
VERSIONS_PATH_BASE_REGEX = re.compile('^/_titan/ver/([0-9]+)')
# For formating "/_titan/ver/123/some/file/path"
VERSIONS_PATH_FORMAT = '/_titan/ver/%d%s'
_CHANGESET_COUNTER_NAME = 'num_changesets'
_MAX_MANIFEST_SHARD_PATHS = 1000
class Error(Exception):
pass
class ChangesetError(Error, ValueError):
pass
class InvalidChangesetError(ChangesetError):
pass
class ChangesetNotFoundError(ChangesetError):
pass
class FileVersionError(Error, ValueError):
pass
class CommitError(Error, ValueError):
pass
class InvalidBaseChangesetCommitError(CommitError):
pass
class NoBaseManifestCommitError(CommitError):
pass
class NamespaceMismatchError(Error):
pass
class NoManifestError(ChangesetError):
pass
class ChangesetRebaseError(ChangesetError):
pass
class FileVersioningMixin(dirs.DirManagerMixin, files.File):
"""Mixin to provide versioned file handling.
If created without an associated changeset, this object will dynamically
determine the real file location from it's latest commited changeset.
"""
@classmethod
def should_apply_mixin(cls, **kwargs):
# Enable always, unless microversions is enabled.
mixin_state = kwargs.get('_mixin_state')
if mixin_state and mixin_state.get('is_microversions_enabled'):
return False
if mixin_state is not None:
mixin_state['is_versions_enabled'] = True
return True
@utils.compose_method_kwargs
def __init__(self, path, **kwargs):
# If given, this File represents the file at the given changeset.
# If not, this File represents the latest committed file version,
# but it cannot be written or changed (since that must happen with an
# associated changeset).
self.changeset = kwargs.get('changeset', None)
# Internal-only flag to make this file not error if marked for delete.
# This is used internally when listing files in a changeset, reverting
# files, and in _copy_file_from_root.
self._allow_deleted_files = kwargs.pop('_allow_deleted_files', False)
# Internal-only state flag of whether manifested views are enabled or not.
# This is used when reading/writing files, and internally in Changeset.
# NOTE: This makes single File objects even less thread-safe, but there is
# not yet another mechanism to make real_path be context aware.
self.__set_enable_manifested_views(
kwargs.pop('_enable_manifested_views', True))
super(FileVersioningMixin, self).__init__(path, **kwargs)
# Support initing with a /_titan/ver path if changeset is not given.
# If it is, the changeset arg wins.
versioned_path_match = VERSIONS_PATH_BASE_REGEX.match(path)
if versioned_path_match and not self.changeset:
self.changeset = int(versioned_path_match.group(1))
# Strip /_titan/ver/123 from the path.
if versioned_path_match:
self._path = VERSIONS_PATH_BASE_REGEX.sub('', path)
self._real_path = None
if self.changeset and isinstance(self.changeset, int):
# Support integer changeset argument.
self.changeset = Changeset(self.changeset, namespace=self.namespace)
if self.changeset and self.namespace != self.changeset.namespace:
raise NamespaceMismatchError(
'File namespace "{}" does not match changeset namespace "{}".'.format(
self.namespace, self.changeset.namespace))
# Make "changeset" a part of the file's composite key for hashing.
if self.changeset:
self._composite_key_elements['changeset'] = str(self.changeset.num)
def __repr__(self):
return '<File %s (cs:%r)>' % (self._path, getattr(self, 'changeset', None))
@property
def _file(self):
"""Handle dynamic determination of correct file entity."""
if not self.changeset:
# No associated changeset. Dynamically pick the file entity based on
# the latest FilePointers.
root_file_pointer = _FilePointer.get_root_key(namespace=self.namespace)
file_pointer = _FilePointer.get_by_id(
self.path, parent=root_file_pointer, namespace=self.namespace)
if file_pointer:
# Associate to the final changeset.
self.changeset = Changeset(
file_pointer.changeset_num,
namespace=self.namespace).linked_changeset
self._composite_key_elements['changeset'] = str(self.changeset.num)
else:
raise files.BadFileError('File does not exist: %s' % self.path)
# A changeset exists, so real_path will resolve correctly. Fall through to
# finding the file entity normally.
file_ent = super(FileVersioningMixin, self)._file
# For normal file interface interactions, if a file is "marked for delete"
# in a changeset it should look deleted in this interface.
if not self._allow_deleted_files and file_ent.status == FileStatus.deleted:
raise files.BadFileError('File does not exist: %s' % self.path)
return file_ent
@property
def created_by(self):
created_by = super(FileVersioningMixin, self).created_by
if not self._file.created_by:
# Backwards-compatibility: before microversions had user passthrough,
# files were written within tasks without a user. For these files, we
# incur an extra RPC and fetch the data from the changelist instead.
return self.changeset.created_by
return created_by
@property
def modified_by(self):
modified_by = super(FileVersioningMixin, self).modified_by
if not self._file.modified_by:
# Backwards-compatibility: before microversions had user passthrough,
# files were written within tasks without a user. For these files, we
# incur an extra RPC and fetch the data from the changelist instead.
#
# NOTE: This is correctly "created_by" and not "modified_by".
return self.changeset.created_by
return modified_by
@property
def real_path(self):
"""Override the storage location of the file to the versioned path."""
if self._real_path:
return self._real_path
# MANIFESTED FILESYSTEM VIEWS.
# Allow the entire filesystem tree to be viewed at all changesets,
# not just the delta of file changes at that changeset. Also, provide
# copy-on-write behavior for staging changesets.
#
# Rules:
# - Files read through a 'new' or 'submitted' changeset: first pull
# from the changeset, then fall back to the base_changeset's manifest.
# The manifested filesystem should by overlaid by the changeset's files.
# - Files read through a 'deleted' or 'deleted-by-submit' changeset:
# always pull from the base_changeset's manifest, ignore the changeset
# since its changes will manifest when viewed at it's corresponding
# submitted changeset.
# - File modifications must go through 'new' changesets.
_require_file_has_changeset(self)
if not self._enable_manifested_views:
# WRITE/DELETE and existence checking through a changeset.
self._real_path = _make_versioned_path(self._path, self.changeset)
return self._real_path
# READ.
kwargs = self._original_kwargs.copy()
kwargs.pop('changeset', None)
# Point to a non-existent changeset by default (mostly to catch
# implementation errors).
self._real_path = _make_versioned_path(
self._path, Changeset(0, namespace=self.namespace))
if self.changeset.status == ChangesetStatus.staging:
if self.path in self.changeset:
self._real_path = _make_versioned_path(self._path, self.changeset)
elif self.changeset.base_changeset:
# Read non-changeset file through manifest.
titan_file = self.changeset.base_changeset.get_file_from_manifest(
**kwargs)
if titan_file is not None:
self._real_path = titan_file.real_path
elif self.changeset.status == ChangesetStatus.submitted:
if self.path not in self.changeset.linked_changeset:
# Read non-changeset file through manifest.
titan_file = self.changeset.get_file_from_manifest(**kwargs)
if titan_file is not None:
self._real_path = titan_file.real_path
else:
# Optimization: instead of reading through the manifest (which would
# still be correct), read directly from the submitted changeset.
self._real_path = _make_versioned_path(
self._path, self.changeset.linked_changeset)
elif self.changeset.status in (ChangesetStatus.deleted_by_submit,
ChangesetStatus.deleted):
if not self.changeset.base_changeset:
# Edge case: the first-ever staging changeset will not have a
# base_changeset, and no files should exist yet. Point to the
# non-existent "0" changeset.
self._real_path = _make_versioned_path(
self._path, Changeset(0, namespace=self.namespace))
return self._real_path
# Read all files through the deleted changeset's base_changeset manifest
# so we don't need to search for the last submitted changeset.
# This assumes that:
# - 'deleted-by-submit' changesets MUST have been rebased to head
# before submit (even if HEAD is passed the deleted changeset number).
# - 'deleted' changesets MUST have be rebased to the last submitted
# changeset, but only up to the deleted changeset's number.
titan_file = self.changeset.base_changeset.get_file_from_manifest(
**kwargs)
if titan_file is not None:
self._real_path = titan_file.real_path
elif self.changeset.status == ChangesetStatus.presubmit:
# Pre-submit changesets should only read from the current changeset.
pass
else:
raise NotImplementedError(
'Changeset status: "{}".'.format(self.changeset.status))
return self._real_path
@property
def versioned_path(self):
return self.real_path
def __set_enable_manifested_views(self, enabled):
"""Sets whether or not manifested views are enabled."""
# NOTE: This makes single File objects even less thread-safe, but there is
# not yet another mechanism to make real_path be context aware.
self._enable_manifested_views = enabled
self._real_path = None # Un-memoize property.
@utils.compose_method_kwargs
def write(self, **kwargs):
"""Write method. See superclass docstring."""
_require_file_has_changeset(self)
_require_file_has_staging_changeset(self)
self.__set_enable_manifested_views(False)
kwargs.pop('_run_mixins_only', False)
mark_version_for_delete = kwargs.pop('_mark_version_for_delete', False)
self.changeset.associate_file(self)
# Update meta data.
kwargs['meta'] = kwargs.get('meta') or {}
# This is unfortunately generically named, but we need to reserve this name.
assert 'status' not in kwargs['meta']
# Never delete blobs when using versions.
# This will orphan blobs if a large file is uploaded many times in a
# changeset without committing, but that's better than losing the data.
# TODO(user): add a flag to entities signifying if they have been
# copied or deleted, so that we can notice and delete orphaned blobs.
kwargs['_delete_old_blob'] = False
if mark_version_for_delete:
kwargs['content'] = ''
kwargs['meta']['status'] = FileStatus.deleted
else:
kwargs['meta']['status'] = FileStatus.edited
# The first time the versioned file is touched in the changeset, we have
# to branch all content and properties from the base_changeset's file.
# If the versioned file is marked for delete in the changeset and then
# un-deleted by being touched, properties are NOT copied since the file
# is treated as a brand new file, not a copy-on-write.
if self.path not in self.changeset and self.changeset.base_changeset:
if self.changeset.base_changeset.has_manifest:
# If there's a manifest on the base_changeset, use that to determine
# what the root file should be.
root_file = self.changeset.base_changeset.get_file_from_manifest(
self.path, namespace=self.namespace, **kwargs)
else:
# If there's not a manifest, there is no way to know what the right
# root is without searching through historical changesets.
# For now, just pull from the dynamically-determined root.
root_file = files.File(self.path, namespace=self.namespace)
if root_file is not None and root_file.exists:
# Copy properties that were not changed in the current write request.
# Don't use root_file.copy_to because it is troublesome.
kwargs['mime_type'] = kwargs.get('mime_type', root_file.mime_type)
kwargs['created'] = kwargs.get('created', root_file.created)
kwargs['created_by'] = kwargs.get(
'created_by', root_file.created_by)
kwargs['modified_by'] = kwargs.get(
'modified_by', root_file.modified_by)
kwargs['modified'] = kwargs.get('modified', root_file.modified)
if kwargs['content'] is None and kwargs['blob'] is None:
# Neither content or blob given, copy content from root_file.
if root_file.blob:
kwargs['blob'] = root_file.blob
else:
kwargs['content'] = root_file.content
# Unset encoding, let it be set based on the root content.
kwargs['encoding'] = None
# Copy meta attributes.
for key, value in root_file.meta.serialize().iteritems():
if key not in kwargs['meta']:
kwargs['meta'][key] = value
try:
return super(FileVersioningMixin, self).write(**kwargs)
finally:
self.__set_enable_manifested_views(True)
@utils.compose_method_kwargs
def delete(self, **kwargs):
"""Mark the file for deletion upon commit.
To revert a file, use changeset.revert_file().
Args:
**kwargs: All composed keyword arguments for this method.
Raises:
InvalidChangesetError: If a changeset was not associated to this file.
Returns:
Self-reference.
"""
_require_file_has_changeset(self)
# From changeset.revert_file().
if kwargs.pop('_revert', False):
# Allow files that are marked for deletion to be reverted.
self._allow_deleted_files = True
result = super(FileVersioningMixin, self).delete(**kwargs)
self._allow_deleted_files = False
return result
# Mark file for deletion.
self.__set_enable_manifested_views(False)
try:
return self.write(content='', _mark_version_for_delete=True, **kwargs)
finally:
self.__set_enable_manifested_views(True)
# ------------------------------------------------------------------------------
class Changeset(object):
"""Unit of consistency over a group of files.
Attributes:
num: An integer of the changeset number.
namespace: The datastore namespace, or None for the default namespace.
created: datetime.datetime object of when the changeset was created.
created_by: The User object of who created this changeset.
status: An integer of one of the CHANGESET_* constants.
base_path: The path prefix for all files in this changeset,
for example: '/_titan/ver/123'
linked_changeset_base_path: Same as base_path, but for the linked changeset.
exists: If the given changeset exists.
"""
# TODO(user): make changeset_ent protected.
def __init__(self, num, namespace=None, changeset_ent=None):
utils.validate_namespace(namespace)
self._changeset_ent = changeset_ent
self._num = int(num)
self._namespace = namespace
self._associated_files = set()
self._finalized_files = False
def __eq__(self, other):
"""Compare equality of two Changeset objects."""
return (isinstance(other, self.__class__)
and self.num == other.num
and self.namespace == other.namespace)
def __ne__(self, other):
return not self == other
def __contains__(self, path):
assert isinstance(path, basestring)
if self._finalized_files:
return any([path == f.path for f in self._associated_files])
return files.File(
path,
namespace=self.namespace, changeset=self,
_internal=True, _allow_deleted_files=True,
_enable_manifested_views=False).exists
def __repr__(self):
return '<Changeset %d evaluated: %s>' % (self._num,
bool(self._changeset_ent))
@property
def changeset_ent(self):
"""Lazy-load the _Changeset entity."""
if not self._changeset_ent:
root_changeset = _Changeset.get_root_key(namespace=self.namespace)
self._changeset_ent = _Changeset.get_by_id(
str(self._num), parent=root_changeset, namespace=self.namespace)
if not self._changeset_ent:
raise ChangesetNotFoundError('Changeset %s does not exist.' % self._num)
return self._changeset_ent
@property
def num(self):
return self._num
@property
def namespace(self):
return self._namespace
@property
def created(self):
return self.changeset_ent.created
@property
def created_by(self):
return self.changeset_ent.created_by
@property
def status(self):
return self.changeset_ent.status
@property
def base_path(self):
return VERSIONS_PATH_FORMAT % (self.num, '')
@property
def linked_changeset_base_path(self):
if self.linked_changeset:
return VERSIONS_PATH_FORMAT % (self.linked_changeset_num, '')
@property
def linked_changeset(self):
if self.linked_changeset_num:
return Changeset(num=self.linked_changeset_num, namespace=self.namespace)
@property
def linked_changeset_num(self):
if self.status not in (ChangesetStatus.staging, ChangesetStatus.deleted):
return int(self.changeset_ent.linked_changeset.id())
raise ChangesetError(
'Cannot reference linked_changeset_num for "{}" changesets.'.format(
self.status))
@property
def base_changeset(self):
if self.base_changeset_num:
return Changeset(num=self.base_changeset_num, namespace=self.namespace)
@property
def base_changeset_num(self):
if self.status in (
ChangesetStatus.staging, ChangesetStatus.deleted_by_submit):
if self.changeset_ent.base_changeset:
return int(self.changeset_ent.base_changeset.id())
return
raise ChangesetError(
'Cannot reference base_changeset_num for "{}" changesets.'.format(
self.status))
@property
def associated_paths(self):
return set([f.path for f in self._associated_files])
@property
def has_manifest(self):
if self.status != ChangesetStatus.submitted:
raise ChangesetError(
'Cannot reference has_manifest for "{}" changesets.'.format(
self.status))
return bool(self.changeset_ent.num_manifest_shards)
@property
def _num_manifest_shards(self):
return self.changeset_ent.num_manifest_shards
@property
def exists(self):
try:
return bool(self.changeset_ent)
except ChangesetError:
return False
def _get_manifest_shard_ent(self, path):
"""Get the shard entity which may contain the given path."""
shard_index = _get_manifest_shard_index(path, self._num_manifest_shards)
shard_id = _make_manifest_shard_id(self, shard_index)
parent = _ChangesetManifestShard.get_root_key(
final_changeset_num=self.num, namespace=self.namespace)
return _ChangesetManifestShard.get_by_id(id=shard_id, parent=parent)
def get_file_from_manifest(self, path, **kwargs):
"""Gets a file through the manifest.
Args:
path: The file path to look into the manifest.
**kwargs: Other keyword args to pass through to the File object.
Raises:
NoManifestError: If the status of the changeset is not 'submitted', or
if a manifest was not saved at this changeset.
Returns:
The file's associated Changeset, or None if the file doesn't exist.
"""
if self.status != ChangesetStatus.submitted:
raise NoManifestError(
'Changeset {:d} with status "{}" does not have a manifest. Only '
'"submitted" changesets may have manifests.'.format(
self.num, self.status))
if not self._num_manifest_shards:
raise NoManifestError(
'Changeset {:d} was not committed with a manifest.'.format(
self.num))
manifest_shard_ent = self._get_manifest_shard_ent(path=path)
paths_to_changeset_num = manifest_shard_ent.paths_to_changeset_num
if path not in paths_to_changeset_num:
# We know deterministically that the given file did not exist
# at this changeset.
return None
kwargs.pop('changeset', None)
kwargs.pop('namespace', None)
changeset = self.__class__(
num=paths_to_changeset_num[path], namespace=self.namespace)
titan_file = files.File(
path, changeset=changeset, namespace=self.namespace, **kwargs)
return titan_file
def get_files(self):
"""Gets all files associated with this changeset.
Guarantees strong consistency, but requires that associated file paths
have been finalized on this specific Changeset instance.
Raises:
ChangesetError: If associated file paths have not been finalized.
Returns:
A files.Files object.
"""
if self.status in (
ChangesetStatus.deleted_by_submit, ChangesetStatus.deleted):
raise ChangesetError(
'Cannot get files from changeset {:d} which is "{}".'.format(
self.num, self.status))
if not self._finalized_files:
raise ChangesetError(
'Cannot guarantee strong consistency when associated file paths '
'have not been finalized. See finalize_associated_files() '
'or use changeset.list_files() for an eventually-consistent view.')
titan_files = files.Files(
files=list(self._associated_files), namespace=self.namespace)
return titan_files
def list_files(self, dir_path, recursive=False, depth=None, filters=None,
limit=None, offset=None, order=None,
include_deleted=True, include_manifested=False, **kwargs):
"""Queries and returns a Files object containing this changeset's files.
This method is always eventually consistent and may not contain recently
changed files.
Args:
dir_path: Absolute directory path.
recursive: Whether to list files recursively.
depth: If recursive, a positive integer to limit the recursion depth.
1 is one folder deep, 2 is two folders deep, etc.
filters: An iterable of FileProperty comparisons, for example:
[files.FileProperty('created_by') == 'example@example.com']
order: An iterable of files.FileProperty objects to sort the result set.
limit: An integer limiting the number of files returned.
offset: Number of files to offset the query by.
include_deleted: Whether or not to include deleted files.
include_manifested: Whether or not to include manifested files in
the result set. If given, the following arguments cannot be passed:
depth, filters, order, limit, offset.
Raises:
ChangesetError: If the status is 'deleted' or 'deleted-by-submit'.
ValueError: If include_manifested is set and unsupported args are passed.
Returns:
A files.OrderedFiles object.
"""
utils.validate_dir_path(dir_path)
manifested_paths_to_changeset_num = {}
if include_manifested:
if (depth is not None or filters is not None or order is not None
or limit is not None or offset is not None):
raise ValueError(
'The following arguments are not supported when '
'include_manifested is set: depth, filters, order, limit, offset.')
if self.status == ChangesetStatus.staging:
# If staging changeset, fetch manifest from base_changeset.
changeset_with_manifest = self.base_changeset
elif self.status == ChangesetStatus.submitted:
# If submitted changeset, fetch manifest from self.
changeset_with_manifest = self
if changeset_with_manifest: # Skip if this is the first-ever changeset.
manifested_paths_to_changeset_num = _list_manifested_paths(
changeset=changeset_with_manifest,
dir_path=dir_path, recursive=recursive)
if self.status in (
ChangesetStatus.deleted_by_submit, ChangesetStatus.deleted,
ChangesetStatus.presubmit):
raise ChangesetError(
'Cannot list files from changeset {:d} which is "{}".'.format(
self.num, self.status))
changeset = self
if changeset.status == ChangesetStatus.submitted:
# The files stored for submitted changesets are actually stored under the
# the staging changeset's number, since they are never moved.
changeset = changeset.linked_changeset
if not include_deleted:
if filters is None:
filters = []
filters.append(files.FileProperty('status') != FileStatus.deleted)
versioned_files = files.OrderedFiles.list(
utils.safe_join(changeset.base_path, dir_path[1:]),
namespace=self.namespace, recursive=recursive,
depth=depth, filters=filters, order=order, limit=limit, offset=offset,
# Important: use changeset=self, not changeset=changeset, to make sure
# submitted changesets are viewed correctly and not manifested.
changeset=self,
_allow_deleted_files=include_deleted,
**kwargs)
if include_manifested and manifested_paths_to_changeset_num:
new_versioned_files = files.OrderedFiles(namespace=self.namespace)
for path in manifested_paths_to_changeset_num:
titan_file = files.File(
path, changeset=self, namespace=self.namespace, **kwargs)
new_versioned_files.update(
files.Files(files=[titan_file], namespace=self.namespace, **kwargs))
# Overlay changeset files on top of manifested files:
new_versioned_files.update(versioned_files)
versioned_files = new_versioned_files
versioned_files.sort()
# Recreate a Files object to get rid of versioned paths in the keys:
return files.OrderedFiles(
files=versioned_files.values(), namespace=self.namespace)
def list_directories(self, dir_path, include_manifested=False, **kwargs):
"""Lists directories.
This method is always eventually consistent and may not contain recently
changed directories.
Args:
dir_path: Absolute directory path.
include_manifested: Whether or not to include manifested directories in
the result set.
**kwargs: Keyword arguments to pass through to Dir objects.
Raises:
ChangesetError: If list_directories is called on unsupported changesets.
Returns:
A dirs.Dirs object.
"""
utils.validate_dir_path(dir_path)
if self.status == ChangesetStatus.staging:
versioned_dir_path = self.base_path
# If staging changeset, fetch manifest from base_changeset.
changeset_with_manifest = self.base_changeset
elif self.status == ChangesetStatus.submitted:
versioned_dir_path = self.linked_changeset_base_path
# If submitted changeset, fetch manifest from self.
changeset_with_manifest = self
else:
raise ChangesetError(
'Unsupported changeset status: {}'.format(self.status))
if include_manifested:
manifested_paths_to_changeset_num = _list_manifested_paths(
# Must always be recursive to include sparse, deep directory trees.
changeset=changeset_with_manifest, dir_path=dir_path, recursive=True)
titan_dirs = dirs.Dirs.list(
utils.safe_join(versioned_dir_path, dir_path[1:]),
namespace=self.namespace, strip_prefix=versioned_dir_path, **kwargs)
complete_dir_paths = set()
if include_manifested and manifested_paths_to_changeset_num:
for path in manifested_paths_to_changeset_num:
# These are file paths, strip them down to directory paths.
complete_dir_paths.add(os.path.dirname(path))
complete_dir_paths -= set(['/', dir_path])
depth = 1 if dir_path == '/' else dir_path.count('/') + 1
for manifested_dir_path in complete_dir_paths:
# Strip path name down to just one directory deeper than dir_path.
manifested_dir_path = manifested_dir_path[1:]
path = os.path.join('/', *manifested_dir_path.split('/')[:depth])
titan_dir = dirs.Dir(path, namespace=self.namespace, **kwargs)
titan_dirs[titan_dir.path] = titan_dir
titan_dirs.sort()
# Recreate the Dirs object to get rid of versioned paths in the keys:
return dirs.Dirs(
dirs=titan_dirs.values(), strip_prefix=versioned_dir_path, **kwargs)
def revert_file(self, titan_file):
_require_file_has_staging_changeset(titan_file)
self.disassociate_file(titan_file)
titan_file.delete(_revert=True)
def rebase(self, new_base_changeset):
"""Rebase the changeset to a new base_changeset.
This assumes that all neccessary merges have already happened. Calling this
is the equivalent of a "choose all mine" merge strategy.
Args:
new_base_changeset: The new, already-submitted changeset from which
the changeset will now be based. The base_changeset number might be
greater than the current staging changeset's number.
Raises:
ChangesetRebaseError: If new_base_changeset is not a submitted changeset.
NamespaceMismatchError: If the new_base_changeset's namespace differs.
"""
if self.namespace != new_base_changeset.namespace:
raise NamespaceMismatchError(
'Current namespace "{}" does not match new_base_changeset namespace '
'"{}".'.format(
self.namespace, new_base_changeset.namespace))
if new_base_changeset.status != ChangesetStatus.submitted:
raise ChangesetRebaseError(
'new_base_changeset must be a submitted changeset. Got: {!r}'.format(
new_base_changeset))
self.changeset_ent.base_changeset = new_base_changeset.changeset_ent.key
self.changeset_ent.put()
def serialize(self):
"""Serializes changeset data into simple types."""
data = {
'num': self.num,
'namespace': self.namespace,
'created': self.created,
'status': self.status,
'base_path': self.base_path,
'created_by': str(self.created_by) if self.created_by else None,
}
if self.status != ChangesetStatus.staging:
# Cannot reference linked_changeset_num for "new" changesets.
data['linked_changeset_num'] = self.linked_changeset_num
data['linked_changeset_base_path'] = self.linked_changeset_base_path
if self.status == ChangesetStatus.submitted:
data['has_manifest'] = self.has_manifest
else:
data['base_changeset_num'] = self.base_changeset_num
return data
def associate_file(self, titan_file):
"""Associate a file temporally to this changeset object before commit.
Args:
titan_file: File object.
"""
# Internal-only flag to allow files which have been marked for deletion.
titan_file._allow_deleted_files = True
self._associated_files.add(titan_file)
self._finalized_files = False
def disassociate_file(self, titan_file):
"""Disassociate a file from this changeset object before commit.
Args:
titan_file: File object.
"""
self._associated_files.remove(titan_file)
self._finalized_files = False
def finalize_associated_files(self):
"""Indicate that this specific Changeset object was used for all operations.
This flag is used during commit to indicate if this object can be trusted
for strong consistency guarantees of which files paths will be committed.
Only call this method if you are sure that this same Changeset instance was
passed in for all file operations associated with this changeset.
Raises:
ChangesetError: if no files have been associated.
"""
if not self._associated_files:
raise ChangesetError('Cannot finalize: no associated file objects.')
self._finalized_files = True
class _Changeset(ndb.Model):
"""Model representing a changeset.
All _Changeset entities are in the same entity group, as well as all
_FileVersion entities. An example entity group relationship might look like:
_Changeset 0 (root ancestor, non-existent)
|
+ _Changeset 1 (deleted-by-submit)
|
+ _Changeset 2 (submitted)
| |
| + _FileVersion /foo
| + _FileVersion /bar
|
+ _Changeset 3 (staging)
|
...
Attributes:
num: Integer of the entity's key.id().
created: datetime.datetime object of when this entity was created.
created_by: A users.TitanUser object of the user who created the changeset.
status: A string status of the changeset.
linked_changeset: A reference between staging and finalized changesets.
base_changeset: A reference to the current base for staging changesets.
num_manifest_shards: The number of shards of the filesystem manifest. Only
set for final changesets and only if the manifest was saved.
"""
# NOTE: This model should be kept as lightweight as possible. Anything
# else added here increases the amount of time that commit() will take,
# though not as drastically as additions to _FileVersion and _FilePointer.
num = ndb.IntegerProperty(required=True)
created = ndb.DateTimeProperty(auto_now_add=True)
created_by = users.TitanUserProperty()
status = ndb.StringProperty(choices=[ChangesetStatus.staging,
ChangesetStatus.presubmit,
ChangesetStatus.submitted,
ChangesetStatus.deleted,
ChangesetStatus.deleted_by_submit])
linked_changeset = ndb.KeyProperty(kind='_Changeset')
base_changeset = ndb.KeyProperty(kind='_Changeset')
num_manifest_shards = ndb.IntegerProperty()
def __repr__(self):
return ('<_Changeset %d namespace:%r status:%s base_changeset:%r '
'num_manifest_shards:%r>') % (
self.num, self.key.namespace(), self.status,
self.base_changeset, self.num_manifest_shards)
@staticmethod
def get_root_key(namespace):
"""Get the root key, the parent of all changeset entities."""
# All changesets are in the same entity group by being children of the
# arbitrary, non-existent "0" changeset.
return ndb.Key(_Changeset, '0', namespace=namespace)
class _ChangesetManifestShard(ndb.Model):
"""Model for one shard of a snapshot of a filesystem manifest at a changeset.
Attributes:
key.id(): The key for this model is "<changeset_num>:<shard_num>".
Example: "3:0" is the first shard for Changeset 3.
paths_to_changeset_num: A manifest of filesystem paths to the last
changeset that affected the path.
"""
# NOTE: This model should be kept as lightweight as possible. Anything
# else added here increases the amount of time that commit() will take,
# and decreases the number of files that can be committed at once.
paths_to_changeset_num = ndb.JsonProperty()
@staticmethod
def get_root_key(final_changeset_num, namespace):
"""Get the root key, the parent of each group of manifest shard entities."""
# These entities are grouped in an entity group if they are part of the same
# manifest. Use an arbitrary, non-existent key named after the changeset.
return ndb.Key(
_ChangesetManifestShard, final_changeset_num, namespace=namespace)
class FileVersion(object):
"""Metadata about a committed file version.
NOTE: Always trust FileVersions as the canonical source of a file's revision
history metadata. Don't use the 'status' meta property or other properties of
File objects as authoritative.
Attributes:
path: The committed file path. Example: /foo.html
namespace: The datastore namespace, or None for the default namespace.
versioned_path: The path of the versioned file. Ex: /_titan/ver/123/foo.html
changeset: A final Changeset object.
changeset_created_by: The TitanUser who created the changeset.
created_by: The TitanUser who created the file version. This usually is the
same as changeset_created_by.
created: datetime.datetime object of when the file version was created.
status: The edit type of the affected file.
"""
def __init__(self, path, namespace, changeset, file_version_ent=None):
self._path = path
self._namespace = namespace
self._file_version_ent = file_version_ent
self._changeset = changeset
if isinstance(changeset, int):
self._changeset = Changeset(changeset, namespace=self.namespace)
@property
def _file_version(self):
"""Lazy-load the _FileVersion entity."""
if not self._file_version_ent:
file_version_id = _FileVersion.make_key_name(self._changeset, self._path)
self._file_version_ent = _FileVersion.get_by_id(
file_version_id, parent=self._changeset.changeset_ent.key,
namespace=self.namespace)
if not self._file_version_ent:
raise FileVersionError('No file version of %s at %s.'
% (self._path, self._changeset.num))
return self._file_version_ent
def __repr__(self):
return ('<FileVersion path: %s namespace: %s versioned_path: %s '
'created: %s status: %s>' % (
self.path, self.namespace, self.versioned_path, self.created,
self.status))
@property
def path(self):
return self._path
@property
def namespace(self):
return self._namespace
@property
def versioned_path(self):
return VERSIONS_PATH_FORMAT % (self._changeset.linked_changeset_num,
self._path)
@property
def changeset(self):
return self._changeset
@property
def content_changeset(self):
"""Convenience property for finding the committed content changeset."""
if self.changeset.status != ChangesetStatus.submitted:
raise TypeError(
'content_changeset can only be accessed from final, committed '
'changesets. Current changeset: %r' % self)
return self.changeset.linked_changeset
@property
def changeset_created_by(self):
return self._file_version.changeset_created_by
@property
def created_by(self):
return self._file_version.created_by
@property
def created(self):
return self._file_version.created
@property
def status(self):
return self._file_version.status
def serialize(self):
"""Serializes a FileVersion into native types."""
cs_created_by = self.changeset_created_by
result = {
'path': self.path,
'versioned_path': self.versioned_path,
'created': self.created,
'status': self.status,
'changeset_num': self._changeset.num,
'changeset_created_by': str(cs_created_by) if cs_created_by else None,
'created_by': str(self.created_by) if self.created_by else None,
'linked_changeset_num': self.changeset.linked_changeset_num,
}
return result
class _FileVersion(ndb.Model):
"""Model representing metadata about a committed file version.
A _FileVersion entity will only exist for committed file changes.
Attributes:
key.id(): '<changeset num>:<path>', such as '123:/foo.html'.
path: The Titan File path.
changeset_num: The changeset number in which the file was changed.
changeset_created_by: A users.TitanUser object of who created the changeset.
created: datetime.datetime object of when the entity was created.
status: The edit type of the file at this version.
"""
# NOTE: This model should be kept as lightweight as possible. Anything
# else added here increases the amount of time that commit() will take,
# and decreases the number of files that can be committed at once.
path = ndb.StringProperty(required=True)
changeset_num = ndb.IntegerProperty(required=True)
changeset_created_by = users.TitanUserProperty()
# In limited cases, the user who created the file version may be different
# than the changeset user (such as with microversions, where the changeset
# user is None, but each file version has an overwritten real author).
created_by = users.TitanUserProperty()
created = ndb.DateTimeProperty(auto_now_add=True)
status = ndb.StringProperty(
required=True,
choices=[FileStatus.created, FileStatus.edited, FileStatus.deleted])
def __repr__(self):
return ('<_FileVersion id:%s path:%s namespace:%s changeset_num:%s'
'created:%s status:%s>' % (
self.key.id(), self.path, self.key.namespace(),
self.changeset_num, self.created, self.status))
@staticmethod
def make_key_name(changeset, path):
return ':'.join([str(changeset.num), path])
class _FilePointer(ndb.Model):
"""Pointer from a root file path to its current file version.
All _FilePointers are in the same entity group. As such, the entities
are updated atomically to point a set of files at new versions.
Attributes:
key.id(): Root file path string. Example: '/foo.html'
changeset_num: An integer pointing to the file's latest committed changeset.
Technically, this is the 'deleted-by-submit' content changeset.
versioned_path: Versioned file path. Example: '/_titan/ver/1/foo.html'
"""
# NOTE: This model should be kept as lightweight as possible. Anything
# else added here increases the amount of time that commit() will take,
# and decreases the number of files that can be committed at once.
changeset_num = ndb.IntegerProperty(required=True)
def __repr__(self):
return '<_FilePointer path:%s namespace:%s changeset: %s>' % (
self.key.id(), self.key.namespace(), self.changeset_num)
@property
def versioned_path(self):
return VERSIONS_PATH_FORMAT % (self.changeset_num, self.key.id())
@staticmethod
def get_root_key(namespace):
# The parent of all _FilePointers is a non-existent _FilePointer arbitrarily
# named '/', since no file path can be a single slash.
return ndb.Key(_FilePointer, '/', namespace=namespace)
class VersionControlService(object):
"""A service object providing version control methods."""
def new_staging_changeset(self, created_by=None, namespace=None):
"""Create a new staging changeset with a unique number ID.
Args:
created_by: A users.TitanUser object, will default to the current user.
namespace: The namespace in which to create the new changeset, or None
to use the default namespace.
Returns:
A Changeset.
"""
return self._new_changeset(
status=ChangesetStatus.staging,
created_by=created_by, namespace=namespace)
def _new_changeset(self, status, created_by, namespace,
nested_transaction=False):
"""Create a changeset with the given status."""
utils.validate_namespace(namespace)
def transaction():
"""The changeset creation transaction."""
new_changeset_num = strong_counters.Increment(
_CHANGESET_COUNTER_NAME, namespace=namespace, nested_transaction=True)
base_changeset_key = None
# Set the base_changeset, but only for new staging changesets.
if status == ChangesetStatus.staging:
base_changeset = self.get_last_submitted_changeset(namespace=namespace)
if base_changeset:
base_changeset_key = base_changeset.changeset_ent.key
changeset_ent = _Changeset(
# NDB properties:
# NDB can support integer keys, but this needs to be a string for
# support of legacy IDs created when using db.
id=str(new_changeset_num),
parent=_Changeset.get_root_key(namespace=namespace),
namespace=namespace,
# Model properties:
num=new_changeset_num,
status=status,
base_changeset=base_changeset_key)
if created_by:
changeset_ent.created_by = created_by
else:
changeset_ent.created_by = users.get_current_user()
changeset_ent.put()
staging_changeset = Changeset(
num=new_changeset_num,
namespace=namespace,
# TODO(user): maybe pass _base_changeset_ent for optimization.
changeset_ent=changeset_ent)
return staging_changeset
if nested_transaction:
return transaction()
# xg-transaction between the StrongCounter and query over _Changeset when
# calling get_last_submitted_changeset().
return ndb.transaction(transaction, xg=True)
def get_last_submitted_changeset(self, namespace=None):
"""Returns a Changeset object of the last submitted changeset.
Args:
namespace: The datastore namespace, or None for the default namespace.
Raises:
ChangesetError: If no changesets currently exist.
Returns:
A Changeset.
"""
changeset_root_key = _Changeset.get_root_key(namespace=namespace)
# Use an ancestor query to maintain strong consistency.
changeset_query = _Changeset.query(
ancestor=changeset_root_key, namespace=namespace)
changeset_query = changeset_query.filter(
_Changeset.status == ChangesetStatus.submitted)
changeset_query = changeset_query.order(-_Changeset.num)
latest_changeset = list(changeset_query.fetch(1))
if not latest_changeset:
return None
return Changeset(num=latest_changeset[0].num, namespace=namespace)
def get_file_versions(self, path, namespace=None, limit=1000):
"""Get FileVersion objects of the revisions of this file path.
Args:
path: An absolute file path.
namespace: The datastore namespace, or None for the default namespace.
limit: The limit to the number of objects returned.
Returns:
A list of FileVersion objects, ordered from latest to earliest.
"""
changeset_root_key = _Changeset.get_root_key(namespace=namespace)
file_version_ents = _FileVersion.query(
ancestor=changeset_root_key, namespace=namespace)
file_version_ents = file_version_ents.filter(_FileVersion.path == path)
# Order in descending chronological order, which will also happen to
# order by changeset_num.
file_version_ents = file_version_ents.order(-_FileVersion.created)
# Encapsulate all the _FileVersion objects in public FileVersion objects.
file_versions = []
for file_version_ent in file_version_ents.fetch(limit=limit):
file_version = FileVersion(
path=file_version_ent.path,
namespace=namespace,
changeset=Changeset(
file_version_ent.changeset_num, namespace=namespace),
file_version_ent=file_version_ent)
file_versions.append(file_version)
return file_versions
def _verify_staging_changeset_ready_for_commit(
self, namespace, staging_changeset, save_manifest):
if not save_manifest:
return
# Fail-fast: save_manifest is True and base_changeset is not up to date.
last_changeset = self.get_last_submitted_changeset(namespace=namespace)
if staging_changeset.base_changeset != last_changeset:
raise InvalidBaseChangesetCommitError(
'Changeset {:d} with base_changeset {} needs rebase to head before '
'commit. Last committed changeset: {}.'.format(
staging_changeset.num,
getattr(staging_changeset.base_changeset, 'num', None),
getattr(last_changeset, 'num', None)))
# Fail-fast: save_manifest is True and no manifest exists on base_changeset.
if (staging_changeset.base_changeset # May be None if first changeset.
and not staging_changeset.base_changeset.has_manifest):
raise NoBaseManifestCommitError(
'The base_changeset for changeset {:d} was not originally committed '
'with a manifest, so the manifest shards cannot be copied.'.format(
staging_changeset.num))
def commit(self, staging_changeset, force=False, save_manifest=True):
"""Commit the given changeset.
Args:
staging_changeset: A Changeset object with a status of staging.
force: Commit a changeset even if using an eventually-consistent query.
This could cause files recently added to the changeset to be missed
on commit.
save_manifest: Whether or not to save a manifest of the entire
filesystem state upon commit. This requires that the associated
base_changeset was also committed with a snapshot.
Raises:
CommitError: If a changeset contains no files or it is already committed.
Returns:
The final Changeset object.
"""
if staging_changeset.status != ChangesetStatus.staging:
raise CommitError('Cannot commit changeset with status "%s".'
% staging_changeset.status)
try:
staged_files = staging_changeset.get_files()
except ChangesetError:
if not force:
raise
# Got force=True, get files with an eventually-consistent query.
staged_files = staging_changeset.list_files(
'/', recursive=True, include_deleted=True)
# Preload files so that they are not lazily loaded inside of the commit
# transaction and count against the xg-transaction limit.
staged_files.load()
if not staged_files:
raise CommitError('Changeset %d contains no file changes.'
% staging_changeset.num)
# Fail if rebase is needed or the base manifest is missing and needed.
# This is also in the _commit path below to guarantee strong consistency,
# but is duplicated here as an optimization to fail outside the transaction.
namespace = staging_changeset.namespace
self._verify_staging_changeset_ready_for_commit(
namespace, staging_changeset, save_manifest=save_manifest)
transaction_func = (
lambda: self._commit(staging_changeset, staged_files, save_manifest))
final_changeset = ndb.transaction(transaction_func, xg=True)
return final_changeset
def _commit(self, staging_changeset, staged_files, save_manifest):
"""Commit a staged changeset."""
# Fail if rebase is needed or the base manifest is missing and needed.
namespace = staging_changeset.namespace
base_changeset = staging_changeset.base_changeset
self._verify_staging_changeset_ready_for_commit(
namespace, staging_changeset, save_manifest=save_manifest)
final_changeset = self._new_changeset(
status=ChangesetStatus.presubmit,
created_by=staging_changeset.created_by,
namespace=namespace, nested_transaction=True)
changes = ['%s: %s' % (f.meta.status, f.path)
for f in staged_files.values()]
logging.info(
'Submitting staging changeset %d in namespace %s as final changeset %d '
'with %d files:\n%s',
staging_changeset.num, namespace, final_changeset.num,
len(staged_files), '\n'.join(changes))
# Copy the manifest data from the base_changeset to the final_changeset.
# TODO(user): optimize this by pulling the data reading for small
# manifests out of the commit path.
new_manifest_shards = []
if save_manifest:
new_manifest = {}
# If this isn't the first-ever committed changeset, copy the old manifest.
if staging_changeset.base_changeset:
full_manifest = _fetch_full_manifest(base_changeset)
new_manifest.update(full_manifest)
for staged_file in staged_files.itervalues():
if staged_file.meta.status == FileStatus.deleted:
# Remove from new manifest if it existed in previous manifests.
new_manifest.pop(staged_file.path, None)
else:
# New file or edited file: point to the current final_changeset.
new_manifest[staged_file.path] = final_changeset.num
# Split up the new_manifest into shards by hashing each path and modding
# it into the right bucket. Hashing the paths provides a relatively even
# distribution over the available buckets, and also provides a
# deterministic O(1) way to know which manifest shard a path may exist in.
num_manifest_shards = len(new_manifest) / _MAX_MANIFEST_SHARD_PATHS + 1
manifest_buckets = [{}] * num_manifest_shards
while new_manifest:
path, changeset_num = new_manifest.popitem()
index = _get_manifest_shard_index(path, num_manifest_shards)
manifest_buckets[index][path] = changeset_num
# Create the new manifest entities.
parent = _ChangesetManifestShard.get_root_key(
final_changeset_num=final_changeset.num, namespace=namespace)
for i, manifest_bucket in enumerate(manifest_buckets):
shard_ent = _ChangesetManifestShard(
id=_make_manifest_shard_id(final_changeset, i),
paths_to_changeset_num=manifest_bucket, parent=parent)
new_manifest_shards.append(shard_ent)
# Update status of the staging and final changesets.
staging_changeset_ent = staging_changeset.changeset_ent
staging_changeset_ent.status = ChangesetStatus.deleted_by_submit
staging_changeset_ent.linked_changeset = final_changeset.changeset_ent.key
final_changeset_ent = final_changeset.changeset_ent
final_changeset_ent.status = ChangesetStatus.submitted
final_changeset_ent.linked_changeset = staging_changeset.changeset_ent.key
if save_manifest:
final_changeset_ent.num_manifest_shards = num_manifest_shards
ndb.put_multi([
staging_changeset_ent,
final_changeset_ent,
])
# Get a mapping of paths to current _FilePointers (or None).
file_pointers = {}
root_file_pointer = _FilePointer.get_root_key(namespace=namespace)
ordered_paths = staged_files.keys()
file_pointer_keys = []
for path in ordered_paths:
key = ndb.Key(
_FilePointer, path, parent=root_file_pointer, namespace=namespace)
file_pointer_keys.append(key)
file_pointer_ents = ndb.get_multi(file_pointer_keys)
for i, file_pointer_ent in enumerate(file_pointer_ents):
file_pointers[ordered_paths[i]] = file_pointer_ent
new_file_versions = []
updated_file_pointers = []
deleted_file_pointers = []
for path, titan_file in staged_files.iteritems():
file_pointer = file_pointers[titan_file.path]
# Update "edited" status to be "created" on commit if file doesn't exist.
status = titan_file.meta.status
if titan_file.meta.status == FileStatus.edited and not file_pointer:
status = FileStatus.created
# Create a _FileVersion entity containing revision metadata.
new_file_version = _FileVersion(
# NDB args:
id=_FileVersion.make_key_name(final_changeset, titan_file.path),
namespace=namespace,
parent=final_changeset.changeset_ent.key,
# Model args:
path=titan_file.path,
changeset_num=final_changeset.num,
changeset_created_by=final_changeset.created_by,
# This is correctly modified_by, not created_by. We want to store the
# user who made this file revision, not the original created_by user.
created_by=titan_file.modified_by,
status=status)
new_file_versions.append(new_file_version)
# Create or change the _FilePointer for this file.
if not file_pointer and status != FileStatus.deleted:
# New file, setup the pointer.
file_pointer = _FilePointer(id=titan_file.path,
parent=root_file_pointer,
changeset_num=staging_changeset.num,
namespace=namespace)
elif file_pointer:
# Important: the file pointer is pointed to the staged changeset number,
# since a file is not copied on commit from ver/1/file to ver/2/file.
file_pointer.changeset_num = staging_changeset.num
# Files versions marked as "deleted" should delete the _FilePointer.
if status == FileStatus.deleted:
# Only delete file_pointer if it exists.
if file_pointer:
deleted_file_pointers.append(file_pointer)
else:
updated_file_pointers.append(file_pointer)
# For all file changes and updated pointers, do the RPCs.
if new_file_versions:
ndb.put_multi(new_file_versions)
if updated_file_pointers:
ndb.put_multi(updated_file_pointers)
if deleted_file_pointers:
ndb.delete_multi([p.key for p in deleted_file_pointers])
if new_manifest_shards:
ndb.put_multi(new_manifest_shards)
logging.info('Submitted staging changeset %d as final changeset %d.',
staging_changeset.num, final_changeset.num)
return final_changeset
def _get_manifest_shard_index(path, num_manifest_shards):
path_hash_num = int(hashlib.md5(path).hexdigest(), 16)
return path_hash_num % num_manifest_shards
def _make_manifest_shard_keys(changeset):
"""Gets a list of ndb.Key objects for all of a changeset's manifest shards."""
num_shards = changeset._num_manifest_shards
namespace = changeset.namespace
ids = [_make_manifest_shard_id(changeset, i) for i in range(num_shards)]
keys = []
parent = _ChangesetManifestShard.get_root_key(
changeset.num, namespace=namespace)
for shard_id in ids:
shard_key = ndb.Key(
_ChangesetManifestShard, shard_id, namespace=namespace, parent=parent)
keys.append(shard_key)
return keys
def _make_manifest_shard_id(changeset, shard_index):
return '{:d}:{:d}'.format(changeset.num, shard_index)
def _make_versioned_path(path, changeset):
"""Return a two-tuple of (versioned paths, is_multiple)."""
# Make sure we're not accidentally using non-strings,
# which could create a path like /_titan/ver/123<Some object>
if not isinstance(path, basestring):
raise TypeError('path argument must be a string: %r' % path)
return VERSIONS_PATH_FORMAT % (changeset.num, path)
def _fetch_full_manifest(base_changeset):
full_manifest = {}
manifest_shard_keys = _make_manifest_shard_keys(base_changeset)
manifest_shards = ndb.get_multi(manifest_shard_keys)
if not all(manifest_shards):
raise CommitError(
'Expected complete manifest shards, but got: {!r}'.format(
manifest_shards))
for manifest_shard in manifest_shards:
full_manifest.update(manifest_shard.paths_to_changeset_num)
return full_manifest
def _list_manifested_paths(changeset, dir_path, recursive=False):
paths_to_changeset_num = _fetch_full_manifest(changeset)
# Add trailing slash.
if dir_path != '/' and not dir_path.endswith('/'):
dir_path += '/'
desired_start_depth = dir_path.count('/') - 1
new_paths_to_changeset_num = {}
# Limit paths by dir_path and recursive.
for path, changeset_num in paths_to_changeset_num.iteritems():
# Depth 0 is root, depth 1 is one folder deep, etc.
path_depth = path.count('/') - 1
# Discard paths which are not deep enough or too deep.
if (path_depth < desired_start_depth
or not recursive and path_depth > desired_start_depth):
continue
# Discard paths in different directories.
if not path.startswith(dir_path):
continue
new_paths_to_changeset_num[path] = changeset_num
return new_paths_to_changeset_num
def _require_file_has_changeset(titan_file):
if not titan_file.changeset:
raise InvalidChangesetError(
'File modification requires an associated changeset.')
def _require_file_has_staging_changeset(titan_file):
"""If changeset is committed, don't allow files to be changed."""
if titan_file.changeset.status != ChangesetStatus.staging:
raise ChangesetError('Cannot change files in a "%s" changeset.'
% titan_file.changeset.status)
def _verify_root_paths(paths):
"""Make sure all given paths are not versioned paths."""
is_multiple = hasattr(paths, '__iter__')
for path in paths if is_multiple else [paths]:
if VERSIONS_PATH_BASE_REGEX.match(path):
raise ValueError('Not a root file path: %s' % path)
def _verify_versioned_paths(paths):
"""Make sure all given paths are versioned paths."""
is_multiple = hasattr(paths, '__iter__')
for path in paths if is_multiple else [paths]:
if not VERSIONS_PATH_BASE_REGEX.match(path):
raise ValueError('Not a versioned file path: %s' % path)
|
apache-2.0
|
Zhongqilong/kbengine
|
kbe/src/lib/python/Lib/lib2to3/fixes/fix_renames.py
|
203
|
2221
|
"""Fix incompatible renames
Fixes:
* sys.maxint -> sys.maxsize
"""
# Author: Christian Heimes
# based on Collin Winter's fix_import
# Local imports
from .. import fixer_base
from ..fixer_util import Name, attr_chain
MAPPING = {"sys": {"maxint" : "maxsize"},
}
LOOKUP = {}
def alternates(members):
return "(" + "|".join(map(repr, members)) + ")"
def build_pattern():
#bare = set()
for module, replace in list(MAPPING.items()):
for old_attr, new_attr in list(replace.items()):
LOOKUP[(module, old_attr)] = new_attr
#bare.add(module)
#bare.add(old_attr)
#yield """
# import_name< 'import' (module=%r
# | dotted_as_names< any* module=%r any* >) >
# """ % (module, module)
yield """
import_from< 'from' module_name=%r 'import'
( attr_name=%r | import_as_name< attr_name=%r 'as' any >) >
""" % (module, old_attr, old_attr)
yield """
power< module_name=%r trailer< '.' attr_name=%r > any* >
""" % (module, old_attr)
#yield """bare_name=%s""" % alternates(bare)
class FixRenames(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "|".join(build_pattern())
order = "pre" # Pre-order tree traversal
# Don't match the node if it's within another match
def match(self, node):
match = super(FixRenames, self).match
results = match(node)
if results:
if any(match(obj) for obj in attr_chain(node, "parent")):
return False
return results
return False
#def start_tree(self, tree, filename):
# super(FixRenames, self).start_tree(tree, filename)
# self.replace = {}
def transform(self, node, results):
mod_name = results.get("module_name")
attr_name = results.get("attr_name")
#bare_name = results.get("bare_name")
#import_mod = results.get("module")
if mod_name and attr_name:
new_attr = LOOKUP[(mod_name.value, attr_name.value)]
attr_name.replace(Name(new_attr, prefix=attr_name.prefix))
|
lgpl-3.0
|
lenstr/rethinkdb
|
external/v8_3.30.33.16/testing/gtest/scripts/pump.py
|
2471
|
23673
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.2.0 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def StripMetaComments(str):
"""Strip meta comments from each line in the given string."""
# First, completely remove lines containing nothing but a meta
# comment, including the trailing \n.
str = re.sub(r'^\s*\$\$.*\n', '', str)
# Then, remove meta comments from contentful lines.
return re.sub(r'\s*\$\$.*', '', str)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
"""A generator that yields the tokens in the given string."""
if s != '':
lines = s.splitlines(True)
for token in TokenizeLines(lines, Cursor(0, 0)):
yield token
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def ParseToAST(pump_src_text):
"""Convert the given Pump source text into an AST."""
tokens = list(Tokenize(pump_src_text))
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsSingleLineComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirective(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirective(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsMultiLineIWYUPragma(line):
return re.search(r'/\* IWYU pragma: ', line)
def IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line) or
# Don't break IWYU pragmas, either; that causes iwyu.py problems.
re.search(r'// IWYU pragma: ', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsSingleLineComment(line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirective(output, line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapPreprocessorDirective(line, output)
elif IsMultiLineIWYUPragma(line):
output.append(line)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def ConvertFromPumpSource(src_text):
"""Return the text generated from the given Pump source text."""
ast = ParseToAST(StripMetaComments(src_text))
output = Output()
RunCode(Env(), ast, output)
return BeautifyCode(output.string)
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
|
agpl-3.0
|
lseyesl/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/reftests/extract_reference_link.py
|
196
|
2272
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utility module for reftests."""
from HTMLParser import HTMLParser
class ExtractReferenceLinkParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.matches = []
self.mismatches = []
def handle_starttag(self, tag, attrs):
if tag != "link":
return
attrs = dict(attrs)
if not "rel" in attrs:
return
if not "href" in attrs:
return
if attrs["rel"] == "match":
self.matches.append(attrs["href"])
if attrs["rel"] == "mismatch":
self.mismatches.append(attrs["href"])
def get_reference_link(html_string):
"""Returns reference links in the given html_string.
Returns:
a tuple of two URL lists, (matches, mismatches).
"""
parser = ExtractReferenceLinkParser()
parser.feed(html_string)
parser.close()
return parser.matches, parser.mismatches
|
bsd-3-clause
|
zoggn/kernel_tcl_msm8610
|
Documentation/target/tcm_mod_builder.py
|
4981
|
41422
|
#!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
gpl-2.0
|
RazielSun/juma-editor
|
editor/lib/juma/SearchView/ArrayView.py
|
2
|
3871
|
#!/usr/bin/env python
from PySide import QtGui, QtCore
from PySide.QtCore import Qt, QEventLoop, QEvent, QObject
from juma.core import *
from juma.core.ModelManager import *
from juma.qt.helpers import addWidgetWithLayout, restrainWidgetToScreen
from juma.qt.IconCache import getIcon
from ui.array_view_container_ui import Ui_ArrayViewContainer as ArrayViewContainer
##----------------------------------------------------------------##
def getModulePath( path ):
import os.path
return os.path.dirname( __file__ ) + '/' + path
##----------------------------------------------------------------##
class ArrayLineView( QtGui.QLineEdit ):
def __init__(self, *args ):
super( ArrayLineView, self ).__init__( *args )
##----------------------------------------------------------------##
class ArrayViewWidget( QtGui.QWidget ):
def __init__(self, *args ):
super( ArrayViewWidget, self ).__init__( *args )
self.views = []
self.counts = 0
self.setWindowFlags( Qt.Popup )
self.ui = ui = ArrayViewContainer()
ui.setupUi( self )
ui.bodyLayout.setAlignment(QtCore.Qt.AlignTop)
intValidator = QtGui.QIntValidator()
ui.totalEdit.setText( str(self.counts) )
ui.totalEdit.setValidator( intValidator )
ui.totalEdit.returnPressed.connect( self.onTotalEditPressed )
ui.totalBtn.setText( 'Save' )
ui.totalBtn.clicked.connect( self.onTotalButtonClick )
self.setMinimumSize( 300, 200 )
def body( self ):
return self.ui.bodyLayout
def hideAll( self ):
for view in self.views:
view.hide()
def setup( self, typeId, data ):
self.hideAll()
total = 0
if data:
total = len(data)
self.createLines( total )
i = 0
for d in data:
self.fill( i, d )
i += 1
self.ui.totalEdit.setText(str(total))
def createLines( self, total ):
self.hideAll()
count = len(self.views)
self.counts = total
for i in range(0, total):
view = None
if count > i:
view = self.views[i]
view.show()
else:
view = self.getLine()
self.body().addWidget( view )
self.views.append( view )
height = total * 28 + 40
self.resize( 350, height )
def fill( self, index, value ):
view = self.views[index]
view.setText(str(value))
def getLine( self ):
return ArrayLineView( self )
##----------------------------------------------------------------##
def collect( self ):
arr = []
for i in range(0, self.counts):
view = self.views[i]
txt = view.text()
arr.append( txt )
return arr
##----------------------------------------------------------------##
def onTotalEditPressed( self ):
total = int(self.ui.totalEdit.text())
self.createLines( total )
def onTotalButtonClick( self ):
self.module.saveData()
##----------------------------------------------------------------##
## Array View
##----------------------------------------------------------------##
class ArrayView( EditorModule ):
_name = 'array_view'
_dependency = [ 'qt' ]
def __init__( self ):
self.onSave = None
def onLoad( self ):
self.window = ArrayViewWidget( None )
self.window.module = self
def request( self, **option ):
pos = option.get( 'pos', QtGui.QCursor.pos() )
typeId = option.get( 'type', None )
context = option.get( 'context', None )
initial = option.get( 'initial', None )
self.onSave = option.get('on_save', None)
self.window.move( pos )
restrainWidgetToScreen( self.window )
self.window.setup( typeId, initial )
self.window.show()
self.window.raise_()
self.window.setFocus()
def saveData( self ):
if self.onSave:
self.onSave( self.window.collect() )
self.window.hide()
##----------------------------------------------------------------##
arrayView = ArrayView()
arrayView.register()
##----------------------------------------------------------------##
def requestArrayView( **option ):
return arrayView.request( **option )
|
mit
|
miguelparaiso/PracticaOdoo
|
addons/base_report_designer/plugin/openerp_report_designer/bin/script/Expression.py
|
384
|
4146
|
#########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import string
import unohelper
import xmlrpclib
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from lib.error import ErrorDialog
from lib.functions import *
database="test"
uid = 3
class Expression(unohelper.Base, XJobExecutor ):
def __init__(self, sExpression="", sName="", bFromModify=False):
LoginTest()
if not loginstatus and __name__=="package":
exit(1)
self.win = DBModalDialog(60, 50, 180, 65, "Expression Builder")
self.win.addFixedText("lblExpression",17 , 10, 35, 15, "Expression :")
self.win.addEdit("txtExpression", -5, 5, 123, 15)
self.win.addFixedText("lblName", 2, 30, 50, 15, "Displayed Name :")
self.win.addEdit("txtName", -5, 25, 123, 15)
self.win.addButton( "btnOK", -5, -5, 40, 15, "OK", actionListenerProc = self.btnOk_clicked )
self.win.addButton( "btnCancel", -5 - 40 -5, -5, 40, 15, "Cancel", actionListenerProc = self.btnCancel_clicked )
self.bModify=bFromModify
if self.bModify==True:
self.win.setEditText("txtExpression",sExpression)
self.win.setEditText("txtName",sName)
self.win.doModalDialog("",None)
def btnOk_clicked(self, oActionEvent):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
text = doc.Text
cursor = doc.getCurrentController().getViewCursor()
if self.bModify==True:
oCurObj=cursor.TextField
sKey=u""+self.win.getEditText("txtName")
sValue=u"[[ " + self.win.getEditText("txtExpression") + " ]]"
oCurObj.Items = (sKey,sValue)
oCurObj.update()
self.win.endExecute()
else:
oInputList = doc.createInstance("com.sun.star.text.TextField.DropDown")
if self.win.getEditText("txtName")!="" and self.win.getEditText("txtExpression")!="":
sKey=u""+self.win.getEditText("txtName")
sValue=u"[[ " + self.win.getEditText("txtExpression") + " ]]"
if cursor.TextTable==None:
oInputList.Items = (sKey,sValue)
text.insertTextContent(cursor,oInputList,False)
else:
oTable = cursor.TextTable
oCurCell = cursor.Cell
tableText = oTable.getCellByName( oCurCell.CellName )
oInputList.Items = (sKey,sValue)
tableText.insertTextContent(cursor,oInputList,False)
self.win.endExecute()
else:
ErrorDialog("Please fill appropriate data in Name field or in Expression field.")
def btnCancel_clicked(self, oActionEvent):
self.win.endExecute()
if __name__<>"package" and __name__=="__main__":
Expression()
elif __name__=="package":
g_ImplementationHelper.addImplementation( Expression, "org.openoffice.openerp.report.expression", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
redhawkci/omniEvents
|
test/EventChannelAdmin_idl.py
|
2
|
2945
|
# Python stubs generated by omniidl from ../idl/EventChannelAdmin.idl
import omniORB, _omnipy
from omniORB import CORBA, PortableServer
_0_CORBA = CORBA
_omnipy.checkVersion(2,0, __file__)
# #include "CosNaming.idl"
import CosNaming_idl
_0_CosNaming = omniORB.openModule("CosNaming")
_0_CosNaming__POA = omniORB.openModule("CosNaming__POA")
# #include "CosLifeCycle.idl"
import CosLifeCycle_idl
_0_CosLifeCycle = omniORB.openModule("CosLifeCycle")
_0_CosLifeCycle__POA = omniORB.openModule("CosLifeCycle__POA")
#
# Start of module "EventChannelAdmin"
#
__name__ = "EventChannelAdmin"
_0_EventChannelAdmin = omniORB.openModule("EventChannelAdmin", r"../idl/EventChannelAdmin.idl")
_0_EventChannelAdmin__POA = omniORB.openModule("EventChannelAdmin__POA", r"../idl/EventChannelAdmin.idl")
# interface EventChannelFactory
_0_EventChannelAdmin._d_EventChannelFactory = (omniORB.tcInternal.tv_objref, "IDL:EventChannelAdmin/EventChannelFactory:1.0", "EventChannelFactory")
omniORB.typeMapping["IDL:EventChannelAdmin/EventChannelFactory:1.0"] = _0_EventChannelAdmin._d_EventChannelFactory
_0_EventChannelAdmin.EventChannelFactory = omniORB.newEmptyClass()
class EventChannelFactory (_0_CosLifeCycle.GenericFactory):
_NP_RepositoryId = _0_EventChannelAdmin._d_EventChannelFactory[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_EventChannelAdmin.EventChannelFactory = EventChannelFactory
_0_EventChannelAdmin._tc_EventChannelFactory = omniORB.tcInternal.createTypeCode(_0_EventChannelAdmin._d_EventChannelFactory)
omniORB.registerType(EventChannelFactory._NP_RepositoryId, _0_EventChannelAdmin._d_EventChannelFactory, _0_EventChannelAdmin._tc_EventChannelFactory)
# EventChannelFactory object reference
class _objref_EventChannelFactory (_0_CosLifeCycle._objref_GenericFactory):
_NP_RepositoryId = EventChannelFactory._NP_RepositoryId
def __init__(self):
_0_CosLifeCycle._objref_GenericFactory.__init__(self)
__methods__ = [] + _0_CosLifeCycle._objref_GenericFactory.__methods__
omniORB.registerObjref(EventChannelFactory._NP_RepositoryId, _objref_EventChannelFactory)
_0_EventChannelAdmin._objref_EventChannelFactory = _objref_EventChannelFactory
del EventChannelFactory, _objref_EventChannelFactory
# EventChannelFactory skeleton
__name__ = "EventChannelAdmin__POA"
class EventChannelFactory (_0_CosLifeCycle__POA.GenericFactory):
_NP_RepositoryId = _0_EventChannelAdmin.EventChannelFactory._NP_RepositoryId
_omni_op_d = {}
_omni_op_d.update(_0_CosLifeCycle__POA.GenericFactory._omni_op_d)
EventChannelFactory._omni_skeleton = EventChannelFactory
_0_EventChannelAdmin__POA.EventChannelFactory = EventChannelFactory
del EventChannelFactory
__name__ = "EventChannelAdmin"
#
# End of module "EventChannelAdmin"
#
__name__ = "EventChannelAdmin_idl"
_exported_modules = ( "EventChannelAdmin", )
# The end.
|
lgpl-2.1
|
vincent-noel/SigNetSim
|
signetsim/json/validators/__init__.py
|
2
|
1148
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2017 Vincent Noel (vincent.noel@butantan.gov.br)
#
# This file is part of libSigNetSim.
#
# libSigNetSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libSigNetSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libSigNetSim. If not, see <http://www.gnu.org/licenses/>.
""" __init__.py
Initialization of the module signetsim.views.json.validators
"""
from .MathValidator import MathValidator
from .FloatValidator import FloatValidator
from .SbmlIdValidator import SbmlIdValidator
from .UnitIdValidator import UnitIdValidator
from .ModelNameValidator import ModelNameValidator
from .UsernameValidator import UsernameValidator
|
agpl-3.0
|
roadmapper/ansible
|
lib/ansible/modules/cloud/vmware/vmware_tag.py
|
10
|
8738
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_tag
short_description: Manage VMware tags
description:
- This module can be used to create / delete / update VMware tags.
- Tag feature is introduced in vSphere 6 version, so this module is not supported in the earlier versions of vSphere.
- All variables and VMware object names are case sensitive.
version_added: '2.6'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
- vSphere Automation SDK
options:
tag_name:
description:
- The name of tag to manage.
required: True
type: str
tag_description:
description:
- The tag description.
- This is required only if C(state) is set to C(present).
- This parameter is ignored, when C(state) is set to C(absent).
- Process of updating tag only allows description change.
required: False
default: ''
type: str
category_id:
description:
- The unique ID generated by vCenter should be used to.
- User can get this unique ID from facts module.
required: False
type: str
state:
description:
- The state of tag.
- If set to C(present) and tag does not exists, then tag is created.
- If set to C(present) and tag exists, then tag is updated.
- If set to C(absent) and tag exists, then tag is deleted.
- If set to C(absent) and tag does not exists, no action is taken.
required: False
default: 'present'
choices: [ 'present', 'absent' ]
type: str
extends_documentation_fragment: vmware_rest_client.documentation
'''
EXAMPLES = r'''
- name: Create a tag
vmware_tag:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
validate_certs: no
category_id: 'urn:vmomi:InventoryServiceCategory:e785088d-6981-4b1c-9fb8-1100c3e1f742:GLOBAL'
tag_name: Sample_Tag_0002
tag_description: Sample Description
state: present
delegate_to: localhost
- name: Update tag description
vmware_tag:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
tag_name: Sample_Tag_0002
tag_description: Some fancy description
state: present
delegate_to: localhost
- name: Delete tag
vmware_tag:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
tag_name: Sample_Tag_0002
state: absent
delegate_to: localhost
'''
RETURN = r'''
tag_status:
description: dictionary of tag metadata
returned: on success
type: dict
sample: {
"msg": "Tag 'Sample_Tag_0002' created.",
"tag_id": "urn:vmomi:InventoryServiceTag:bff91819-f529-43c9-80ca-1c9dfda09441:GLOBAL"
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware_rest_client import VmwareRestClient
try:
from com.vmware.vapi.std.errors_client import Error
except ImportError:
pass
class VmwareTag(VmwareRestClient):
def __init__(self, module):
super(VmwareTag, self).__init__(module)
self.global_tags = dict()
# api_client to call APIs instead of individual service
self.tag_service = self.api_client.tagging.Tag
self.tag_name = self.params.get('tag_name')
self.get_all_tags()
self.category_service = self.api_client.tagging.Category
def ensure_state(self):
"""
Manage internal states of tags
"""
desired_state = self.params.get('state')
states = {
'present': {
'present': self.state_update_tag,
'absent': self.state_create_tag,
},
'absent': {
'present': self.state_delete_tag,
'absent': self.state_unchanged,
}
}
states[desired_state][self.check_tag_status()]()
def state_create_tag(self):
"""
Create tag
"""
tag_spec = self.tag_service.CreateSpec()
tag_spec.name = self.tag_name
tag_spec.description = self.params.get('tag_description')
category_id = self.params.get('category_id', None)
if category_id is None:
self.module.fail_json(msg="'category_id' is required parameter while creating tag.")
category_found = False
for category in self.category_service.list():
category_obj = self.category_service.get(category)
if category_id == category_obj.id:
category_found = True
break
if not category_found:
self.module.fail_json(msg="Unable to find category specified using 'category_id' - %s" % category_id)
tag_spec.category_id = category_id
tag_id = ''
try:
tag_id = self.tag_service.create(tag_spec)
except Error as error:
self.module.fail_json(msg="%s" % self.get_error_message(error))
if tag_id:
self.module.exit_json(changed=True,
tag_status=dict(msg="Tag '%s' created." % tag_spec.name, tag_id=tag_id))
self.module.exit_json(changed=False,
tag_status=dict(msg="No tag created", tag_id=tag_id))
def state_unchanged(self):
"""
Return unchanged state
"""
self.module.exit_json(changed=False)
def state_update_tag(self):
"""
Update tag
"""
changed = False
tag_id = self.global_tags[self.tag_name]['tag_id']
results = dict(msg="Tag %s is unchanged." % self.tag_name,
tag_id=tag_id)
tag_update_spec = self.tag_service.UpdateSpec()
tag_desc = self.global_tags[self.tag_name]['tag_description']
desired_tag_desc = self.params.get('tag_description')
if tag_desc != desired_tag_desc:
tag_update_spec.description = desired_tag_desc
try:
self.tag_service.update(tag_id, tag_update_spec)
except Error as error:
self.module.fail_json(msg="%s" % self.get_error_message(error))
results['msg'] = 'Tag %s updated.' % self.tag_name
changed = True
self.module.exit_json(changed=changed, tag_status=results)
def state_delete_tag(self):
"""
Delete tag
"""
tag_id = self.global_tags[self.tag_name]['tag_id']
try:
self.tag_service.delete(tag_id=tag_id)
except Error as error:
self.module.fail_json(msg="%s" % self.get_error_message(error))
self.module.exit_json(changed=True,
tag_status=dict(msg="Tag '%s' deleted." % self.tag_name, tag_id=tag_id))
def check_tag_status(self):
"""
Check if tag exists or not
Returns: 'present' if tag found, else 'absent'
"""
if 'category_id' in self.params:
if self.tag_name in self.global_tags and self.params['category_id'] == self.global_tags[self.tag_name]['tag_category_id']:
ret = 'present'
else:
ret = 'absent'
else:
ret = 'present' if self.tag_name in self.global_tags else 'absent'
return ret
def get_all_tags(self):
"""
Retrieve all tag information
"""
for tag in self.tag_service.list():
tag_obj = self.tag_service.get(tag)
self.global_tags[tag_obj.name] = dict(
tag_description=tag_obj.description,
tag_used_by=tag_obj.used_by,
tag_category_id=tag_obj.category_id,
tag_id=tag_obj.id
)
def main():
argument_spec = VmwareRestClient.vmware_client_argument_spec()
argument_spec.update(
tag_name=dict(type='str', required=True),
tag_description=dict(type='str', default='', required=False),
category_id=dict(type='str', required=False),
state=dict(type='str', choices=['present', 'absent'], default='present', required=False),
)
module = AnsibleModule(argument_spec=argument_spec)
vmware_tag = VmwareTag(module)
vmware_tag.ensure_state()
if __name__ == '__main__':
main()
|
gpl-3.0
|
lattwood/phantomjs
|
src/breakpad/src/tools/gyp/test/builddir/gyptest-default.py
|
147
|
2348
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify the settings that cause a set of programs to be created in
a specific build directory, and that no intermediate built files
get created outside of that build directory hierarchy even when
referred to with deeply-nested ../../.. paths.
"""
import TestGyp
# TODO(mmoss): Make only supports (theoretically) a single, global build
# directory (through GYP_GENERATOR_FLAGS 'output_dir'), rather than
# gyp-file-specific settings (e.g. the stuff in builddir.gypi) that the other
# generators support, so this doesn't work yet for make.
# TODO(mmoss) Make also has the issue that the top-level Makefile is written to
# the "--depth" location, which is one level above 'src', but then this test
# moves 'src' somewhere else, leaving the Makefile behind, so make can't find
# its sources. I'm not sure if make is wrong for writing outside the current
# directory, or if the test is wrong for assuming everything generated is under
# the current directory.
test = TestGyp.TestGyp(formats=['!make'])
test.run_gyp('prog1.gyp', '--depth=..', chdir='src')
test.relocate('src', 'relocate/src')
test.subdir('relocate/builddir')
# Make sure that all the built ../../etc. files only get put under builddir,
# by making all of relocate read-only and then making only builddir writable.
test.writable('relocate', False)
test.writable('relocate/builddir', True)
# Suppress the test infrastructure's setting SYMROOT on the command line.
test.build('prog1.gyp', SYMROOT=None, chdir='relocate/src')
expect1 = """\
Hello from prog1.c
Hello from func1.c
"""
expect2 = """\
Hello from subdir2/prog2.c
Hello from func2.c
"""
expect3 = """\
Hello from subdir2/subdir3/prog3.c
Hello from func3.c
"""
expect4 = """\
Hello from subdir2/subdir3/subdir4/prog4.c
Hello from func4.c
"""
expect5 = """\
Hello from subdir2/subdir3/subdir4/subdir5/prog5.c
Hello from func5.c
"""
def run_builddir(prog, expect):
dir = 'relocate/builddir/Default/'
test.run(program=test.workpath(dir + prog), stdout=expect)
run_builddir('prog1', expect1)
run_builddir('prog2', expect2)
run_builddir('prog3', expect3)
run_builddir('prog4', expect4)
run_builddir('prog5', expect5)
test.pass_test()
|
bsd-3-clause
|
michalkurka/h2o-3
|
h2o-py/tests/testdir_parser/pyunit_load_sparse.py
|
6
|
1288
|
#!/usr/bin/env python
from __future__ import division
import sys; sys.path.insert(1, "../..")
import h2o
from tests import pyunit_utils
def test_load_sparse():
try:
import scipy.sparse as sp
except ImportError:
return
A = sp.csr_matrix([[1, 2, 0, 5.5], [0, 0, 3, 6.7], [4, 0, 5, 0]])
fr = h2o.H2OFrame(A)
assert fr.shape == (3, 4)
assert fr.as_data_frame(False) == \
[['C1', 'C2', 'C3', 'C4'], ['1', '2', '0', '5.5'], ['0', '0', '3', '6.7'], ['4', '0', '5', '0.0']]
A = sp.lil_matrix((1000, 1000))
A.setdiag(10)
for i in range(999):
A[i, i + 1] = -3
A[i + 1, i] = -2
fr = h2o.H2OFrame(A)
assert fr.shape == (1000, 1000)
means = fr.mean().getrow()
assert means == [0.008] + [0.005] * 998 + [0.007]
I = [0, 0, 1, 3, 1, 0, 0]
J = [0, 2, 1, 3, 1, 0, 0]
V = [1, 1, 1, 1, 1, 1, 1]
B = sp.coo_matrix((V, (I, J)), shape=(4, 4))
fr = h2o.H2OFrame(B)
assert fr.shape == (4, 4)
assert fr.as_data_frame(False) == [['C1', 'C2', 'C3', 'C4'], ['3', '0', '1', '0'], ['0', '2', '0', '0'],
['0', '0', '0', '0'], ['0', '0', '0', '1']]
if __name__ == "__main__":
pyunit_utils.standalone_test(test_load_sparse)
else:
test_load_sparse()
|
apache-2.0
|
lafranceinsoumise/api-django
|
agir/system_pay/soap_client.py
|
1
|
5607
|
import uuid
from datetime import datetime
import hmac
from base64 import b64encode
from django.conf import settings
from django.utils import timezone
from zeep import Client, xsd
from agir.payments.models import Subscription
from agir.system_pay import SystemPayError
from agir.system_pay.utils import get_recurrence_rule
client = Client("https://paiement.systempay.fr/vads-ws/v5?wsdl")
header_namespace = "{http://v5.ws.vads.lyra.com/Header/}"
prefix_namespace = "{http://v5.ws.vads.lyra.com/}"
cancel_subscription_type = client.get_type(prefix_namespace + "cancelSubscription")
common_request_type = client.get_type(prefix_namespace + "commonRequest")
query_request_type = client.get_type(prefix_namespace + "queryRequest")
order_request_type = client.get_type(prefix_namespace + "orderRequest")
card_request_type = client.get_type(prefix_namespace + "cardRequest")
subscription_request_type = client.get_type(prefix_namespace + "subscriptionRequest")
class SystemPaySoapClient:
def __init__(self, sp_config):
self.sp_config = sp_config
def _get_header(self):
headers = list()
request_id = str(uuid.uuid4())
timestamp = datetime.utcnow().replace(microsecond=0).isoformat() + "Z"
elements = {
"shopId": self.sp_config["site_id"],
"requestId": request_id,
"timestamp": timestamp,
"mode": "PRODUCTION" if self.sp_config["production"] else "TEST",
"authToken": b64encode(
hmac.digest(
self.sp_config["certificate"].encode(),
(request_id + timestamp).encode(),
"sha256",
)
).decode(),
}
for elem in elements:
header = xsd.ComplexType(
[xsd.Element(header_namespace + elem, xsd.String())]
)
headers.append(header(**{elem: elements[elem]}))
return headers
def cancel_alias(self, alias):
res = client.service.cancelToken(
_soapheaders=self._get_header(),
commonRequest=common_request_type(),
queryRequest=query_request_type(paymentToken=alias.identifier.hex),
)
if res["commonResponse"]["responseCode"] > 0:
raise SystemPayError(res["commonResponse"]["responseCodeDetail"])
def cancel_subscription(self, subscription):
system_pay_subscription = subscription.system_pay_subscription
alias = system_pay_subscription.alias
res = client.service.cancelSubscription(
_soapheaders=self._get_header(),
commonRequest=common_request_type(),
queryRequest=query_request_type(
paymentToken=alias.identifier.hex,
subscriptionId=system_pay_subscription.identifier,
),
)
if res["commonResponse"]["responseCode"] > 0:
raise SystemPayError(
res["commonResponse"]["responseCodeDetail"],
system_pay_code=res["commonResponse"]["responseCode"],
)
def get_subscription_details(self, subscription):
system_pay_subscription = subscription.system_pay_subscription
alias = system_pay_subscription.alias
res = client.service.getSubscriptionDetails(
_soapheaders=self._get_header(),
queryRequest=query_request_type(
paymentToken=alias.identifier.hex,
subscriptionId=system_pay_subscription.identifier,
),
)
if res["commonResponse"]["responseCode"] > 0:
raise SystemPayError(
res["commonResponse"]["responseCodeDetail"],
system_pay_code=res["commonResponse"]["responseCode"],
)
return {
"orderId": res["orderResponse"]["orderId"],
**res["subscriptionResponse"],
}
def create_subscription(self, subscription, alias):
"""
Crée une souscription côté SystemPay et renvoie l'identifiant de souscription
SystemPay correspondant
:param subscription: L'objet souscription pour lequel il faut créer la souscription
:param alias: L'alias pour lequel il faut créer cette souscription
:return: L'identifiant de souscription côté SystemPay
"""
if subscription.status != Subscription.STATUS_WAITING:
raise ValueError("La souscription doit être en attente.")
if not alias.active:
raise ValueError("L'alias doit être actif.")
res = client.service.createSubscription(
_soapheaders=self._get_header(),
commonRequest=common_request_type(),
orderRequest=order_request_type(order_id=f"S{subscription.id}"),
subscriptionRequest=subscription_request_type(
effectDate=(timezone.now() + timezone.timedelta(hours=2)).strftime(
"%Y-%m-%dT%H:%M:%SZ"
),
amount=subscription.price,
currency=settings.SYSTEMPAY_CURRENCY,
initialAmount=None,
initialAmountNumber=0,
rrule=get_recurrence_rule(subscription),
),
cardRequest=card_request_type(paymentToken=alias.identifier.hex),
)
if res["commonResponse"]["responseCode"] > 0:
raise SystemPayError(
res["commonResponse"]["responseCodeDetail"],
system_pay_code=res["commonResponse"]["responseCode"],
)
return res["subscriptionResponse"]["subscriptionId"]
|
agpl-3.0
|
belltailjp/scikit-learn
|
sklearn/datasets/base.py
|
196
|
18554
|
"""
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
|
bsd-3-clause
|
TrentHouliston/VisualMesh
|
learning/test.py
|
1
|
3050
|
#!/usr/bin/env python3
import os
import re
import math
import json
import yaml
import numpy as np
import tensorflow as tf
from . import dataset
def test(sess,
network,
mesh_type,
mesh_size,
model_dir,
input_path,
output_path):
# Initialise global variables
sess.run(tf.global_variables_initializer())
save_vars = {v.name: v for v in tf.trainable_variables()}
saver = tf.train.Saver(save_vars)
# Get our model directory and load it if it exists
model_path = os.path.join(model_dir, 'model.ckpt')
checkpoint_file = tf.train.latest_checkpoint(model_dir)
print('Loading model {}'.format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Load our dataset
print('Loading file list')
files = dataset.get_files(input_path, mesh_type, mesh_size)
print('Loaded {} files'.format(len(files)))
# Load our test data
data_string = dataset.dataset(
files[round(len(files) * 0.8):],
mesh_type=mesh_type,
variants=False,
repeat=1,
batch_size=10,
shuffle=False
)
# Get our iterator handle
data_handle = sess.run(data_string)
classification = [network['network'][..., 0],
network['Y'][..., 0],
network['files'][..., 1],
]
results = []
buckets = 1000
with open(os.path.join(output_path), 'w') as output_file:
# Loop through the data
while True:
try:
# Get the difference between our labels and our expectations
# for tp, tn, fp, fn, f in zip(*sess.run(confusion, feed_dict={ network['handle']: data_handle })):
for X, Y, f in zip(*sess.run(classification, feed_dict={ network['handle']: data_handle })):
meta_file = re.sub(r'(.+)image(\d+)\.jpg', r'\1meta\2.json', f.decode('utf-8'))
fno = int(re.match(r'.+image(\d+)\.jpg', f.decode('utf-8')).group(1))
with open(meta_file, 'r') as f:
meta = json.load(f)
distance = np.array(meta['ball']['position'] + [meta['camera']['height']])
distance = np.linalg.norm(distance)
tp = [0] * (buckets + 1)
fp = [0] * (buckets + 1)
for x, y in zip(X, Y):
b = math.floor(x * buckets)
if y == 0:
fp[b] += 1
else:
tp[b] += 1
json.dump({
'fno': fno,
'd': float(distance),
'tp': tp,
'fp': fp,
}, output_file)
output_file.write('\n')
output_file.flush()
print(f'Testing file {fno}')
except tf.errors.OutOfRangeError:
print('Testing Done')
break
|
mit
|
aliyun/aliyun-spark-deploy-tool
|
bin/core/nginx.py
|
2
|
1545
|
#!/usr/bin/python
#coding=utf-8
import sys
import os
import utils
from core.common import GlobalVar
from config_nginx import generate_config_file
def copy_file(opts, src_file, ip, dst):
try:
os.system("sshpass -p %s scp -r %s %s %s@%s:%s" % (opts.pwd, " ".join(utils.ssh_args()), src_file, opts.user, ip, dst))
except Exception as e:
print(e.message)
raise e
def execute_remote_command(opts, ip, command):
os.system("sshpass -p %s ssh %s %s@%s %s" % (opts.pwd, " ".join(utils.ssh_args()), opts.user, ip, command))
def execute_local_command(command):
os.system(command)
def start_nginx(opts, host_info_file, ip):
try:
nginx_config_template_file = "%s/conf/nginx.conf.template" % GlobalVar.SPARK_ECS_DIR
local_nginx_config = "%s/conf/nginx.conf" % GlobalVar.SPARK_ECS_DIR
dst = "/opt/nginx-1.9.1/conf/nginx.conf"
generate_config_file(host_info_file, nginx_config_template_file, local_nginx_config)
copy_file(opts, local_nginx_config, ip, dst)
start_nginx_command = "/opt/nginx-1.9.1/sbin/nginx"
execute_remote_command(opts, ip, start_nginx_command)
return 1
except Exception as e:
print "start nginx failed %s" % str(e.message)
return -1
def do_stop_nginx(opts,ip):
try:
stopNginxCommand = "/opt/nginx-1.9.1/sbin/nginx -s stop"
execute_remote_command(opts, ip, stopNginxCommand)
return 1
except Exception as e:
print "stop nginx filed "+str(e.message)
return -1
|
artistic-2.0
|
ruud-v-a/rhythmbox
|
plugins/rbzeitgeist/rbzeitgeist.py
|
1
|
6528
|
# -.- coding: utf-8 -.-
# Copyright © 2009 Markus Korn <thekorn@gmx.de>
# Copyright © 2010 Laszlo Pandy <laszlok2@gmail.com>
# Copyright © 2011 Michal Hruby <michal.mhr@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# The Rhythmbox authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and Rhythmbox. This permission is above and beyond the permissions granted
# by the GPL license by which Rhythmbox is covered. If you modify this code
# you may extend this exception to your version of the code, but you are not
# obligated to do so. If you do not wish to do so, delete this exception
# statement from your version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import time
import rb
from gi.repository import GObject, Gio, GLib, Peas
from gi.repository import RB
from zeitgeist.client import ZeitgeistClient
from zeitgeist.datamodel import Event, Subject, Interpretation, Manifestation
try:
IFACE = ZeitgeistClient()
except RuntimeError as e:
print("Unable to connect to Zeitgeist, won't send events. Reason: '%s'" % e)
IFACE = None
class ZeitgeistPlugin(GObject.Object, Peas.Activatable):
__gtype_name__ = 'ZeitgeistPlugin'
object = GObject.property(type=GObject.Object)
def __init__(self):
GObject.Object.__init__(self)
def do_activate(self):
print("Loading Zeitgeist plugin...")
if IFACE is not None:
shell = self.object
shell_player = shell.props.shell_player
self.__psc_id = shell_player.connect("playing-song-changed", self.playing_song_changed)
backend_player = shell_player.props.player
self.__eos_id = backend_player.connect("eos", self.on_backend_eos)
self.__manual_switch = True
self.__current_song = None
if IFACE.get_version() >= [0, 3, 2, 999]:
IFACE.register_data_source("org.gnome.Rhythmbox,dataprovider", "Rhythmbox", "Play and organize your music collection",
[Event.new_for_values(actor="application://rhythmbox.desktop")])
@staticmethod
def get_song_info(db, entry):
# we don't want the PROP_MEDIA_TYPE, as it doesn't contain mimetype
# of the audio file itself
song = {
"album": entry.get_string(RB.RhythmDBPropType.ALBUM),
"artist": entry.get_string(RB.RhythmDBPropType.ARTIST),
"title": entry.get_string(RB.RhythmDBPropType.TITLE),
"location": entry.get_playback_uri(),
}
return song
def on_backend_eos(self, backend_player, stream_data, eos_early):
# EOS signal means that the song changed because the song is over.
# ie. the user did not explicitly change the song.
self.__manual_switch = False
def playing_song_changed(self, shell, entry):
if self.__current_song is not None:
self.send_to_zeitgeist_async(self.__current_song, Interpretation.LEAVE_EVENT)
if entry is not None:
self.send_to_zeitgeist_async(entry, Interpretation.ACCESS_EVENT)
self.__current_song = entry
GLib.idle_add(self.reset_manual_switch)
def reset_manual_switch(self):
"""
After the eos signal has fired, and after the zeitgeist events have
been sent asynchronously, reset the manual_switch variable.
"""
self.__manual_switch = True
def send_to_zeitgeist_async(self, entry, event_type):
"""
We do async here because the "eos" signal is fired
*after* the "playing-song-changed" signal.
We don't know if the song change was manual or automatic
until we get get the eos signal. If the mainloop goes to
idle, it means there are no more signals scheduled, so we
will have already received the eos if it was coming.
"""
shell = self.object
db = shell.props.db
GLib.idle_add(self.send_to_zeitgeist, db, entry, event_type)
def send_to_zeitgeist(self, db, entry, event_type):
song = self.get_song_info(db, entry)
if self.__manual_switch:
manifest = Manifestation.USER_ACTIVITY
else:
manifest = Manifestation.SCHEDULED_ACTIVITY
def file_info_complete(obj, res, user_data):
try:
fi = obj.query_info_finish(res)
except:
return
uri_mimetype = fi.get_content_type()
subject = Subject.new_for_values(
uri=song["location"],
interpretation=unicode(Interpretation.AUDIO),
manifestation=unicode(Manifestation.FILE_DATA_OBJECT),
origin=song["location"].rpartition("/")[0],
mimetype=uri_mimetype,
text=" - ".join([song["title"], song["artist"], song["album"]])
)
event = Event.new_for_values(
timestamp=int(time.time()*1000),
interpretation=unicode(event_type),
manifestation=unicode(manifest),
actor="application://rhythmbox.desktop",
subjects=[subject,]
)
IFACE.insert_event(event)
f = Gio.file_new_for_uri(song["location"])
f.query_info_async(Gio.FILE_ATTRIBUTE_STANDARD_CONTENT_TYPE, Gio.FileQueryInfoFlags.NONE, GLib.PRIORITY_DEFAULT, None, file_info_complete, None)
def do_deactivate(self):
print("Deactivating Zeitgeist plugin...")
if IFACE is not None:
shell = self.object
shell_player = shell.props.shell_player
shell_player.disconnect(self.__psc_id)
self.__psc_id = None
backend_player = shell_player.props.player
backend_player.disconnect(self.__eos_id)
self.__eos_id = None
self.__current_song = None
|
gpl-2.0
|
DenniFurst3/Dennis
|
py/openage/convert/fix_data.py
|
46
|
1296
|
# data fixing script
#
# as you can imagine, the data entries may contain some semi-correct
# values, which we need to adapt. this is done in this file.
def fix_data(data):
"""
updates given input with modifications.
input: empiresdat object, vanilla, fully read.
output: empiresdat object, fixed.
"""
###
# Terrain fixes
###
#remove terrains with slp_id == -1
#we'll need them again in the future, with fixed slp ids
slp_ge0 = lambda x: x.slp_id >= 0
data.terrains = list(filter(slp_ge0, data.terrains))
#assign correct blending modes
#key: dat file stored mode
#value: corrected mode
#resulting values are also priorities!
# -> higher => gets selected as mask for two partners
blendmode_map = {
#identical modes: [0,1,7,8], [4,6]
0: 1, #dirt, grass, palm_desert
1: 3, #farms
2: 2, #beach
3: 0, #water
4: 1, #shallows
5: 4, #roads
6: 5, #ice
7: 6, #snow
8: 4, #no terrain has it, but the mode exists..
}
for terrain in data.terrains:
terrain.blend_mode = blendmode_map[terrain.blend_mode]
#set correct terrain ids
for idx, terrain in enumerate(data.terrains):
terrain.terrain_id = idx
return data
|
gpl-3.0
|
lizan/envoy
|
source/extensions/filters/network/kafka/protocol/generator.py
|
4
|
25849
|
#!/usr/bin/python
# Main library file containing all the protocol generation logic.
def generate_main_code(type, main_header_file, resolver_cc_file, metrics_header_file, input_files):
"""
Main code generator.
Takes input files and processes them into structures representing a Kafka message (request or
response).
These responses are then used to create:
- main_header_file - contains definitions of Kafka structures and their deserializers
- resolver_cc_file - contains request api key & version mapping to deserializer (from header file)
- metrics_header_file - contains metrics with names corresponding to messages
"""
processor = StatefulProcessor()
# Parse provided input files.
messages = processor.parse_messages(input_files)
complex_type_template = RenderingHelper.get_template('complex_type_template.j2')
parsers_template = RenderingHelper.get_template("%s_parser.j2" % type)
main_header_contents = ''
for message in messages:
# For each child structure that is used by request/response, render its matching C++ code.
dependencies = message.compute_declaration_chain()
for dependency in dependencies:
main_header_contents += complex_type_template.render(complex_type=dependency)
# Each top-level structure (e.g. FetchRequest/FetchResponse) needs corresponding parsers.
main_header_contents += parsers_template.render(complex_type=message)
# Full file with headers, namespace declaration etc.
template = RenderingHelper.get_template("%ss_h.j2" % type)
contents = template.render(contents=main_header_contents)
# Generate main header file.
with open(main_header_file, 'w') as fd:
fd.write(contents)
# Generate ...resolver.cc file.
template = RenderingHelper.get_template("kafka_%s_resolver_cc.j2" % type)
contents = template.render(message_types=messages)
with open(resolver_cc_file, 'w') as fd:
fd.write(contents)
# Generate ...metrics.h file.
template = RenderingHelper.get_template("%s_metrics_h.j2" % type)
contents = template.render(message_types=messages)
with open(metrics_header_file, 'w') as fd:
fd.write(contents)
def generate_test_code(type, header_test_cc_file, codec_test_cc_file, utilities_cc_file,
input_files):
"""
Test code generator.
Takes input files and processes them into structures representing a Kafka message (request or
response).
These responses are then used to create:
- header_test_cc_file - tests for basic message serialization deserialization,
- codec_test_cc_file - tests involving codec and Request/ResponseParserResolver,
- utilities_cc_file - utilities for creating sample messages.
"""
processor = StatefulProcessor()
# Parse provided input files.
messages = processor.parse_messages(input_files)
# Generate header-test file.
template = RenderingHelper.get_template("%ss_test_cc.j2" % type)
contents = template.render(message_types=messages)
with open(header_test_cc_file, 'w') as fd:
fd.write(contents)
# Generate codec-test file.
template = RenderingHelper.get_template("%s_codec_%s_test_cc.j2" % (type, type))
contents = template.render(message_types=messages)
with open(codec_test_cc_file, 'w') as fd:
fd.write(contents)
# Generate utilities file.
template = RenderingHelper.get_template("%s_utilities_cc.j2" % type)
contents = template.render(message_types=messages)
with open(utilities_cc_file, 'w') as fd:
fd.write(contents)
class StatefulProcessor:
"""
Helper entity that keeps state during the processing.
Some state needs to be shared across multiple message types, as we need to handle identical
sub-type names (e.g. both AlterConfigsRequest & IncrementalAlterConfigsRequest have child
AlterConfigsResource, what would cause a compile-time error if we were to handle it trivially).
"""
def __init__(self):
# Complex types that have been encountered during processing.
self.known_types = set()
# Name of parent message type that's being processed right now.
self.currently_processed_message_type = None
# Common structs declared in this message type.
self.common_structs = {}
def parse_messages(self, input_files):
"""
Parse request/response structures from provided input files.
"""
import re
import json
messages = []
# Sort the input files, as the processing is stateful, as we want the same order every time.
input_files.sort()
# For each specification file, remove comments, and parse the remains.
for input_file in input_files:
try:
with open(input_file, 'r') as fd:
raw_contents = fd.read()
without_comments = re.sub(r'\s*//.*\n', '\n', raw_contents)
without_empty_newlines = re.sub(r'^\s*$', '', without_comments, flags=re.MULTILINE)
message_spec = json.loads(without_empty_newlines)
message = self.parse_top_level_element(message_spec)
messages.append(message)
except Exception as e:
print('could not process %s' % input_file)
raise
# Sort messages by api_key.
messages.sort(key=lambda x: x.get_extra('api_key'))
return messages
def parse_top_level_element(self, spec):
"""
Parse a given structure into a request/response.
Request/response is just a complex type, that has name & version information kept in differently
named fields, compared to sub-structures in a message.
"""
self.currently_processed_message_type = spec['name']
# Figure out all versions of this message type.
versions = Statics.parse_version_string(spec['validVersions'], 2 << 16 - 1)
# Figure out the flexible versions.
flexible_versions_string = spec.get('flexibleVersions', 'none')
if 'none' != flexible_versions_string:
flexible_versions = Statics.parse_version_string(flexible_versions_string, versions[-1])
else:
flexible_versions = []
# Sanity check - all flexible versions need to be versioned.
if [x for x in flexible_versions if x not in versions]:
raise ValueError('invalid flexible versions')
try:
# In 2.4 some types are declared at top level, and only referenced inside.
# So let's parse them and store them in state.
common_structs = spec.get('commonStructs')
if common_structs is not None:
for common_struct in common_structs:
common_struct_name = common_struct['name']
common_struct_versions = Statics.parse_version_string(common_struct['versions'],
versions[-1])
parsed_complex = self.parse_complex_type(common_struct_name, common_struct,
common_struct_versions)
self.common_structs[parsed_complex.name] = parsed_complex
# Parse the type itself.
complex_type = self.parse_complex_type(self.currently_processed_message_type, spec, versions)
complex_type.register_flexible_versions(flexible_versions)
# Request / response types need to carry api key version.
result = complex_type.with_extra('api_key', spec['apiKey'])
return result
finally:
self.common_structs = {}
self.currently_processed_message_type = None
def parse_complex_type(self, type_name, field_spec, versions):
"""
Parse given complex type, returning a structure that holds its name, field specification and
allowed versions.
"""
fields_el = field_spec.get('fields')
if fields_el is not None:
fields = []
for child_field in field_spec['fields']:
child = self.parse_field(child_field, versions[-1])
if child is not None:
fields.append(child)
# Some of the types repeat multiple times (e.g. AlterableConfig).
# In such a case, every second or later occurrence of the same name is going to be prefixed
# with parent type, e.g. we have AlterableConfig (for AlterConfigsRequest) and then
# IncrementalAlterConfigsRequestAlterableConfig (for IncrementalAlterConfigsRequest).
# This keeps names unique, while keeping non-duplicate ones short.
if type_name not in self.known_types:
self.known_types.add(type_name)
else:
type_name = self.currently_processed_message_type + type_name
self.known_types.add(type_name)
return Complex(type_name, fields, versions)
else:
return self.common_structs[type_name]
def parse_field(self, field_spec, highest_possible_version):
"""
Parse given field, returning a structure holding the name, type, and versions when this field is
actually used (nullable or not). Obviously, field cannot be used in version higher than its
type's usage.
"""
if field_spec.get('tag') is not None:
return None
version_usage = Statics.parse_version_string(field_spec['versions'], highest_possible_version)
version_usage_as_nullable = Statics.parse_version_string(
field_spec['nullableVersions'],
highest_possible_version) if 'nullableVersions' in field_spec else range(-1)
parsed_type = self.parse_type(field_spec['type'], field_spec, highest_possible_version)
return FieldSpec(field_spec['name'], parsed_type, version_usage, version_usage_as_nullable)
def parse_type(self, type_name, field_spec, highest_possible_version):
"""
Parse a given type element - returns an array type, primitive (e.g. uint32_t) or complex one.
"""
if (type_name.startswith('[]')):
# In spec files, array types are defined as `[]underlying_type` instead of having its own
# element with type inside.
underlying_type = self.parse_type(type_name[2:], field_spec, highest_possible_version)
return Array(underlying_type)
else:
if (type_name in Primitive.USABLE_PRIMITIVE_TYPE_NAMES):
return Primitive(type_name, field_spec.get('default'))
else:
versions = Statics.parse_version_string(field_spec['versions'], highest_possible_version)
return self.parse_complex_type(type_name, field_spec, versions)
class Statics:
@staticmethod
def parse_version_string(raw_versions, highest_possible_version):
"""
Return integer range that corresponds to version string in spec file.
"""
if raw_versions.endswith('+'):
return range(int(raw_versions[:-1]), highest_possible_version + 1)
else:
if '-' in raw_versions:
tokens = raw_versions.split('-', 1)
return range(int(tokens[0]), int(tokens[1]) + 1)
else:
single_version = int(raw_versions)
return range(single_version, single_version + 1)
class FieldList:
"""
List of fields used by given entity (request or child structure) in given message version
(as fields get added or removed across versions and/or they change compaction level).
"""
def __init__(self, version, uses_compact_fields, fields):
self.version = version
self.uses_compact_fields = uses_compact_fields
self.fields = fields
def used_fields(self):
"""
Return list of fields that are actually used in this version of structure.
"""
return filter(lambda x: x.used_in_version(self.version), self.fields)
def constructor_signature(self):
"""
Return constructor signature.
Multiple versions of the same structure can have identical signatures (due to version bumps in
Kafka).
"""
parameter_spec = map(lambda x: x.parameter_declaration(self.version), self.used_fields())
return ', '.join(parameter_spec)
def constructor_init_list(self):
"""
Renders member initialization list in constructor.
Takes care of potential optional<T> conversions (as field could be T in V1, but optional<T>
in V2).
"""
init_list = []
for field in self.fields:
if field.used_in_version(self.version):
if field.is_nullable():
if field.is_nullable_in_version(self.version):
# Field is optional<T>, and the parameter is optional<T> in this version.
init_list_item = '%s_{%s}' % (field.name, field.name)
init_list.append(init_list_item)
else:
# Field is optional<T>, and the parameter is T in this version.
init_list_item = '%s_{absl::make_optional(%s)}' % (field.name, field.name)
init_list.append(init_list_item)
else:
# Field is T, so parameter cannot be optional<T>.
init_list_item = '%s_{%s}' % (field.name, field.name)
init_list.append(init_list_item)
else:
# Field is not used in this version, so we need to put in default value.
init_list_item = '%s_{%s}' % (field.name, field.default_value())
init_list.append(init_list_item)
pass
return ', '.join(init_list)
def field_count(self):
return len(list(self.used_fields()))
def example_value(self):
return ', '.join(map(lambda x: x.example_value_for_test(self.version), self.used_fields()))
class FieldSpec:
"""
Represents a field present in a structure (request, or child structure thereof).
Contains name, type, and versions when it is used (nullable or not).
"""
def __init__(self, name, type, version_usage, version_usage_as_nullable):
import re
separated = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
self.name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', separated).lower()
self.type = type
self.version_usage = version_usage
self.version_usage_as_nullable = version_usage_as_nullable
def is_nullable(self):
return len(self.version_usage_as_nullable) > 0
def is_nullable_in_version(self, version):
"""
Whether the field is nullable in given version.
Fields can be non-nullable in earlier versions.
See https://github.com/apache/kafka/tree/2.2.0-rc0/clients/src/main/resources/common/message#nullable-fields
"""
return version in self.version_usage_as_nullable
def used_in_version(self, version):
return version in self.version_usage
def field_declaration(self):
if self.is_nullable():
return 'absl::optional<%s> %s' % (self.type.name, self.name)
else:
return '%s %s' % (self.type.name, self.name)
def parameter_declaration(self, version):
if self.is_nullable_in_version(version):
return 'absl::optional<%s> %s' % (self.type.name, self.name)
else:
return '%s %s' % (self.type.name, self.name)
def default_value(self):
if self.is_nullable():
type_default_value = self.type.default_value()
# For nullable fields, it's possible to have (Java) null as default value.
if type_default_value != 'null':
return '{%s}' % type_default_value
else:
return 'absl::nullopt'
else:
return str(self.type.default_value())
def example_value_for_test(self, version):
if self.is_nullable():
return 'absl::make_optional<%s>(%s)' % (self.type.name,
self.type.example_value_for_test(version))
else:
return str(self.type.example_value_for_test(version))
def deserializer_name_in_version(self, version, compact):
if self.is_nullable_in_version(version):
return 'Nullable%s' % self.type.deserializer_name_in_version(version, compact)
else:
return self.type.deserializer_name_in_version(version, compact)
def is_printable(self):
return self.type.is_printable()
class TypeSpecification:
def compute_declaration_chain(self):
"""
Computes types that need to be declared before this type can be declared, in C++ sense.
"""
raise NotImplementedError()
def deserializer_name_in_version(self, version, compact):
"""
Renders the deserializer name of given type, in message with given version.
"""
raise NotImplementedError()
def default_value(self):
"""
Returns a default value for given type.
"""
raise NotImplementedError()
def has_flexible_handling(self):
"""
Whether the given type has special encoding when carrying message is using flexible encoding.
"""
raise NotImplementedError()
def example_value_for_test(self, version):
raise NotImplementedError()
def is_printable(self):
raise NotImplementedError()
class Array(TypeSpecification):
"""
Represents array complex type.
To use instance of this type, it is necessary to declare structures required by self.underlying
(e.g. to use Array<Foo>, we need to have `struct Foo {...}`).
"""
def __init__(self, underlying):
self.underlying = underlying
@property
def name(self):
return 'std::vector<%s>' % self.underlying.name
def compute_declaration_chain(self):
# To use an array of type T, we just need to be capable of using type T.
return self.underlying.compute_declaration_chain()
def deserializer_name_in_version(self, version, compact):
# For arrays, deserializer name is (Compact)(Nullable)ArrayDeserializer<ElementDeserializer>.
element_deserializer_name = self.underlying.deserializer_name_in_version(version, compact)
return '%sArrayDeserializer<%s>' % ("Compact" if compact else "", element_deserializer_name)
def default_value(self):
return 'std::vector<%s>{}' % (self.underlying.name)
def has_flexible_handling(self):
return True
def example_value_for_test(self, version):
return 'std::vector<%s>{ %s }' % (self.underlying.name,
self.underlying.example_value_for_test(version))
def is_printable(self):
return self.underlying.is_printable()
class Primitive(TypeSpecification):
"""
Represents a Kafka primitive value.
"""
USABLE_PRIMITIVE_TYPE_NAMES = ['bool', 'int8', 'int16', 'int32', 'int64', 'string', 'bytes']
KAFKA_TYPE_TO_ENVOY_TYPE = {
'string': 'std::string',
'bool': 'bool',
'int8': 'int8_t',
'int16': 'int16_t',
'int32': 'int32_t',
'int64': 'int64_t',
'bytes': 'Bytes',
'tagged_fields': 'TaggedFields',
}
KAFKA_TYPE_TO_DESERIALIZER = {
'string': 'StringDeserializer',
'bool': 'BooleanDeserializer',
'int8': 'Int8Deserializer',
'int16': 'Int16Deserializer',
'int32': 'Int32Deserializer',
'int64': 'Int64Deserializer',
'bytes': 'BytesDeserializer',
'tagged_fields': 'TaggedFieldsDeserializer',
}
KAFKA_TYPE_TO_COMPACT_DESERIALIZER = {
'string': 'CompactStringDeserializer',
'bytes': 'CompactBytesDeserializer'
}
# See https://github.com/apache/kafka/tree/trunk/clients/src/main/resources/common/message#deserializing-messages
KAFKA_TYPE_TO_DEFAULT_VALUE = {
'string': '""',
'bool': 'false',
'int8': '0',
'int16': '0',
'int32': '0',
'int64': '0',
'bytes': '{}',
'tagged_fields': 'TaggedFields({})',
}
# Custom values that make test code more readable.
KAFKA_TYPE_TO_EXAMPLE_VALUE_FOR_TEST = {
'string':
'"string"',
'bool':
'false',
'int8':
'static_cast<int8_t>(8)',
'int16':
'static_cast<int16_t>(16)',
'int32':
'static_cast<int32_t>(32)',
'int64':
'static_cast<int64_t>(64)',
'bytes':
'Bytes({0, 1, 2, 3})',
'tagged_fields':
'TaggedFields{std::vector<TaggedField>{{10, Bytes({1, 2, 3})}, {20, Bytes({4, 5, 6})}}}',
}
def __init__(self, name, custom_default_value):
self.original_name = name
self.name = Primitive.compute(name, Primitive.KAFKA_TYPE_TO_ENVOY_TYPE)
self.custom_default_value = custom_default_value
@staticmethod
def compute(name, map):
if name in map:
return map[name]
else:
raise ValueError(name)
def compute_declaration_chain(self):
# Primitives need no declarations.
return []
def deserializer_name_in_version(self, version, compact):
if compact and self.original_name in Primitive.KAFKA_TYPE_TO_COMPACT_DESERIALIZER.keys():
return Primitive.compute(self.original_name, Primitive.KAFKA_TYPE_TO_COMPACT_DESERIALIZER)
else:
return Primitive.compute(self.original_name, Primitive.KAFKA_TYPE_TO_DESERIALIZER)
def default_value(self):
if self.custom_default_value is not None:
return self.custom_default_value
else:
return Primitive.compute(self.original_name, Primitive.KAFKA_TYPE_TO_DEFAULT_VALUE)
def has_flexible_handling(self):
return self.original_name in ['string', 'bytes', 'tagged_fields']
def example_value_for_test(self, version):
return Primitive.compute(self.original_name, Primitive.KAFKA_TYPE_TO_EXAMPLE_VALUE_FOR_TEST)
def is_printable(self):
return self.name not in ['Bytes']
class FieldSerializationSpec():
def __init__(self, field, versions, compute_size_method_name, encode_method_name):
self.field = field
self.versions = versions
self.compute_size_method_name = compute_size_method_name
self.encode_method_name = encode_method_name
class Complex(TypeSpecification):
"""
Represents a complex type (multiple types aggregated into one).
This type gets mapped to a C++ struct.
"""
def __init__(self, name, fields, versions):
self.name = name
self.fields = fields
self.versions = versions
self.flexible_versions = None # Will be set in 'register_flexible_versions'.
self.attributes = {}
def register_flexible_versions(self, flexible_versions):
# If flexible versions are present, so we need to add placeholder 'tagged_fields' field to
# *every* type that's used in by this message type.
for type in self.compute_declaration_chain():
type.flexible_versions = flexible_versions
if len(flexible_versions) > 0:
tagged_fields_field = FieldSpec('tagged_fields', Primitive('tagged_fields', None),
flexible_versions, [])
type.fields.append(tagged_fields_field)
def compute_declaration_chain(self):
"""
Computes all dependencies, what means all non-primitive types used by this type.
They need to be declared before this struct is declared.
"""
result = []
for field in self.fields:
field_dependencies = field.type.compute_declaration_chain()
for field_dependency in field_dependencies:
if field_dependency not in result:
result.append(field_dependency)
result.append(self)
return result
def with_extra(self, key, value):
self.attributes[key] = value
return self
def get_extra(self, key):
return self.attributes[key]
def compute_constructors(self):
"""
Field lists for different versions may not differ (as Kafka can bump version without any
changes). But constructors need to be unique, so we need to remove duplicates if the signatures
match.
"""
signature_to_constructor = {}
for field_list in self.compute_field_lists():
signature = field_list.constructor_signature()
constructor = signature_to_constructor.get(signature)
if constructor is None:
entry = {}
entry['versions'] = [field_list.version]
entry['signature'] = signature
if (len(signature) > 0):
entry['full_declaration'] = '%s(%s): %s {};' % (self.name, signature,
field_list.constructor_init_list())
else:
entry['full_declaration'] = '%s() {};' % self.name
signature_to_constructor[signature] = entry
else:
constructor['versions'].append(field_list.version)
return sorted(signature_to_constructor.values(), key=lambda x: x['versions'][0])
def compute_field_lists(self):
"""
Return field lists representing each of structure versions.
"""
field_lists = []
for version in self.versions:
field_list = FieldList(version, version in self.flexible_versions, self.fields)
field_lists.append(field_list)
return field_lists
def compute_serialization_specs(self):
result = []
for field in self.fields:
if field.type.has_flexible_handling():
flexible = [x for x in field.version_usage if x in self.flexible_versions]
non_flexible = [x for x in field.version_usage if x not in flexible]
if non_flexible:
result.append(FieldSerializationSpec(field, non_flexible, 'computeSize', 'encode'))
if flexible:
result.append(
FieldSerializationSpec(field, flexible, 'computeCompactSize', 'encodeCompact'))
else:
result.append(FieldSerializationSpec(field, field.version_usage, 'computeSize', 'encode'))
return result
def deserializer_name_in_version(self, version, compact):
return '%sV%dDeserializer' % (self.name, version)
def name_in_c_case(self):
import re
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', self.name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def default_value(self):
raise NotImplementedError('unable to create default value of complex type')
def has_flexible_handling(self):
return False
def example_value_for_test(self, version):
field_list = next(fl for fl in self.compute_field_lists() if fl.version == version)
example_values = map(lambda x: x.example_value_for_test(version), field_list.used_fields())
return '%s(%s)' % (self.name, ', '.join(example_values))
def is_printable(self):
return True
class RenderingHelper:
"""
Helper for jinja templates.
"""
@staticmethod
def get_template(template):
import jinja2
import os
import sys
# Templates are resolved relatively to main start script, due to main & test templates being
# stored in different directories.
env = jinja2.Environment(loader=jinja2.FileSystemLoader(
searchpath=os.path.dirname(os.path.abspath(sys.argv[0]))))
return env.get_template(template)
|
apache-2.0
|
cldershem/osf.io
|
website/addons/dropbox/model.py
|
11
|
11235
|
# -*- coding: utf-8 -*-
import os
import base64
import logging
import pymongo
from modularodm import fields
from framework.auth import Auth
from website.addons.base import exceptions
from website.addons.base import AddonUserSettingsBase, AddonNodeSettingsBase, GuidFile
from website.addons.base import StorageAddonBase
from website.addons.dropbox.utils import clean_path, DropboxNodeLogger
logger = logging.getLogger(__name__)
class DropboxFile(GuidFile):
"""A Dropbox file model with a GUID. Created lazily upon viewing a
file's detail page.
"""
__indices__ = [
{
'key_or_list': [
('node', pymongo.ASCENDING),
('path', pymongo.ASCENDING),
],
'unique': True,
}
]
#: Full path to the file, e.g. 'My Pictures/foo.png'
path = fields.StringField(required=True, index=True)
@property
def file_name(self):
if self.revision:
return '{0}_{1}_{2}.html'.format(self._id, self.revision, base64.b64encode(self.folder))
return '{0}_{1}_{2}.html'.format(self._id, self.unique_identifier, base64.b64encode(self.folder))
@property
def waterbutler_path(self):
path = '/' + self.path
if self.folder == '/':
return path
return path.replace(self.folder, '', 1)
@property
def folder(self):
addon = self.node.get_addon('dropbox')
if not addon or not addon.folder:
return '' # Must return a str value this will error out properly later
return addon.folder
@property
def provider(self):
return 'dropbox'
@property
def version_identifier(self):
return 'revision'
@property
def unique_identifier(self):
return self._metadata_cache['extra']['revisionId']
class DropboxUserSettings(AddonUserSettingsBase):
"""Stores user-specific dropbox information, including the Oauth access
token.
"""
dropbox_id = fields.StringField(required=False)
access_token = fields.StringField(required=False)
dropbox_info = fields.DictionaryField(required=False)
# TODO(sloria): The `user` param in unnecessary for AddonUserSettings
def to_json(self, user=None):
"""Return a dictionary representation of the user settings.
The dictionary keys and values will be available as variables in
dropbox_user_settings.mako.
"""
output = super(DropboxUserSettings, self).to_json(self.owner)
output['has_auth'] = self.has_auth
return output
@property
def has_auth(self):
return bool(self.access_token)
def delete(self, save=True):
self.clear()
super(DropboxUserSettings, self).delete(save)
def clear(self):
"""Clear settings and deauthorize any associated nodes."""
self.dropbox_id = None
self.access_token = None
for node_settings in self.dropboxnodesettings__authorized:
node_settings.deauthorize(Auth(self.owner))
node_settings.save()
return self
def __repr__(self):
return u'<DropboxUserSettings(user={self.owner.username!r})>'.format(self=self)
class DropboxNodeSettings(StorageAddonBase, AddonNodeSettingsBase):
user_settings = fields.ForeignField(
'dropboxusersettings', backref='authorized'
)
folder = fields.StringField(default=None)
#: Information saved at the time of registration
#: Note: This is unused right now
registration_data = fields.DictionaryField()
@property
def folder_name(self):
return os.path.split(self.folder)[1]
@property
def display_name(self):
return '{0}: {1}'.format(self.config.full_name, self.folder)
@property
def complete(self):
return self.has_auth and self.folder is not None
@property
def has_auth(self):
"""Whether an access token is associated with this node."""
return bool(self.user_settings and self.user_settings.has_auth)
def find_or_create_file_guid(self, path):
return DropboxFile.get_or_create(
node=self.owner,
path=clean_path(os.path.join(self.folder, path.lstrip('/'))),
)
def set_folder(self, folder, auth):
self.folder = folder
# Add log to node
nodelogger = DropboxNodeLogger(node=self.owner, auth=auth)
nodelogger.log(action="folder_selected", save=True)
def set_user_auth(self, user_settings):
"""Import a user's Dropbox authentication and create a NodeLog.
:param DropboxUserSettings user_settings: The user settings to link.
"""
self.user_settings = user_settings
nodelogger = DropboxNodeLogger(node=self.owner, auth=Auth(user_settings.owner))
nodelogger.log(action="node_authorized", save=True)
# TODO: Is this used? If not, remove this and perhaps remove the 'deleted' field
def delete(self, save=True):
self.deauthorize(add_log=False)
super(DropboxNodeSettings, self).delete(save)
def deauthorize(self, auth=None, add_log=True):
"""Remove user authorization from this node and log the event."""
node = self.owner
folder = self.folder
self.folder = None
self.user_settings = None
if add_log:
extra = {'folder': folder}
nodelogger = DropboxNodeLogger(node=node, auth=auth)
nodelogger.log(action="node_deauthorized", extra=extra, save=True)
def serialize_waterbutler_credentials(self):
if not self.has_auth:
raise exceptions.AddonError('Addon is not authorized')
return {'token': self.user_settings.access_token}
def serialize_waterbutler_settings(self):
if not self.folder:
raise exceptions.AddonError('Folder is not configured')
return {'folder': self.folder}
def create_waterbutler_log(self, auth, action, metadata):
cleaned_path = clean_path(os.path.join(self.folder, metadata['path']))
url = self.owner.web_url_for('addon_view_or_download_file', path=cleaned_path, provider='dropbox')
self.owner.add_log(
'dropbox_{0}'.format(action),
auth=auth,
params={
'project': self.owner.parent_id,
'node': self.owner._id,
'path': metadata['path'],
'folder': self.folder,
'urls': {
'view': url,
'download': url + '?action=download'
},
},
)
def __repr__(self):
return u'<DropboxNodeSettings(node_id={self.owner._primary_key!r})>'.format(self=self)
##### Callback overrides #####
def before_register_message(self, node, user):
"""Return warning text to display if user auth will be copied to a
registration.
"""
category = node.project_or_component
if self.user_settings and self.user_settings.has_auth:
return (
u'The contents of Dropbox add-ons cannot be registered at this time; '
u'the Dropbox folder linked to this {category} will not be included '
u'as part of this registration.'
).format(**locals())
# backwards compatibility
before_register = before_register_message
def before_remove_contributor_message(self, node, removed):
"""Return warning text to display if removed contributor is the user
who authorized the Dropbox addon
"""
if self.user_settings and self.user_settings.owner == removed:
category = node.project_or_component
name = removed.fullname
return (u'The Dropbox add-on for this {category} is authenticated by {name}. '
'Removing this user will also remove write access to Dropbox '
'unless another contributor re-authenticates the add-on.'
).format(**locals())
# backwards compatibility
before_remove_contributor = before_remove_contributor_message
# Note: Registering Dropbox content is disabled for now; leaving this code
# here in case we enable registrations later on.
# @jmcarp
# def after_register(self, node, registration, user, save=True):
# """After registering a node, copy the user settings and save the
# chosen folder.
#
# :return: A tuple of the form (cloned_settings, message)
# """
# clone, message = super(DropboxNodeSettings, self).after_register(
# node, registration, user, save=False
# )
# # Copy user_settings and add registration data
# if self.has_auth and self.folder is not None:
# clone.user_settings = self.user_settings
# clone.registration_data['folder'] = self.folder
# if save:
# clone.save()
# return clone, message
def after_fork(self, node, fork, user, save=True):
"""After forking, copy user settings if the user is the one who authorized
the addon.
:return: A tuple of the form (cloned_settings, message)
"""
clone, _ = super(DropboxNodeSettings, self).after_fork(
node=node, fork=fork, user=user, save=False
)
if self.user_settings and self.user_settings.owner == user:
clone.user_settings = self.user_settings
message = (
'Dropbox authorization copied to forked {cat}.'
).format(
cat=fork.project_or_component
)
else:
message = (
u'Dropbox authorization not copied to forked {cat}. You may '
'authorize this fork on the <u><a href="{url}">Settings</a></u> '
'page.'
).format(
url=fork.web_url_for('node_setting'),
cat=fork.project_or_component
)
if save:
clone.save()
return clone, message
def after_remove_contributor(self, node, removed, auth=None):
"""If the removed contributor was the user who authorized the Dropbox
addon, remove the auth credentials from this node.
Return the message text that will be displayed to the user.
"""
if self.user_settings and self.user_settings.owner == removed:
self.user_settings = None
self.save()
message = (
u'Because the Dropbox add-on for {category} "{title}" was authenticated '
u'by {user}, authentication information has been deleted.'
).format(
category=node.category_display,
title=node.title,
user=removed.fullname
)
if not auth or auth.user != removed:
url = node.web_url_for('node_setting')
message += (
u' You can re-authenticate on the <u><a href="{url}">Settings</a></u> page.'
).format(url=url)
#
return message
def after_delete(self, node, user):
self.deauthorize(Auth(user=user), add_log=True)
self.save()
|
apache-2.0
|
maximon93/fabric-bolt
|
fabric_bolt/web_hooks/tests/test_hooks.py
|
10
|
5201
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from django.contrib.auth import get_user_model
from django.core.cache import cache
from model_mommy import mommy
from fabric_bolt.projects import models
from fabric_bolt.web_hooks import models as hook_models
from fabric_bolt.web_hooks.tasks import DeliverHook
import mock
User = get_user_model()
class TestHooks(TestCase):
project_type = None
project = None
stage = None
configuration = None
task = None
deployment = None
def setUp(self):
password = 'mypassword'
self.user = User.objects.create_superuser(email='myemail@test.com', password=password)
# You'll need to log him in before you can send requests through the client
self.client.login(email=self.user.email, password=password)
self._create_project()
def _create_project(self):
# Bare bones project
project = models.Project()
project.name = 'TEST_PROJECT'
project.description = 'TEST_DESCRIPTION'
project.save()
# Bare bones stage
stage = models.Stage()
stage.project = project
stage.name = 'Production'
stage.save()
self.stage = stage
# Bare bones configuration
configuration = models.Configuration()
configuration.project = project
configuration.stage = stage
configuration.key = 'KEY'
configuration.value = 'VALUE'
configuration.prompt_me_for_input = True
configuration.save()
self.configuration = configuration
# Bare bones task
task = models.Task()
task.name = 'TASK_NAME'
task.save()
self.task = task
# Bare bones deployment
deployment = models.Deployment()
deployment.user = self.user
deployment.stage = stage
deployment.comments = 'COMMENTS'
deployment.output = 'OUTPUT'
deployment.task = task
deployment.save()
# Setup Hook
hook = hook_models.Hook()
hook.url = 'http://example.com'
hook.save()
project_hook = hook_models.Hook()
project_hook.url = 'http://example.com/project/hook/'
project_hook.project = project
project_hook.save()
self.deployment = deployment
self.hook = hook
self.project_hook = project_hook
self.project = project
def test_web_hooks(self):
self.assertEqual(2, self.project.web_hooks().count())
def test_global_web_hooks(self):
global_hooks = hook_models.Hook.objects.filter(project=None)
self.assertEqual(1, global_hooks.count())
def test_project_web_hooks(self):
project_hooks = hook_models.Hook.objects.filter(project=self.project)
self.assertEqual(1, project_hooks.count())
@mock.patch('fabric_bolt.web_hooks.tasks.requests')
def test_task_post_data(self, mock_requests):
mock_requests.post.return_value.status_code = 200
d = DeliverHook()
ret = d.post_data('http://www.example.com', {'junk': 'payload'})
self.assertEqual(ret.status_code, 200)
# def test_task_post_data_run(self):
#
# d = DeliverHook()
# ret = d.run('http://www.example.com', {'junk': 'payload'})
@mock.patch('fabric_bolt.web_hooks.tasks.requests')
def test_task_delete_hook_410(self, mock_requests):
# post_data deletes hooks when the status code is 410
mock_requests.post.return_value.status_code = 410
h = hook_models.Hook()
h.url = 'http://example.com/project/delete/me/'
h.project = self.project
h.save()
hook_id = h.pk
d = DeliverHook()
ret = d.post_data('http://example.com/api/123', {'junk': 'payload'}, hook_id)
def look_up_error(hook_id):
hook_models.Hook.objects.get(pk=hook_id)
self.assertRaises(hook_models.Hook.DoesNotExist, look_up_error, hook_id)
@mock.patch('fabric_bolt.web_hooks.tasks.requests')
def test_task_delete_hook(self, mock_requests):
# post_data deletes hooks when the status code is 410
mock_requests.post.return_value.status_code = 410
h = hook_models.Hook()
h.url = 'http://example.com/project/delete/me/'
h.project = self.project
h.save()
d = DeliverHook()
# We're testing we don't have hook deleted, since we're not passing in the hook id
ret = d.post_data('http://example.com/api/123', {'junk': 'payload'})
hook_models.Hook.objects.get(pk=h.pk)
# @override_settings(CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
# CELERY_ALWAYS_EAGER=True,
# BROKER_BACKEND='memory')
# def test_task_wrapper(self):
# from fabric_bolt.web_hooks.tasks import deliver_hook_wrapper
#
# deliver_hook_wrapper('http://www.example.com', {'dummy': 'payload'})
|
mit
|
jeffzheng1/tensorflow
|
tensorflow/python/kernel_tests/embedding_ops_test.py
|
3
|
26642
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for ops used with embeddings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
def _AsLong(array):
"""Casts arrays elements to long type. Used to convert from numpy tf."""
return [int(x) for x in array]
class ScatterAddSubTest(tf.test.TestCase):
def _TestCase(self, shape, indices, scatter_op=tf.scatter_add):
"""Run a random test case with the given shape and indices.
Args:
shape: Shape of the parameters array.
indices: One-dimensional array of ints, the indices of the last dimension
of the parameters to update.
scatter_op: ScatterAdd or ScatterSub.
"""
super(ScatterAddSubTest, self).setUp()
with self.test_session(use_gpu=False):
# Create a random parameter array of given shape
p_init = np.random.rand(*shape).astype("f")
# Create the shape of the update array. All dimensions except the last
# match the parameter array, the last dimension equals the # of indices.
vals_shape = [len(indices)] + shape[1:]
vals_init = np.random.rand(*vals_shape).astype("f")
v_i = [float(x) for x in vals_init.ravel()]
p = tf.Variable(p_init)
vals = tf.constant(v_i, shape=vals_shape, name="vals")
ind = tf.constant(indices, dtype=tf.int32)
p2 = scatter_op(p, ind, vals, name="updated_p")
# p = init
tf.global_variables_initializer().run()
# p += vals
result = p2.eval()
# Compute the expected 'p' using numpy operations.
for i, ind in enumerate(indices):
if scatter_op == tf.scatter_add:
p_init.reshape(shape[0], -1)[ind, :] += (
vals_init.reshape(vals_shape[0], -1)[i, :])
else:
p_init.reshape(shape[0], -1)[ind, :] -= (
vals_init.reshape(vals_shape[0], -1)[i, :])
self.assertTrue(all((p_init == result).ravel()))
def testNoRepetitions(self):
self._TestCase([2, 2], [1])
self._TestCase([4, 4, 4], [2, 0])
self._TestCase([43, 20, 10, 10], [42, 5, 6, 1, 3, 5, 7, 9])
def testWithRepetitions(self):
self._TestCase([2, 2], [1, 1])
self._TestCase([5, 3, 9, 5], [2, 0, 4, 1, 3, 1, 4, 0, 4, 3])
self._TestCase([32, 4, 4], [31] * 8)
def testRandom(self):
# Random shapes of rank 4, random indices
for _ in range(5):
shape = np.random.randint(1, 20, size=4)
indices = np.random.randint(shape[0], size=2 * shape[0])
self._TestCase(_AsLong(list(shape)), list(indices))
def testSubRandom(self):
# Random shapes of rank 4, random indices
for _ in range(5):
shape = np.random.randint(1, 20, size=4)
indices = np.random.randint(shape[0], size=2 * shape[0])
self._TestCase(_AsLong(list(shape)), list(indices),
tf.scatter_sub)
def testWrongShape(self):
# Indices and values mismatch.
var = tf.Variable(tf.zeros(shape=[1024, 64, 64], dtype=tf.float32))
indices = tf.placeholder(tf.int32, shape=[32])
values = tf.placeholder(tf.float32, shape=[33, 64, 64])
with self.assertRaises(ValueError):
tf.scatter_add(var, indices, values)
# Var and values mismatch.
values = tf.placeholder(tf.float32, shape=[32, 64, 63])
with self.assertRaises(ValueError):
tf.scatter_add(var, indices, values)
def _PName(param_id):
return "p" + str(param_id)
def _EmbeddingParams(num_shards, vocab_size,
dtype=tf.float32,
shape=None,
use_shapeless_placeholder=False):
p = []
params = {}
feed_dict = {}
if not shape: shape = [10]
for i in range(num_shards):
shard_shape = [vocab_size // num_shards] + shape
if i < vocab_size % num_shards: # Excess goes evenly on the first shards
shard_shape[0] += 1
param_name = _PName(i)
if use_shapeless_placeholder:
param = tf.placeholder(dtype, shape=None, name=param_name)
else:
param = tf.constant(1.0, shape=shard_shape, dtype=dtype, name=param_name)
p.append(param)
np_type = "f" if dtype == tf.float32 else "d"
val = (np.random.rand(*shard_shape).astype(np_type)) + 1
params[param_name + ":0"] = val
feed_dict[param.name] = val
return p, params, feed_dict
def _EmbeddingParamsAsPartitionedVariable(num_shards, vocab_size,
dtype=tf.float32, shape=None):
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, dtype=dtype, shape=shape)
shape = shape or [10]
partitioned_variable = tf.get_variable(
"p",
shape=[vocab_size] + shape,
initializer=tf.concat(0, [params[p_i.name] for p_i in p]),
partitioner=tf.min_max_variable_partitioner(
max_partitions=num_shards, min_slice_size=1))
return p, partitioned_variable, params, feed_dict
def _EmbeddingResult(params, id_vals, num_shards, vocab_size,
partition_strategy="mod",
weight_vals=None):
if weight_vals is None:
weight_vals = np.copy(id_vals)
weight_vals.fill(1)
values = []
weights = []
weights_squared = []
for ids, wts in zip(id_vals, weight_vals):
value_aggregation = None
weight_aggregation = None
squared_weight_aggregation = None
if isinstance(ids, tf.compat.integral_types):
ids = [ids]
wts = [wts]
for i, weight_value in zip(ids, wts):
if partition_strategy == "mod":
val = np.copy(params[_PName(i % num_shards) + ":0"][
i // num_shards, :]) * weight_value
elif partition_strategy == "div":
ids_per_partition, extras = divmod(vocab_size, num_shards)
threshold = extras * (ids_per_partition + 1)
if i < threshold:
partition = i // (ids_per_partition + 1)
offset = i % (ids_per_partition + 1)
else:
partition = extras + (i - threshold) // ids_per_partition
offset = (i - threshold) % ids_per_partition
val = np.copy(
params[_PName(partition) + ":0"][offset, :]) * weight_value
else:
assert False
if value_aggregation is None:
assert weight_aggregation is None
assert squared_weight_aggregation is None
value_aggregation = val
weight_aggregation = weight_value
squared_weight_aggregation = weight_value * weight_value
else:
assert weight_aggregation is not None
assert squared_weight_aggregation is not None
value_aggregation += val
weight_aggregation += weight_value
squared_weight_aggregation += weight_value * weight_value
values.append(value_aggregation)
weights.append(weight_aggregation)
weights_squared.append(squared_weight_aggregation)
values = np.array(values).astype(np.float32)
weights = np.array(weights).astype(np.float32)
weights_squared = np.array(weights_squared).astype(np.float32)
return values, weights, weights_squared
class EmbeddingLookupTest(tf.test.TestCase):
# This test looks up [0, 0] in a parameter matrix sharded 2 ways. Since
# both the ids are in the first shard, one of the resulting lookup
# vector is going to be empty. The subsequent DivOp fails because of that.
# TODO(keveman): Disabling the test until the underlying problem is fixed.
def testSimpleSharded(self):
with self.test_session():
num_shards = 2
vocab_size = 4
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
id_vals = np.array([0, 0])
ids = tf.constant(list(id_vals), dtype=tf.int32)
print("Construct ids", ids.get_shape())
embedding = tf.nn.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
def testSimpleShardedPartitionedVariable(self):
with self.test_session() as sess:
num_shards = 2
vocab_size = 4
p, p_variable, params, feed_dict = _EmbeddingParamsAsPartitionedVariable(
num_shards, vocab_size)
id_vals = np.array([0, 0])
ids = tf.constant(list(id_vals), dtype=tf.int32)
print("Construct ids", ids.get_shape())
embedding = tf.nn.embedding_lookup(p_variable, ids)
tf.global_variables_initializer().run()
params_values = [params[p_i.name] for p_i in p]
# Test that the PartitionedVariable components equal the list in p
p_var_val = sess.run(list(p_variable))
# Actual test
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(params_values, p_var_val)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
def testShardedModPartitioningInt32Ids(self):
with self.test_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = tf.constant(list(id_vals), dtype=tf.int32)
embedding = tf.nn.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
def testShardedModPartitioningInt64Ids(self):
with self.test_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = tf.constant(list(id_vals), dtype=tf.int64)
embedding = tf.nn.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
def testShardedDivPartitioningInt32Ids(self):
with self.test_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = tf.constant(list(id_vals), dtype=tf.int32)
embedding = tf.nn.embedding_lookup(p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
def testShardedDivPartitioningInt32IdsPartitionedVariable(self):
with self.test_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
_, p_variable, params, feed_dict = _EmbeddingParamsAsPartitionedVariable(
num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = tf.constant(list(id_vals), dtype=tf.int32)
tf.global_variables_initializer().run()
embedding = tf.nn.embedding_lookup(
p_variable, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
def testShardedDivPartitioningInt64Ids(self):
with self.test_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = tf.constant(list(id_vals), dtype=tf.int64)
embedding = tf.nn.embedding_lookup(p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
def testShardedDivPartitioningUnknownParamShape(self):
with self.test_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
# We clear parameter shapes, to test when shape is not statically known.
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, use_shapeless_placeholder=True)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = tf.constant(list(id_vals), dtype=tf.int64)
embedding = tf.nn.embedding_lookup(p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
def testGradientsEmbeddingLookup(self):
vocab_size = 9
num_ids = 10
id_vals = list(np.random.randint(vocab_size, size=num_ids))
tf.logging.vlog(1, id_vals)
for ids_shape in [(10,), (2, 5)]:
for num_shards in [1, 3]:
with self.test_session():
ids = tf.constant(id_vals, shape=ids_shape, dtype=tf.int32)
x, params, _ = _EmbeddingParams(
num_shards, vocab_size, shape=[2])
y = tf.nn.embedding_lookup(x, ids)
y_shape = [num_ids] + list(params[_PName(0) + ":0"].shape[1:])
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
err = tf.test.compute_gradient_error(x,
x_shape,
y,
y_shape,
x_init_value=x_init_value)
self.assertLess(err, 1e-4)
def testGradientsEmbeddingLookupWithComputedParams(self):
vocab_size = 9
num_ids = 5
id_vals = list(np.random.randint(vocab_size, size=num_ids))
tf.logging.vlog(1, id_vals)
for num_shards in [1, 3]:
with self.test_session():
ids = tf.constant(id_vals, dtype=tf.int32)
x, params, _ = _EmbeddingParams(
num_shards, vocab_size, shape=[2])
# This will force a conversion from IndexedSlices to Tensor.
x_squared = [tf.square(elem) for elem in x]
y = tf.nn.embedding_lookup(x_squared, ids)
y_shape = [num_ids] + list(params[_PName(0) + ":0"].shape[1:])
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
err = tf.test.compute_gradient_error(x,
x_shape,
y,
y_shape,
x_init_value=x_init_value)
self.assertLess(err, 1e-3)
def testConstructionNonSharded(self):
with tf.Graph().as_default():
p = tf.Variable(tf.zeros(shape=[100, 100], dtype=tf.float32))
ids = tf.constant([0, 1, 1, 7], dtype=tf.int32)
tf.nn.embedding_lookup([p], ids)
def testConstructionSharded(self):
with tf.Graph().as_default():
p = []
for _ in range(2):
p += [tf.Variable(tf.zeros(shape=[100, 100], dtype=tf.float32))]
ids = tf.constant([0, 1, 1, 17], dtype=tf.int32)
tf.nn.embedding_lookup(p, ids)
def testHigherRank(self):
np.random.seed(8)
with self.test_session():
for params_shape in (12,), (6, 3):
params = np.random.randn(*params_shape)
for ids_shape in (3, 2), (4, 3):
ids = np.random.randint(params.shape[0],
size=np.prod(ids_shape)).reshape(ids_shape)
# Compare nonsharded to gather
simple = tf.nn.embedding_lookup(params, ids).eval()
self.assertAllEqual(simple, tf.gather(params, ids).eval())
# Run a few random sharded versions
for procs in 1, 2, 3:
stride = procs * tf.range(params.shape[0] // procs)
split_params = [tf.gather(params, stride + p)
for p in xrange(procs)]
sharded = tf.nn.embedding_lookup(split_params, ids).eval()
self.assertAllEqual(simple, sharded)
class EmbeddingLookupSparseTest(tf.test.TestCase):
def _RandomIdsAndWeights(self, batch_size, vocab_size):
max_val_per_entry = 6
vals_per_batch_entry = np.random.randint(
1, max_val_per_entry, size=batch_size)
num_vals = np.sum(vals_per_batch_entry)
ids = np.random.randint(vocab_size, size=num_vals)
weights = 1 + np.random.rand(num_vals)
indices = []
for batch_entry, num_val in enumerate(vals_per_batch_entry):
for val_index in range(num_val):
indices.append([batch_entry, val_index])
shape = [batch_size, max_val_per_entry]
sp_ids = tf.SparseTensor(
tf.constant(indices, tf.int64),
tf.constant(ids, tf.int32),
tf.constant(shape, tf.int64))
sp_weights = tf.SparseTensor(
tf.constant(indices, tf.int64),
tf.constant(weights, tf.float32),
tf.constant(shape, tf.int64))
return sp_ids, sp_weights, ids, weights, vals_per_batch_entry
def _GroupByBatchEntry(self, vals, vals_per_batch_entry):
grouped_vals = []
index = 0
for num_val in vals_per_batch_entry:
grouped_vals.append(list(vals[index: (index + num_val)]))
index += num_val
return grouped_vals
def testEmbeddingLookupSparse(self):
vocab_size = 13
batch_size = 10
param_shape = [2, 5]
expected_lookup_result_shape = [None] + param_shape
sp_ids, sp_weights, ids, weights, vals_per_batch_entry = (
self._RandomIdsAndWeights(batch_size, vocab_size))
grouped_ids = self._GroupByBatchEntry(ids, vals_per_batch_entry)
grouped_weights = self._GroupByBatchEntry(weights, vals_per_batch_entry)
grouped_ignored_weights = self._GroupByBatchEntry(
np.ones(np.sum(vals_per_batch_entry)), vals_per_batch_entry)
for num_shards, combiner, dtype, ignore_weights in itertools.product(
[1, 5],
["sum", "mean", "sqrtn"],
[tf.float32, tf.float64],
[True, False]):
with self.test_session():
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size,
shape=param_shape,
dtype=dtype)
embedding_sum = tf.nn.embedding_lookup_sparse(
p, sp_ids, None if ignore_weights else sp_weights,
combiner=combiner)
self.assertEqual(embedding_sum.get_shape().as_list(),
expected_lookup_result_shape)
tf_embedding_sum = embedding_sum.eval(feed_dict=feed_dict)
np_embedding_sum, np_weight_sum, np_weight_sq_sum = _EmbeddingResult(
params, grouped_ids, num_shards, vocab_size,
weight_vals=grouped_ignored_weights
if ignore_weights else grouped_weights)
if combiner == "mean":
np_embedding_sum /= np.reshape(np_weight_sum, (batch_size, 1, 1))
if combiner == "sqrtn":
np_embedding_sum /= np.reshape(
np.sqrt(np_weight_sq_sum), (batch_size, 1, 1))
self.assertAllClose(np_embedding_sum, tf_embedding_sum)
def testGradientsEmbeddingLookupSparse(self):
vocab_size = 12
batch_size = 4
param_shape = [2, 3]
sp_ids, sp_weights, _, _, _ = (
self._RandomIdsAndWeights(batch_size, vocab_size))
for num_shards, combiner, dtype, ignore_weights in itertools.product(
[1, 3],
["sum", "mean", "sqrtn"],
[tf.float32, tf.float64],
[True, False]):
with self.test_session():
x, params, _ = _EmbeddingParams(num_shards, vocab_size,
shape=param_shape,
dtype=dtype)
y = tf.nn.embedding_lookup_sparse(
x, sp_ids, None if ignore_weights else sp_weights,
combiner=combiner)
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
y_shape = [batch_size] + list(params[_PName(0) + ":0"].shape[1:])
err = tf.test.compute_gradient_error(x,
x_shape,
y,
y_shape,
x_init_value=x_init_value)
self.assertLess(err, 1e-5 if dtype == tf.float64 else 2e-3)
def testIncompatibleShapes(self):
with self.test_session():
x, _, _ = _EmbeddingParams(1, 10, dtype=tf.float32)
sp_ids = tf.SparseTensor(
tf.constant([[0, 0], [0, 1], [1, 0]], tf.int64),
tf.constant([0, 1, 2], tf.int32),
tf.constant([2, 2], tf.int64))
sp_weights = tf.SparseTensor(
tf.constant([[0, 0], [0, 1]], tf.int64),
tf.constant([12.0, 5.0], tf.float32),
tf.constant([1, 2], tf.int64))
with self.assertRaises(ValueError):
tf.nn.embedding_lookup_sparse(x, sp_ids, sp_weights, combiner="mean")
class DynamicStitchOpTest(tf.test.TestCase):
def testCint32Cpu(self):
with self.test_session(use_gpu=False):
indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]
values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]
self.assertAllEqual(
tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
def testCint32Gpu(self):
with self.test_session(use_gpu=True):
indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]
values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]
self.assertAllEqual(
tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
def testInt32Cpu(self):
with self.test_session(use_gpu=False):
indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]
values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]
self.assertAllEqual(
tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
def testInt32Gpu(self):
with self.test_session(use_gpu=True):
indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]
values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]
self.assertAllEqual(
tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
def testSumGradArgs(self):
with self.test_session(use_gpu=False):
indices = [tf.convert_to_tensor([0, 1, 2, 3]),
tf.convert_to_tensor([2, 3])]
values = [tf.convert_to_tensor([2, 3, 5, 7]), tf.convert_to_tensor([1, 1])]
self.assertAllEqual(
tf.dynamic_stitch(indices, values).eval(), [2, 3, 1, 1])
# We expect that the values are merged in order.
def testStitchOrder(self):
with self.test_session():
indices = []
np_values = []
values = []
for _ in range(10):
indices.extend([tf.convert_to_tensor(np.arange(100).astype(np.int32))])
np_values.extend([np.random.uniform(size=100)])
values.extend([tf.convert_to_tensor(np_values[-1])])
stitched = tf.dynamic_stitch(indices, values).eval()
self.assertAllEqual(np_values[-1], stitched)
if __name__ == "__main__":
tf.test.main()
|
apache-2.0
|
steelcowboy/PyFlowChart
|
pyflowchart/interface/modify_interface.py
|
1
|
11599
|
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from collections import OrderedDict
GRID_MARGIN = 5
ICON_SIZE = Gtk.IconSize.MENU
class ModifyGrid(Gtk.Grid):
def __init__(self):
Gtk.Grid.__init__(self)
self.generate_labels()
self.generate_entries()
self.attach_items()
self.set_margin_top(GRID_MARGIN)
self.set_margin_bottom(GRID_MARGIN)
self.set_margin_start(GRID_MARGIN)
self.set_margin_end(GRID_MARGIN)
self.quarter_map = {
'Fall' : 0,
'Winter' : 1,
'Spring' : 2,
'Summer' : 3
}
self.type_map = {
'Major' : 0,
'Support' : 1,
'Concentration' : 2,
'General Ed' : 3,
'Free Elective' : 4,
'Minor' : 5
}
self.ge_type_map = {
None: -1,
'B6': 9,
'D2': 16,
'B3': 6,
'D5': 19,
'C1': 10,
'B5': 8,
'D1': 15,
'B1': 4,
'C2': 11,
'D3': 17,
'B2': 5,
'A1': 1,
'A2': 2,
'C5': 14,
'C4': 13,
'A3': 3,
'C3': 12,
'B4': 7,
'D4/E': 18,
'F': 20
}
self.add_year = None
self.add_quarter = None
self.tile = None
self.show_all()
def generate_labels(self):
self.instructions_label = Gtk.Label('Fill in the following information:')
self.title_label = Gtk.Label('Course Title:')
self.catalog_label = Gtk.Label('Catalog Title:')
self.credits_label = Gtk.Label('Units:')
self.prereqs_label = Gtk.Label('Prereqs:')
self.time_label = Gtk.Label('Year and Quarter:')
self.course_type_label = Gtk.Label('Course Type:')
self.ge_type_label = Gtk.Label('GE Type: (optional)')
self.notes_label = Gtk.Label('Notes: (optional)')
self.labels = [self.title_label, self.catalog_label, self.credits_label,
self.prereqs_label, self.time_label,
self.course_type_label, self.ge_type_label, self.notes_label]
def generate_entries(self):
self.title_entry = Gtk.Entry()
self.title_entry.set_placeholder_text('Full title of the course')
self.catalog_entry = Gtk.Entry()
self.catalog_entry.set_placeholder_text('E.g. COMS 101')
self.credits_spinner = Gtk.SpinButton.new_with_range(1,6,1)
self.credits_spinner.set_digits(0)
self.credits_spinner.set_numeric(True)
self.credits_spinner.set_snap_to_ticks(True)
self.credits_spinner.set_value(1)
# Box to hold prereq entries and buttons
self.prereqs_box = Gtk.Box()
self.prereqs_box.set_margin_top(5)
self.prereqs_box.set_margin_bottom(5)
# Box to hold prereq entry
self.prereq_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.change_buttons_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.prereq_box.pack_start(Gtk.Entry(), True, True, 0)
self.change_buttons_box.pack_start(self.create_change_box(), True, True, 0)
self.prereqs_box.pack_start(self.prereq_box, True, True, 0)
self.prereqs_box.pack_end(self.change_buttons_box, True, True, 0)
self.time_box = Gtk.Box()
self.time_box.set_homogeneous(True)
self.year_selector = Gtk.ComboBoxText()
self.quarter_selector = Gtk.ComboBoxText()
for year in range(1,6):
self.year_selector.append_text(str(year))
for quarter in ['Fall','Winter','Spring','Summer']:
self.quarter_selector.append_text(quarter)
self.time_box.pack_start(self.year_selector, True, True, 0)
self.time_box.pack_start(self.quarter_selector, True, True, 0)
self.course_type_selector = Gtk.ComboBoxText()
for c_type in ['Major','Support','Concentration','General Ed','Free Elective','Minor']:
self.course_type_selector.append_text(c_type)
# Box to hold GE selectors and buttons
self.ge_box = Gtk.Box()
self.ge_box.set_margin_top(5)
self.ge_box.set_margin_bottom(5)
# Box to hold GE selector
self.ge_type_selector_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.ge_type_selector_box.pack_start(self.generate_ge_selector(), True, True, 0)
self.ge_box.pack_start(self.ge_type_selector_box, True, True, 0)
self.ge_buttons_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.ge_buttons_box.pack_start(self.create_change_box("ge"), True, True, 0)
self.ge_box.pack_end(self.ge_buttons_box, True, True, 0)
self.notes_entry = Gtk.Entry()
def generate_ge_selector(self):
ge_type_selector = Gtk.ComboBoxText()
ge_type_selector.append_text('None')
ge_numbers = {'A':3, 'B':6, 'C':5, 'D':3}
ge_numbers = OrderedDict(sorted(ge_numbers.items(), key=lambda t: t[0]))
for ge_type, how_many in ge_numbers.items():
for x in range(how_many):
ge_type_selector.append_text('{}{}'.format(ge_type,x+1))
ge_type_selector.append_text('D4/E')
ge_type_selector.append_text('D5')
ge_type_selector.append_text('F')
return ge_type_selector
def attach_items(self):
self.attach(self.instructions_label, 0, 0, 2, 1)
for pos, label in enumerate(self.labels):
self.attach(label, 0, pos+1, 1, 1)
self.attach(self.title_entry, 1, 1, 1, 1)
self.attach(self.catalog_entry, 1, 2, 1, 1)
self.attach(self.credits_spinner, 1, 3, 1, 1)
self.attach(self.prereqs_box, 1, 4, 1, 1)
self.attach(self.time_box, 1, 5, 1, 1)
self.attach(self.course_type_selector, 1, 6, 1, 1)
self.attach(self.ge_box, 1, 7, 1, 1)
self.attach(self.notes_entry, 1, 8, 1, 1)
self.bottom_row = 8
def create_change_box(self, button_type="prereq"):
change_box = Gtk.Box()
add_button = Gtk.Button.new_from_icon_name('list-add', ICON_SIZE)
if button_type == "prereq":
add_button.connect('clicked', self.add_prereq)
elif button_type == "ge":
add_button.connect('clicked', self.add_ge)
remove_button = Gtk.Button.new_from_icon_name('list-remove', ICON_SIZE)
if button_type == "prereq":
remove_button.connect('clicked', self.remove_prereq)
elif button_type == "ge":
remove_button.connect('clicked', self.remove_ge)
change_box.pack_start(add_button, True, True, 0)
change_box.pack_end(remove_button, True, True, 0)
return change_box
def get_entry_values(self):
"""Retreive course information from the interface."""
# This is a required field
course_type = self.course_type_selector.get_active_text()
if course_type is None:
return False
notes = self.notes_entry.get_text()
if notes == '':
notes = None
prereqs = []
for entry in self.prereq_box.get_children():
prereqs.append(entry.get_text())
prereqs = list(filter(None, prereqs))
ges = []
for ge in self.ge_type_selector_box.get_children():
ge_text = ge.get_active_text()
ge_text = None if ge_text == 'None' else ge_text
ges.append(ge_text)
new_course = {
'title' : self.title_entry.get_text(),
'catalog': self.catalog_entry.get_text(),
'credits': self.credits_spinner.get_value(),
'prereqs': prereqs,
'time' : (
int(self.year_selector.get_active_text()),
self.quarter_selector.get_active_text()
),
'course_type': course_type,
'ge_type' : ges,
'notes' : notes
}
self.course_id = None
return new_course
def add_prereq(self, button=None, prereq=None):
"""Create a new prereq entry field."""
new_entry = Gtk.Entry()
change_box = self.create_change_box()
if prereq is not None:
new_entry.set_text(prereq)
self.prereq_box.pack_start(new_entry, True, True, 0)
self.change_buttons_box.pack_end(change_box, True, True, 0)
self.show_all()
def add_ge(self, button=None, ge=None):
"""Create a new prereq entry field."""
new_selector = self.generate_ge_selector()
change_box = self.create_change_box("ge")
if ge is not None:
new_selector.set_active(self.ge_type_map[ge])
self.ge_type_selector_box.pack_start(new_selector, True, True, 0)
self.ge_buttons_box.pack_end(change_box, True, True, 0)
self.show_all()
def remove_prereq(self,button):
print("I'll remove one eventually!")
def remove_ge(self,button):
print("I'll remove one eventually!")
def clean_form(self):
"""Clean the form, preserving the year and quarter."""
for entry in [self.title_entry,self.catalog_entry]:
entry.set_text('')
self.credits_spinner.set_value(self.credits_spinner.get_range()[0])
self.course_type_selector.set_active(-1)
for selector in self.ge_type_selector_box.get_children()[1:]:
selector.destroy()
self.ge_type_selector_box.get_children()[0].set_active(-1)
for entry in self.prereq_box.get_children()[1:]:
entry.destroy()
self.prereq_box.get_children()[0].set_text('')
for button in self.change_buttons_box.get_children()[1:]:
button.destroy()
for button in self.ge_buttons_box.get_children()[1:]:
button.destroy()
def load_entry(self, course):
"""Load course information into the interface."""
self.title_entry.set_text(course['title'])
self.catalog_entry.set_text(course['catalog'])
self.credits_spinner.set_value(course['credits'])
prereqs = course['prereqs']
if isinstance(prereqs, str):
prereqs = list(filter(None, prereqs.split(',')))
else:
prereqs = list(filter(None, prereqs))
if len(prereqs):
self.prereq_box.get_children()[0].set_text(prereqs[0])
prereqs = prereqs[1:]
for prereq in prereqs:
self.add_prereq(prereq=prereq)
self.year_selector.set_active(course['time'][0]-1)
self.quarter_selector.set_active(self.quarter_map[course['time'][1]])
self.course_type_selector.set_active(self.type_map[course['course_type']])
ge_types = list(filter(None, course['ge_type']))
if len(ge_types):
self.ge_type_selector_box.get_children()[0].set_active(self.ge_type_map[ge_types[0]])
ge_types = ge_types[1:]
for ge in ge_types:
self.add_ge(ge=ge)
if course['notes'] is not None:
self.notes_entry.set_text(course['notes'])
|
bsd-3-clause
|
YoannDupont/SEM
|
sem/IO/keyIO.py
|
1
|
3886
|
# -*- coding: utf-8 -*-
"""
file: keyIO.py
Description: an IO module for CoNLL-formatted files when column
identifiers are available.
author: Yoann Dupont
MIT License
Copyright (c) 2018 Yoann Dupont
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from sem import PY2
import codecs
class KeyReader(object):
def __init__(self, name, encoding, keys, cleaner=(unicode.strip if PY2 else str.strip), splitter=(unicode.split if PY2 else str.strip)):
self._name = name
self._encoding = encoding
self._keys = keys
self._cleaner = cleaner
self._splitter = splitter
self._length = None
if not self._keys:
raise ValueError("Cannot give empty key set to KeyReader object.")
if None == self._cleaner:
self._cleaner = unicode.strip
if None == self._splitter:
self._splitter = unicode.split
# The following instructions will raise an exception if they are not callable.
self._cleaner.__call__
self._splitter.__call__
@staticmethod
def identity(obj):
return obj
def __iter__(self):
paragraph = []
for line in codecs.open(self._name, "rU", self._encoding):
line = self._cleaner(line)
if line != "":
line = dict(zip(self._keys, self._splitter(line)))
paragraph.append(line)
elif paragraph != []:
yield paragraph
del paragraph[:]
if paragraph != []:
yield paragraph
@property
def length(self):
if self._length is None:
self._length = 0
for x in self:
self._length += 1
return self._length
class KeyWriter(object):
def __init__(self, name, encoding, keys, joiner="\t"):
self._name = name
self._encoding = encoding
self._keys = keys
self._joiner = joiner
self._fd = codecs.open(self._name, "w", self._encoding)
if None == self._joiner:
self._joiner = ""
self._format = self._joiner.join([u"{{{0}}}".format(key) for key in self._keys])
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self._fd.close()
def write(self, entries):
for p in entries:
for l in p:
self._fd.write(self._format.format(**l))
self._fd.write(u"\n")
self._fd.write(u"\n")
def write_p(self, p):
for l in p:
self._fd.write(self._format.format(**l))
self._fd.write(u"\n")
self._fd.write(u"\n")
def write_l(self, l):
self._fd.write(self._format.format(**l))
self._fd.write(u"\n")
def close(self):
self._fd.close()
|
mit
|
Thoshh/wapad
|
lib/python2.7/site-packages/django/contrib/postgres/fields/hstore.py
|
151
|
2771
|
import json
from django.contrib.postgres import forms, lookups
from django.contrib.postgres.fields.array import ArrayField
from django.core import exceptions
from django.db.models import Field, TextField, Transform
from django.utils import six
from django.utils.translation import ugettext_lazy as _
__all__ = ['HStoreField']
class HStoreField(Field):
empty_strings_allowed = False
description = _('Map of strings to strings')
default_error_messages = {
'not_a_string': _('The value of "%(key)s" is not a string.'),
}
def db_type(self, connection):
return 'hstore'
def get_transform(self, name):
transform = super(HStoreField, self).get_transform(name)
if transform:
return transform
return KeyTransformFactory(name)
def validate(self, value, model_instance):
super(HStoreField, self).validate(value, model_instance)
for key, val in value.items():
if not isinstance(val, six.string_types):
raise exceptions.ValidationError(
self.error_messages['not_a_string'],
code='not_a_string',
params={'key': key},
)
def to_python(self, value):
if isinstance(value, six.string_types):
value = json.loads(value)
return value
def value_to_string(self, obj):
return json.dumps(self.value_from_object(obj))
def formfield(self, **kwargs):
defaults = {
'form_class': forms.HStoreField,
}
defaults.update(kwargs)
return super(HStoreField, self).formfield(**defaults)
HStoreField.register_lookup(lookups.DataContains)
HStoreField.register_lookup(lookups.ContainedBy)
HStoreField.register_lookup(lookups.HasKey)
HStoreField.register_lookup(lookups.HasKeys)
HStoreField.register_lookup(lookups.HasAnyKeys)
class KeyTransform(Transform):
output_field = TextField()
def __init__(self, key_name, *args, **kwargs):
super(KeyTransform, self).__init__(*args, **kwargs)
self.key_name = key_name
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return "(%s -> '%s')" % (lhs, self.key_name), params
class KeyTransformFactory(object):
def __init__(self, key_name):
self.key_name = key_name
def __call__(self, *args, **kwargs):
return KeyTransform(self.key_name, *args, **kwargs)
@HStoreField.register_lookup
class KeysTransform(Transform):
lookup_name = 'keys'
function = 'akeys'
output_field = ArrayField(TextField())
@HStoreField.register_lookup
class ValuesTransform(Transform):
lookup_name = 'values'
function = 'avals'
output_field = ArrayField(TextField())
|
mit
|
jpetto/olympia
|
src/olympia/zadmin/decorators.py
|
7
|
1616
|
import functools
from django.core.exceptions import PermissionDenied
from olympia.access.acl import action_allowed
from olympia.amo.decorators import login_required
def admin_required(reviewers=False, theme_reviewers=False):
"""
Admin, or someone with AdminTools:View, required.
If reviewers=True ReviewerAdminTools:View is allowed also.
If theme_reviewers=True SeniorPersonasTools:View is allowed also.
"""
def decorator(f):
@login_required
@functools.wraps(f)
def wrapper(request, *args, **kw):
admin = (action_allowed(request, 'Admin', '%') or
action_allowed(request, 'AdminTools', 'View'))
# Yes, the "is True" is here on purpose... because this decorator
# takes optional arguments, but doesn't do it properly (so if
# you're not giving it arguments, it takes the decorated function
# as the first argument, and then "reviewers" is truthy.
if reviewers is True:
admin = (
admin or
action_allowed(request, 'ReviewerAdminTools', 'View'))
if theme_reviewers is True:
admin = (
admin or
action_allowed(request, 'SeniorPersonasTools', 'View'))
if admin:
return f(request, *args, **kw)
raise PermissionDenied
return wrapper
# If decorator has no args, and is "paren-less", it's callable.
if callable(reviewers):
return decorator(reviewers)
else:
return decorator
|
bsd-3-clause
|
solintegra/addons
|
portal/tests/__init__.py
|
261
|
1078
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_portal
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
rintoj/ai
|
demo/valueIterationAgents.py
|
1
|
3503
|
# valueIterationAgents.py
# -----------------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
import mdp, util
from learningAgents import ValueEstimationAgent
class ValueIterationAgent(ValueEstimationAgent):
"""
* Please read learningAgents.py before reading this.*
A ValueIterationAgent takes a Markov decision process
(see mdp.py) on initialization and runs value iteration
for a given number of iterations using the supplied
discount factor.
"""
def __init__(self, mdp, discount = 0.9, iterations = 100):
"""
Your value iteration agent should take an mdp on
construction, run the indicated number of iterations
and then act according to the resulting policy.
Some useful mdp methods you will use:
mdp.getStates()
mdp.getPossibleActions(state)
mdp.getTransitionStatesAndProbs(state, action)
mdp.getReward(state, action, nextState)
"""
self.mdp = mdp
self.discount = discount
self.iterations = iterations
self.values = util.Counter() # A Counter is action dict with default 0
"*** YOUR CODE HERE ***"
for i in range(iterations):
self.prevBatch = self.values.copy()
for state in mdp.getStates():
qValues = util.Counter()
for action in mdp.getPossibleActions(state):
for (statePrime, tValue) in mdp.getTransitionStatesAndProbs(state, action):
qValues[action] += tValue * (mdp.getReward(state, action, statePrime) + self.discount * self.prevBatch[statePrime])
self.values[state] = qValues[qValues.argMax()]
def getValue(self, state):
"""
Return the value of the state (computed in __init__).
"""
return self.values[state]
def getQValue(self, state, action):
"""
The q-value of the state action pair
(after the indicated number of value iteration
passes). Note that value iteration does not
necessarily create this quantity and you may have
to derive it on the fly.
"""
"*** YOUR CODE HERE ***"
qValue = 0
for (sp, tValue) in self.mdp.getTransitionStatesAndProbs(state, action):
qValue += tValue * (self.mdp.getReward(state, action, sp) + self.discount * self.values[sp] )
return qValue;
#util.raiseNotDefined()
def getPolicy(self, state):
"""
The policy is the best action in the given state
according to the values computed by value iteration.
You may break ties any way you see fit. Note that if
there are no legal actions, which is the case at the
terminal state, you should return None.
"""
"*** YOUR CODE HERE ***"
if self.mdp.isTerminal(state) :
return None
else:
qValues = util.Counter()
actions = self.mdp.getPossibleActions(state)
for action in actions:
qValues[actions.index(action)] = self.getQValue(state, action)
return actions[qValues.argMax()];
#util.raiseNotDefined()
def getAction(self, state):
"Returns the policy at the state (no exploration)."
return self.getPolicy(state)
|
mit
|
MISP/misp-modules
|
misp_modules/modules/import_mod/cuckooimport.py
|
2
|
26979
|
import json
import base64
import io
import logging
import posixpath
import stat
import tarfile
import zipfile
from pymisp import MISPEvent, MISPObject, MISPAttribute
from pymisp.tools import make_binary_objects
from collections import OrderedDict
log = logging.getLogger(__name__)
misperrors = {'error': 'Error'}
moduleinfo = {
'version': '1.1',
'author': 'Pierre-Jean Grenier',
'description': "Import a Cuckoo archive (zipfile or bzip2 tarball), "
"either downloaded manually or exported from the "
"API (/tasks/report/{task_id}/all).",
'module-type': ['import'],
}
moduleconfig = []
mispattributes = {
'inputSource': ['file'],
'output': ['MISP objects', 'malware-sample'],
'format': 'misp_standard',
}
# Attributes for which we can set the "Artifacts dropped"
# category if we want to
ARTIFACTS_DROPPED = (
"filename",
"md5",
"sha1",
"sha256",
"sha512",
"malware-sample",
"mimetype",
"ssdeep",
)
# Same for the category "Payload delivery"
PAYLOAD_DELIVERY = ARTIFACTS_DROPPED
class PrettyDict(OrderedDict):
"""
This class is just intended for a pretty print
of its keys and values.
"""
MAX_SIZE = 30
def __str__(self):
tmp = []
for k, v in self.items():
v = str(v)
if len(v) > self.MAX_SIZE:
k += ',cut'
v = v[:self.MAX_SIZE]
v.replace('\n', ' ')
tmp.append((k, v))
return "; ".join(f"({k}) {v}" for k, v in tmp)
def search_objects(event, name, attributes=[]):
"""
Search for objects in event, which name is `name` and
contain at least the attributes given.
Return a generator.
@ param attributes: a list of (object_relation, value)
"""
match = filter(
lambda obj: all(
obj.name == name
and (obj_relation, str(attr_value)) in map(
lambda attr: (attr.object_relation, str(attr.value)),
obj.attributes
)
for obj_relation, attr_value in attributes
), event.objects
)
return match
def find_process_by_pid(event, pid):
"""
Find a 'process' MISPObject by its PID. If multiple objects are found,
only return the first one.
@ param pid: integer or str
"""
generator = search_objects(
event,
"process",
(('pid', pid),)
)
return next(generator, None)
class CuckooParser():
# This dict is used to generate the userConfig and link the different
# options to the corresponding method of the parser. This way, we avoid
# redundancy and make future changes easier (instead of for instance
# defining all the options in userConfig directly, and then making a
# switch when running the parser).
# Careful about the order here, as we create references between
# MISPObjects/MISPAttributes at the same time we generate them.
# Hence when we create object B, which we want to reference to
# object A, we should already have created object A.
# TODO create references only after all parsing is done
options = {
"Sandbox info": {
"method": lambda self: self.add_sandbox_info(),
"userConfig": {
'type': 'Boolean',
'message': "Add info related to the sandbox",
'checked': 'true',
},
},
"Upload sample": {
"method": lambda self: self.add_sample(),
"userConfig": {
'type': 'Boolean',
'message': "Upload the sample",
'checked': 'true',
},
},
"Processes": {
"method": lambda self: self.add_process_tree(),
"userConfig": {
'type': 'Boolean',
'message': "Add info related to the processes",
'checked': 'true',
},
},
"DNS": {
"method": lambda self: self.add_dns(),
"userConfig": {
'type': 'Boolean',
'message': "Add DNS queries/answers",
'checked': 'true',
},
},
"TCP": {
"method": lambda self: self.add_network("tcp"),
"userConfig": {
'type': 'Boolean',
'message': "Add TCP connections",
'checked': 'true',
},
},
"UDP": {
"method": lambda self: self.add_network("udp"),
"userConfig": {
'type': 'Boolean',
'message': "Add UDP connections",
'checked': 'true',
},
},
"HTTP": {
"method": lambda self: self.add_http(),
"userConfig": {
'type': 'Boolean',
'message': "Add HTTP requests",
'checked': 'true',
},
},
"Signatures": {
"method": lambda self: self.add_signatures(),
"userConfig": {
'type': 'Boolean',
'message': "Add Cuckoo's triggered signatures",
'checked': 'true',
},
},
"Screenshots": {
"method": lambda self: self.add_screenshots(),
"userConfig": {
'type': 'Boolean',
'message': "Upload the screenshots",
'checked': 'true',
},
},
"Dropped files": {
"method": lambda self: self.add_dropped_files(),
"userConfig": {
'type': 'Boolean',
'message': "Upload the dropped files",
'checked': 'true',
},
},
"Dropped buffers": {
"method": lambda self: self.add_dropped_buffers(),
"userConfig": {
'type': 'Boolean',
'message': "Upload the dropped buffers",
'checked': 'true',
},
},
}
def __init__(self, config):
self.event = MISPEvent()
self.files = None
self.malware_binary = None
self.report = None
self.config = {
# if an option is missing (we receive None as a value),
# fall back to the default specified in the options
key: int(
on if on is not None
else self.options[key]["userConfig"]["checked"] == 'true'
)
for key, on in config.items()
}
def get_file(self, relative_filepath):
"""Return an io.BufferedIOBase for the corresponding relative_filepath
in the Cuckoo archive. If not found, return an empty io.BufferedReader
to avoid fatal errors."""
blackhole = io.BufferedReader(open('/dev/null', 'rb'))
res = self.files.get(relative_filepath, blackhole)
if res == blackhole:
log.debug(f"Did not find file {relative_filepath}, "
f"returned an empty file instead")
return res
def read_archive(self, archive_encoded):
"""Read the archive exported from Cuckoo and initialize the class"""
# archive_encoded is base 64 encoded content
# we extract the info about each file but do not retrieve
# it automatically, as it may take too much space in memory
buf_io = io.BytesIO(base64.b64decode(archive_encoded))
if zipfile.is_zipfile(buf_io):
# the archive was probably downloaded from the WebUI
buf_io.seek(0) # don't forget this not to read an empty buffer
z = zipfile.ZipFile(buf_io, 'r')
self.files = {
info.filename: z.open(info)
for info in z.filelist
# only extract the regular files and dirs, we don't
# want any symbolic link
if stat.S_ISREG(info.external_attr >> 16)
or stat.S_ISDIR(info.external_attr >> 16)
}
else:
# the archive was probably downloaded from the API
buf_io.seek(0) # don't forget this not to read an empty buffer
f = tarfile.open(fileobj=buf_io, mode='r:bz2')
self.files = {
info.name: f.extractfile(info)
for info in f.getmembers()
# only extract the regular files and dirs, we don't
# want any symbolic link
if info.isreg() or info.isdir()
}
# We want to keep the order of the keys of sub-dicts in the report,
# eg. the signatures have marks with unknown keys such as
# {'marks': [
# {"suspicious_features": "Connection to IP address",
# "suspicious_request": "OPTIONS http://85.20.18.18/doc"}
# ]}
# To render those marks properly, we can only hope the developpers
# thought about the order in which they put the keys, and keep this
# order so that the signature makes sense to the reader.
# We use PrettyDict, a customization of OrderedDict to do so.
# It will be instanced iteratively when parsing the json (ie. subdicts
# will also be instanced as PrettyDict)
self.report = json.load(
self.get_file("reports/report.json"),
object_pairs_hook=PrettyDict,
)
def read_malware(self):
self.malware_binary = self.get_file("binary").read()
if not self.malware_binary:
log.warn("No malware binary found")
def add_sandbox_info(self):
info = self.report.get("info", {})
if not info:
log.warning("The 'info' field was not found "
"in the report, skipping")
return False
o = MISPObject(name='sandbox-report')
o.add_attribute('score', info['score'])
o.add_attribute('sandbox-type', 'on-premise')
o.add_attribute('on-premise-sandbox', 'cuckoo')
o.add_attribute('raw-report',
f'started on:{info["machine"]["started_on"]} '
f'duration:{info["duration"]}s '
f'vm:{info["machine"]["name"]}/'
f'{info["machine"]["label"]}')
self.event.add_object(o)
def add_sample(self):
"""Add the sample/target of the analysis"""
target = self.report.get("target", {})
category = target.get("category", "")
if not category:
log.warning("Could not find info about the sample "
"in the report, skipping")
return False
if category == "file":
log.debug("Sample is a file, uploading it")
self.read_malware()
file_o, bin_type_o, bin_section_li = make_binary_objects(
pseudofile=io.BytesIO(self.malware_binary),
filename=target["file"]["name"],
)
file_o.comment = "Submitted sample"
# fix categories
for obj in filter(None, (file_o, bin_type_o, *bin_section_li,)):
for attr in obj.attributes:
if attr.type in PAYLOAD_DELIVERY:
attr.category = "Payload delivery"
self.event.add_object(obj)
elif category == "url":
log.debug("Sample is a URL")
o = MISPObject(name='url')
o.add_attribute('url', target['url'])
o.add_attribute('text', "Submitted URL")
self.event.add_object(o)
def add_http(self):
"""Add the HTTP requests"""
network = self.report.get("network", [])
http = network.get("http", [])
if not http:
log.info("No HTTP connection found in the report, skipping")
return False
for request in http:
o = MISPObject(name='http-request')
o.add_attribute('host', request['host'])
o.add_attribute('method', request['method'])
o.add_attribute('uri', request['uri'])
o.add_attribute('user-agent', request['user-agent'])
o.add_attribute('text', f"count:{request['count']} "
f"port:{request['port']}")
self.event.add_object(o)
def add_network(self, proto=None):
"""
Add UDP/TCP traffic
proto must be one of "tcp", "udp"
"""
network = self.report.get("network", [])
li_conn = network.get(proto, [])
if not li_conn:
log.info(f"No {proto} connection found in the report, skipping")
return False
from_to = []
# sort by time to get the "first packet seen" right
li_conn.sort(key=lambda x: x["time"])
for conn in li_conn:
src = conn['src']
dst = conn['dst']
sport = conn['sport']
dport = conn['dport']
if (src, sport, dst, dport) in from_to:
continue
from_to.append((src, sport, dst, dport))
o = MISPObject(name='network-connection')
o.add_attribute('ip-src', src)
o.add_attribute('ip-dst', dst)
o.add_attribute('src-port', sport)
o.add_attribute('dst-port', dport)
o.add_attribute('layer3-protocol', "IP")
o.add_attribute('layer4-protocol', proto.upper())
o.add_attribute('first-packet-seen', conn['time'])
self.event.add_object(o)
def add_dns(self):
"""Add DNS records"""
network = self.report.get("network", [])
dns = network.get("dns", [])
if not dns:
log.info("No DNS connection found in the report, skipping")
return False
for record in dns:
o = MISPObject(name='dns-record')
o.add_attribute('text', f"request type:{record['type']}")
o.add_attribute('queried-domain', record['request'])
for answer in record.get("answers", []):
if answer["type"] in ("A", "AAAA"):
o.add_attribute('a-record', answer['data'])
# TODO implement MX/NS
self.event.add_object(o)
def _get_marks_str(self, marks):
marks_strings = []
for m in marks:
m_type = m.pop("type") # temporarily remove the type
if m_type == "generic":
marks_strings.append(str(m))
elif m_type == "ioc":
marks_strings.append(m['ioc'])
elif m_type == "call":
call = m["call"]
arguments = call.get("arguments", {})
flags = call.get("flags", {})
info = ""
for details in (arguments, flags):
info += f" {details}"
marks_strings.append(f"Call API '{call['api']}'%s" % info)
else:
logging.debug(f"Unknown mark type '{m_type}', skipping")
m["type"] = m_type # restore key 'type'
# TODO implemented marks 'config' and 'volatility'
return marks_strings
def _add_ttp(self, attribute, ttp_short, ttp_num):
"""
Internal wrapper to add the TTP tag from the MITRE galaxy.
@ params
- attribute: MISPAttribute
- ttp_short: short description of the TTP
(eg. "Credential Dumping")
- ttp_num: formatted as "T"+int
(eg. T1003)
"""
attribute.add_tag(f'misp-galaxy:mitre-attack-pattern='
f'"{ttp_short} - {ttp_num}"')
def add_signatures(self):
"""Add the Cuckoo signatures, with as many details as possible
regarding the marks"""
signatures = self.report.get("signatures", [])
if not signatures:
log.info("No signature found in the report")
return False
o = MISPObject(name='sb-signature')
o.add_attribute('software', "Cuckoo")
for sign in signatures:
marks = sign["marks"]
marks_strings = self._get_marks_str(marks)
summary = sign['description']
if marks_strings:
summary += "\n---\n"
marks_strings = set(marks_strings)
description = summary + "\n".join(marks_strings)
a = MISPAttribute()
a.from_dict(type='text', value=description)
for ttp_num, desc in sign.get("ttp", {}).items():
ttp_short = desc["short"]
self._add_ttp(a, ttp_short, ttp_num)
# this signature was triggered by the processes with the following
# PIDs, we can create references
triggered_by_pids = filter(
None,
(m.get("pid", None) for m in marks)
)
# remove redundancy
triggered_by_pids = set(triggered_by_pids)
for pid in triggered_by_pids:
process_o = find_process_by_pid(self.event, pid)
if process_o:
process_o.add_reference(a, "triggers")
o.add_attribute('signature', **a)
self.event.add_object(o)
def _handle_process(self, proc, accu):
"""
This is an internal recursive function to handle one process
from a process tree and then iterate on its children.
List the objects to be added, based on the tree, into the `accu` list.
The `accu` list uses a DFS-like order.
"""
o = MISPObject(name='process')
accu.append(o)
o.add_attribute('pid', proc['pid'])
o.add_attribute('command-line', proc['command_line'])
o.add_attribute('name', proc['process_name'])
o.add_attribute('parent-pid', proc['ppid'])
for child in proc.get('children', []):
pos_child = len(accu)
o.add_attribute('child-pid', child['pid'])
self._handle_process(child, accu)
child_obj = accu[pos_child]
child_obj.add_reference(o, 'child-of')
return o
def add_process_tree(self):
"""Add process tree from the report, as separated process objects"""
behavior = self.report.get("behavior", {})
tree = behavior.get("processtree", [])
if not tree:
log.warning("No process tree found in the report, skipping")
return False
for proc in tree:
objs = []
self._handle_process(proc, objs)
for o in objs:
self.event.add_object(o)
def get_relpath(self, path):
"""
Transform an absolute or relative path into a path relative to the
correct cuckoo analysis directory, without knowing the cuckoo
working directory.
Return an empty string if the path given does not refer to a
file from the analysis directory.
"""
head, tail = posixpath.split(path)
if not tail:
return ""
prev = self.get_relpath(head)
longer = posixpath.join(prev, tail)
if longer in self.files:
return longer
elif tail in self.files:
return tail
else:
return ""
def add_screenshots(self):
"""Add the screenshots taken by Cuckoo in a sandbox-report object"""
screenshots = self.report.get('screenshots', [])
if not screenshots:
log.info("No screenshot found in the report, skipping")
return False
o = MISPObject(name='sandbox-report')
o.add_attribute('sandbox-type', 'on-premise')
o.add_attribute('on-premise-sandbox', "cuckoo")
for shot in screenshots:
# The path given by Cuckoo is an absolute path, but we need a path
# relative to the analysis folder.
path = self.get_relpath(shot['path'])
img = self.get_file(path)
# .decode('utf-8') in order to avoid the b'' format
img_data = base64.b64encode(img.read()).decode('utf-8')
filename = posixpath.basename(path)
o.add_attribute(
"sandbox-file", value=filename,
data=img_data, type='attachment',
category="External analysis",
)
self.event.add_object(o)
def _get_dropped_objs(self, path, filename=None, comment=None):
"""
Internal wrapper to get dropped files/buffers as file objects
@ params
- path: relative to the cuckoo analysis directory
- filename: if not specified, deduced from the path
"""
if not filename:
filename = posixpath.basename(path)
dropped_file = self.get_file(path)
dropped_binary = io.BytesIO(dropped_file.read())
# create ad hoc objects
file_o, bin_type_o, bin_section_li = make_binary_objects(
pseudofile=dropped_binary, filename=filename,
)
if comment:
file_o.comment = comment
# fix categories
for obj in filter(None, (file_o, bin_type_o, *bin_section_li,)):
for attr in obj.attributes:
if attr.type in ARTIFACTS_DROPPED:
attr.category = "Artifacts dropped"
return file_o, bin_type_o, bin_section_li
def _add_yara(self, obj, yara_dict):
"""Internal wrapper to add Yara matches to an MISPObject"""
for yara in yara_dict:
description = yara.get("meta", {}).get("description", "")
name = yara.get("name", "")
obj.add_attribute(
"text",
f"Yara match\n(name) {name}\n(description) {description}",
comment="Yara match"
)
def add_dropped_files(self):
"""Upload the dropped files as file objects"""
dropped = self.report.get("dropped", [])
if not dropped:
log.info("No dropped file found, skipping")
return False
for d in dropped:
# Cuckoo logs three things that are of interest for us:
# - 'filename' which is not the original name of the file
# but is formatted as follow:
# 8 first bytes of SHA265 + _ + original name in lower case
# - 'filepath' which is the original filepath on the VM,
# where the file was dropped
# - 'path' which is the local path of the stored file,
# in the cuckoo archive
filename = d.get("name", "")
original_path = d.get("filepath", "")
sha256 = d.get("sha256", "")
if original_path and sha256:
log.debug(f"Will now try to restore original filename from "
f"path {original_path}")
try:
s = filename.split("_")
if not s:
raise Exception("unexpected filename read "
"in the report")
sha256_first_8_bytes = s[0]
original_name = s[1]
# check our assumptions are valid, if so we can safely
# restore the filename, if not the format may have changed
# so we'll keep the filename of the report
if sha256.startswith(sha256_first_8_bytes) and \
original_path.lower().endswith(original_name) and \
filename not in original_path.lower():
# we can restore the original case of the filename
position = original_path.lower().rindex(original_name)
filename = original_path[position:]
log.debug(f"Successfully restored original filename: "
f"{filename}")
else:
raise Exception("our assumptions were wrong, "
"filename format may have changed")
except Exception as e:
log.debug(f"Cannot restore filename: {e}")
if not filename:
filename = "NO NAME FOUND IN THE REPORT"
log.warning(f'No filename found for dropped file! '
f'Will use "{filename}"')
file_o, bin_type_o, bin_section_o = self._get_dropped_objs(
self.get_relpath(d['path']),
filename=filename,
comment="Dropped file"
)
self._add_yara(file_o, d.get("yara", []))
file_o.add_attribute("fullpath", original_path,
category="Artifacts dropped")
# why is this a list? for when various programs drop the same file?
for pid in d.get("pids", []):
# if we have an object for the process that dropped the file,
# we can link the two (we just take the first result from
# the search)
process_o = find_process_by_pid(self.event, pid)
if process_o:
file_o.add_reference(process_o, "dropped-by")
self.event.add_object(file_o)
def add_dropped_buffers(self):
""""Upload the dropped buffers as file objects"""
buffer = self.report.get("buffer", [])
if not buffer:
log.info("No dropped buffer found, skipping")
return False
for i, buf in enumerate(buffer):
file_o, bin_type_o, bin_section_o = self._get_dropped_objs(
self.get_relpath(buf['path']),
filename=f"buffer {i}",
comment="Dropped buffer"
)
self._add_yara(file_o, buf.get("yara", []))
self.event.add_object(file_o)
def parse(self):
"""Run the parsing"""
for name, active in self.config.items():
if active:
self.options[name]["method"](self)
def get_misp_event(self):
log.debug("Running MISP expansions")
self.event.run_expansions()
return self.event
def handler(q=False):
# In case there's no data
if q is False:
return False
q = json.loads(q)
data = q['data']
parser = CuckooParser(q['config'])
parser.read_archive(data)
parser.parse()
event = parser.get_misp_event()
event = json.loads(event.to_json())
results = {
key: event[key]
for key in ('Attribute', 'Object')
if (key in event and event[key])
}
return {'results': results}
def introspection():
userConfig = {
key: o["userConfig"]
for key, o in CuckooParser.options.items()
}
mispattributes['userConfig'] = userConfig
return mispattributes
def version():
moduleinfo['config'] = moduleconfig
return moduleinfo
|
agpl-3.0
|
kswiat/django
|
django/db/migrations/migration.py
|
29
|
6933
|
from __future__ import unicode_literals
from django.db.transaction import atomic
class Migration(object):
"""
The base class for all migrations.
Migration files will import this from django.db.migrations.Migration
and subclass it as a class called Migration. It will have one or more
of the following attributes:
- operations: A list of Operation instances, probably from django.db.migrations.operations
- dependencies: A list of tuples of (app_path, migration_name)
- run_before: A list of tuples of (app_path, migration_name)
- replaces: A list of migration_names
Note that all migrations come out of migrations and into the Loader or
Graph as instances, having been initialized with their app label and name.
"""
# Operations to apply during this migration, in order.
operations = []
# Other migrations that should be run before this migration.
# Should be a list of (app, migration_name).
dependencies = []
# Other migrations that should be run after this one (i.e. have
# this migration added to their dependencies). Useful to make third-party
# apps' migrations run after your AUTH_USER replacement, for example.
run_before = []
# Migration names in this app that this migration replaces. If this is
# non-empty, this migration will only be applied if all these migrations
# are not applied.
replaces = []
# Error class which is raised when a migration is irreversible
class IrreversibleError(RuntimeError):
pass
def __init__(self, name, app_label):
self.name = name
self.app_label = app_label
# Copy dependencies & other attrs as we might mutate them at runtime
self.operations = list(self.__class__.operations)
self.dependencies = list(self.__class__.dependencies)
self.run_before = list(self.__class__.run_before)
self.replaces = list(self.__class__.replaces)
def __eq__(self, other):
if not isinstance(other, Migration):
return False
return (self.name == other.name) and (self.app_label == other.app_label)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "<Migration %s.%s>" % (self.app_label, self.name)
def __str__(self):
return "%s.%s" % (self.app_label, self.name)
def __hash__(self):
return hash("%s.%s" % (self.app_label, self.name))
def mutate_state(self, project_state):
"""
Takes a ProjectState and returns a new one with the migration's
operations applied to it.
"""
new_state = project_state.clone()
for operation in self.operations:
operation.state_forwards(self.app_label, new_state)
return new_state
def apply(self, project_state, schema_editor, collect_sql=False):
"""
Takes a project_state representing all migrations prior to this one
and a schema_editor for a live database and applies the migration
in a forwards order.
Returns the resulting project state for efficient re-use by following
Migrations.
"""
for operation in self.operations:
# If this operation cannot be represented as SQL, place a comment
# there instead
if collect_sql and not operation.reduces_to_sql:
schema_editor.collected_sql.append("--")
schema_editor.collected_sql.append("-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE WRITTEN AS SQL:")
schema_editor.collected_sql.append("-- %s" % operation.describe())
schema_editor.collected_sql.append("--")
continue
# Get the state after the operation has run
new_state = project_state.clone()
operation.state_forwards(self.app_label, new_state)
# Run the operation
if not schema_editor.connection.features.can_rollback_ddl and operation.atomic:
# We're forcing a transaction on a non-transactional-DDL backend
with atomic(schema_editor.connection.alias):
operation.database_forwards(self.app_label, schema_editor, project_state, new_state)
else:
# Normal behaviour
operation.database_forwards(self.app_label, schema_editor, project_state, new_state)
# Switch states
project_state = new_state
return project_state
def unapply(self, project_state, schema_editor, collect_sql=False):
"""
Takes a project_state representing all migrations prior to this one
and a schema_editor for a live database and applies the migration
in a reverse order.
"""
# We need to pre-calculate the stack of project states
to_run = []
for operation in self.operations:
# If this operation cannot be represented as SQL, place a comment
# there instead
if collect_sql and not operation.reduces_to_sql:
schema_editor.collected_sql.append("--")
schema_editor.collected_sql.append("-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE WRITTEN AS SQL:")
schema_editor.collected_sql.append("-- %s" % operation.describe())
schema_editor.collected_sql.append("--")
continue
# If it's irreversible, error out
if not operation.reversible:
raise Migration.IrreversibleError("Operation %s in %s is not reversible" % (operation, self))
new_state = project_state.clone()
operation.state_forwards(self.app_label, new_state)
to_run.append((operation, project_state, new_state))
project_state = new_state
# Now run them in reverse
to_run.reverse()
for operation, to_state, from_state in to_run:
if not schema_editor.connection.features.can_rollback_ddl and operation.atomic:
# We're forcing a transaction on a non-transactional-DDL backend
with atomic(schema_editor.connection.alias):
operation.database_backwards(self.app_label, schema_editor, from_state, to_state)
else:
# Normal behaviour
operation.database_backwards(self.app_label, schema_editor, from_state, to_state)
return project_state
class SwappableTuple(tuple):
"""
Subclass of tuple so Django can tell this was originally a swappable
dependency when it reads the migration file.
"""
def __new__(cls, value, setting):
self = tuple.__new__(cls, value)
self.setting = setting
return self
def swappable_dependency(value):
"""
Turns a setting value into a dependency.
"""
return SwappableTuple((value.split(".", 1)[0], "__first__"), value)
|
bsd-3-clause
|
jmcarbo/openerp7
|
openerp/addons/stock_location/__init__.py
|
68
|
1101
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock_location
import procurement_pull
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
acrosby/pyoos
|
setup.py
|
1
|
1593
|
from __future__ import with_statement
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
from pyoos import __version__
def readme():
with open('README.md') as f:
return f.read()
reqs = [line.strip() for line in open('requirements.txt')]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name = "pyoos",
version = __version__,
description = "A Python library for collecting Met/Ocean observations",
long_description = readme(),
license = 'GPLv3',
author = "Kyle Wilcox",
author_email = "kwilcox@sasascience.com",
url = "https://github.com/asascience-open/pyoos",
packages = find_packages(),
install_requires = reqs,
tests_require = ['pytest'],
cmdclass = {'test': PyTest},
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
],
include_package_data = True,
)
|
gpl-3.0
|
ramsateesh/designate
|
designate/backend/agent_backend/base.py
|
7
|
1619
|
# Copyright 2014 Rackspace Inc.
#
# Author: Tim Simmons <tim.simmons@rackspace.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_log import log as logging
from designate.plugin import DriverPlugin
LOG = logging.getLogger(__name__)
class AgentBackend(DriverPlugin):
"""Base class for backend implementations"""
__plugin_type__ = 'backend'
__plugin_ns__ = 'designate.backend.agent_backend'
def __init__(self, agent_service):
super(AgentBackend, self).__init__()
self.agent_service = agent_service
def start(self):
pass
def stop(self):
pass
@abc.abstractmethod
def find_domain_serial(self, domain_name):
"""Find a DNS Domain"""
@abc.abstractmethod
def create_domain(self, domain):
"""Create a DNS domain"""
"""Domain is a DNSPython Zone object"""
@abc.abstractmethod
def update_domain(self, domain):
"""Update a DNS domain"""
"""Domain is a DNSPython Zone object"""
@abc.abstractmethod
def delete_domain(self, domain_name):
"""Delete a DNS domain"""
|
apache-2.0
|
jaddison/ansible
|
lib/ansible/plugins/action/patch.py
|
67
|
2387
|
# (c) 2015, Brian Coca <briancoca+dev@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=dict()):
src = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
remote_src = boolean(self._task.args.get('remote_src', 'no'))
if src is None:
return dict(failed=True, msg="src is required")
elif remote_src:
# everything is remote, so we just execute the module
# without changing any of the module arguments
return self._execute_module(task_vars=task_vars)
if self._task._role is not None:
src = self._loader.path_dwim_relative(self._task._role._role_path, 'files', src)
else:
src = self._loader.path_dwim_relative(self._loader.get_basedir(), 'files', src)
# create the remote tmp dir if needed, and put the source file there
if tmp is None or "-tmp-" not in tmp:
tmp = self._make_tmp_path()
tmp_src = self._connection._shell.join_path(tmp, os.path.basename(src))
self._connection.put_file(src, tmp_src)
if self._play_context.become and self._play_context.become_user != 'root':
if not self._play_context.check_mode:
self._remote_chmod('a+r', tmp_src, tmp)
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=tmp_src,
)
)
return self._execute_module('patch', module_args=new_module_args, task_vars=task_vars)
|
gpl-3.0
|
amiguez/youtube-dl
|
youtube_dl/extractor/ndr.py
|
74
|
4573
|
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
qualities,
parse_duration,
)
class NDRBaseIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
page = self._download_webpage(url, video_id, 'Downloading page')
title = self._og_search_title(page).strip()
description = self._og_search_description(page)
if description:
description = description.strip()
duration = int_or_none(self._html_search_regex(r'duration: (\d+),\n', page, 'duration', default=None))
if not duration:
duration = parse_duration(self._html_search_regex(
r'(<span class="min">\d+</span>:<span class="sec">\d+</span>)',
page, 'duration', default=None))
formats = []
mp3_url = re.search(r'''\{src:'(?P<audio>[^']+)', type:"audio/mp3"},''', page)
if mp3_url:
formats.append({
'url': mp3_url.group('audio'),
'format_id': 'mp3',
})
thumbnail = None
video_url = re.search(r'''3: \{src:'(?P<video>.+?)\.(lo|hi|hq)\.mp4', type:"video/mp4"},''', page)
if video_url:
thumbnails = re.findall(r'''\d+: \{src: "([^"]+)"(?: \|\| '[^']+')?, quality: '([^']+)'}''', page)
if thumbnails:
quality_key = qualities(['xs', 's', 'm', 'l', 'xl'])
largest = max(thumbnails, key=lambda thumb: quality_key(thumb[1]))
thumbnail = 'http://www.ndr.de' + largest[0]
for format_id in 'lo', 'hi', 'hq':
formats.append({
'url': '%s.%s.mp4' % (video_url.group('video'), format_id),
'format_id': format_id,
})
if not formats:
raise ExtractorError('No media links available for %s' % video_id)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
class NDRIE(NDRBaseIE):
IE_NAME = 'ndr'
IE_DESC = 'NDR.de - Mediathek'
_VALID_URL = r'https?://www\.ndr\.de/.+?(?P<id>\d+)\.html'
_TESTS = [
{
'url': 'http://www.ndr.de/fernsehen/sendungen/nordmagazin/Kartoffeltage-in-der-Lewitz,nordmagazin25866.html',
'md5': '5bc5f5b92c82c0f8b26cddca34f8bb2c',
'note': 'Video file',
'info_dict': {
'id': '25866',
'ext': 'mp4',
'title': 'Kartoffeltage in der Lewitz',
'description': 'md5:48c4c04dde604c8a9971b3d4e3b9eaa8',
'duration': 166,
},
'skip': '404 Not found',
},
{
'url': 'http://www.ndr.de/fernsehen/Party-Poette-und-Parade,hafengeburtstag988.html',
'md5': 'dadc003c55ae12a5d2f6bd436cd73f59',
'info_dict': {
'id': '988',
'ext': 'mp4',
'title': 'Party, Pötte und Parade',
'description': 'Hunderttausende feiern zwischen Speicherstadt und St. Pauli den 826. Hafengeburtstag. Die NDR Sondersendung zeigt die schönsten und spektakulärsten Bilder vom Auftakt.',
'duration': 3498,
},
},
{
'url': 'http://www.ndr.de/info/audio51535.html',
'md5': 'bb3cd38e24fbcc866d13b50ca59307b8',
'note': 'Audio file',
'info_dict': {
'id': '51535',
'ext': 'mp3',
'title': 'La Valette entgeht der Hinrichtung',
'description': 'md5:22f9541913a40fe50091d5cdd7c9f536',
'duration': 884,
}
}
]
class NJoyIE(NDRBaseIE):
IE_NAME = 'N-JOY'
_VALID_URL = r'https?://www\.n-joy\.de/.+?(?P<id>\d+)\.html'
_TEST = {
'url': 'http://www.n-joy.de/entertainment/comedy/comedy_contest/Benaissa-beim-NDR-Comedy-Contest,comedycontest2480.html',
'md5': 'cb63be60cd6f9dd75218803146d8dc67',
'info_dict': {
'id': '2480',
'ext': 'mp4',
'title': 'Benaissa beim NDR Comedy Contest',
'description': 'Von seinem sehr "behaarten" Leben lässt sich Benaissa trotz aller Schwierigkeiten nicht unterkriegen.',
'duration': 654,
}
}
|
unlicense
|
IDSIA/sacred
|
sacred/config/config_summary.py
|
1
|
2228
|
#!/usr/bin/env python
# coding=utf-8
from sacred.utils import iter_prefixes, join_paths
class ConfigSummary(dict):
def __init__(
self, added=(), modified=(), typechanged=(), ignored_fallbacks=(), docs=()
):
super().__init__()
self.added = set(added)
self.modified = set(modified) # TODO: test for this member
self.typechanged = dict(typechanged)
self.ignored_fallbacks = set(ignored_fallbacks) # TODO: test
self.docs = dict(docs)
self.ensure_coherence()
def update_from(self, config_mod, path=""):
added = config_mod.added
updated = config_mod.modified
typechanged = config_mod.typechanged
self.added &= {join_paths(path, a) for a in added}
self.modified |= {join_paths(path, u) for u in updated}
self.typechanged.update(
{join_paths(path, k): v for k, v in typechanged.items()}
)
self.ensure_coherence()
for k, v in config_mod.docs.items():
if not self.docs.get(k, ""):
self.docs[k] = v
def update_add(self, config_mod, path=""):
added = config_mod.added
updated = config_mod.modified
typechanged = config_mod.typechanged
self.added |= {join_paths(path, a) for a in added}
self.modified |= {join_paths(path, u) for u in updated}
self.typechanged.update(
{join_paths(path, k): v for k, v in typechanged.items()}
)
self.docs.update(
{
join_paths(path, k): v
for k, v in config_mod.docs.items()
if path == "" or k != "seed"
}
)
self.ensure_coherence()
def ensure_coherence(self):
# make sure parent paths show up as updated appropriately
self.modified |= {p for a in self.added for p in iter_prefixes(a)}
self.modified |= {p for u in self.modified for p in iter_prefixes(u)}
self.modified |= {p for t in self.typechanged for p in iter_prefixes(t)}
# make sure there is no overlap
self.added -= set(self.typechanged.keys())
self.modified -= set(self.typechanged.keys())
self.modified -= self.added
|
mit
|
lucienfostier/gaffer
|
python/GafferCortex/ObjectReader.py
|
11
|
3001
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2013-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
##\ todo: Remove this class once SceneReader is capable of loading
# single Object scenes using IECore.Reader internally.
class ObjectReader( Gaffer.ComputeNode ) :
def __init__( self, name="ObjectReader" ) :
Gaffer.ComputeNode.__init__( self, name )
self.addChild( Gaffer.StringPlug( "fileName", Gaffer.Plug.Direction.In ) )
self.addChild( Gaffer.ObjectPlug( "out", Gaffer.Plug.Direction.Out, IECore.NullObject.defaultNullObject() ) )
def affects( self, input ) :
outputs = Gaffer.ComputeNode.affects( self, input )
if input.isSame( self["fileName"] ) :
outputs.append( self["out"] )
return outputs
def hash( self, output, context, h ) :
assert( output.isSame( self["out"] ) )
self["fileName"].hash( h )
def compute( self, plug, context ) :
assert( plug.isSame( self["out"] ) )
reader = None
with IECore.IgnoredExceptions( RuntimeError ) :
reader = IECore.Reader.create( self["fileName"].getValue() )
plug.setValue( reader.read() if reader else plug.defaultValue() )
IECore.registerRunTimeTyped( ObjectReader, typeName = "GafferCortex::ObjectReader" )
|
bsd-3-clause
|
liukaijv/XlsxWriter
|
xlsxwriter/test/comparison/test_default_date_format01.py
|
8
|
3099
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from datetime import datetime
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'default_date_format01.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file_user_date_format(self):
"""Test write_datetime with explicit date format."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column(0, 0, 12)
format1 = workbook.add_format({'num_format': 'yyyy\\-mm\\-dd'})
date1 = datetime.strptime('2013-07-25', "%Y-%m-%d")
worksheet.write_datetime(0, 0, date1, format1)
workbook.close()
self.assertExcelEqual()
def test_create_file_default_date_format(self):
"""Test write_datetime with default date format."""
workbook = Workbook(self.got_filename, {'default_date_format': 'yyyy\\-mm\\-dd'})
worksheet = workbook.add_worksheet()
worksheet.set_column(0, 0, 12)
date1 = datetime.strptime('2013-07-25', "%Y-%m-%d")
worksheet.write_datetime(0, 0, date1)
workbook.close()
self.assertExcelEqual()
def test_create_file_default_date_format_write(self):
"""Test write_datetime with default date format."""
workbook = Workbook(self.got_filename, {'default_date_format': 'yyyy\\-mm\\-dd'})
worksheet = workbook.add_worksheet()
worksheet.set_column(0, 0, 12)
date1 = datetime.strptime('2013-07-25', "%Y-%m-%d")
worksheet.write('A1', date1)
workbook.close()
self.assertExcelEqual()
def test_create_file_default_date_format_write_row(self):
"""Test write_row with default date format."""
workbook = Workbook(self.got_filename, {'default_date_format': 'yyyy\\-mm\\-dd'})
worksheet = workbook.add_worksheet()
worksheet.set_column(0, 0, 12)
date1 = datetime.strptime('2013-07-25', "%Y-%m-%d")
worksheet.write_row('A1', [date1])
workbook.close()
self.assertExcelEqual()
def test_create_file_default_date_format_write_column(self):
"""Test write_column with default date format."""
workbook = Workbook(self.got_filename, {'default_date_format': 'yyyy\\-mm\\-dd'})
worksheet = workbook.add_worksheet()
worksheet.set_column(0, 0, 12)
date1 = datetime.strptime('2013-07-25', "%Y-%m-%d")
worksheet.write_column(0, 0, [date1])
workbook.close()
self.assertExcelEqual()
|
bsd-2-clause
|
lucienfostier/gaffer
|
python/GafferUITest/CompoundEditorTest.py
|
8
|
3593
|
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import weakref
import imath
import Gaffer
import GafferTest
import GafferUI
import GafferUITest
class CompoundEditorTest( GafferUITest.TestCase ) :
def testAddEditorLifetime( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
c = GafferUI.CompoundEditor( s )
e = GafferUI.GraphEditor( s )
c.addEditor( e )
wc = weakref.ref( c )
we = weakref.ref( e )
del c
del e
self.assertEqual( wc(), None )
self.assertEqual( we(), None )
def testEditorsLifetime( self ) :
s = Gaffer.ScriptNode()
c = GafferUI.CompoundEditor( s )
n = GafferUI.NodeEditor( s )
c.addEditor( n )
wc = weakref.ref( c )
wn = weakref.ref( n )
e = c.editors()
self.assertTrue( e[0] is n )
del e
del c
del n
self.assertEqual( wc(), None )
self.assertEqual( wn(), None )
def testDetachedPanelsLifetime( self ) :
s = Gaffer.ScriptNode()
c = GafferUI.CompoundEditor( s )
p = c._createDetachedPanel()
wp = weakref.ref( p )
ps = c._detachedPanels()
self.assertTrue( ps[0] is p )
del ps
del p
del c
self.assertEqual( wp(), None )
def testReprLifetime( self ) :
s = Gaffer.ScriptNode()
c = GafferUI.CompoundEditor( s )
wc = weakref.ref( c )
repr( c )
del c
self.assertEqual( wc(), None )
def testWindowStateCompatibility ( self ) :
s = Gaffer.ScriptNode()
c = GafferUI.CompoundEditor( s )
sw = GafferUI.ScriptWindow.acquire( s )
sw.setLayout( c )
sw.setVisible( True )
d = eval( c._serializeWindowState() )
self.assertIsInstance( d, dict )
self.assertIsInstance( d["fullScreen"], bool )
self.assertIsInstance( d["maximized"], bool )
self.assertIsInstance( d["screen"], int )
self.assertIsInstance( d["bound"], imath.Box2f )
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
|
karstenw/nodebox-pyobjc
|
examples/Extended Application/matplotlib/examples/event_handling/data_browser.py
|
1
|
3261
|
"""
============
Data Browser
============
"""
import numpy as np
import matplotlib.pyplot as plt
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
class PointBrowser(object):
"""
Click on a point to select and highlight it -- the data that
generated the point will be shown in the lower axes. Use the 'n'
and 'p' keys to browse through the next and previous points
"""
def __init__(self):
self.lastind = 0
self.text = ax.text(0.05, 0.95, 'selected: none',
transform=ax.transAxes, va='top')
self.selected, = ax.plot([xs[0]], [ys[0]], 'o', ms=12, alpha=0.4,
color='yellow', visible=False)
def onpress(self, event):
if self.lastind is None:
return
if event.key not in ('n', 'p'):
return
if event.key == 'n':
inc = 1
else:
inc = -1
self.lastind += inc
self.lastind = np.clip(self.lastind, 0, len(xs) - 1)
self.update()
def onpick(self, event):
if event.artist != line:
return True
N = len(event.ind)
if not N:
return True
# the click locations
x = event.mouseevent.xdata
y = event.mouseevent.ydata
distances = np.hypot(x - xs[event.ind], y - ys[event.ind])
indmin = distances.argmin()
dataind = event.ind[indmin]
self.lastind = dataind
self.update()
def update(self):
if self.lastind is None:
return
dataind = self.lastind
ax2.cla()
ax2.plot(X[dataind])
ax2.text(0.05, 0.9, 'mu=%1.3f\nsigma=%1.3f' % (xs[dataind], ys[dataind]),
transform=ax2.transAxes, va='top')
ax2.set_ylim(-0.5, 1.5)
self.selected.set_visible(True)
self.selected.set_data(xs[dataind], ys[dataind])
self.text.set_text('selected: %d' % dataind)
fig.canvas.draw()
if 1: #__name__ == '__main__':
# Fixing random state for reproducibility
np.random.seed(19680801)
X = np.random.rand(100, 200)
xs = np.mean(X, axis=1)
ys = np.std(X, axis=1)
fig, (ax, ax2) = plt.subplots(2, 1)
ax.set_title('click on point to plot time series')
line, = ax.plot(xs, ys, 'o', picker=5) # 5 points tolerance
browser = PointBrowser()
fig.canvas.mpl_connect('pick_event', browser.onpick)
fig.canvas.mpl_connect('key_press_event', browser.onpress)
pltshow(plt)
|
mit
|
atomic-labs/zulip
|
zerver/management/commands/create_stream.py
|
9
|
1146
|
from __future__ import absolute_import
from __future__ import print_function
from django.core.management.base import BaseCommand
from zerver.lib.actions import do_create_stream
from zerver.models import Realm, get_realm
import sys
class Command(BaseCommand):
help = """Create a stream, and subscribe all active users (excluding bots).
This should be used for TESTING only, unless you understand the limitations of
the command."""
def add_arguments(self, parser):
parser.add_argument('domain', metavar='<domain>', type=str,
help='domain in which to create the stream')
parser.add_argument('stream_name', metavar='<stream name>', type=str,
help='name of stream to create')
def handle(self, *args, **options):
domain = options['domain']
stream_name = options['stream_name']
encoding = sys.getfilesystemencoding()
try:
realm = get_realm(domain)
except Realm.DoesNotExist:
print("Unknown domain %s" % (domain,))
exit(1)
do_create_stream(realm, stream_name.decode(encoding))
|
apache-2.0
|
frogleg/lightblue-0.4
|
src/series60/_lightbluecommon.py
|
179
|
10831
|
# Copyright (c) 2009 Bea Lam. All rights reserved.
#
# This file is part of LightBlue.
#
# LightBlue is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LightBlue is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with LightBlue. If not, see <http://www.gnu.org/licenses/>.
# Defines attributes with common implementations across the different
# platforms.
# public attributes
__all__ = ("L2CAP", "RFCOMM", "OBEX", "BluetoothError", "splitclass")
# Protocol/service class types, used for sockets and advertising services
L2CAP, RFCOMM, OBEX = (10, 11, 12)
class BluetoothError(IOError):
"""
Generic exception raised for Bluetooth errors. This is not raised for
socket-related errors; socket objects raise the socket.error and
socket.timeout exceptions from the standard library socket module.
Note that error codes are currently platform-independent. In particular,
the Mac OS X implementation returns IOReturn error values from the IOKit
framework, and OBEXError codes from <IOBluetooth/OBEX.h> for OBEX operations.
"""
pass
def splitclass(classofdevice):
"""
Splits the given class of device to return a 3-item tuple with the
major service class, major device class and minor device class values.
These values indicate the device's major services and the type of the
device (e.g. mobile phone, laptop, etc.). If you google for
"assigned numbers bluetooth baseband" you might find some documents
that discuss how to extract this information from the class of device.
Example:
>>> splitclass(1057036)
(129, 1, 3)
>>>
"""
if not isinstance(classofdevice, int):
try:
classofdevice = int(classofdevice)
except (TypeError, ValueError):
raise TypeError("Given device class '%s' cannot be split" % \
str(classofdevice))
data = classofdevice >> 2 # skip over the 2 "format" bits
service = data >> 11
major = (data >> 6) & 0x1F
minor = data & 0x3F
return (service, major, minor)
_validbtaddr = None
def _isbtaddr(address):
"""
Returns whether the given address is a valid bluetooth address.
For example, "00:0e:6d:7b:a2:0a" is a valid address.
Returns False if the argument is None or is not a string.
"""
# Define validity regex. Accept either ":" or "-" as separators.
global _validbtaddr
if _validbtaddr is None:
import re
_validbtaddr = re.compile("((\d|[a-f]){2}(:|-)){5}(\d|[a-f]){2}",
re.IGNORECASE)
import types
if not isinstance(address, types.StringTypes):
return False
return _validbtaddr.match(address) is not None
# --------- other attributes ---------
def _joinclass(codtuple):
"""
The opposite of splitclass(). Joins a (service, major, minor) class-of-
device tuple into a whole class of device value.
"""
if not isinstance(codtuple, tuple):
raise TypeError("argument must be tuple, was %s" % type(codtuple))
if len(codtuple) != 3:
raise ValueError("tuple must have 3 items, has %d" % len(codtuple))
serviceclass = codtuple[0] << 2 << 11
majorclass = codtuple[1] << 2 << 6
minorclass = codtuple[2] << 2
return (serviceclass | majorclass | minorclass)
# Docstrings for socket objects.
# Based on std lib socket docs.
_socketdocs = {
"accept":
"""
accept() -> (socket object, address info)
Wait for an incoming connection. Return a new socket representing the
connection, and the address of the client. For RFCOMM sockets, the address
info is a pair (hostaddr, channel).
The socket must be bound and listening before calling this method.
""",
"bind":
"""
bind(address)
Bind the socket to a local address. For RFCOMM sockets, the address is a
pair (host, channel); the host must refer to the local host.
A port value of 0 binds the socket to a dynamically assigned port.
(Note that on Mac OS X, the port value must always be 0.)
The socket must not already be bound.
""",
"close":
"""
close()
Close the socket. It cannot be used after this call.
""",
"connect":
"""
connect(address)
Connect the socket to a remote address. The address should be a
(host, channel) pair for RFCOMM sockets, and a (host, PSM) pair for L2CAP
sockets.
The socket must not be already connected.
""",
"connect_ex":
"""
connect_ex(address) -> errno
This is like connect(address), but returns an error code instead of raising
an exception when an error occurs.
""",
"dup":
"""
dup() -> socket object
Returns a new socket object connected to the same system resource.
""",
"fileno":
"""
fileno() -> integer
Return the integer file descriptor of the socket.
Raises NotImplementedError on Mac OS X and Python For Series 60.
""",
"getpeername":
"""
getpeername() -> address info
Return the address of the remote endpoint. The address info is a
(host, channel) pair for RFCOMM sockets, and a (host, PSM) pair for L2CAP
sockets.
If the socket has not been connected, socket.error will be raised.
""",
"getsockname":
"""
getsockname() -> address info
Return the address of the local endpoint. The address info is a
(host, channel) pair for RFCOMM sockets, and a (host, PSM) pair for L2CAP
sockets.
If the socket has not been connected nor bound, this returns the tuple
("00:00:00:00:00:00", 0).
""",
"getsockopt":
"""
getsockopt(level, option[, bufsize]) -> value
Get a socket option. See the Unix manual for level and option.
If a nonzero buffersize argument is given, the return value is a
string of that length; otherwise it is an integer.
Currently support for socket options are platform independent -- i.e.
depends on the underlying Series 60 or BlueZ socket options support.
The Mac OS X implementation currently does not support any options at
all and automatically raises socket.error.
""",
"gettimeout":
"""
gettimeout() -> timeout
Returns the timeout in floating seconds associated with socket
operations. A timeout of None indicates that timeouts on socket
operations are disabled.
Currently not supported on Python For Series 60 implementation, which
will always return None.
""",
"listen":
"""
listen(backlog)
Enable a server to accept connections. The backlog argument must be at
least 1; it specifies the number of unaccepted connection that the system
will allow before refusing new connections.
The socket must not be already listening.
Currently not implemented on Mac OS X.
""",
"makefile":
"""
makefile([mode[, bufsize]]) -> file object
Returns a regular file object corresponding to the socket. The mode
and bufsize arguments are as for the built-in open() function.
""",
"recv":
"""
recv(bufsize[, flags]) -> data
Receive up to bufsize bytes from the socket. For the optional flags
argument, see the Unix manual. When no data is available, block until
at least one byte is available or until the remote end is closed. When
the remote end is closed and all data is read, return the empty string.
Currently the flags argument has no effect on Mac OS X.
""",
"recvfrom":
"""
recvfrom(bufsize[, flags]) -> (data, address info)
Like recv(buffersize, flags) but also return the sender's address info.
""",
"send":
"""
send(data[, flags]) -> count
Send a data string to the socket. For the optional flags
argument, see the Unix manual. Return the number of bytes
sent.
The socket must be connected to a remote socket.
Currently the flags argument has no effect on Mac OS X.
""",
"sendall":
"""
sendall(data[, flags])
Send a data string to the socket. For the optional flags
argument, see the Unix manual. This calls send() repeatedly
until all data is sent. If an error occurs, it's impossible
to tell how much data has been sent.
""",
"sendto":
"""
sendto(data[, flags], address) -> count
Like send(data, flags) but allows specifying the destination address.
For RFCOMM sockets, the address is a pair (hostaddr, channel).
""",
"setblocking":
"""
setblocking(flag)
Set the socket to blocking (flag is true) or non-blocking (false).
setblocking(True) is equivalent to settimeout(None);
setblocking(False) is equivalent to settimeout(0.0).
Initially a socket is in blocking mode. In non-blocking mode, if a
socket operation cannot be performed immediately, socket.error is raised.
The underlying implementation on Python for Series 60 only supports
non-blocking mode for send() and recv(), and ignores it for connect() and
accept().
""",
"setsockopt":
"""
setsockopt(level, option, value)
Set a socket option. See the Unix manual for level and option.
The value argument can either be an integer or a string.
Currently support for socket options are platform independent -- i.e.
depends on the underlying Series 60 or BlueZ socket options support.
The Mac OS X implementation currently does not support any options at
all and automatically raise socket.error.
""",
"settimeout":
"""
settimeout(timeout)
Set a timeout on socket operations. 'timeout' can be a float,
giving in seconds, or None. Setting a timeout of None disables
the timeout feature and is equivalent to setblocking(1).
Setting a timeout of zero is the same as setblocking(0).
If a timeout is set, the connect, accept, send and receive operations will
raise socket.timeout if a timeout occurs.
Raises NotImplementedError on Python For Series 60.
""",
"shutdown":
"""
shutdown(how)
Shut down the reading side of the socket (flag == socket.SHUT_RD), the
writing side of the socket (flag == socket.SHUT_WR), or both ends
(flag == socket.SHUT_RDWR).
"""
}
|
gpl-3.0
|
RafaelTorrealba/odoo
|
addons/decimal_precision/__init__.py
|
450
|
1128
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#import decimal_precision
from decimal_precision import get_precision
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
rabitt/mir_eval
|
mir_eval/melody.py
|
3
|
27199
|
# CREATED:2014-03-07 by Justin Salamon <justin.salamon@nyu.edu>
'''
Melody extraction algorithms aim to produce a sequence of frequency values
corresponding to the pitch of the dominant melody from a musical
recording. For evaluation, an estimated pitch series is evaluated against a
reference based on whether the voicing (melody present or not) and the pitch
is correct (within some tolerance).
For a detailed explanation of the measures please refer to:
J. Salamon, E. Gomez, D. P. W. Ellis and G. Richard, "Melody Extraction
from Polyphonic Music Signals: Approaches, Applications and Challenges",
IEEE Signal Processing Magazine, 31(2):118-134, Mar. 2014.
Conventions
-----------
Melody annotations are assumed to be given in the format of a 1d array of
frequency values which are accompanied by a 1d array of times denoting when
each frequency value occurs. In a reference melody time series, a frequency
value of 0 denotes "unvoiced". In a estimated melody time series, unvoiced
frames can be indicated either by 0 Hz or by a negative Hz value - negative
values represent the algorithm's pitch estimate for frames it has determined as
unvoiced, in case they are in fact voiced.
Metrics are computed using a sequence of reference and estimated pitches in
cents and boolean voicing arrays, both of which are sampled to the same
timebase. The function :func:`mir_eval.melody.to_cent_voicing` can be used to
convert a sequence of estimated and reference times and frequency values in Hz
to boolean voicing arrays and frequency arrays in the format required by the
metric functions. By default, the convention is to resample the estimated
melody time series to the reference melody time series' timebase.
Metrics
-------
* :func:`mir_eval.melody.voicing_measures`: Voicing measures, including the
recall rate (proportion of frames labeled as melody frames in the reference
that are estimated as melody frames) and the false alarm
rate (proportion of frames labeled as non-melody in the reference that are
mistakenly estimated as melody frames)
* :func:`mir_eval.melody.raw_pitch_accuracy`: Raw Pitch Accuracy, which
computes the proportion of melody frames in the reference for which the
frequency is considered correct (i.e. within half a semitone of the reference
frequency)
* :func:`mir_eval.melody.raw_chroma_accuracy`: Raw Chroma Accuracy, where the
estimated and reference frequency sequences are mapped onto a single octave
before computing the raw pitch accuracy
* :func:`mir_eval.melody.overall_accuracy`: Overall Accuracy, which computes
the proportion of all frames correctly estimated by the algorithm, including
whether non-melody frames where labeled by the algorithm as non-melody
'''
import numpy as np
import scipy.interpolate
import collections
import warnings
from . import util
def validate_voicing(ref_voicing, est_voicing):
"""Checks that voicing inputs to a metric are in the correct format.
Parameters
----------
ref_voicing : np.ndarray
Reference boolean voicing array
est_voicing : np.ndarray
Estimated boolean voicing array
"""
if ref_voicing.size == 0:
warnings.warn("Reference voicing array is empty.")
if est_voicing.size == 0:
warnings.warn("Estimated voicing array is empty.")
if ref_voicing.sum() == 0:
warnings.warn("Reference melody has no voiced frames.")
if est_voicing.sum() == 0:
warnings.warn("Estimated melody has no voiced frames.")
# Make sure they're the same length
if ref_voicing.shape[0] != est_voicing.shape[0]:
raise ValueError('Reference and estimated voicing arrays should '
'be the same length.')
for voicing in [ref_voicing, est_voicing]:
# Make sure they're (effectively) boolean
if np.logical_and(voicing != 0, voicing != 1).any():
raise ValueError('Voicing arrays must be boolean.')
def validate(ref_voicing, ref_cent, est_voicing, est_cent):
"""Checks that voicing and frequency arrays are well-formed. To be used in
conjunction with :func:`mir_eval.melody.validate_voicing`
Parameters
----------
ref_voicing : np.ndarray
Reference boolean voicing array
ref_cent : np.ndarray
Reference pitch sequence in cents
est_voicing : np.ndarray
Estimated boolean voicing array
est_cent : np.ndarray
Estimate pitch sequence in cents
"""
if ref_cent.size == 0:
warnings.warn("Reference frequency array is empty.")
if est_cent.size == 0:
warnings.warn("Estimated frequency array is empty.")
# Make sure they're the same length
if ref_voicing.shape[0] != ref_cent.shape[0] or \
est_voicing.shape[0] != est_cent.shape[0] or \
ref_cent.shape[0] != est_cent.shape[0]:
raise ValueError('All voicing and frequency arrays must have the '
'same length.')
def hz2cents(freq_hz, base_frequency=10.0):
"""Convert an array of frequency values in Hz to cents.
0 values are left in place.
Parameters
----------
freq_hz : np.ndarray
Array of frequencies in Hz.
base_frequency : float
Base frequency for conversion.
(Default value = 10.0)
Returns
-------
cent : np.ndarray
Array of frequencies in cents, relative to base_frequency
"""
freq_cent = np.zeros(freq_hz.shape[0])
freq_nonz_ind = np.flatnonzero(freq_hz)
normalized_frequency = np.abs(freq_hz[freq_nonz_ind])/base_frequency
freq_cent[freq_nonz_ind] = 1200*np.log2(normalized_frequency)
return freq_cent
def freq_to_voicing(frequencies):
"""Convert from an array of frequency values to frequency array +
voice/unvoiced array
Parameters
----------
frequencies : np.ndarray
Array of frequencies. A frequency <= 0 indicates "unvoiced".
Returns
-------
frequencies : np.ndarray
Array of frequencies, all >= 0.
voiced : np.ndarray
Boolean array, same length as frequencies,
which indicates voiced or unvoiced
"""
return np.abs(frequencies), frequencies > 0
def constant_hop_timebase(hop, end_time):
"""Generates a time series from 0 to ``end_time`` with times spaced ``hop``
apart
Parameters
----------
hop : float
Spacing of samples in the time series
end_time : float
Time series will span ``[0, end_time]``
Returns
-------
times : np.ndarray
Generated timebase
"""
# Compute new timebase. Rounding/linspace is to avoid float problems.
end_time = np.round(end_time, 10)
times = np.linspace(0, hop*int(np.floor(end_time/hop)),
int(np.floor(end_time/hop)) + 1)
times = np.round(times, 10)
return times
def resample_melody_series(times, frequencies, voicing,
times_new, kind='linear'):
"""Resamples frequency and voicing time series to a new timescale. Maintains
any zero ("unvoiced") values in frequencies.
If ``times`` and ``times_new`` are equivalent, no resampling will be
performed.
Parameters
----------
times : np.ndarray
Times of each frequency value
frequencies : np.ndarray
Array of frequency values, >= 0
voicing : np.ndarray
Boolean array which indicates voiced or unvoiced
times_new : np.ndarray
Times to resample frequency and voicing sequences to
kind : str
kind parameter to pass to scipy.interpolate.interp1d.
(Default value = 'linear')
Returns
-------
frequencies_resampled : np.ndarray
Frequency array resampled to new timebase
voicing_resampled : np.ndarray, dtype=bool
Boolean voicing array resampled to new timebase
"""
# If the timebases are already the same, no need to interpolate
if times.shape == times_new.shape and np.allclose(times, times_new):
return frequencies, voicing.astype(np.bool)
# Warn when the delta between the original times is not constant,
# unless times[0] == 0. and frequencies[0] == frequencies[1] (see logic at
# the beginning of to_cent_voicing)
if not (np.allclose(np.diff(times), np.diff(times).mean()) or
(np.allclose(np.diff(times[1:]), np.diff(times[1:]).mean()) and
frequencies[0] == frequencies[1])):
warnings.warn(
"Non-uniform timescale passed to resample_melody_series. Pitch "
"will be linearly interpolated, which will result in undesirable "
"behavior if silences are indicated by missing values. Silences "
"should be indicated by nonpositive frequency values.")
# Round to avoid floating point problems
times = np.round(times, 10)
times_new = np.round(times_new, 10)
# Add in an additional sample if we'll be asking for a time too large
if times_new.max() > times.max():
times = np.append(times, times_new.max())
frequencies = np.append(frequencies, 0)
voicing = np.append(voicing, 0)
# We need to fix zero transitions if interpolation is not zero or nearest
if kind != 'zero' and kind != 'nearest':
# Fill in zero values with the last reported frequency
# to avoid erroneous values when resampling
frequencies_held = np.array(frequencies)
for n, frequency in enumerate(frequencies[1:]):
if frequency == 0:
frequencies_held[n + 1] = frequencies_held[n]
# Linearly interpolate frequencies
frequencies_resampled = scipy.interpolate.interp1d(times,
frequencies_held,
kind)(times_new)
# Retain zeros
frequency_mask = scipy.interpolate.interp1d(times,
frequencies,
'zero')(times_new)
frequencies_resampled *= (frequency_mask != 0)
else:
frequencies_resampled = scipy.interpolate.interp1d(times,
frequencies,
kind)(times_new)
# Use nearest-neighbor for voicing if it was used for frequencies
if kind == 'nearest':
voicing_resampled = scipy.interpolate.interp1d(times,
voicing,
kind)(times_new)
# otherwise, always use zeroth order
else:
voicing_resampled = scipy.interpolate.interp1d(times,
voicing,
'zero')(times_new)
return frequencies_resampled, voicing_resampled.astype(np.bool)
def to_cent_voicing(ref_time, ref_freq, est_time, est_freq, base_frequency=10.,
hop=None, kind='linear'):
"""Converts reference and estimated time/frequency (Hz) annotations to sampled
frequency (cent)/voicing arrays.
A zero frequency indicates "unvoiced".
A negative frequency indicates "Predicted as unvoiced, but if it's voiced,
this is the frequency estimate".
Parameters
----------
ref_time : np.ndarray
Time of each reference frequency value
ref_freq : np.ndarray
Array of reference frequency values
est_time : np.ndarray
Time of each estimated frequency value
est_freq : np.ndarray
Array of estimated frequency values
base_frequency : float
Base frequency in Hz for conversion to cents
(Default value = 10.)
hop : float
Hop size, in seconds, to resample,
default None which means use ref_time
kind : str
kind parameter to pass to scipy.interpolate.interp1d.
(Default value = 'linear')
Returns
-------
ref_voicing : np.ndarray, dtype=bool
Resampled reference boolean voicing array
ref_cent : np.ndarray
Resampled reference frequency (cent) array
est_voicing : np.ndarray, dtype=bool
Resampled estimated boolean voicing array
est_cent : np.ndarray
Resampled estimated frequency (cent) array
"""
# Check if missing sample at time 0 and if so add one
if ref_time[0] > 0:
ref_time = np.insert(ref_time, 0, 0)
ref_freq = np.insert(ref_freq, 0, ref_freq[0])
if est_time[0] > 0:
est_time = np.insert(est_time, 0, 0)
est_freq = np.insert(est_freq, 0, est_freq[0])
# Get separated frequency array and voicing boolean array
ref_freq, ref_voicing = freq_to_voicing(ref_freq)
est_freq, est_voicing = freq_to_voicing(est_freq)
# convert both sequences to cents
ref_cent = hz2cents(ref_freq, base_frequency)
est_cent = hz2cents(est_freq, base_frequency)
# If we received a hop, use it to resample both
if hop is not None:
# Resample to common time base
ref_cent, ref_voicing = resample_melody_series(
ref_time, ref_cent, ref_voicing,
constant_hop_timebase(hop, ref_time.max()), kind)
est_cent, est_voicing = resample_melody_series(
est_time, est_cent, est_voicing,
constant_hop_timebase(hop, est_time.max()), kind)
# Otherwise, only resample estimated to the reference time base
else:
est_cent, est_voicing = resample_melody_series(
est_time, est_cent, est_voicing, ref_time, kind)
# ensure the estimated sequence is the same length as the reference
len_diff = ref_cent.shape[0] - est_cent.shape[0]
if len_diff >= 0:
est_cent = np.append(est_cent, np.zeros(len_diff))
est_voicing = np.append(est_voicing, np.zeros(len_diff))
else:
est_cent = est_cent[:ref_cent.shape[0]]
est_voicing = est_voicing[:ref_voicing.shape[0]]
return (ref_voicing.astype(bool), ref_cent,
est_voicing.astype(bool), est_cent)
def voicing_measures(ref_voicing, est_voicing):
"""Compute the voicing recall and false alarm rates given two voicing
indicator sequences, one as reference (truth) and the other as the estimate
(prediction). The sequences must be of the same length.
Examples
--------
>>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt')
>>> est_time, est_freq = mir_eval.io.load_time_series('est.txt')
>>> (ref_v, ref_c,
... est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time,
... ref_freq,
... est_time,
... est_freq)
>>> recall, false_alarm = mir_eval.melody.voicing_measures(ref_v,
... est_v)
Parameters
----------
ref_voicing : np.ndarray
Reference boolean voicing array
est_voicing : np.ndarray
Estimated boolean voicing array
Returns
-------
vx_recall : float
Voicing recall rate, the fraction of voiced frames in ref
indicated as voiced in est
vx_false_alarm : float
Voicing false alarm rate, the fraction of unvoiced frames in ref
indicated as voiced in est
"""
validate_voicing(ref_voicing, est_voicing)
ref_voicing = ref_voicing.astype(bool)
est_voicing = est_voicing.astype(bool)
# When input arrays are empty, return 0 by special case
if ref_voicing.size == 0 or est_voicing.size == 0:
return 0.
# How voicing is computed
# | ref_v | !ref_v |
# -------|-------|--------|
# est_v | TP | FP |
# -------|-------|------- |
# !est_v | FN | TN |
# -------------------------
TP = (ref_voicing*est_voicing).sum()
FP = ((ref_voicing == 0)*est_voicing).sum()
FN = (ref_voicing*(est_voicing == 0)).sum()
TN = ((ref_voicing == 0)*(est_voicing == 0)).sum()
# Voicing recall = fraction of voiced frames according the reference that
# are declared as voiced by the estimate
if TP + FN == 0:
vx_recall = 0.
else:
vx_recall = TP/float(TP + FN)
# Voicing false alarm = fraction of unvoiced frames according to the
# reference that are declared as voiced by the estimate
if FP + TN == 0:
vx_false_alm = 0.
else:
vx_false_alm = FP/float(FP + TN)
return vx_recall, vx_false_alm
def raw_pitch_accuracy(ref_voicing, ref_cent, est_voicing, est_cent,
cent_tolerance=50):
"""Compute the raw pitch accuracy given two pitch (frequency) sequences in
cents and matching voicing indicator sequences. The first pitch and voicing
arrays are treated as the reference (truth), and the second two as the
estimate (prediction). All 4 sequences must be of the same length.
Examples
--------
>>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt')
>>> est_time, est_freq = mir_eval.io.load_time_series('est.txt')
>>> (ref_v, ref_c,
... est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time,
... ref_freq,
... est_time,
... est_freq)
>>> raw_pitch = mir_eval.melody.raw_pitch_accuracy(ref_v, ref_c,
... est_v, est_c)
Parameters
----------
ref_voicing : np.ndarray
Reference boolean voicing array
ref_cent : np.ndarray
Reference pitch sequence in cents
est_voicing : np.ndarray
Estimated boolean voicing array
est_cent : np.ndarray
Estimate pitch sequence in cents
cent_tolerance : float
Maximum absolute deviation for a cent value to be considerd correct
(Default value = 50)
Returns
-------
raw_pitch : float
Raw pitch accuracy, the fraction of voiced frames in ref_cent for
which est_cent provides a correct frequency values
(within cent_tolerance cents).
"""
validate_voicing(ref_voicing, est_voicing)
validate(ref_voicing, ref_cent, est_voicing, est_cent)
ref_voicing = ref_voicing.astype(bool)
est_voicing = est_voicing.astype(bool)
# When input arrays are empty, return 0 by special case
if ref_voicing.size == 0 or est_voicing.size == 0 \
or ref_cent.size == 0 or est_cent.size == 0:
return 0.
# If there are no voiced frames in reference, metric is 0
if ref_voicing.sum() == 0:
return 0.
# Raw pitch = the number of voiced frames in the reference for which the
# estimate provides a correct frequency value (within cent_tolerance cents)
# NB: voicing estimation is ignored in this measure
frame_correct = (np.abs(ref_cent - est_cent)[ref_voicing] < cent_tolerance)
raw_pitch = (frame_correct).sum()/float(ref_voicing.sum())
return raw_pitch
def raw_chroma_accuracy(ref_voicing, ref_cent, est_voicing, est_cent,
cent_tolerance=50):
"""Compute the raw chroma accuracy given two pitch (frequency) sequences
in cents and matching voicing indicator sequences. The first pitch and
voicing arrays are treated as the reference (truth), and the second two as
the estimate (prediction). All 4 sequences must be of the same length.
Examples
--------
>>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt')
>>> est_time, est_freq = mir_eval.io.load_time_series('est.txt')
>>> (ref_v, ref_c,
... est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time,
... ref_freq,
... est_time,
... est_freq)
>>> raw_chroma = mir_eval.melody.raw_chroma_accuracy(ref_v, ref_c,
... est_v, est_c)
Parameters
----------
ref_voicing : np.ndarray
Reference boolean voicing array
ref_cent : np.ndarray
Reference pitch sequence in cents
est_voicing : np.ndarray
Estimated boolean voicing array
est_cent : np.ndarray
Estimate pitch sequence in cents
cent_tolerance : float
Maximum absolute deviation for a cent value to be considered correct
(Default value = 50)
Returns
-------
raw_chroma : float
Raw chroma accuracy, the fraction of voiced frames in ref_cent for
which est_cent provides a correct frequency values (within
cent_tolerance cents), ignoring octave errors
References
----------
.. [#] J. Salamon, E. Gomez, D. P. W. Ellis and G. Richard, "Melody
Extraction from Polyphonic Music Signals: Approaches, Applications
and Challenges", IEEE Signal Processing Magazine, 31(2):118-134,
Mar. 2014.
.. [#] G. E. Poliner, D. P. W. Ellis, A. F. Ehmann, E. Gomez, S.
Streich, and B. Ong. "Melody transcription from music audio:
Approaches and evaluation", IEEE Transactions on Audio, Speech, and
Language Processing, 15(4):1247-1256, 2007.
"""
validate_voicing(ref_voicing, est_voicing)
validate(ref_voicing, ref_cent, est_voicing, est_cent)
ref_voicing = ref_voicing.astype(bool)
est_voicing = est_voicing.astype(bool)
# When input arrays are empty, return 0 by special case
if ref_voicing.size == 0 or est_voicing.size == 0 \
or ref_cent.size == 0 or est_cent.size == 0:
return 0.
# If there are no voiced frames in reference, metric is 0
if ref_voicing.sum() == 0:
return 0.
# Raw chroma = same as raw pitch except that octave errors are ignored.
cent_diff = np.abs(ref_cent - est_cent)
octave = 1200*np.floor(cent_diff/1200.0 + 0.5)
frame_correct = (np.abs(cent_diff - octave)[ref_voicing] < cent_tolerance)
n_voiced = float(ref_voicing.sum())
raw_chroma = (frame_correct).sum()/n_voiced
return raw_chroma
def overall_accuracy(ref_voicing, ref_cent, est_voicing, est_cent,
cent_tolerance=50):
"""Compute the overall accuracy given two pitch (frequency) sequences in cents
and matching voicing indicator sequences. The first pitch and voicing
arrays are treated as the reference (truth), and the second two as the
estimate (prediction). All 4 sequences must be of the same length.
Examples
--------
>>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt')
>>> est_time, est_freq = mir_eval.io.load_time_series('est.txt')
>>> (ref_v, ref_c,
... est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time,
... ref_freq,
... est_time,
... est_freq)
>>> overall_accuracy = mir_eval.melody.overall_accuracy(ref_v, ref_c,
... est_v, est_c)
Parameters
----------
ref_voicing : np.ndarray
Reference boolean voicing array
ref_cent : np.ndarray
Reference pitch sequence in cents
est_voicing : np.ndarray
Estimated boolean voicing array
est_cent : np.ndarray
Estimate pitch sequence in cents
cent_tolerance : float
Maximum absolute deviation for a cent value to be considered correct
(Default value = 50)
Returns
-------
overall_accuracy : float
Overall accuracy, the total fraction of correctly estimates frames,
where provides a correct frequency values (within cent_tolerance
cents).
"""
validate_voicing(ref_voicing, est_voicing)
validate(ref_voicing, ref_cent, est_voicing, est_cent)
ref_voicing = ref_voicing.astype(bool)
est_voicing = est_voicing.astype(bool)
# When input arrays are empty, return 0 by special case
if ref_voicing.size == 0 or est_voicing.size == 0 \
or ref_cent.size == 0 or est_cent.size == 0:
return 0.
# True negatives = frames correctly estimates as unvoiced
TN = ((ref_voicing == 0)*(est_voicing == 0)).sum()
cent_diff = np.abs(ref_cent - est_cent)
frame_correct = (cent_diff[ref_voicing*est_voicing] < cent_tolerance)
accuracy = (frame_correct.sum() + TN)/float(ref_cent.shape[0])
return accuracy
def evaluate(ref_time, ref_freq, est_time, est_freq, **kwargs):
"""Evaluate two melody (predominant f0) transcriptions, where the first is
treated as the reference (ground truth) and the second as the estimate to
be evaluated (prediction).
Examples
--------
>>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt')
>>> est_time, est_freq = mir_eval.io.load_time_series('est.txt')
>>> scores = mir_eval.melody.evaluate(ref_time, ref_freq,
... est_time, est_freq)
Parameters
----------
ref_time : np.ndarray
Time of each reference frequency value
ref_freq : np.ndarray
Array of reference frequency values
est_time : np.ndarray
Time of each estimated frequency value
est_freq : np.ndarray
Array of estimated frequency values
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
"""
# Convert to reference/estimated voicing/frequency (cent) arrays
(ref_voicing, ref_cent,
est_voicing, est_cent) = util.filter_kwargs(
to_cent_voicing, ref_time, ref_freq, est_time, est_freq, **kwargs)
# Compute metrics
scores = collections.OrderedDict()
(scores['Voicing Recall'],
scores['Voicing False Alarm']) = util.filter_kwargs(voicing_measures,
ref_voicing,
est_voicing, **kwargs)
scores['Raw Pitch Accuracy'] = util.filter_kwargs(raw_pitch_accuracy,
ref_voicing, ref_cent,
est_voicing, est_cent,
**kwargs)
scores['Raw Chroma Accuracy'] = util.filter_kwargs(raw_chroma_accuracy,
ref_voicing, ref_cent,
est_voicing, est_cent,
**kwargs)
scores['Overall Accuracy'] = util.filter_kwargs(overall_accuracy,
ref_voicing, ref_cent,
est_voicing, est_cent,
**kwargs)
return scores
|
mit
|
woodymit/millstone_accidental_source
|
genome_designer/main/startup.py
|
1
|
1068
|
"""Actions to run at server startup.
"""
from django.db import connection
from django.db import transaction
def run():
"""Call this from manage.py or tests.
"""
_add_custom_mult_agg_function()
def _add_custom_mult_agg_function():
"""Make sure the Postgresql database has a custom function array_agg_mult.
NOTE: Figured out the raw sql query by running psql with -E flag
and then calling \df to list functions. The -E flag causes the internal
raw sql of the commands to be shown.
"""
cursor = connection.cursor()
cursor.execute(
'SELECT p.proname '
'FROM pg_catalog.pg_proc p '
'WHERE p.proname=\'array_agg_mult\''
)
mult_agg_exists = bool(cursor.fetchone())
if not mult_agg_exists:
cursor.execute(
'CREATE AGGREGATE array_agg_mult (anyarray) ('
' SFUNC = array_cat'
' ,STYPE = anyarray'
' ,INITCOND = \'{}\''
');'
)
transaction.commit_unless_managed()
|
mit
|
fabaff/ansible
|
lib/ansible/plugins/connection/jail.py
|
13
|
8005
|
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# and chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
# (c) 2013, Michael Scherer <misc@zarb.org>
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import distutils.spawn
import os
import os.path
import pipes
import subprocess
import traceback
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.connection import ConnectionBase
from ansible.utils.unicode import to_bytes
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
BUFSIZE = 65536
class Connection(ConnectionBase):
''' Local BSD Jail based connections '''
transport = 'jail'
# Pipelining may work. Someone needs to test by setting this to True and
# having pipelining=True in their ansible.cfg
has_pipelining = False
# Some become_methods may work in v2 (sudo works for other chroot-based
# plugins while su seems to be failing). If some work, check chroot.py to
# see how to disable just some methods.
become_methods = frozenset()
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.jail = self._play_context.remote_addr
if os.geteuid() != 0:
raise AnsibleError("jail connection requires running as root")
self.jls_cmd = self._search_executable('jls')
self.jexec_cmd = self._search_executable('jexec')
if self.jail not in self.list_jails():
raise AnsibleError("incorrect jail name %s" % self.jail)
@staticmethod
def _search_executable(executable):
cmd = distutils.spawn.find_executable(executable)
if not cmd:
raise AnsibleError("%s command not found in PATH") % executable
return cmd
def list_jails(self):
p = subprocess.Popen([self.jls_cmd, '-q', 'name'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return stdout.split()
def get_jail_path(self):
p = subprocess.Popen([self.jls_cmd, '-j', to_bytes(self.jail), '-q', 'path'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
# remove \n
return stdout[:-1]
def _connect(self):
''' connect to the jail; nothing to do here '''
super(Connection, self)._connect()
if not self._connected:
display.vvv("THIS IS A LOCAL JAIL DIR", host=self.jail)
self._connected = True
def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
''' run a command on the jail. This is only needed for implementing
put_file() get_file() so that we don't have to read the whole file
into memory.
compared to exec_command() it looses some niceties like being able to
return the process's exit code immediately.
'''
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh'
local_cmd = [self.jexec_cmd, self.jail, executable, '-c', cmd]
display.vvv("EXEC %s" % (local_cmd,), host=self.jail)
local_cmd = map(to_bytes, local_cmd)
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def exec_command(self, cmd, in_data=None, sudoable=False):
''' run a command on the jail '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
# TODO: Check whether we can send the command to stdin via
# p.communicate(in_data)
# If we can, then we can change this plugin to has_pipelining=True and
# remove the error if in_data is given.
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
p = self._buffered_exec_command(cmd)
stdout, stderr = p.communicate(in_data)
return (p.returncode, stdout, stderr)
def _prefix_login_path(self, remote_path):
''' Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
Can revisit using $HOME instead if it's a problem
'''
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
return os.path.normpath(remote_path)
def put_file(self, in_path, out_path):
''' transfer a file from local to jail '''
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail)
out_path = pipes.quote(self._prefix_login_path(out_path))
try:
with open(in_path, 'rb') as in_file:
try:
p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), stdin=in_file)
except OSError:
raise AnsibleError("jail connection requires dd command in the jail")
try:
stdout, stderr = p.communicate()
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
except IOError:
raise AnsibleError("file or module does not exist at: %s" % in_path)
def fetch_file(self, in_path, out_path):
''' fetch a file from jail to local '''
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail)
in_path = pipes.quote(self._prefix_login_path(in_path))
try:
p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
except OSError:
raise AnsibleError("jail connection requires dd command in the jail")
with open(out_path, 'wb+') as out_file:
try:
chunk = p.stdout.read(BUFSIZE)
while chunk:
out_file.write(chunk)
chunk = p.stdout.read(BUFSIZE)
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def close(self):
''' terminate the connection; nothing to do here '''
super(Connection, self).close()
self._connected = False
|
gpl-3.0
|
h2oai/h2o-2
|
py/testdir_single_jvm/test_speedrf_params_rand2.py
|
9
|
3800
|
import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_rf, h2o_import as h2i, h2o_util
paramDict = {
# 2 new
'destination_key': ['model_keyA', '012345', '__hello'],
'cols': [None, None, None, None, None, '0,1,2,3,4,5,6,7,8','C1,C2,C3,C4,C5,C6,C7,C8'],
# exclusion handled below, otherwise exception:
# ...Arguments 'cols', 'ignored_cols_by_name', and 'ignored_cols' are exclusive
'ignored_cols_by_name': [None, None, None, None, 'C1','C2','C3','C4','C5','C6','C7','C8','C9'],
# probably can't deal with mixtures of cols and ignore, so just use cols for now
# could handle exclusion below
# 'ignored_cols': [None, None, None, None, None, '0,1,2,3,4,5,6,7,8','C1,C2,C3,C4,C5,C6,C7,C8'],
'n_folds': [None, 2, 5], # has to be >= 2?
'keep_cross_validation_splits': [None, 0, 1],
# 'classification': [None, 0, 1],
# doesn't support regression yet
'classification': [None, 1],
'balance_classes': [None, 0, 1],
# never run with unconstrained balance_classes size if random sets balance_classes..too slow
'max_after_balance_size': [.1, 1, 2],
'oobee': [None, 0, 1],
'sampling_strategy': [None, 'RANDOM'],
'select_stat_type': [None, 'ENTROPY', 'GINI'],
'response': [54, 'C55'], # equivalent. None is not legal
'validation': [None, 'covtype.data.hex'],
'ntrees': [1], # just do one tree
'importance': [None, 0, 1],
'max_depth': [None, 1,10,20,100],
'nbins': [None,5,10,100,1000],
'sample_rate': [None,0.20,0.40,0.60,0.80,0.90],
'seed': [None,'0','1','11111','19823134','1231231'],
# Can't have more mtries than cols..force to 4 if cols is not None?
'mtries': [1,3,5,7],
}
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(java_heap_GB=10)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_speedrf_params_rand2_fvec(self):
csvPathname = 'standard/covtype.data'
hex_key = 'covtype.data.hex'
for trial in range(10):
# params is mutable. This is default.
# response is required for SpeeERF
params = {
'response': 'C55',
'ntrees': 1, 'mtries': 7,
'balance_classes': 0,
# never run with unconstrained balance_classes size if random sets balance_classes..too slow
'max_after_balance_size': 2,
'importance': 0}
colX = h2o_util.pickRandParams(paramDict, params)
if 'cols' in params and params['cols']:
# exclusion
if 'ignored_cols_by_name' in params:
params['ignored_cols_by_name'] = None
else:
if 'ignored_cols_by_name' in params and params['ignored_cols_by_name']:
params['mtries'] = random.randint(1,53)
else:
params['mtries'] = random.randint(1,54)
kwargs = params.copy()
# adjust timeoutSecs with the number of trees
timeoutSecs = 80 + ((kwargs['ntrees']*80) * max(1,kwargs['mtries']/60) )
start = time.time()
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='put', hex_key=hex_key)
h2o_cmd.runSpeeDRF(parseResult=parseResult, timeoutSecs=timeoutSecs, retryDelaySecs=1, **kwargs)
elapsed = time.time()-start
print "Trial #", trial, "completed in", elapsed, "seconds.", "%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
if __name__ == '__main__':
h2o.unit_main()
|
apache-2.0
|
tjanez/ansible
|
test/units/parsing/test_splitter.py
|
181
|
3734
|
# coding: utf-8
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.parsing.splitter import split_args, parse_kv
import pytest
SPLIT_DATA = (
(u'a',
[u'a'],
{u'_raw_params': u'a'}),
(u'a=b',
[u'a=b'],
{u'a': u'b'}),
(u'a="foo bar"',
[u'a="foo bar"'],
{u'a': u'foo bar'}),
(u'"foo bar baz"',
[u'"foo bar baz"'],
{u'_raw_params': '"foo bar baz"'}),
(u'foo bar baz',
[u'foo', u'bar', u'baz'],
{u'_raw_params': u'foo bar baz'}),
(u'a=b c="foo bar"',
[u'a=b', u'c="foo bar"'],
{u'a': u'b', u'c': u'foo bar'}),
(u'a="echo \\"hello world\\"" b=bar',
[u'a="echo \\"hello world\\""', u'b=bar'],
{u'a': u'echo "hello world"', u'b': u'bar'}),
(u'a="multi\nline"',
[u'a="multi\nline"'],
{u'a': u'multi\nline'}),
(u'a="blank\n\nline"',
[u'a="blank\n\nline"'],
{u'a': u'blank\n\nline'}),
(u'a="blank\n\n\nlines"',
[u'a="blank\n\n\nlines"'],
{u'a': u'blank\n\n\nlines'}),
(u'a="a long\nmessage\\\nabout a thing\n"',
[u'a="a long\nmessage\\\nabout a thing\n"'],
{u'a': u'a long\nmessage\\\nabout a thing\n'}),
(u'a="multiline\nmessage1\\\n" b="multiline\nmessage2\\\n"',
[u'a="multiline\nmessage1\\\n"', u'b="multiline\nmessage2\\\n"'],
{u'a': 'multiline\nmessage1\\\n', u'b': u'multiline\nmessage2\\\n'}),
(u'a={{jinja}}',
[u'a={{jinja}}'],
{u'a': u'{{jinja}}'}),
(u'a={{ jinja }}',
[u'a={{ jinja }}'],
{u'a': u'{{ jinja }}'}),
(u'a="{{jinja}}"',
[u'a="{{jinja}}"'],
{u'a': u'{{jinja}}'}),
(u'a={{ jinja }}{{jinja2}}',
[u'a={{ jinja }}{{jinja2}}'],
{u'a': u'{{ jinja }}{{jinja2}}'}),
(u'a="{{ jinja }}{{jinja2}}"',
[u'a="{{ jinja }}{{jinja2}}"'],
{u'a': u'{{ jinja }}{{jinja2}}'}),
(u'a={{jinja}} b={{jinja2}}',
[u'a={{jinja}}', u'b={{jinja2}}'],
{u'a': u'{{jinja}}', u'b': u'{{jinja2}}'}),
(u'a="{{jinja}}\n" b="{{jinja2}}\n"',
[u'a="{{jinja}}\n"', u'b="{{jinja2}}\n"'],
{u'a': u'{{jinja}}\n', u'b': u'{{jinja2}}\n'}),
(u'a="café eñyei"',
[u'a="café eñyei"'],
{u'a': u'café eñyei'}),
(u'a=café b=eñyei',
[u'a=café', u'b=eñyei'],
{u'a': u'café', u'b': u'eñyei'}),
(u'a={{ foo | some_filter(\' \', " ") }} b=bar',
[u'a={{ foo | some_filter(\' \', " ") }}', u'b=bar'],
{u'a': u'{{ foo | some_filter(\' \', " ") }}', u'b': u'bar'}),
)
SPLIT_ARGS = ((test[0], test[1]) for test in SPLIT_DATA)
PARSE_KV = ((test[0], test[2]) for test in SPLIT_DATA)
@pytest.mark.parametrize("args, expected", SPLIT_ARGS)
def test_split_args(args, expected):
assert split_args(args) == expected
@pytest.mark.parametrize("args, expected", PARSE_KV)
def test_parse_kv(args, expected):
assert parse_kv(args) == expected
|
gpl-3.0
|
samliu/servo
|
tests/wpt/css-tests/css-text-decor-3_dev/xhtml1print/support/generate-text-emphasis-position-property-tests.py
|
841
|
3343
|
#!/usr/bin/env python
# - * - coding: UTF-8 - * -
"""
This script generates tests text-emphasis-position-property-001 ~ 006
which cover all possible values of text-emphasis-position property with
all combination of three main writing modes and two orientations. Only
test files are generated by this script. It also outputs a list of all
tests it generated in the format of Mozilla reftest.list to the stdout.
"""
from __future__ import unicode_literals
import itertools
TEST_FILE = 'text-emphasis-position-property-{:03}{}.html'
REF_FILE = 'text-emphasis-position-property-{:03}-ref.html'
TEST_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Test: text-emphasis-position: {value}, {title}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<link rel="help" href="https://drafts.csswg.org/css-text-decor-3/#text-emphasis-position-property">
<meta name="assert" content="'text-emphasis-position: {value}' with 'writing-mode: {wm}' puts emphasis marks {position} the text.">
<link rel="match" href="text-emphasis-position-property-{index:03}-ref.html">
<p>Pass if the emphasis marks are {position} the text below:</p>
<div style="line-height: 5; text-emphasis: circle; writing-mode: {wm}; text-orientation: {orient}; text-emphasis-position: {value}">試験テスト</div>
'''
SUFFIXES = ['', 'a', 'b', 'c', 'd', 'e', 'f', 'g']
WRITING_MODES = ["horizontal-tb", "vertical-rl", "vertical-lr"]
POSITION_HORIZONTAL = ["over", "under"]
POSITION_VERTICAL = ["right", "left"]
REF_MAP_MIXED = { "over": 1, "under": 2, "right": 3, "left": 4 }
REF_MAP_SIDEWAYS = { "right": 5, "left": 6 }
POSITION_TEXT = { "over": "over", "under": "under",
"right": "to the right of", "left": "to the left of" }
suffixes = [iter(SUFFIXES) for i in range(6)]
reftest_items = []
def write_file(filename, content):
with open(filename, 'wb') as f:
f.write(content.encode('UTF-8'))
def write_test_file(idx, suffix, wm, orient, value, position):
filename = TEST_FILE.format(idx, suffix)
write_file(filename, TEST_TEMPLATE.format(
value=value, wm=wm, orient=orient, index=idx, position=position,
title=(wm if orient == "mixed" else "{}, {}".format(wm, orient))))
reftest_items.append("== {} {}".format(filename, REF_FILE.format(idx)))
def write_test_files(wm, orient, pos1, pos2):
idx = (REF_MAP_MIXED if orient == "mixed" else REF_MAP_SIDEWAYS)[pos1]
position = POSITION_TEXT[pos1]
suffix = suffixes[idx - 1]
write_test_file(idx, next(suffix), wm, orient, pos1 + " " + pos2, position)
write_test_file(idx, next(suffix), wm, orient, pos2 + " " + pos1, position)
for wm in WRITING_MODES:
if wm == "horizontal-tb":
effective_pos = POSITION_HORIZONTAL
ineffective_pos = POSITION_VERTICAL
else:
effective_pos = POSITION_VERTICAL
ineffective_pos = POSITION_HORIZONTAL
for pos1, pos2 in itertools.product(effective_pos, ineffective_pos):
write_test_files(wm, "mixed", pos1, pos2)
if wm != "horizontal-tb":
write_test_files(wm, "sideways", pos1, pos2)
print("# START tests from {}".format(__file__))
reftest_items.sort()
for item in reftest_items:
print(item)
print("# END tests from {}".format(__file__))
|
mpl-2.0
|
NetApp/cinder
|
cinder/tests/unit/api/contrib/test_snapshot_unmanage.py
|
6
|
4940
|
# Copyright (c) 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
import webob
from cinder import context
from cinder import exception
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
# This list of fake snapshot is used by our tests.
snapshot_id = fake.SNAPSHOT_ID
bad_snp_id = fake.WILL_NOT_BE_FOUND_ID
def app():
# no auth, just let environ['cinder.context'] pass through
api = fakes.router.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v2'] = api
return mapper
def api_snapshot_get(self, context, snp_id):
"""Replacement for cinder.volume.api.API.get_snapshot.
We stub the cinder.volume.api.API.get_snapshot method to check for the
existence of snapshot_id in our list of fake snapshots and raise an
exception if the specified snapshot ID is not in our list.
"""
snapshot = {'id': fake.SNAPSHOT_ID,
'progress': '100%',
'volume_id': fake.VOLUME_ID,
'project_id': fake.PROJECT_ID,
'status': fields.SnapshotStatus.AVAILABLE}
if snp_id == snapshot_id:
snapshot_objct = fake_snapshot.fake_snapshot_obj(context, **snapshot)
return snapshot_objct
else:
raise exception.SnapshotNotFound(snapshot_id=snp_id)
@mock.patch('cinder.volume.api.API.get_snapshot', api_snapshot_get)
class SnapshotUnmanageTest(test.TestCase):
"""Test cases for cinder/api/contrib/snapshot_unmanage.py
The API extension adds an action to snapshots, "os-unmanage", which will
effectively issue a delete operation on the snapshot, but with a flag set
that means that a different method will be invoked on the driver, so that
the snapshot is not actually deleted in the storage backend.
In this set of test cases, we are ensuring that the code correctly parses
the request structure and raises the correct exceptions when things are not
right, and calls down into cinder.volume.api.API.delete_snapshot with the
correct arguments.
"""
def _get_resp(self, snapshot_id):
"""Helper to build an os-unmanage req for the specified snapshot_id."""
req = webob.Request.blank('/v2/%s/snapshots/%s/action' % (
fake.PROJECT_ID, snapshot_id))
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.environ['cinder.context'] = context.RequestContext(fake.USER_ID,
fake.PROJECT_ID,
True)
body = {'os-unmanage': ''}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(app())
return res
@mock.patch('cinder.db.conditional_update', return_value=1)
@mock.patch('cinder.db.snapshot_update')
@mock.patch('cinder.objects.Volume.get_by_id')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_snapshot')
def test_unmanage_snapshot_ok(self, mock_rpcapi, mock_volume_get_by_id,
mock_db_update, mock_conditional_update):
"""Return success for valid and unattached volume."""
ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
volume = fake_volume.fake_volume_obj(ctxt, id=fake.VOLUME_ID)
mock_volume_get_by_id.return_value = volume
res = self._get_resp(snapshot_id)
self.assertEqual(1, mock_volume_get_by_id.call_count)
self.assertEqual(2, len(mock_volume_get_by_id.call_args[0]),
mock_volume_get_by_id.call_args)
self.assertEqual(fake.VOLUME_ID,
mock_volume_get_by_id.call_args[0][1])
self.assertEqual(1, mock_rpcapi.call_count)
self.assertEqual(3, len(mock_rpcapi.call_args[0]))
self.assertEqual(1, len(mock_rpcapi.call_args[1]))
self.assertTrue(mock_rpcapi.call_args[1]['unmanage_only'])
self.assertEqual(202, res.status_int, res)
def test_unmanage_snapshot_bad_snapshot_id(self):
"""Return 404 if the volume does not exist."""
res = self._get_resp(bad_snp_id)
self.assertEqual(404, res.status_int, res)
|
apache-2.0
|
MagicStack/MagicPython
|
test/statements/for1.py
|
1
|
1062
|
for a, b, c in b:
pass
else:
1/0
for : keyword.control.flow.python, source.python
: source.python
a : source.python
, : punctuation.separator.element.python, source.python
: source.python
b : source.python
, : punctuation.separator.element.python, source.python
: source.python
c : source.python
: source.python
in : keyword.control.flow.python, source.python
: source.python
b : source.python
: : punctuation.separator.colon.python, source.python
: source.python
pass : keyword.control.flow.python, source.python
else : keyword.control.flow.python, source.python
: : punctuation.separator.colon.python, source.python
: source.python
1 : constant.numeric.dec.python, source.python
/ : keyword.operator.arithmetic.python, source.python
0 : constant.numeric.dec.python, source.python
|
mit
|
michael-pacheco/dota2-predictor
|
visualizing/dataset_stats.py
|
2
|
3746
|
import numpy as np
from tools.metadata import get_hero_dict
import operator
import pandas as pd
import plotly.graph_objs as go
import plotly.plotly as py
def winrate_statistics(dataset_df, mmr_info):
x_data, y_data = dataset_df
wins = np.zeros(114)
games = np.zeros(114)
winrate = np.zeros(114)
for idx, game in enumerate(x_data):
for i in range(228):
if game[i] == 1:
games[i % 114] += 1
if y_data[idx] == 1:
if i < 114:
wins[i] += 1
else:
if i >= 114:
wins[i - 114] += 1
winrate = wins / games
winrate_dict = dict()
hero_dict = get_hero_dict()
for i in range(114):
if i != 23:
winrate_dict[hero_dict[i + 1]] = winrate[i]
sorted_winrates = sorted(winrate_dict.items(), key=operator.itemgetter(1))
x_plot_data = [x[0] for x in sorted_winrates]
y_plot_data = [x[1] for x in sorted_winrates]
title = 'Hero winrates at ' + mmr_info + ' MMR'
data = [go.Bar(
y=x_plot_data,
x=y_plot_data,
orientation='h'
)]
layout = go.Layout(
title=title,
width=1000,
height=1400,
yaxis=dict(title='hero',
ticks='',
nticks=114,
tickfont=dict(
size=8,
color='black')
),
xaxis=dict(title='win rate',
nticks=30,
tickfont=dict(
size=10,
color='black')
)
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='hero_winrates_' + mmr_info)
def pick_statistics(dataset_df, mmr_info):
x_data, y_data = dataset_df
wins = np.zeros(114)
games = np.zeros(114)
pick_rate = np.zeros(114)
for idx, game in enumerate(x_data):
for i in range(228):
if game[i] == 1:
games[i % 114] += 1
if y_data[idx] == 1:
if i < 114:
wins[i] += 1
else:
if i >= 114:
wins[i - 114] += 1
pick_rate = games / np.sum(games)
pick_rate_dict = dict()
hero_dict = get_hero_dict()
for i in range(114):
if i != 23:
pick_rate_dict[hero_dict[i + 1]] = pick_rate[i]
sorted_pickrates = sorted(pick_rate_dict.items(), key=operator.itemgetter(1))
x_plot_data = [x[0] for x in sorted_pickrates]
y_plot_data = [x[1] for x in sorted_pickrates]
title = 'Hero pick rates at ' + mmr_info + ' MMR'
data = [go.Bar(
y=x_plot_data,
x=y_plot_data * 100,
orientation='h'
)]
layout = go.Layout(
title=title,
width=1000,
height=1400,
yaxis=dict(title='hero',
ticks='',
nticks=114,
tickfont=dict(
size=8,
color='black')
),
xaxis=dict(title='pick rate',
nticks=30,
tickfont=dict(
size=10,
color='black')
)
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='hero_pickrates_' + mmr_info)
def mmr_distribution(csv_file):
dataset = pd.read_csv(csv_file)
data = [go.Histogram(x=dataset[:30000]['avg_mmr'])]
layout = go.Layout(
title='MMR distribution (sample of 30k games)'
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='MMR_distribution')
|
mit
|
Aloomaio/googleads-python-lib
|
examples/ad_manager/v201808/user_service/get_user_by_email_address.py
|
1
|
1937
|
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets users by email.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
EMAIL_ADDRESS = 'INSERT_EMAIL_ADDRESS_HERE'
def main(client, email_address):
# Initialize appropriate service.
user_service = client.GetService('UserService', version='v201808')
# Create a statement to select users.
statement = (ad_manager.StatementBuilder(version='v201808')
.Where('email = :email')
.WithBindVariable('email', email_address))
# Retrieve a small amount of users at a time, paging
# through until all users have been retrieved.
while True:
response = user_service.getUsersByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
for user in response['results']:
# Print out some information for each user.
print('User with ID "%d" and name "%s" was found.\n' % (user['id'],
user['name']))
statement.offset += statement.limit
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, EMAIL_ADDRESS)
|
apache-2.0
|
beeftornado/sentry
|
tests/sentry/similarity/backends/base.py
|
3
|
8478
|
from __future__ import absolute_import
import abc
class MinHashIndexBackendTestMixin(object):
__meta__ = abc.ABCMeta
@abc.abstractproperty
def index(self):
pass
def test_basic(self):
self.index.record("example", "1", [("index", "hello world")])
self.index.record("example", "2", [("index", "hello world")])
self.index.record("example", "3", [("index", "jello world")])
self.index.record("example", "4", [("index", "yellow world"), ("index", "mellow world")])
self.index.record("example", "5", [("index", "pizza world")])
# comparison, without thresholding
results = self.index.compare("example", "1", [("index", 0)])
assert results[0] == ("1", [1.0])
assert results[1] == ("2", [1.0]) # identical contents
assert results[2][0] in ("3", "4") # equidistant pairs, order doesn't really matter
assert results[3][0] in ("3", "4")
assert results[4][0] == "5"
# comparison, low threshold
results = self.index.compare("example", "1", [("index", 6)])
assert len(results) == 4
assert results[0] == ("1", [1.0])
assert results[1] == ("2", [1.0]) # identical contents
assert results[2][0] in ("3", "4") # equidistant pairs, order doesn't really matter
assert results[3][0] in ("3", "4")
# comparison, high threshold (exact match)
results = self.index.compare("example", "1", [("index", self.index.bands)])
assert len(results) == 2
assert results[0] == ("1", [1.0])
assert results[1] == ("2", [1.0]) # identical contents
# comparison, candidate limit (with lexicographical collision sort)
results = self.index.compare("example", "1", [("index", 0)], limit=1)
assert len(results) == 1
assert results[0] == ("1", [1.0])
# classification, without thresholding
results = self.index.classify("example", [("index", 0, "hello world")])
assert results[0:2] == [("1", [1.0]), ("2", [1.0])]
assert results[2][0] in ("3", "4") # equidistant pairs, order doesn't really matter
assert results[3][0] in ("3", "4")
assert results[4][0] == "5"
# classification, low threshold
results = self.index.classify("example", [("index", 6, "hello world")])
assert len(results) == 4
assert results[0] == ("1", [1.0])
assert results[1] == ("2", [1.0]) # identical contents
assert results[2][0] in ("3", "4") # equidistant pairs, order doesn't really matter
assert results[3][0] in ("3", "4")
# classification, high threshold (exact match)
results = self.index.classify("example", [("index", self.index.bands, "hello world")])
assert len(results) == 2
assert results[0] == ("1", [1.0])
assert results[1] == ("2", [1.0]) # identical contents
# classification, candidate limit (with lexicographical collision sort)
results = self.index.classify("example", [("index", 0, "hello world")], limit=1)
assert len(results) == 1
assert results[0] == ("1", [1.0])
self.index.delete("example", [("index", "3")])
assert [key for key, _ in self.index.compare("example", "1", [("index", 0)])] == [
"1",
"2",
"4",
"5",
]
def test_multiple_index(self):
self.index.record("example", "1", [("index:a", "hello world"), ("index:b", "hello world")])
self.index.record("example", "2", [("index:a", "hello world"), ("index:b", "hello world")])
self.index.record("example", "3", [("index:a", "hello world"), ("index:b", "pizza world")])
self.index.record("example", "4", [("index:a", "hello world")])
self.index.record("example", "5", [("index:b", "hello world")])
# comparison, without thresholding
results = self.index.compare("example", "1", [("index:a", 0), ("index:b", 0)])
assert len(results) == 5
assert results[:2] == [("1", [1.0, 1.0]), ("2", [1.0, 1.0])]
assert results[2][0] == "3"
assert results[2][1][0] == 1.0
assert results[3] == ("4", [1.0, 0.0])
assert results[4] == ("5", [0.0, 1.0])
# comparison, candidate limit (with lexicographical collision sort)
results = self.index.compare("example", "1", [("index:a", 0), ("index:b", 0)], limit=4)
assert len(results) == 4
assert results[:2] == [("1", [1.0, 1.0]), ("2", [1.0, 1.0])]
assert results[2][0] == "3"
assert results[2][1][0] == 1.0
assert results[3] == ("4", [1.0, 0.0])
# classification, without thresholding
results = self.index.classify(
"example", [("index:a", 0, "hello world"), ("index:b", 0, "hello world")]
)
assert len(results) == 5
assert results[:2] == [("1", [1.0, 1.0]), ("2", [1.0, 1.0])]
assert results[2][0] == "3"
assert results[2][1][0] == 1.0
assert results[3] == ("4", [1.0, 0.0])
assert results[4] == ("5", [0.0, 1.0])
# classification, with thresholding (low)
results = self.index.classify(
"example",
[
("index:a", self.index.bands, "pizza world"), # no direct hits
("index:b", 8, "pizza world"), # one direct hit
],
)
assert len(results) == 1
assert results[0][0] == "3"
# this should have a value since it's similar even thought it was not
# considered as a candidate for this index
assert results[0][1][0] > 0
assert results[0][1][1] == 1.0
# classification, with thresholding (high)
results = self.index.classify(
"example",
[
("index:a", self.index.bands, "pizza world"), # no direct hits
("index:b", self.index.bands, "hello world"), # 3 direct hits
],
)
assert len(results) == 3
assert results[0][0] == "1" # tie btw first 2 items is broken by lex sort
assert results[0][1][0] > 0
assert results[0][1][1] == 1.0
assert results[1][0] == "2"
assert results[1][1][0] > 0
assert results[1][1][1] == 1.0
assert results[2] == ("5", [0.0, 1.0])
# classification, candidate limit (with lexicographical collision sort)
results = self.index.classify(
"example", [("index:a", 0, "hello world"), ("index:b", 0, "hello world")], limit=4
)
assert len(results) == 4
assert results[:2] == [("1", [1.0, 1.0]), ("2", [1.0, 1.0])]
assert results[2][0] == "3"
assert results[2][1][0] == 1.0
assert results[3] == ("4", [1.0, 0.0])
# empty query
assert (
self.index.classify("example", [("index:a", 0, "hello world"), ("index:b", 0, "")])
== self.index.compare("example", "4", [("index:a", 0), ("index:b", 0)])
== [("4", [1.0, None]), ("1", [1.0, 0.0]), ("2", [1.0, 0.0]), ("3", [1.0, 0.0])]
)
def test_merge(self):
self.index.record("example", "1", [("index", ["foo", "bar"])])
self.index.record("example", "2", [("index", ["baz"])])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == [("1", [1.0])]
self.index.merge("example", "1", [("index", "2")])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == [("1", [0.5])]
# merge into an empty key should act as a move
self.index.merge("example", "2", [("index", "1")])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == [("2", [0.5])]
def test_flush_scoped(self):
self.index.record("example", "1", [("index", ["foo", "bar"])])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == [("1", [1.0])]
self.index.flush("example", ["index"])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == []
def test_flush_unscoped(self):
self.index.record("example", "1", [("index", ["foo", "bar"])])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == [("1", [1.0])]
self.index.flush("*", ["index"])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == []
@abc.abstractmethod
def test_export_import(self):
pass
|
bsd-3-clause
|
mikma/tmtp
|
standalone/Jeans.py
|
1
|
30844
|
#!/usr/bin/env python
# Jeans.py
# Jeans Foundation #4
# Designer: Helen Joseph-Armstrong
# PatternMaker: Susan Spencer Conklin
#
# This pattern contains a design for a pair of jeans
from tmtpl.constants import *
from tmtpl.pattern import *
from tmtpl.document import *
from tmtpl.client import Client
#Project specific
#from math import sin, cos, radians
from math import sqrt
from pysvg.filter import *
from pysvg.gradient import *
from pysvg.linking import *
from pysvg.script import *
from pysvg.shape import *
from pysvg.structure import *
from pysvg.style import *
from pysvg.text import *
from pysvg.builders import *
class PatternDesign():
def __init__(self):
self.styledefs={}
self.markerdefs={}
return
def pattern(self):
"""
Method defining a pattern design. This is where the designer places
all elements of the design definition
"""
CM=CM_TO_PX
IN=IN_TO_PX
#The following attributes are set before calling this method:
#self.cd - Client Data, which has been loaded from the client data file
#self.styledefs - the style difinition dictionary, loaded from the styles file
#self.markerdefs - the marker definition dictionary
#self.cfg - configuration settings from the main app framework
#TODO - find a way to get this administrative cruft out of this pattern method
cd=self.cd #client data is prefaced with cd.
self.cfg['clientdata']=cd
#TODO - also extract these from this file to somewhere else
printer='36" wide carriage plotter'
if (printer=='36" wide carriage plotter'):
self.cfg['paper_width']=(36 * IN)
self.cfg['border']=(5*CM)#document borders
BORDER=self.cfg['border']
#self.cfg['verbose']=('')#debug statements
BORDER=self.cfg['border']
# 1: womens=W, mens=M, teensgirls=TG, teenboys=TB, girls=G, boys=B, toddlers=T, babies=B, crafts=C
# 2: streetwearable=S, period=P, fantasy=F
# 3: 3digit year: 1870:870, 880, 890,900,910,920,930,940,950,960,970,980,990,000,010
# 4: none=x, Gaming=g, Futuristic=f, Cosplay=c, GothLolita=g, Military=m, BasicCostumes=c
# 5: dress=d, pants=p, jeans=j, shirt/blouse=s, tshirt=t, jacket=j, coat=c, vest=v, hat=h, pjs=p, lingerie=l, swimsuits=s,
# ....maternity=m, accessories=a
# 6: casual=1, elegant=2, day&evening=3, grunge&skate=4, sports=5
# 7: followed by Absolute Number of patterns generated
#TODO - abstract these into configuration file(s)
metainfo={'companyName':'Seamly Patterns', #mandatory
'designerName':'Susan Spencer',#mandatory
'patternName':'Jeans Foundation',#mandatory
'patternNumber':'WS010-xj1-1' #mandatory
}
self.cfg['metainfo']=metainfo
#attributes for the entire svg document
docattrs={'currentscale' : "0.5 : 1",
'fitBoxtoViewport' : "True",
'preserveAspectRatio' : "xMidYMid meet",
}
doc=Document(self.cfg, name='document', attributes=docattrs)
#Set up the Title Block and Test Grid for the top of the document
TB=TitleBlock('notes', 'titleblock', 0, 0, stylename='titleblock_text_style')
doc.add(TB)
TG=TestGrid('notes', 'testgrid', self.cfg['paper_width']/3.0, 0, stylename='cuttingline_style')
doc.add(TG)
# All measurements are in pixels...CM=CM_TO_PX, IN=IN_TO_PX, etc.
#client & pattern measurements
FRONT_WAIST_ARC=(cd.front_waist_arc)
FRONT_ABDOMEN_ARC=(cd.front_abdomen_arc)
FRONT_HIP_ARC=(cd.front_hip_arc)
BACK_WAIST_ARC=(cd.waist_circumference - (2*FRONT_WAIST_ARC))/2.0
BACK_ABDOMEN_ARC=(cd.abdomen_circumference - (2*FRONT_ABDOMEN_ARC))/2.0
BACK_HIP_ARC=(cd.hip_circumference - (2*FRONT_HIP_ARC))/2.0
THIGH_ARC=(cd.thigh_circumference/4.0)
FRONT_RISE=(cd.front_rise)
SIDE_RISE=(cd.side_rise)
BACK_RISE=(cd.back_rise)
print 'Waist =', cd.waist_circumference * PX_TO_IN, cd.waist_circumference * PX_TO_CM
print 'FRONT_WAIST_ARC = ', FRONT_WAIST_ARC* PX_TO_IN, FRONT_WAIST_ARC* PX_TO_CM
print 'BACK_WAIST_ARC =', BACK_WAIST_ARC* PX_TO_IN, BACK_WAIST_ARC* PX_TO_CM
print 'THIGH_ARC =', THIGH_ARC*PX_TO_IN, THIGH_ARC*PX_TO_CM
print 'Hip =', cd.hip_circumference * PX_TO_IN, cd.hip_circumference * PX_TO_CM
print 'FRONT_HIP_ARC =', FRONT_HIP_ARC * PX_TO_IN, FRONT_HIP_ARC * PX_TO_CM
print 'BACK_HIP_ARC =', BACK_HIP_ARC * PX_TO_IN, BACK_HIP_ARC * PX_TO_CM
WAISTLINE=(1.0*IN) # Jeans waist is 1" lower than actual waist
ABDOMENLINE=WAISTLINE + cd.abdomen_height
RISELINE=WAISTLINE + max(cd.front_rise, cd.side_rise, cd.back_rise)
HIPLINE=WAISTLINE + (2/3.0)*(RISELINE)
HEMLINE=WAISTLINE + cd.outside_leg
THIGHLINE=RISELINE + (1.0*IN)
KNEELINE=RISELINE+(abs(HEMLINE-RISELINE)/2.0)-(1.0*IN)
WAISTBAND=(1.0*IN) # Height of Waistband
if ((FRONT_HIP_ARC-FRONT_WAIST_ARC)>= (2.0*IN)):
FRONT_NORMAL_WAIST=1
else:
FRONT_NORMAL_WAIST=0
if ((BACK_HIP_ARC-BACK_WAIST_ARC)>= (2.0*IN)):
BACK_NORMAL_WAIST=1
else:
BACK_NORMAL_WAIST=0
if (FRONT_HIP_ARC - (2*THIGH_ARC)>=(1.0*IN)):
FRONTNORMALTHIGH=1
else:
FRONTNORMALTHIGH=0
if (BACK_HIP_ARC - (2*THIGH_ARC) >= (1.0*IN)):
BACK_NORMAL_THIGH=1
else:
BACK_NORMAL_THIGH=0
#Begin Jeans Pattern Set
jeans=Pattern('jeans')
doc.add(jeans)
jeans.styledefs.update(self.styledefs)
jeans.markerdefs.update(self.markerdefs)
# Jeans Front 'A'
jeans.add(PatternPiece('pattern', 'front', letter='A', fabric=2, interfacing=0, lining=0))
A=jeans.front
ASTART=0.0
AEND=(FRONT_HIP_ARC+((1/8.0)*IN))
AStart=rPoint(A, 'AStart', ASTART, ASTART)
AEnd=rPoint(A, 'AEnd', AEND, ASTART)
AWaist=rPoint(A, 'AWaist', ASTART, WAISTLINE)
AAbdomen=rPoint(A, 'AAbdomen', ASTART, ABDOMENLINE)
AHip=rPoint(A, 'AHip', ASTART, HIPLINE)
ARise=rPoint(A, 'ARise', ASTART, RISELINE)
AThigh=rPoint(A, 'AThigh', ASTART, THIGHLINE)
Ap1=rPoint(A, 'Ap1', AEND, WAISTLINE) # right side of reference grid
Ap5=rPoint(A, 'Ap5', AEND/2.0, WAISTLINE) # dart midpoint
Ap6=rPoint(A, 'Ap6', Ap5.x-(.25*IN), WAISTLINE) #dart outside leg (left on pattern)
Ap7=rPoint(A, 'Ap7', Ap5.x+(.25*IN), WAISTLINE) # dart inside leg (right on pattern)
Ap8=rPoint(A, 'Ap8', Ap5.x, Ap5.y+(2.5*IN)) # dart point
Ap2=rPoint(A, 'Ap2', Ap7.x+(FRONT_WAIST_ARC/2.0), WAISTLINE)
Ap3=rPoint(A, 'Ap3', Ap2.x, WAISTLINE-(0.25)*IN) # center waist
Ap4=rPoint(A, 'Ap4', Ap6.x-(FRONT_WAIST_ARC/2.0), WAISTLINE) # side waist
Ap9=rPoint(A, 'Ap9', AEND, (RISELINE/2.0)) # center front 'pivot' point from crotch curve to front fly
Ap10=rPoint(A, 'Ap10', ASTART, HIPLINE)
Ap11=rPoint(A, 'Ap11', AEND, HIPLINE)
Ap12=rPoint(A, 'Ap12', ASTART, RISELINE)
Ap13=rPoint(A, 'Ap13', AEND, RISELINE)
Ap14=rPointP(A, 'Ap14', pntFromDistanceAndAngleP(Ap13, (1.25*IN), angleFromSlope(1.0, 1.0))) # inside crotch curve point
Ap15=rPoint(A, 'Ap15', Ap13.x+(2.0*IN), RISELINE) # point of crotch
Ap16=rPoint(A, 'Ap16', Ap15.x/2.0, RISELINE) # creaseline point
ACREASELINE=Ap16.x
Ap17=rPoint(A, 'Ap17', Ap16.x, KNEELINE)
Ap18=rPoint(A, 'Ap18', Ap16.x-(4.0*IN), KNEELINE) # outside knee
Ap19=rPoint(A, 'Ap19', Ap16.x+(4.0*IN), KNEELINE) # inside knee
Ap20=rPoint(A, 'Ap20', Ap16.x, HEMLINE)
Ap21=rPoint(A, 'Ap21', Ap20.x-(3.5*IN), HEMLINE) # outside hem
Ap22=rPoint(A, 'Ap22', Ap20.x+(3.5*IN), HEMLINE) # inside hem
Ap23=rPoint(A, 'Ap23', Ap8.x-(FRONT_ABDOMEN_ARC/2.0), ABDOMENLINE)
Ap24=rPoint(A, 'Ap24', Ap8.x+(FRONT_ABDOMEN_ARC/2.0), ABDOMENLINE)
Ap25=rPoint(A, 'Ap25', ACREASELINE - THIGH_ARC, THIGHLINE)
Ap26=rPoint(A, 'Ap26', ACREASELINE + THIGH_ARC, THIGHLINE )
# front waist AW
AW1=rPointP(A,'AW1', Ap3) # front center seam at waist
AW2=rPointP(A, 'AW2', pntIntersectLinesP(Ap3, Ap4, Ap8, Ap7)) # inside dart leg at waist
# calculate dart
DART_LEG_LENGTH=lineLengthP(Ap8, AW2)
angle1=angleP(Ap8, Ap5) # angle of center dart line
angle2=angleP(Ap8, Ap7) # angle of inside dart leg
angle3=angle1 - angle2 # half-angle of dart
angle4=angle1 + angle3 # angle of outside dart leg
angle5=angle1 + (2*angle3) # angle of sewn dart fold, towards side seam
AW4=rPointP(A, 'AW4', pntFromDistanceAndAngleP(Ap8, DART_LEG_LENGTH, angle4)) # outside dart leg at waist
pnt1=pntFromDistanceAndAngleP(Ap8, DART_LEG_LENGTH, angle5) # point along sewn dart fold
pnt2=pntIntersectLinesP(Ap8, pnt1, Ap4, AW4 ) # where sewn dart fold will cross waistline
AW3=rPointP(A, 'AW3', pntOnLineP(Ap8, Ap5, lineLengthP(Ap8, pnt2))) # center dart line at waist
AW5=rPointP(A, 'AW5', Ap4) # side waist
#front waist control points
distance=(lineLengthP(AW4, AW5)/3.0)
cAW5b=cPoint(A, 'cAW5b', AW5.x+distance, AW5.y)
cAW5a=cPointP(A, 'cAW5a', pntOnLineP(AW4, cAW5b, distance))
# front dart AD
AD1=rPointP(A, 'AD1', Ap8) # point of dart
AD2=rPointP(A, 'AD2', pntOffLineP(AW3, Ap8, SEAM_ALLOWANCE)) # center dart line at cuttingline
AD3=rPointP(A, 'AD3', pntIntersectLines(AW4.x, AW4.y-SEAM_ALLOWANCE, AW5.x, AW5.y-SEAM_ALLOWANCE, Ap8.x, Ap8.y, AW4.x, AW4.y)) # outside dart leg
AD4=rPointP(A, 'AD4', pntIntersectLines(AW1.x, AW1.y-SEAM_ALLOWANCE, AW2.x, AW2.y-SEAM_ALLOWANCE, Ap8.x, Ap8.y, AW2.x, AW2.y)) #inside dart leg
# front thigh points
AT1=rPointP(A, 'AT1', Ap25)
AT2=rPointP(A, 'AT2', Ap26)
# front side seam AS
AS1=rPointP(A, 'AS1', Ap10)
AS2=rPointP(A, 'AS2', Ap12)
AS3=rPointP(A, 'AS3', Ap18)
AS4=rPointP(A, 'AS4', Ap21)
# front side seam control points
if (FRONTNORMALTHIGH):
if (FRONT_NORMAL_WAIST):
cAS3b=cPointP(A, 'cAS3b', pntOffLineP(AS3, AS4, (lineLengthP(AS3, AS1)/2.0))) # b/w AS1 & AS3
pnts=pointList(AW5, AS1, AS3)
c1, c2=controlPoints('FrontSideSeam', pnts)
cAS1a=cPoint(A, 'cAS1a', c1[0].x, c1[0].y) #b/w AW5 & AS2
cAS1b=cPoint(A, 'cAS1b', AS1.x, c2[0].y) #b/w AW5 & AS1
cAS3a=cPoint(A, 'cAS3a', AS1.x, c1[1].y) #b/w AS1 & AW5
else:
cAS2a=cPoint(A, 'cAS2a', min(AS2.x, AW5.x), AW5.y+(lineLengthP(AW5, AS2)/3.0)) # waistline slightly less than hipline (ex: 1.25") use AS2 else AW5
cAS3b=cPointP(A, 'cAS3b', pntOffLineP(AS3, AS4, (lineLengthP(AS2, AS3)/3.0))) # b/w AS2 & AS3
pnts=pointList(cAS2a, AS2, cAS3b)
c1, c2=controlPoints('BackSideSeam', pnts)
cAS2b=cPoint(A, 'cAS2b', c2[0].x, c2[0].y) #b/w AW5 & AS2
cAS3a=cPoint(A, 'cAS3a', c1[1].x, c1[1].y) #b/w AS2 & AS3
else:
cAS3b=cPointP(A, 'cAS3b', pntOffLineP(AS3, AS4, (lineLengthP(AS3, AT1)/2.0))) # b/w AS1 & AS3
pnts=pointList(AW5, AS1, AT1, AS3)
c1, c2=controlPoints('FrontSideSeam', pnts)
cAS1a=cPoint(A, 'cAS1a', c1[0].x, c1[0].y) #b/w AW5 & AS2
cAS1b=cPoint(A, 'cAS1b', c2[0].x, c2[0].y) #b/w AW5 & AS1
cAT1a=cPoint(A, 'cAT1a', c1[1].x, c1[1].y) #b/w AS1 & AT1
cAT1b=cPoint(A, 'cAT1b', c2[1].x, c2[1].y) #b/w AS1 & AT1
cAS3a=cPoint(A, 'cAS3a', c1[2].x, c1[2].y) #b/w AS1 & AW5
# front inseam AI
AI1=rPointP(A, 'AI1', Ap22)
AI2=rPointP(A, 'AI2', Ap19)
# crotch point should be at least 1/3rd inch greater than calculated inner thigh point (Ap25)
if (Ap15.x - (0.3*IN) > Ap26.x):
AI3=rPointP(A, 'AI3', Ap15)
else:
AI3=rPoint(A, 'AI3', Ap26.x + (0.3*IN), Ap15.y)
#front inseam control points
cAI3a=cPointP(A, 'cAI3a', pntOffLineP(AI2, AI1, (lineLengthP(AI2, AI3)/2.0))) #b/w AI2 & AI3
cAI3b=cPointP(A, 'cAI3b', pntOnLineP(AI3, cAI3a, (lineLengthP(AI2, AI3)/3.0))) #b/w AI2 & AI3
#front center seam AC
AC1=rPointP(A, 'AC1', Ap14)
if (AW1.x > Ap9.x):
FRONTLARGERWAIST=1
else:
FRONTLARGERWAIST=0
if (FRONT_NORMAL_WAIST):
AC2=rPointP(A, 'AC2', Ap9)
# straight line for upper front center seam, control points for AC1 & AC2 only, with calculated control point cAC2b to smooth into straight line
cAC2b=cPointP(A, 'cAC2b', pntOffLine(AC2.x, AC2.y, AW1.x, AW1.y, (lineLengthP(AC1, AC2)/2.0)))
pnts=pointList(AI3, AC1, cAC2b)
c1, c2=controlPoints('FrontCenterSeam', pnts)
cAC1a=cPoint(A, 'cAC1a', c1[0].x, c1[0].y) #b/w AI3 & AC1
cAC1b=cPoint(A, 'cAC1b', c2[0].x, c2[0].y) #b/w AI3 & AC1
cAC2a=cPoint(A, 'cAC2a', c1[1].x, c1[1].y) #b/w AC1 & AC2
else:
if (FRONTLARGERWAIST):
# curve through AI3,AC2, straight to AW1
# move AC2 point towards center (x)
AC2=rPoint(A, 'AC2', Ap9.x + (abs(AW1.x - Ap9.x)/4.0), Ap9.y)
cAC2b=cPointP(A, 'cAC2b', pntIntersectLinesP(AC2, AW1, AS1, Ap11)) #intersection with Hipline
else:
# curve through AI3, AC2, then straight to AW1
AC2=rPointP(A, 'AC2', Ap9)
cAC2b=cPointP(A, 'cAC2b', pntOffLineP(AC2, AW1, (lineLengthP(AC2, AC1)/3.0)))
cAC2a=cPointP(A, 'cAC2a', pntOnLineP(Ap14, Ap13, (lineLengthP(Ap14, Ap13)/4.0)))
# points to create Jeans Waistband pattern 'C'
AWB1=rPointP(A, 'AWB1', pntOnLineP(AW1, AC2, WAISTBAND)) # waistband below center waist
if FRONT_NORMAL_WAIST:
pnt=pntOnLineP(AW5, cAS1a, WAISTBAND)
else:
pnt=pntOnLineP(AW5, cAS2a, WAISTBAND)
AWB4=rPointP(A, 'AWB4', pnt) # waistband line 1in. below side waist
AWB2=rPointP(A, 'AWB2', pntIntersectLinesP(AWB1, AWB4, Ap8, Ap7)) # waistband line at inside dart leg
AWB3=rPointP(A, 'AWB3', pntIntersectLinesP(AWB1, AWB4, Ap8, Ap6)) # waistband line at outside dart leg
#front grainline AG & label location
AG1=rPoint(A, 'AG1', Ap16.x, HIPLINE)
AG2=rPoint(A, 'AG2', Ap16.x, Ap18.y+abs(Ap21.y-Ap18.y)/2.0)
(A.label_x, A.label_y)=(AG2.x, AG2.y-(2.0*IN))
#grid 'Agrid' path
Agrid=path()
# vertical Agrid
addToPath(Agrid, 'M', AStart, 'L', ARise, 'M', Ap5, 'L', Ap8, 'M', Ap16, 'L', Ap20, 'M', Ap3, 'L', Ap2, 'M', AEnd, 'L', Ap13)
# horizontal Agrid
addToPath(Agrid, 'M', AStart, 'L', AEnd, 'M', AWaist, 'L', Ap1, 'M', Ap23, 'L', Ap24, 'M', AHip, 'L', Ap11)
addToPath(Agrid, 'M', ARise, 'L', Ap15, 'M', Ap18, 'L', Ap19, 'M', AWB1, 'L', AWB2, 'M', AWB3, 'L', AWB4, 'M', AT1, 'L', AT2)
# diagonal grid
addToPath(Agrid, 'M', Ap3, 'L', Ap4, 'M', Ap13, 'L', Ap14)
# dart 'd' path
d=path()
addToPath(d, 'M', AD1, 'L', AD2, 'M', AD3, 'L', AD1, 'L', AD4)
# seamline 's' & cuttingline 'c' paths
s=path()
c=path()
paths=pointList(s, c)
for p in paths:
# - addToPath(p, 'M', AW1, 'L', AW2, 'L', AW3, 'L', AW4, 'C', cAW5a, cAW5b, AW5) --> waistband from waist to 1" below waist
# - waistband from 1" below waist to 2" below waist
addToPath(p, 'M', AW1, 'L', AW2, 'L', AW3, 'L', AW4, 'C', cAW5a, cAW5b, AW5)
if (FRONTNORMALTHIGH):
if (FRONT_NORMAL_WAIST):
addToPath(p, 'C', cAS1a, cAS1b, AS1)
else:
addToPath(p, 'C', cAS2a, cAS2b, AS2)
else:
addToPath(p, 'C', cAS1a, cAS1b, AS1, 'C', cAT1a, cAT1b, AT1)
addToPath(p, 'C', cAS3a, cAS3b, AS3, 'L', AS4, 'L', AI1, 'L', AI2, 'C', cAI3a, cAI3b, AI3)
if (FRONT_NORMAL_WAIST):
cubicCurveP(p, cAC1a, cAC1b, AC1)
addToPath(p, 'C', cAC2a, cAC2b, AC2, 'L', AW1)
# add grainline, dart, seamline & cuttingline paths to pattern
A.add(grainLinePath("grainLine", "Jeans Front Grainline", AG1, AG2))
A.add(Path('reference','grid', 'Jeans Front Gridline', Agrid, 'gridline_style'))
A.add(Path('pattern', 'dartline', 'Jeans Front Dartline', d, 'dart_style'))
A.add(Path('pattern', 'seamLine', 'Jeans Front Seamline', s, 'seamline_path_style'))
A.add(Path('pattern', 'cuttingLine', 'Jeans Front Cuttingline', c, 'cuttingline_style'))
# Jeans Back 'B'
jeans.add(PatternPiece('pattern', 'back', letter='B', fabric=2, interfacing=0, lining=0))
B=jeans.back
BSTART=0.0
BEND=((1.25)*BACK_HIP_ARC)
BStart=rPoint(B, 'BStart', BSTART, BSTART)
BEnd=rPoint(B, 'BEnd', BEND, BSTART)
BWaist=rPoint(B, 'BWaist', BSTART, WAISTLINE)
BAbdomen=rPoint(B, 'BAbdomen', BSTART, ABDOMENLINE)
BHip=rPoint(B, 'BHip', BSTART, HIPLINE)
BRise=rPoint(B, 'BRise', BSTART, RISELINE)
Bp1=rPoint(B, 'Bp1', BSTART+((0.25)*BACK_HIP_ARC), WAISTLINE)
Bp2=rPoint(B, 'Bp2', BEND, WAISTLINE)
Bp5=rPoint(B, 'Bp5', Bp1.x+((BEND-Bp1.x)/2.0), WAISTLINE)
Bp6=rPoint(B, 'Bp6', Bp5.x-((3/8.0)*IN), WAISTLINE)
Bp7=rPoint(B, 'Bp7', Bp5.x + ((3/8.0)*IN), WAISTLINE)
Bp8=rPoint(B, 'Bp8', Bp5.x, (Bp5.y + (3.5*IN) ) )
if (BACK_NORMAL_WAIST):
Bp3=rPoint(B, 'Bp3', Bp1.x+(1.75*IN), WAISTLINE)
Bp4=rPoint(B, 'Bp4', Bp1.x+(BACK_WAIST_ARC)+(1.0*IN), WAISTLINE)
else:
Bp3=rPoint(B, 'Bp3', Bp6.x-(BACK_WAIST_ARC/2.0)-((1/8)*IN), WAISTLINE)
Bp4=rPoint(B, 'Bp4', Bp7.x+(BACK_WAIST_ARC/2.0)+((1/8)*IN), WAISTLINE)
Bp9=rPoint(B, 'Bp9', Bp1.x, HIPLINE-(abs(RISELINE-HIPLINE)/2.0))
Bp10=rPoint(B, 'Bp10', Bp1.x, HIPLINE)
Bp11=rPoint(B, 'Bp11', Bp2.x, HIPLINE)
Bp12=rPoint(B, 'Bp12', BStart.x, RISELINE)
Bp13=rPoint(B, 'Bp13', Bp1.x, RISELINE)
Bp14=rPointP(B, 'Bp14', pntFromDistanceAndAngleP(Bp13, (1.75*IN), angleFromSlope(1.0, -1.0)))
Bp15=rPoint(B, 'Bp15', Bp2.x, RISELINE)
Bp16=rPoint(B, 'Bp16', Bp15.x-((3./8.0)*IN), RISELINE)
Bp17=rPoint(B, 'Bp17',(Bp16.x-Bp12.x)/2., RISELINE) # Creaseline
BCREASELINE=Bp17.x
Bp18=rPoint(B, 'Bp18', Bp17.x, KNEELINE)
Bp19=rPoint(B, 'Bp19', Bp18.x-(4.50*IN), KNEELINE)
Bp20=rPoint(B, 'Bp20', Bp18.x+(4.50*IN), KNEELINE)
Bp21=rPoint(B, 'Bp21', Bp18.x, HEMLINE)
Bp22=rPoint(B, 'Bp22', Bp21.x-(4.*IN), HEMLINE)
Bp23=rPoint(B, 'Bp23', Bp21.x+(4.*IN), HEMLINE)
Bp24=rPoint(B, 'Bp24', Bp8.x-(BACK_ABDOMEN_ARC/2.0)-((1/8.0)*IN), ABDOMENLINE)
Bp25=rPoint(B, 'Bp25', Bp8.x+(BACK_ABDOMEN_ARC/2.0)+((1/8.0)*IN), ABDOMENLINE)
Bp26=rPoint(B, 'Bp26', BCREASELINE + THIGH_ARC, THIGHLINE)
Bp27=rPoint(B, 'Bp27', BCREASELINE - THIGH_ARC, THIGHLINE)
# back waist
BW1=rPoint(B,'BW1', Bp3.x, BStart.y) # back center seam at waist
if (BW1.x < Bp9.x):
# BW1 waistpoint extends past (inside) calculated inflection point on back center seam (Bp9)
BACKLARGERWAIST=1
else:
BACKLARGERWAIST=0
BW2=rPointP(B, 'BW2', pntIntersectLinesP(BW1, Bp4, Bp8, Bp6)) # inside dart leg at waist
DART_LEG_LENGTH=lineLengthP(Bp8, BW2)
angle1=angleP(Bp8, Bp5) # angle of center dart line
angle2=angleP(Bp8, Bp6) # angle of inside dart leg
if angle1 > angle2:
angle3=angle1 - angle2 # angle of half-dart
angle4=angle1 + angle3 # angle of outside dart leg
angle5=angle1 + (2*angle3) # # angle of sewn dart fold, towards side seam
else:
angle3=angle2 - angle1 # angle of half-dart
angle4=angle1 - angle3 # angle of outside dart leg
angle5=angle1 - (2*angle3) # # angle of sewn dart fold, towards side seam
BW4=rPointP(B, 'BW4', pntFromDistanceAndAngleP(Bp8, DART_LEG_LENGTH, angle4)) # outside dart leg at waist
pnt1=pntFromDistanceAndAngleP(Bp8, DART_LEG_LENGTH, angle5) # point along sewn dart fold
pnt2=pntIntersectLinesP(Bp8, pnt1, Bp4, BW4) # where sewn dart fold will cross waistline
BW3=rPointP(B, 'BW3', pntOnLineP(Bp8, Bp5, lineLengthP(Bp8, pnt2))) # center dart line at waist
BW5=rPointP(B, 'BW5', Bp4)
# back waist control points
distance=(lineLengthP(BW4, BW5)/3.0)
cBW5b=cPoint(B, 'cBW5b', BW5.x-distance, BW5.y)
cBW5a=cPointP(B, 'cBW5a', pntOnLineP(BW4, cBW5b, distance))
#back dart
BD1=rPointP(B, 'BD1', Bp8) # point of dart
BD2=rPointP(B, 'BD2', pntOffLineP(BW3, Bp8, SEAM_ALLOWANCE)) # center dart line at cuttingline
BD3=rPointP(B, 'BD3', pntIntersectLines(BW4.x, BW4.y-SEAM_ALLOWANCE, BW5.x, BW5.y-SEAM_ALLOWANCE, Bp8.x, Bp8.y, BW4.x, BW4.y)) # dart outside leg at cuttingline
BD4=rPointP(B, 'BD4', pntIntersectLines(BW1.x, BW1.y-SEAM_ALLOWANCE, BW2.x, BW2.y-SEAM_ALLOWANCE, Bp8.x, Bp8.y, BW2.x, BW2.y)) # dart inside leg at cuttingline
# back thigh points
BT1=rPointP(B, 'BT1', Bp26)
BT2=rPointP(B, 'BT2', Bp27)
#back side seam
BS1=rPointP(B, 'BS1', Bp11)
BS2=rPointP(B, 'BS2', Bp15)
BS3=rPointP(B, 'BS3', Bp20)
BS4=rPointP(B, 'BS4', Bp23)
if (BACK_NORMAL_THIGH):
# normal thigh
if (BACK_NORMAL_WAIST):
# normal waist
cBS3b=cPointP(B, 'cBS3b', pntOffLineP(BS3, BS4, (lineLengthP(BS3, BS1)/2.0))) # b/w BS1 & BS3
pnts=pointList(BW5, BS1, BS3)
c1, c2=controlPoints('BackSideSeam', pnts)
cBS1a=cPoint(B, 'cBS1a', c1[0].x, c1[0].y) #b/w BW5 & BS2
cBS1b=cPoint(B, 'cBS1b', BS1.x, c2[0].y) #b/w BW5 & BS1
cBS3a=cPoint(B, 'cBS3a', BS1.x, c1[1].y) #b/w BS1 & BW5
else:
# larger waist
cBS2a=cPoint(B, 'cBS2a', BW5.x, BW5.y+(lineLengthP(BW5, BS2)/3.0))
cBS3b=cPointP(B, 'cBS3b', pntOffLineP(BS3, BS4, (lineLengthP(BS2, BS3)/3.0))) # b/w BS2 & BS3
pnts=pointList(cBS2a, BS2, cBS3b)
c1, c2=controlPoints('BackSideSeam', pnts)
cBS2b=cPoint(B, 'cBS2b', c2[0].x, c2[0].y) #b/w BW5 & BS2
cBS3a=cPoint(B, 'cBS3a', c1[1].x, c1[1].y) #b/w BS2 & BS3
else:
# larger thigh
print 'larger thigh'
cBS3b=cPointP(B, 'cBS3b', pntOffLineP(BS3, BS4, (lineLengthP(BS3, BT1)/2.0))) # b/w BS1 & BS3
pnts=pointList(BW5, BS1, BT1, cBS3b)
c1, c2=controlPoints('FrontSideSeam', pnts)
cBS1a=cPoint(B, 'cBS1a', c1[0].x, c1[0].y) #b/w BW5 & BS1
cBS1b=cPoint(B, 'cBS1b', c2[0].x, c2[0].y) #b/w BW5 & BS1
cBT1a=cPoint(B, 'cBT1a', c1[1].x, c1[1].y) #b/w BS1 & BT1
cBT1b=cPoint(B, 'cBT1b', c2[1].x, c2[1].y) #b/w BS1 & BT1
cBS3a=cPoint(B, 'cBS3a', c1[2].x, c1[2].y) #b/w BT1 & BS3
# back inseam
BI1=rPointP(B, 'BI1', Bp22)
BI2=rPointP(B, 'BI2', Bp19)
# crotch point should be at least 1/3rd inch past inner thigh point (BT2)
if (Bp12.x <= (BT2.x - (0.3*IN)) ):
BI3=rPointP(B, 'BI3', Bp12)
else:
BI3=rPoint(B, 'BI3', BT2.x - (0.3*IN), Bp12.y)
distance=(lineLengthP(BI2, BI3)/3.0)
cBI3a=cPointP(B, 'cBI3a', pntOffLineP(BI2, BI1, distance)) #b/w BI2 & BI3
cBI3b=cPointP(B, 'cBI3b', pntOnLineP(BI3, cBI3a, distance)) #b/w BI2 & BI3
#back center seam
BC1=rPointP(B, 'BC1', Bp14)
#back center seam control points
if (BACK_NORMAL_WAIST):
# curve through BI3,BC1, BC2, then straight to BW1
BC2=rPointP(B, 'BC2', Bp9)
cBC2b=cPointP(B, 'cBC2b', pntOffLineP(BC2, BW1, (lineLengthP(BC1, BC2)/3.0)))
pnts=pointList(BI3, BC1, cBC2b)
c1, c2=controlPoints('BackCenterSeam', pnts)
cBC1a=cPoint(B, 'cBC1a', c1[0].x, c1[0].y) #b/w BI3 & BC1
cBC1b=cPoint(B, 'cBC1b', c2[0].x, c2[0].y) #b/w BI3 & BC1
cBC2a=cPoint(B, 'cBC2a', c1[1].x, c1[1].y) #b/w BC1 & BC2
else:
if (BACKLARGERWAIST):
# curve through BI3,BC2, BW1
# move BC2 point towards center (x) by 25% of extra back waist width
BC2=rPoint(B, 'BC2', Bp9.x - (abs(Bp9.x - BW1.x)/4.0), Bp9.y )
cBW1a=cPoint(B, 'cBC1a', BC2.x, BC2.y - (lineLengthP(BC2, BW1)/3.0)) #b/w BC2 & BW1
cBW1b=cPoint(B, 'cBW1b', BW1.x, BW1.y + (lineLengthP(BC2, BW1)/2.0)) #b/w BC2 & BW1 # vertical with BW1 (x)
cBC2b=cPoint(B, 'cBC2b', BC2.x, BC2.y + (lineLengthP(BC2, BI3)/3.0)) #b/w BC2 & BI3
cBC2a=cPointP(B, 'cBC2a', pntOnLineP(BC1, Bp13, lineLengthP(BC1, Bp13)/4.0)) #b/w BI3 & BC2
else:
# curve through BI3, BC2, then straight to BW1
BC2=rPointP(B, 'BC2', Bp9)
cBC2b=cPointP(B, 'cBC2b', pntOffLineP(BC2, BW1, (lineLengthP(BC2, BC1)/3.0)))
cBC2a=cPoint(B, 'cBC2a', pntOnLineP(Bp14, Bp13, (lineLengthP(Bp14, Bp13)/4.0)))
# back points to create Jeans Waistband pattern 'C'
# back waistband, center section
rise=-(BW2.y - BW1.y)# negate this b/c y increases from top to bottom of drawing
run=BW2.x - BW1.y
angle1=angleFromSlope(-run, rise) # inverse rise/run --> -run/rise
pnt1=pntFromDistanceAndAngleP(BW1, WAISTBAND, angle1)
pnt2=pntFromDistanceAndAngleP(BW2, WAISTBAND, angle1)
BWB1=rPointP(B, 'BWB1', pntIntersectLinesP(pnt1, pnt2, BW1, BC2))
BWB2=rPointP(B, 'BWB2', pntIntersectLinesP(pnt1, pnt2, BW2, BD1))
# back waistband, side section
rise=-(BW4.y - BW5.y)# negate this b/c y increases from top to bottom of drawing
run=BW4.x - BW5.y
angle1=angleFromSlope(-run, rise) # inverse rise/run --> -run/rise
pnt1=pntFromDistanceAndAngleP(BW4, WAISTBAND, angle1)
pnt2=pntFromDistanceAndAngleP(BW5, WAISTBAND, angle1)
BWB3=rPointP(B, 'BWB3', pntIntersectLinesP(pnt1, pnt2, BW4, BD1))
if BACK_NORMAL_WAIST:
BWB4=rPointP(B, 'BWB4', pntIntersectLinesP(pnt1, pnt2, BW5, cBS1a))
else:
BWB4=rPointP(B, 'BWB4', pntIntersectLinesP(pnt1, pnt2, BW5, cBS2a))
#back grainline & label location
BG1=rPoint(B, 'BG1', Bp17.x, HIPLINE)
BG2=rPoint(B, 'BG2', BG1.x, Bp18.y+(Bp21.y-Bp18.y)/2.0)
(B.label_x, B.label_y)=(BG2.x, BG2.y-(2.0*IN))
#grid 'Bgrid' path
Bgrid=path()
# vertical grid
addToPath(Bgrid, 'M', BStart, 'L', BRise, 'M', Bp1, 'L', Bp13, 'M', BEnd, 'L', Bp15, 'M', Bp17, 'L', Bp21, 'M', Bp5, 'L', Bp8)
# horizontal grid
addToPath(Bgrid, 'M', BStart, 'L', BEnd, 'M', BWaist, 'L', Bp2, 'M', BHip, 'L', Bp11, 'M', BRise, 'L', Bp15, 'M', Bp19, 'L', Bp20, 'M', BT1, 'L', BT2)
# diagonal grid
addToPath(Bgrid, 'M', BW1, 'L', BW5, 'M', Bp13, 'L', Bp14, 'M', BWB1, 'L', BWB2, 'M', BWB3, 'L', BWB4)
#dart 'd' path
d=path()
addToPath(d, 'M', BD1, 'L', BD2, 'M', BD3, 'L', BD1, 'L', BD4)
#seamline 's' & cuttingline 'c' paths
s=path()
c=path()
paths=pointList(s, c)
for p in paths:
addToPath(p, 'M', BW1, 'L', BW2, 'L', BW3, 'L', BW4, 'C', cBW5a, cBW5b, BW5)
if (BACK_NORMAL_THIGH):
# normal thigh
if (BACK_NORMAL_WAIST):
# normal waist
addToPath(p, 'C', cBS1a, cBS1b, BS1)
else:
# large waist
addToPath(p, 'C', cBS2a, cBS2b, BS2)
else:
# large thigh
addToPath(p, 'C', cBS1a, cBS1b, BS1, 'C', cBT1a, cBT1b, BT1)
addToPath(p, 'C', cBS3a, cBS3b, BS3, 'L', BS4, 'L', BI1, 'L', BI2, 'C', cBI3a, cBI3b, BI3)
if (BACK_NORMAL_WAIST) :
addToPath(p, 'C', cBC1a, cBC1b, BC1, 'C', cBC2a, cBC2b, BC2, 'L', BW1)
elif (BACKLARGERWAIST):
addToPath(p, 'C', cBC2a, cBC2b, BC2, 'C', cBW1a, cBW1b, BW1)
else:
addtoPath(p, 'C', cBC2a, cBC2b, BC2, 'L', BW1)
# add grid, dart, grainline, seamline & cuttingline paths to pattern
B.add(grainLinePath("grainLine", "Jeans Back Grainline", BG1, BG2))
B.add(Path('reference','Bgrid', 'Trousers Back Gridline', Bgrid, 'gridline_style'))
B.add(Path('pattern', 'dartline', 'Jeans Back Dartline', d, 'dart_style'))
B.add(Path('pattern', 'seamLine', 'Jeans Back Seamline', s, 'seamline_path_style'))
B.add(Path('pattern', 'cuttingLine', 'Jeans Back Cuttingline', c, 'cuttingline_style'))
# Jeans Waistband 'C'
jeans.add(PatternPiece('pattern', 'LeftWaistband', letter='C', fabric=2, interfacing=1, lining=0))
C=jeans.LeftWaistband
CSTART=0.0
CEND=(FRONT_WAIST_ARC+BACK_WAIST_ARC)
CStart=rPoint(C, 'CStart', BSTART, BSTART)
CEnd=rPoint(C, 'CEnd', BEND, BSTART)
CX1=rPoint(C,'CX1', AWB4.x, AWB4.y-WAISTBAND) # reference point to center the waistband
connector0=AWB4 #object1..front waistband,center section, right side, low...AWB4 <===> AWB4 (connector2)...no change
connector1=CX1 # object1...point vertical from AWB4...CX1<===> AW5 ...straightens up 1st object
connector2=AWB4 #object2...front waistband,side section, left side, low
connector3=AW1 #object2...front waistband,side section, left side, high
connector_pnts=pointList(connector0, connector1, connector2, connector3)
old_pnts=pointList(AWB4, AW5, AW4, AWB3) # front waistband, side section old points
new_pnts=connectObjects(connector_pnts, old_pnts) # front waistband, side section new points
C8=rPoint(C, 'C8', new_pnts[0].x, new_pnts[0].y)
C3=rPoint(C, 'C3', new_pnts[1].x, new_pnts[1].y)
C2=rPoint(C, 'C2', new_pnts[2].x, new_pnts[2].y)
C9=rPoint(C, 'C9', new_pnts[3].x, new_pnts[3].y)
connector0=AWB4 #object1..AWB4 <===> BWB4
connector1=CX1 #object1...CX1 <===> BW5
connector2=BWB4 #object2...back waistband,side section, right side, low
connector3=BW5 #object2...back waistband,side section, right side, high
connector_pnts=pointList(connector0, connector1, connector2, connector3)
old_pnts=pointList(BWB4, BW5, BW4, BWB3) # front waistband, side section old points
new_pnts=connectObjects(connector_pnts, old_pnts) # front waistband, side section new points
# new_pnts[0] =C8 ( on lower edge of waistband), new_pnts[1] =C3 (on upper edge)
C4=rPoint(C, 'C4', new_pnts[2].x, new_pnts[2].y)
C7=rPoint(C, 'C7', new_pnts[3].x, new_pnts[3].y)
connector0=C9 #object2...front waistband,side section,right side, low...C9 <===> AWB2
connector1=C2 #object2...front waistband,side section,right side, high...C2<===> AW2
connector2=AWB2 #object3...front waistband,center section,right side,low
connector3=AW2 #object3...front waistband,center section,right side,high
connector_pnts=pointList(connector0, connector1, connector2, connector3)
old_pnts=pointList(AWB2, AW2, AW1, AWB1)
new_pnts=connectObjects(connector_pnts, old_pnts)
C1=rPoint(C, 'C1', new_pnts[2].x, new_pnts[2].y)
C10=rPoint(C, 'C10', new_pnts[3].x, new_pnts[3].y)
connector0=C7 #object3...back waistband,side section,left side, low...C7 <===> BWB2 (connector6)
connector1=C4 #object3...front waistband,side section,left side, high...C4 <===> BW2 (connector7)
connector2=BWB2 #object4...back waistband,center section,right side,low
connector3=BW2 #object4...back waistband,center section,right side,high
connector_pnts=pointList(connector0, connector1, connector2, connector3)
old_pnts=pointList(BWB2, BW2, BW1, BWB1)
new_pnts=connectObjects(connector_pnts, old_pnts)
C5=rPoint(C, 'C5', new_pnts[2].x, new_pnts[2].y)
C6=rPoint(C, 'C6', new_pnts[3].x, new_pnts[3].y)
cC3a=cPointP(C, 'cC3a', pntOffLineP(C2, C1, lineLengthP(C2, C3)/3.0)) #b/w C2 & C3
cC4b=cPointP(C, 'cC4b', pntOffLineP(C4, C5, lineLengthP(C4, C3)/4.0)) #b/w C4 & C3
pnts=pointList(cC3a, C3, cC4b)
c1, c2=controlPoints('LeftWaistbandTopEdge', pnts)
cC3b=cPointP(C, 'cC3b', pntOnLineP(C3, c2[0], lineLengthP(C2, C3)/3.0)) #b/w C2 & C3
cC4a=cPointP(C, 'cC4a', pntOnLineP(C3, c1[1], lineLengthP(C4, C3)/3.0)) #b/w C4 & C3
cC8a=cPointP(C, 'cC8a', pntOffLineP(C7, C6, lineLengthP(C7, C8)/3.0)) #b/w C7 & C8
cC9b=cPointP(C, 'cC9b', pntOffLineP(C9, C10, lineLengthP(C9, C8)/3.0)) #b/w C9 & C8
pnts=pointList(cC8a, C8, cC9b)
c1, c2=controlPoints('LeftWaistbandLowerEdge', pnts)
cC8b=cPointP(C, 'cC8b', pntOnLineP(C8, c2[0], lineLengthP(C7, C8)/3.0)) #b/w C7 & C8
cC9a=cPointP(C, 'cC9a', pntOnLineP(C8, c1[1], lineLengthP(C9, C8)/3.0)) #b/w C8 & C9
#grainline points & label location
CG1=rPoint(C, 'CG1', C2.x+(1.0*IN), C2.y + (0.5*IN))
CG2=rPoint(C, 'CG2', CG1.x + (1.5*IN), CG1.y)
(C.label_x, C.label_y)=(CG2.x, CG2.y)
#grid 'Cgrid' path
Cgrid=path()
addToPath(Cgrid, 'M', C2, 'L', C9, 'M',C3,'L',C8,'M', C4, 'L', C7)
# seamline 's' & cuttingline 'c' paths
s=path()
c=path()
paths=pointList(s, c)
for p in paths:
addToPath(p, 'M', C1, 'L', C2, 'C', cC3a, cC3b, C3, 'C', cC4a, cC4b, C4, 'L', C5)
addToPath(p, 'L', C6, 'L', C7, 'C', cC8a, cC8b, C8, 'C', cC9a, cC9b, C9, 'L', C10, 'L', C1)
# add grainline, seamline & cuttingline paths to pattern
C.add(grainLinePath("grainLine", "Left Waistband Grainline", CG1, CG2))
C.add(Path('reference','grid', 'Left Waistband Reference Grid', Cgrid, 'gridline_style'))
C.add(Path('pattern', 'seamLine', 'Left Waistband Seamline', s, 'seamline_path_style'))
C.add(Path('pattern', 'cuttingLine', 'Left Waistband Cuttingline', c, 'cuttingline_style'))
#call draw once for the entire pattern
doc.draw()
return
# vi:set ts=4 sw=4 expandtab:
|
gpl-3.0
|
raoulbq/scipy
|
scipy/signal/_arraytools.py
|
91
|
5145
|
"""
Functions for acting on a axis of an array.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
def axis_slice(a, start=None, stop=None, step=None, axis=-1):
"""Take a slice along axis 'axis' from 'a'.
Parameters
----------
a : numpy.ndarray
The array to be sliced.
start, stop, step : int or None
The slice parameters.
axis : int, optional
The axis of `a` to be sliced.
Examples
--------
>>> a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> axis_slice(a, start=0, stop=1, axis=1)
array([[1],
[4],
[7]])
>>> axis_slice(a, start=1, axis=0)
array([[4, 5, 6],
[7, 8, 9]])
Notes
-----
The keyword arguments start, stop and step are used by calling
slice(start, stop, step). This implies axis_slice() does not
handle its arguments the exacty the same as indexing. To select
a single index k, for example, use
axis_slice(a, start=k, stop=k+1)
In this case, the length of the axis 'axis' in the result will
be 1; the trivial dimension is not removed. (Use numpy.squeeze()
to remove trivial axes.)
"""
a_slice = [slice(None)] * a.ndim
a_slice[axis] = slice(start, stop, step)
b = a[a_slice]
return b
def axis_reverse(a, axis=-1):
"""Reverse the 1-d slices of `a` along axis `axis`.
Returns axis_slice(a, step=-1, axis=axis).
"""
return axis_slice(a, step=-1, axis=axis)
def odd_ext(x, n, axis=-1):
"""Generate a new ndarray by making an odd extension of x along an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend x at each end of the axis.
axis : int, optional
The axis along which to extend x. Default is -1.
Examples
--------
>>> a = array([[1.0,2.0,3.0,4.0,5.0], [0.0, 1.0, 4.0, 9.0, 16.0]])
>>> _odd_ext(a, 2)
array([[-1., 0., 1., 2., 3., 4., 5., 6., 7.],
[-4., -1, 0., 1., 4., 9., 16., 23., 28.]])
"""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_end = axis_slice(x, start=0, stop=1, axis=axis)
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((2 * left_end - left_ext,
x,
2 * right_end - right_ext),
axis=axis)
return ext
def even_ext(x, n, axis=-1):
"""Create an ndarray that is an even extension of x along an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend x at each end of the axis.
axis : int, optional
The axis along which to extend x. Default is -1.
Examples
--------
>>> a = array([[1.0,2.0,3.0,4.0,5.0], [0.0, 1.0, 4.0, 9.0, 16.0]])
>>> _even_ext(a, 2)
array([[ 3., 2., 1., 2., 3., 4., 5., 4., 3.],
[ 4., 1., 0., 1., 4., 9., 16., 9., 4.]])
"""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def const_ext(x, n, axis=-1):
"""Create an ndarray that is a constant extension of x along an axis.
The extension repeats the values at the first and last element of
the axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend x at each end of the axis.
axis : int, optional
The axis along which to extend x. Default is -1.
Examples
--------
>>> a = array([[1.0,2.0,3.0,4.0,5.0], [0.0, 1.0, 4.0, 9.0, 16.0]])
>>> _const_ext(a, 2)
array([[ 1., 1., 1., 2., 3., 4., 5., 5., 5.],
[ 0., 0., 0., 1., 4., 9., 16., 16., 16.]])
"""
if n < 1:
return x
left_end = axis_slice(x, start=0, stop=1, axis=axis)
ones_shape = [1] * x.ndim
ones_shape[axis] = n
ones = np.ones(ones_shape, dtype=x.dtype)
left_ext = ones * left_end
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = ones * right_end
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
|
bsd-3-clause
|
ivano666/tensorflow
|
tensorflow/python/kernel_tests/summary_image_op_test.py
|
15
|
3949
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for summary image op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.ops import image_ops
class SummaryImageOpTest(tf.test.TestCase):
def _AsSummary(self, s):
summ = tf.Summary()
summ.ParseFromString(s)
return summ
def _CheckProto(self, image_summ, shape):
"""Verify that the non-image parts of the image_summ proto match shape."""
# Only the first 3 images are returned.
for v in image_summ.value:
v.image.ClearField("encoded_image_string")
expected = '\n'.join("""
value {
tag: "img/image/%d"
image { height: %d width: %d colorspace: %d }
}""" % ((i,) + shape[1:]) for i in xrange(3))
self.assertProtoEquals(expected, image_summ)
def testImageSummary(self):
np.random.seed(7)
with self.test_session() as sess:
for depth in 1, 3, 4:
shape = (4, 5, 7) + (depth,)
bad_color = [255, 0, 0, 255][:depth]
for positive in False, True:
# Build a mostly random image with one nan
const = np.random.randn(*shape).astype(np.float32)
const[0, 1, 2] = 0 # Make the nan entry not the max
if positive:
const = 1 + np.maximum(const, 0)
scale = 255 / const.reshape(4, -1).max(axis=1)
offset = 0
else:
scale = 127 / np.abs(const.reshape(4, -1)).max(axis=1)
offset = 128
adjusted = np.floor(scale[:, None, None, None] * const + offset)
const[0, 1, 2, depth // 2] = np.nan
# Summarize
summ = tf.image_summary("img", const)
value = sess.run(summ)
self.assertEqual([], summ.get_shape())
image_summ = self._AsSummary(value)
# Decode the first image and check consistency
image = image_ops.decode_png(
image_summ.value[0].image.encoded_image_string).eval()
self.assertAllEqual(image[1, 2], bad_color)
image[1, 2] = adjusted[0, 1, 2]
self.assertAllClose(image, adjusted[0])
# Check the rest of the proto
self._CheckProto(image_summ, shape)
def testImageSummaryUint8(self):
np.random.seed(7)
with self.test_session() as sess:
for depth in 1, 3, 4:
shape = (4, 5, 7) + (depth,)
# Build a random uint8 image
images = np.random.randint(256, size=shape).astype(np.uint8)
tf_images = tf.convert_to_tensor(images)
self.assertEqual(tf_images.dtype, tf.uint8)
# Summarize
summ = tf.image_summary("img", tf_images)
value = sess.run(summ)
self.assertEqual([], summ.get_shape())
image_summ = self._AsSummary(value)
# Decode the first image and check consistency.
# Since we're uint8, everything should be exact.
image = image_ops.decode_png(
image_summ.value[0].image.encoded_image_string).eval()
self.assertAllEqual(image, images[0])
# Check the rest of the proto
self._CheckProto(image_summ, shape)
if __name__ == "__main__":
tf.test.main()
|
apache-2.0
|
jup3/hots
|
example.py
|
1
|
1077
|
db1 = {"Database":
{"Raynor": {
"Raynor-Counter": ["Tracer", "Uther", "Zeratul"],
"Raynor-Synergize": ["Who", "It" , "is"]
},
"Stitches" : {
"Stiches-Counter": ["What", "ever", "it",],
"Stiches-Synergize":["What", "name", "is"]
}
}
}
db = {"i" :
{"a":{
"1": ["i", "ii", "iii"],
"2": ["iv", "v", "vi"],
"3": ["iv", "v", "vi"]
},
"b" :{
"4": ["ix", "xi", "x"],
"5": ["ix", "sa", "x"]
}
}
}
player_db = {"Players" :
{"Player1" : ["These", "are", "my"],
"Player2" : ["Favorite", "heroes", "in"],
"Player3" : ["The", "Game"]
}
}
print db1["Database"]["Stitches"]
print db["i"]["b"]["5"]
print player_db["Players"]["Player2"]
print (True or True) and (True or True)
print (True or False) and (True or True)
print (True or False) and (True or False)
print (True or False) and (False or False)
print (True or False) and (False or True)
|
mit
|
zhaodelong/django
|
tests/m2m_multiple/tests.py
|
227
|
2370
|
from __future__ import unicode_literals
from datetime import datetime
from django.test import TestCase
from .models import Article, Category
class M2MMultipleTests(TestCase):
def test_multiple(self):
c1, c2, c3, c4 = [
Category.objects.create(name=name)
for name in ["Sports", "News", "Crime", "Life"]
]
a1 = Article.objects.create(
headline="Parrot steals", pub_date=datetime(2005, 11, 27)
)
a1.primary_categories.add(c2, c3)
a1.secondary_categories.add(c4)
a2 = Article.objects.create(
headline="Parrot runs", pub_date=datetime(2005, 11, 28)
)
a2.primary_categories.add(c1, c2)
a2.secondary_categories.add(c4)
self.assertQuerysetEqual(
a1.primary_categories.all(), [
"Crime",
"News",
],
lambda c: c.name
)
self.assertQuerysetEqual(
a2.primary_categories.all(), [
"News",
"Sports",
],
lambda c: c.name
)
self.assertQuerysetEqual(
a1.secondary_categories.all(), [
"Life",
],
lambda c: c.name
)
self.assertQuerysetEqual(
c1.primary_article_set.all(), [
"Parrot runs",
],
lambda a: a.headline
)
self.assertQuerysetEqual(
c1.secondary_article_set.all(), []
)
self.assertQuerysetEqual(
c2.primary_article_set.all(), [
"Parrot steals",
"Parrot runs",
],
lambda a: a.headline
)
self.assertQuerysetEqual(
c2.secondary_article_set.all(), []
)
self.assertQuerysetEqual(
c3.primary_article_set.all(), [
"Parrot steals",
],
lambda a: a.headline
)
self.assertQuerysetEqual(
c3.secondary_article_set.all(), []
)
self.assertQuerysetEqual(
c4.primary_article_set.all(), []
)
self.assertQuerysetEqual(
c4.secondary_article_set.all(), [
"Parrot steals",
"Parrot runs",
],
lambda a: a.headline
)
|
bsd-3-clause
|
PrivacyScore/PrivacyScore
|
privacyscore/backend/management/commands/rescanscanlist.py
|
1
|
2152
|
# Copyright (C) 2018 PrivacyScore Contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from time import sleep
from django.core.management import BaseCommand
from django.utils import timezone
from privacyscore.backend.models import Site, ScanList
from privacyscore.utils import normalize_url
class Command(BaseCommand):
help = 'Rescan all sites in an exisiting ScanList.'
def add_arguments(self, parser):
parser.add_argument('scan_list_id')
parser.add_argument('-s', '--sleep-between-scans', type=float, default=0)
def handle(self, *args, **options):
scan_list = ScanList.objects.get(id=options['scan_list_id'])
sites = scan_list.sites.all()
scan_count = 0
for site in sites:
status_code = site.scan()
if status_code == Site.SCAN_COOLDOWN:
self.stdout.write(
'Rate limiting -- Not scanning site {}'.format(site))
continue
if status_code == Site.SCAN_BLACKLISTED:
self.stdout.write(
'Blacklisted -- Not scanning site {}'.format(site))
continue
scan_count += 1
self.stdout.write('Scanning site {}'.format(
site))
if options['sleep_between_scans']:
self.stdout.write('Sleeping {}'.format(options['sleep_between_scans']))
sleep(options['sleep_between_scans'])
self.stdout.write('read {} sites, scanned {}'.format(
len(sites), scan_count))
|
gpl-3.0
|
pyload/pyload
|
src/pyload/plugins/accounts/OboomCom.py
|
1
|
2033
|
# -*- coding: utf-8 -*-
import json
from binascii import b2a_hex
from beaker.crypto.pbkdf2 import pbkdf2
from pyload.core.network.request_factory import get_url
from ..base.account import BaseAccount
class PBKDF2:
def __init__(self, passphrase, salt, iterations=1000):
self.passphrase = passphrase
self.salt = salt
self.iterations = iterations
def hexread(self, octets):
return b2a_hex(pbkdf2(self.passphrase, self.salt, self.iterations, octets))
class OboomCom(BaseAccount):
__name__ = "OboomCom"
__type__ = "account"
__version__ = "0.34"
__status__ = "testing"
__description__ = """Oboom.com account plugin"""
__license__ = "GPLv3"
__authors__ = [
("stanley", "stanley.foerster@gmail.com"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com"),
]
#: See https://www.oboom.com/api
API_URL = "https://%s.oboom.com/1/"
@classmethod
def api_request(cls, subdomain, method, args={}):
return json.loads(get_url(cls.API_URL % subdomain + method, post=args))
def grab_info(self, user, password, data):
salt = password[::-1]
pbkdf2 = PBKDF2(password, salt, 1000).hexread(16)
res = self.api_request("www", "login", {"auth": user, "pass": pbkdf2})
user_data = res[1]["user"]
premium = user_data["premium"] != "null"
if user_data["premium_unix"] == "null":
validuntil = -1
else:
validuntil = float(user_data["premium_unix"])
trafficleft = user_data["traffic"]["current"]
data["session"] = res[1]["session"]
return {
"premium": premium,
"validuntil": validuntil,
"trafficleft": trafficleft,
}
def signin(self, user, password, data):
salt = password[::-1]
pbkdf2 = PBKDF2(password, salt, 1000).hexread(16)
res = self.api_request("www", "login", {"auth": user, "pass": pbkdf2})
if res[0] != 200:
self.fail_login(res[1])
|
agpl-3.0
|
lmtierney/selenium
|
py/selenium/webdriver/common/action_chains.py
|
5
|
12419
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The ActionChains implementation,
"""
import time
from selenium.webdriver.remote.command import Command
from .utils import keys_to_typing
from .actions.action_builder import ActionBuilder
class ActionChains(object):
"""
ActionChains are a way to automate low level interactions such as
mouse movements, mouse button actions, key press, and context menu interactions.
This is useful for doing more complex actions like hover over and drag and drop.
Generate user actions.
When you call methods for actions on the ActionChains object,
the actions are stored in a queue in the ActionChains object.
When you call perform(), the events are fired in the order they
are queued up.
ActionChains can be used in a chain pattern::
menu = driver.find_element_by_css_selector(".nav")
hidden_submenu = driver.find_element_by_css_selector(".nav #submenu1")
ActionChains(driver).move_to_element(menu).click(hidden_submenu).perform()
Or actions can be queued up one by one, then performed.::
menu = driver.find_element_by_css_selector(".nav")
hidden_submenu = driver.find_element_by_css_selector(".nav #submenu1")
actions = ActionChains(driver)
actions.move_to_element(menu)
actions.click(hidden_submenu)
actions.perform()
Either way, the actions are performed in the order they are called, one after
another.
"""
def __init__(self, driver):
"""
Creates a new ActionChains.
:Args:
- driver: The WebDriver instance which performs user actions.
"""
self._driver = driver
self._actions = []
if self._driver.w3c:
self.w3c_actions = ActionBuilder(driver)
def perform(self):
"""
Performs all stored actions.
"""
if self._driver.w3c:
self.w3c_actions.perform()
else:
for action in self._actions:
action()
def reset_actions(self):
"""
Clears actions that are already stored locally and on the remote end
"""
if self._driver.w3c:
self.w3c_actions.clear_actions()
for device in self.w3c_actions.devices:
device.clear_actions()
self._actions = []
def click(self, on_element=None):
"""
Clicks an element.
:Args:
- on_element: The element to click.
If None, clicks on current mouse position.
"""
if on_element:
self.move_to_element(on_element)
if self._driver.w3c:
self.w3c_actions.pointer_action.click()
self.w3c_actions.key_action.pause()
self.w3c_actions.key_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.CLICK, {'button': 0}))
return self
def click_and_hold(self, on_element=None):
"""
Holds down the left mouse button on an element.
:Args:
- on_element: The element to mouse down.
If None, clicks on current mouse position.
"""
if on_element:
self.move_to_element(on_element)
if self._driver.w3c:
self.w3c_actions.pointer_action.click_and_hold()
self.w3c_actions.key_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.MOUSE_DOWN, {}))
return self
def context_click(self, on_element=None):
"""
Performs a context-click (right click) on an element.
:Args:
- on_element: The element to context-click.
If None, clicks on current mouse position.
"""
if on_element:
self.move_to_element(on_element)
if self._driver.w3c:
self.w3c_actions.pointer_action.context_click()
self.w3c_actions.key_action.pause()
self.w3c_actions.key_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.CLICK, {'button': 2}))
return self
def double_click(self, on_element=None):
"""
Double-clicks an element.
:Args:
- on_element: The element to double-click.
If None, clicks on current mouse position.
"""
if on_element:
self.move_to_element(on_element)
if self._driver.w3c:
self.w3c_actions.pointer_action.double_click()
for _ in range(4):
self.w3c_actions.key_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.DOUBLE_CLICK, {}))
return self
def drag_and_drop(self, source, target):
"""
Holds down the left mouse button on the source element,
then moves to the target element and releases the mouse button.
:Args:
- source: The element to mouse down.
- target: The element to mouse up.
"""
self.click_and_hold(source)
self.release(target)
return self
def drag_and_drop_by_offset(self, source, xoffset, yoffset):
"""
Holds down the left mouse button on the source element,
then moves to the target offset and releases the mouse button.
:Args:
- source: The element to mouse down.
- xoffset: X offset to move to.
- yoffset: Y offset to move to.
"""
self.click_and_hold(source)
self.move_by_offset(xoffset, yoffset)
self.release()
return self
def key_down(self, value, element=None):
"""
Sends a key press only, without releasing it.
Should only be used with modifier keys (Control, Alt and Shift).
:Args:
- value: The modifier key to send. Values are defined in `Keys` class.
- element: The element to send keys.
If None, sends a key to current focused element.
Example, pressing ctrl+c::
ActionChains(driver).key_down(Keys.CONTROL).send_keys('c').key_up(Keys.CONTROL).perform()
"""
if element:
self.click(element)
if self._driver.w3c:
self.w3c_actions.key_action.key_down(value)
self.w3c_actions.pointer_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.SEND_KEYS_TO_ACTIVE_ELEMENT,
{"value": keys_to_typing(value)}))
return self
def key_up(self, value, element=None):
"""
Releases a modifier key.
:Args:
- value: The modifier key to send. Values are defined in Keys class.
- element: The element to send keys.
If None, sends a key to current focused element.
Example, pressing ctrl+c::
ActionChains(driver).key_down(Keys.CONTROL).send_keys('c').key_up(Keys.CONTROL).perform()
"""
if element:
self.click(element)
if self._driver.w3c:
self.w3c_actions.key_action.key_up(value)
self.w3c_actions.pointer_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.SEND_KEYS_TO_ACTIVE_ELEMENT,
{"value": keys_to_typing(value)}))
return self
def move_by_offset(self, xoffset, yoffset):
"""
Moving the mouse to an offset from current mouse position.
:Args:
- xoffset: X offset to move to, as a positive or negative integer.
- yoffset: Y offset to move to, as a positive or negative integer.
"""
if self._driver.w3c:
self.w3c_actions.pointer_action.move_by(xoffset, yoffset)
self.w3c_actions.key_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.MOVE_TO, {
'xoffset': int(xoffset),
'yoffset': int(yoffset)}))
return self
def move_to_element(self, to_element):
"""
Moving the mouse to the middle of an element.
:Args:
- to_element: The WebElement to move to.
"""
if self._driver.w3c:
self.w3c_actions.pointer_action.move_to(to_element)
self.w3c_actions.key_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.MOVE_TO, {'element': to_element.id}))
return self
def move_to_element_with_offset(self, to_element, xoffset, yoffset):
"""
Move the mouse by an offset of the specified element.
Offsets are relative to the top-left corner of the element.
:Args:
- to_element: The WebElement to move to.
- xoffset: X offset to move to.
- yoffset: Y offset to move to.
"""
if self._driver.w3c:
self.w3c_actions.pointer_action.move_to(to_element, xoffset, yoffset)
self.w3c_actions.key_action.pause()
else:
self._actions.append(
lambda: self._driver.execute(Command.MOVE_TO, {
'element': to_element.id,
'xoffset': int(xoffset),
'yoffset': int(yoffset)}))
return self
def pause(self, seconds):
""" Pause all inputs for the specified duration in seconds """
if self._driver.w3c:
self.w3c_actions.pointer_action.pause(seconds)
self.w3c_actions.key_action.pause(seconds)
else:
self._actions.append(lambda: time.sleep(seconds))
return self
def release(self, on_element=None):
"""
Releasing a held mouse button on an element.
:Args:
- on_element: The element to mouse up.
If None, releases on current mouse position.
"""
if on_element:
self.move_to_element(on_element)
if self._driver.w3c:
self.w3c_actions.pointer_action.release()
self.w3c_actions.key_action.pause()
else:
self._actions.append(lambda: self._driver.execute(Command.MOUSE_UP, {}))
return self
def send_keys(self, *keys_to_send):
"""
Sends keys to current focused element.
:Args:
- keys_to_send: The keys to send. Modifier keys constants can be found in the
'Keys' class.
"""
typing = keys_to_typing(keys_to_send)
if self._driver.w3c:
for key in typing:
self.key_down(key)
self.key_up(key)
else:
self._actions.append(lambda: self._driver.execute(
Command.SEND_KEYS_TO_ACTIVE_ELEMENT, {'value': typing}))
return self
def send_keys_to_element(self, element, *keys_to_send):
"""
Sends keys to an element.
:Args:
- element: The element to send keys.
- keys_to_send: The keys to send. Modifier keys constants can be found in the
'Keys' class.
"""
self.click(element)
self.send_keys(*keys_to_send)
return self
# Context manager so ActionChains can be used in a 'with .. as' statements.
def __enter__(self):
return self # Return created instance of self.
def __exit__(self, _type, _value, _traceback):
pass # Do nothing, does not require additional cleanup.
|
apache-2.0
|
himmih/cluedo
|
venv/lib/python2.7/site-packages/pip/req/req_set.py
|
246
|
27444
|
from __future__ import absolute_import
from collections import defaultdict
import functools
import itertools
import logging
import os
from pip._vendor import pkg_resources
from pip._vendor import requests
from pip.download import (url_to_path, unpack_url)
from pip.exceptions import (InstallationError, BestVersionAlreadyInstalled,
DistributionNotFound, PreviousBuildDirError)
from pip.req.req_install import InstallRequirement
from pip.utils import (
display_path, dist_in_usersite, ensure_dir, normalize_path)
from pip.utils.logging import indent_log
from pip.vcs import vcs
logger = logging.getLogger(__name__)
class Requirements(object):
def __init__(self):
self._keys = []
self._dict = {}
def keys(self):
return self._keys
def values(self):
return [self._dict[key] for key in self._keys]
def __contains__(self, item):
return item in self._keys
def __setitem__(self, key, value):
if key not in self._keys:
self._keys.append(key)
self._dict[key] = value
def __getitem__(self, key):
return self._dict[key]
def __repr__(self):
values = ['%s: %s' % (repr(k), repr(self[k])) for k in self.keys()]
return 'Requirements({%s})' % ', '.join(values)
class DistAbstraction(object):
"""Abstracts out the wheel vs non-wheel prepare_files logic.
The requirements for anything installable are as follows:
- we must be able to determine the requirement name
(or we can't correctly handle the non-upgrade case).
- we must be able to generate a list of run-time dependencies
without installing any additional packages (or we would
have to either burn time by doing temporary isolated installs
or alternatively violate pips 'don't start installing unless
all requirements are available' rule - neither of which are
desirable).
- for packages with setup requirements, we must also be able
to determine their requirements without installing additional
packages (for the same reason as run-time dependencies)
- we must be able to create a Distribution object exposing the
above metadata.
"""
def __init__(self, req_to_install):
self.req_to_install = req_to_install
def dist(self, finder):
"""Return a setuptools Dist object."""
raise NotImplementedError(self.dist)
def prep_for_dist(self):
"""Ensure that we can get a Dist for this requirement."""
raise NotImplementedError(self.dist)
def make_abstract_dist(req_to_install):
"""Factory to make an abstract dist object.
Preconditions: Either an editable req with a source_dir, or satisfied_by or
a wheel link, or a non-editable req with a source_dir.
:return: A concrete DistAbstraction.
"""
if req_to_install.editable:
return IsSDist(req_to_install)
elif req_to_install.link and req_to_install.link.is_wheel:
return IsWheel(req_to_install)
else:
return IsSDist(req_to_install)
class IsWheel(DistAbstraction):
def dist(self, finder):
return list(pkg_resources.find_distributions(
self.req_to_install.source_dir))[0]
def prep_for_dist(self):
# FIXME:https://github.com/pypa/pip/issues/1112
pass
class IsSDist(DistAbstraction):
def dist(self, finder):
dist = self.req_to_install.get_dist()
# FIXME: shouldn't be globally added:
if dist.has_metadata('dependency_links.txt'):
finder.add_dependency_links(
dist.get_metadata_lines('dependency_links.txt')
)
return dist
def prep_for_dist(self):
self.req_to_install.run_egg_info()
self.req_to_install.assert_source_matches_version()
class Installed(DistAbstraction):
def dist(self, finder):
return self.req_to_install.satisfied_by
def prep_for_dist(self):
pass
class RequirementSet(object):
def __init__(self, build_dir, src_dir, download_dir, upgrade=False,
ignore_installed=False, as_egg=False, target_dir=None,
ignore_dependencies=False, force_reinstall=False,
use_user_site=False, session=None, pycompile=True,
isolated=False, wheel_download_dir=None,
wheel_cache=None):
"""Create a RequirementSet.
:param wheel_download_dir: Where still-packed .whl files should be
written to. If None they are written to the download_dir parameter.
Separate to download_dir to permit only keeping wheel archives for
pip wheel.
:param download_dir: Where still packed archives should be written to.
If None they are not saved, and are deleted immediately after
unpacking.
:param wheel_cache: The pip wheel cache, for passing to
InstallRequirement.
"""
if session is None:
raise TypeError(
"RequirementSet() missing 1 required keyword argument: "
"'session'"
)
self.build_dir = build_dir
self.src_dir = src_dir
# XXX: download_dir and wheel_download_dir overlap semantically and may
# be combined if we're willing to have non-wheel archives present in
# the wheelhouse output by 'pip wheel'.
self.download_dir = download_dir
self.upgrade = upgrade
self.ignore_installed = ignore_installed
self.force_reinstall = force_reinstall
self.requirements = Requirements()
# Mapping of alias: real_name
self.requirement_aliases = {}
self.unnamed_requirements = []
self.ignore_dependencies = ignore_dependencies
self.successfully_downloaded = []
self.successfully_installed = []
self.reqs_to_cleanup = []
self.as_egg = as_egg
self.use_user_site = use_user_site
self.target_dir = target_dir # set from --target option
self.session = session
self.pycompile = pycompile
self.isolated = isolated
if wheel_download_dir:
wheel_download_dir = normalize_path(wheel_download_dir)
self.wheel_download_dir = wheel_download_dir
self._wheel_cache = wheel_cache
# Maps from install_req -> dependencies_of_install_req
self._dependencies = defaultdict(list)
def __str__(self):
reqs = [req for req in self.requirements.values()
if not req.comes_from]
reqs.sort(key=lambda req: req.name.lower())
return ' '.join([str(req.req) for req in reqs])
def __repr__(self):
reqs = [req for req in self.requirements.values()]
reqs.sort(key=lambda req: req.name.lower())
reqs_str = ', '.join([str(req.req) for req in reqs])
return ('<%s object; %d requirement(s): %s>'
% (self.__class__.__name__, len(reqs), reqs_str))
def add_requirement(self, install_req, parent_req_name=None):
"""Add install_req as a requirement to install.
:param parent_req_name: The name of the requirement that needed this
added. The name is used because when multiple unnamed requirements
resolve to the same name, we could otherwise end up with dependency
links that point outside the Requirements set. parent_req must
already be added. Note that None implies that this is a user
supplied requirement, vs an inferred one.
:return: Additional requirements to scan. That is either [] if
the requirement is not applicable, or [install_req] if the
requirement is applicable and has just been added.
"""
name = install_req.name
if not install_req.match_markers():
logger.warning("Ignoring %s: markers %r don't match your "
"environment", install_req.name,
install_req.markers)
return []
install_req.as_egg = self.as_egg
install_req.use_user_site = self.use_user_site
install_req.target_dir = self.target_dir
install_req.pycompile = self.pycompile
if not name:
# url or path requirement w/o an egg fragment
self.unnamed_requirements.append(install_req)
return [install_req]
else:
try:
existing_req = self.get_requirement(name)
except KeyError:
existing_req = None
if (parent_req_name is None and existing_req and not
existing_req.constraint):
raise InstallationError(
'Double requirement given: %s (already in %s, name=%r)'
% (install_req, existing_req, name))
if not existing_req:
# Add requirement
self.requirements[name] = install_req
# FIXME: what about other normalizations? E.g., _ vs. -?
if name.lower() != name:
self.requirement_aliases[name.lower()] = name
result = [install_req]
else:
if not existing_req.constraint:
# No need to scan, we've already encountered this for
# scanning.
result = []
elif not install_req.constraint:
# If we're now installing a constraint, mark the existing
# object for real installation.
existing_req.constraint = False
# And now we need to scan this.
result = [existing_req]
# Canonicalise to the already-added object for the backref
# check below.
install_req = existing_req
if parent_req_name:
parent_req = self.get_requirement(parent_req_name)
self._dependencies[parent_req].append(install_req)
return result
def has_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements or name in self.requirement_aliases:
return True
return False
@property
def has_requirements(self):
return list(req for req in self.requirements.values() if not
req.constraint) or self.unnamed_requirements
@property
def is_download(self):
if self.download_dir:
self.download_dir = os.path.expanduser(self.download_dir)
if os.path.exists(self.download_dir):
return True
else:
logger.critical('Could not find download directory')
raise InstallationError(
"Could not find or access download directory '%s'"
% display_path(self.download_dir))
return False
def get_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements:
return self.requirements[name]
if name in self.requirement_aliases:
return self.requirements[self.requirement_aliases[name]]
raise KeyError("No project with the name %r" % project_name)
def uninstall(self, auto_confirm=False):
for req in self.requirements.values():
if req.constraint:
continue
req.uninstall(auto_confirm=auto_confirm)
req.commit_uninstall()
def _walk_req_to_install(self, handler):
"""Call handler for all pending reqs.
:param handler: Handle a single requirement. Should take a requirement
to install. Can optionally return an iterable of additional
InstallRequirements to cover.
"""
# The list() here is to avoid potential mutate-while-iterating bugs.
discovered_reqs = []
reqs = itertools.chain(
list(self.unnamed_requirements), list(self.requirements.values()),
discovered_reqs)
for req_to_install in reqs:
more_reqs = handler(req_to_install)
if more_reqs:
discovered_reqs.extend(more_reqs)
def prepare_files(self, finder):
"""
Prepare process. Create temp directories, download and/or unpack files.
"""
# make the wheelhouse
if self.wheel_download_dir:
ensure_dir(self.wheel_download_dir)
self._walk_req_to_install(
functools.partial(self._prepare_file, finder))
def _check_skip_installed(self, req_to_install, finder):
"""Check if req_to_install should be skipped.
This will check if the req is installed, and whether we should upgrade
or reinstall it, taking into account all the relevant user options.
After calling this req_to_install will only have satisfied_by set to
None if the req_to_install is to be upgraded/reinstalled etc. Any
other value will be a dist recording the current thing installed that
satisfies the requirement.
Note that for vcs urls and the like we can't assess skipping in this
routine - we simply identify that we need to pull the thing down,
then later on it is pulled down and introspected to assess upgrade/
reinstalls etc.
:return: A text reason for why it was skipped, or None.
"""
# Check whether to upgrade/reinstall this req or not.
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
skip_reason = 'satisfied (use --upgrade to upgrade)'
if self.upgrade:
best_installed = False
# For link based requirements we have to pull the
# tree down and inspect to assess the version #, so
# its handled way down.
if not (self.force_reinstall or req_to_install.link):
try:
finder.find_requirement(req_to_install, self.upgrade)
except BestVersionAlreadyInstalled:
skip_reason = 'up-to-date'
best_installed = True
except DistributionNotFound:
# No distribution found, so we squash the
# error - it will be raised later when we
# re-try later to do the install.
# Why don't we just raise here?
pass
if not best_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
return skip_reason
else:
return None
def _prepare_file(self, finder, req_to_install):
"""Prepare a single requirements files.
:return: A list of addition InstallRequirements to also install.
"""
# Tell user what we are doing for this requirement:
# obtain (editable), skipping, processing (local url), collecting
# (remote url or package name)
if req_to_install.constraint or req_to_install.prepared:
return []
req_to_install.prepared = True
if req_to_install.editable:
logger.info('Obtaining %s', req_to_install)
else:
# satisfied_by is only evaluated by calling _check_skip_installed,
# so it must be None here.
assert req_to_install.satisfied_by is None
if not self.ignore_installed:
skip_reason = self._check_skip_installed(
req_to_install, finder)
if req_to_install.satisfied_by:
assert skip_reason is not None, (
'_check_skip_installed returned None but '
'req_to_install.satisfied_by is set to %r'
% (req_to_install.satisfied_by,))
logger.info(
'Requirement already %s: %s', skip_reason,
req_to_install)
else:
if (req_to_install.link and
req_to_install.link.scheme == 'file'):
path = url_to_path(req_to_install.link.url)
logger.info('Processing %s', display_path(path))
else:
logger.info('Collecting %s', req_to_install)
with indent_log():
# ################################ #
# # vcs update or unpack archive # #
# ################################ #
if req_to_install.editable:
req_to_install.ensure_has_source_dir(self.src_dir)
req_to_install.update_editable(not self.is_download)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
req_to_install.archive(self.download_dir)
elif req_to_install.satisfied_by:
abstract_dist = Installed(req_to_install)
else:
# @@ if filesystem packages are not marked
# editable in a req, a non deterministic error
# occurs when the script attempts to unpack the
# build directory
req_to_install.ensure_has_source_dir(self.build_dir)
# If a checkout exists, it's unwise to keep going. version
# inconsistencies are logged later, but do not fail the
# installation.
# FIXME: this won't upgrade when there's an existing
# package unpacked in `req_to_install.source_dir`
if os.path.exists(
os.path.join(req_to_install.source_dir, 'setup.py')):
raise PreviousBuildDirError(
"pip can't proceed with requirements '%s' due to a"
" pre-existing build directory (%s). This is "
"likely due to a previous installation that failed"
". pip is being responsible and not assuming it "
"can delete this. Please delete it and try again."
% (req_to_install, req_to_install.source_dir)
)
req_to_install.populate_link(finder, self.upgrade)
# We can't hit this spot and have populate_link return None.
# req_to_install.satisfied_by is None here (because we're
# guarded) and upgrade has no impact except when satisfied_by
# is not None.
# Then inside find_requirement existing_applicable -> False
# If no new versions are found, DistributionNotFound is raised,
# otherwise a result is guaranteed.
assert req_to_install.link
try:
download_dir = self.download_dir
# We always delete unpacked sdists after pip ran.
autodelete_unpacked = True
if req_to_install.link.is_wheel \
and self.wheel_download_dir:
# when doing 'pip wheel` we download wheels to a
# dedicated dir.
download_dir = self.wheel_download_dir
if req_to_install.link.is_wheel:
if download_dir:
# When downloading, we only unpack wheels to get
# metadata.
autodelete_unpacked = True
else:
# When installing a wheel, we use the unpacked
# wheel.
autodelete_unpacked = False
unpack_url(
req_to_install.link, req_to_install.source_dir,
download_dir, autodelete_unpacked,
session=self.session)
except requests.HTTPError as exc:
logger.critical(
'Could not install requirement %s because '
'of error %s',
req_to_install,
exc,
)
raise InstallationError(
'Could not install requirement %s because '
'of HTTP error %s for URL %s' %
(req_to_install, exc, req_to_install.link)
)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
# Make a .zip of the source_dir we already created.
if req_to_install.link.scheme in vcs.all_schemes:
req_to_install.archive(self.download_dir)
# req_to_install.req is only avail after unpack for URL
# pkgs repeat check_if_exists to uninstall-on-upgrade
# (#14)
if not self.ignore_installed:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade or self.ignore_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(
req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
logger.info(
'Requirement already satisfied (use '
'--upgrade to upgrade): %s',
req_to_install,
)
# ###################### #
# # parse dependencies # #
# ###################### #
dist = abstract_dist.dist(finder)
more_reqs = []
def add_req(subreq):
sub_install_req = InstallRequirement(
str(subreq),
req_to_install,
isolated=self.isolated,
wheel_cache=self._wheel_cache,
)
more_reqs.extend(self.add_requirement(
sub_install_req, req_to_install.name))
# We add req_to_install before its dependencies, so that we
# can refer to it when adding dependencies.
if not self.has_requirement(req_to_install.name):
# 'unnamed' requirements will get added here
self.add_requirement(req_to_install, None)
if not self.ignore_dependencies:
if (req_to_install.extras):
logger.debug(
"Installing extra requirements: %r",
','.join(req_to_install.extras),
)
missing_requested = sorted(
set(req_to_install.extras) - set(dist.extras)
)
for missing in missing_requested:
logger.warning(
'%s does not provide the extra \'%s\'',
dist, missing
)
available_requested = sorted(
set(dist.extras) & set(req_to_install.extras)
)
for subreq in dist.requires(available_requested):
add_req(subreq)
# cleanup tmp src
self.reqs_to_cleanup.append(req_to_install)
if not req_to_install.editable and not req_to_install.satisfied_by:
# XXX: --no-install leads this to report 'Successfully
# downloaded' for only non-editable reqs, even though we took
# action on them.
self.successfully_downloaded.append(req_to_install)
return more_reqs
def cleanup_files(self):
"""Clean up files, remove builds."""
logger.debug('Cleaning up...')
with indent_log():
for req in self.reqs_to_cleanup:
req.remove_temporary_source()
def _to_install(self):
"""Create the installation order.
The installation order is topological - requirements are installed
before the requiring thing. We break cycles at an arbitrary point,
and make no other guarantees.
"""
# The current implementation, which we may change at any point
# installs the user specified things in the order given, except when
# dependencies must come earlier to achieve topological order.
order = []
ordered_reqs = set()
def schedule(req):
if req.satisfied_by or req in ordered_reqs:
return
if req.constraint:
return
ordered_reqs.add(req)
for dep in self._dependencies[req]:
schedule(dep)
order.append(req)
for install_req in self.requirements.values():
schedule(install_req)
return order
def install(self, install_options, global_options=(), *args, **kwargs):
"""
Install everything in this set (after having downloaded and unpacked
the packages)
"""
to_install = self._to_install()
if to_install:
logger.info(
'Installing collected packages: %s',
', '.join([req.name for req in to_install]),
)
with indent_log():
for requirement in to_install:
if requirement.conflicts_with:
logger.info(
'Found existing installation: %s',
requirement.conflicts_with,
)
with indent_log():
requirement.uninstall(auto_confirm=True)
try:
requirement.install(
install_options,
global_options,
*args,
**kwargs
)
except:
# if install did not succeed, rollback previous uninstall
if (requirement.conflicts_with and not
requirement.install_succeeded):
requirement.rollback_uninstall()
raise
else:
if (requirement.conflicts_with and
requirement.install_succeeded):
requirement.commit_uninstall()
requirement.remove_temporary_source()
self.successfully_installed = to_install
|
apache-2.0
|
kzys/buildbot
|
buildbot/changes/pb.py
|
1
|
4662
|
# -*- test-case-name: buildbot.test.test_changes -*-
from twisted.python import log
from buildbot.pbutil import NewCredPerspective
from buildbot.changes import base, changes
class ChangePerspective(NewCredPerspective):
def __init__(self, changemaster, prefix):
self.changemaster = changemaster
self.prefix = prefix
def attached(self, mind):
return self
def detached(self, mind):
pass
def perspective_addChange(self, changedict):
log.msg("perspective_addChange called")
pathnames = []
prefixpaths = None
for path in changedict['files']:
if self.prefix:
if not path.startswith(self.prefix):
# this file does not start with the prefix, so ignore it
continue
path = path[len(self.prefix):]
pathnames.append(path)
if pathnames:
change = changes.Change(changedict['who'],
pathnames,
changedict['comments'],
branch=changedict.get('branch'),
revision=changedict.get('revision'),
revlink=changedict.get('revlink', ''),
category=changedict.get('category'),
when=changedict.get('when'),
properties=changedict.get('properties', {})
)
self.changemaster.addChange(change)
class PBChangeSource(base.ChangeSource):
compare_attrs = ["user", "passwd", "port", "prefix"]
def __init__(self, user="change", passwd="changepw", port=None,
prefix=None, sep=None):
"""I listen on a TCP port for Changes from 'buildbot sendchange'.
I am a ChangeSource which will accept Changes from a remote source. I
share a TCP listening port with the buildslaves.
The 'buildbot sendchange' command, the contrib/svn_buildbot.py tool,
and the contrib/bzr_buildbot.py tool know how to send changes to me.
@type prefix: string (or None)
@param prefix: if set, I will ignore any filenames that do not start
with this string. Moreover I will remove this string
from all filenames before creating the Change object
and delivering it to the Schedulers. This is useful
for changes coming from version control systems that
represent branches as parent directories within the
repository (like SVN and Perforce). Use a prefix of
'trunk/' or 'project/branches/foobranch/' to only
follow one branch and to get correct tree-relative
filenames.
@param sep: DEPRECATED (with an axe). sep= was removed in
buildbot-0.7.4 . Instead of using it, you should use
prefix= with a trailing directory separator. This
docstring (and the better-than-nothing error message
which occurs when you use it) will be removed in 0.7.5 .
"""
# sep= was removed in 0.7.4 . This more-helpful-than-nothing error
# message will be removed in 0.7.5 .
assert sep is None, "prefix= is now a complete string, do not use sep="
# TODO: current limitations
assert user == "change"
assert passwd == "changepw"
assert port == None
self.user = user
self.passwd = passwd
self.port = port
self.prefix = prefix
def describe(self):
# TODO: when the dispatcher is fixed, report the specific port
#d = "PB listener on port %d" % self.port
d = "PBChangeSource listener on all-purpose slaveport"
if self.prefix is not None:
d += " (prefix '%s')" % self.prefix
return d
def startService(self):
base.ChangeSource.startService(self)
# our parent is the ChangeMaster object
# find the master's Dispatch object and register our username
# TODO: the passwd should be registered here too
master = self.parent.parent
master.dispatcher.register(self.user, self)
def stopService(self):
base.ChangeSource.stopService(self)
# unregister our username
master = self.parent.parent
master.dispatcher.unregister(self.user)
def getPerspective(self):
return ChangePerspective(self.parent, self.prefix)
|
gpl-2.0
|
juliusbierk/scikit-image
|
skimage/restoration/deconvolution.py
|
19
|
14315
|
# -*- coding: utf-8 -*-
# deconvolution.py --- Image deconvolution
"""Implementations restoration functions"""
from __future__ import division
import numpy as np
import numpy.random as npr
from scipy.signal import convolve2d
from . import uft
__keywords__ = "restoration, image, deconvolution"
def wiener(image, psf, balance, reg=None, is_real=True, clip=True):
"""Wiener-Hunt deconvolution
Return the deconvolution with a Wiener-Hunt approach (i.e. with
Fourier diagonalisation).
Parameters
----------
image : (M, N) ndarray
Input degraded image
psf : ndarray
Point Spread Function. This is assumed to be the impulse
response (input image space) if the data-type is real, or the
transfer function (Fourier space) if the data-type is
complex. There is no constraints on the shape of the impulse
response. The transfer function must be of shape `(M, N)` if
`is_real is True`, `(M, N // 2 + 1)` otherwise (see
`np.fft.rfftn`).
balance : float
The regularisation parameter value that tunes the balance
between the data adequacy that improve frequency restoration
and the prior adequacy that reduce frequency restoration (to
avoid noise artifacts).
reg : ndarray, optional
The regularisation operator. The Laplacian by default. It can
be an impulse response or a transfer function, as for the
psf. Shape constraint is the same as for the `psf` parameter.
is_real : boolean, optional
True by default. Specify if ``psf`` and ``reg`` are provided
with hermitian hypothesis, that is only half of the frequency
plane is provided (due to the redundancy of Fourier transform
of real signal). It's apply only if ``psf`` and/or ``reg`` are
provided as transfer function. For the hermitian property see
``uft`` module or ``np.fft.rfftn``.
clip : boolean, optional
True by default. If True, pixel values of the result above 1 or
under -1 are thresholded for skimage pipeline compatibility.
Returns
-------
im_deconv : (M, N) ndarray
The deconvolved image.
Examples
--------
>>> from skimage import color, data, restoration
>>> img = color.rgb2gray(data.astronaut())
>>> from scipy.signal import convolve2d
>>> psf = np.ones((5, 5)) / 25
>>> img = convolve2d(img, psf, 'same')
>>> img += 0.1 * img.std() * np.random.standard_normal(img.shape)
>>> deconvolved_img = restoration.wiener(img, psf, 1100)
Notes
-----
This function applies the Wiener filter to a noisy and degraded
image by an impulse response (or PSF). If the data model is
.. math:: y = Hx + n
where :math:`n` is noise, :math:`H` the PSF and :math:`x` the
unknown original image, the Wiener filter is
.. math::
\hat x = F^\dag (|\Lambda_H|^2 + \lambda |\Lambda_D|^2)
\Lambda_H^\dag F y
where :math:`F` and :math:`F^\dag` are the Fourier and inverse
Fourier transfroms respectively, :math:`\Lambda_H` the transfer
function (or the Fourier transfrom of the PSF, see [Hunt] below)
and :math:`\Lambda_D` the filter to penalize the restored image
frequencies (Laplacian by default, that is penalization of high
frequency). The parameter :math:`\lambda` tunes the balance
between the data (that tends to increase high frequency, even
those coming from noise), and the regularization.
These methods are then specific to a prior model. Consequently,
the application or the true image nature must corresponds to the
prior model. By default, the prior model (Laplacian) introduce
image smoothness or pixel correlation. It can also be interpreted
as high-frequency penalization to compensate the instability of
the solution with respect to the data (sometimes called noise
amplification or "explosive" solution).
Finally, the use of Fourier space implies a circulant property of
:math:`H`, see [Hunt].
References
----------
.. [1] François Orieux, Jean-François Giovannelli, and Thomas
Rodet, "Bayesian estimation of regularization and point
spread function parameters for Wiener-Hunt deconvolution",
J. Opt. Soc. Am. A 27, 1593-1607 (2010)
http://www.opticsinfobase.org/josaa/abstract.cfm?URI=josaa-27-7-1593
http://research.orieux.fr/files/papers/OGR-JOSA10.pdf
.. [2] B. R. Hunt "A matrix theory proof of the discrete
convolution theorem", IEEE Trans. on Audio and
Electroacoustics, vol. au-19, no. 4, pp. 285-288, dec. 1971
"""
if reg is None:
reg, _ = uft.laplacian(image.ndim, image.shape, is_real=is_real)
if not np.iscomplexobj(reg):
reg = uft.ir2tf(reg, image.shape, is_real=is_real)
if psf.shape != reg.shape:
trans_func = uft.ir2tf(psf, image.shape, is_real=is_real)
else:
trans_func = psf
wiener_filter = np.conj(trans_func) / (np.abs(trans_func) ** 2 +
balance * np.abs(reg) ** 2)
if is_real:
deconv = uft.uirfft2(wiener_filter * uft.urfft2(image),
shape=image.shape)
else:
deconv = uft.uifft2(wiener_filter * uft.ufft2(image))
if clip:
deconv[deconv > 1] = 1
deconv[deconv < -1] = -1
return deconv
def unsupervised_wiener(image, psf, reg=None, user_params=None, is_real=True,
clip=True):
"""Unsupervised Wiener-Hunt deconvolution.
Return the deconvolution with a Wiener-Hunt approach, where the
hyperparameters are automatically estimated. The algorithm is a
stochastic iterative process (Gibbs sampler) described in the
reference below. See also ``wiener`` function.
Parameters
----------
image : (M, N) ndarray
The input degraded image.
psf : ndarray
The impulse response (input image's space) or the transfer
function (Fourier space). Both are accepted. The transfer
function is automatically recognized as being complex
(``np.iscomplexobj(psf)``).
reg : ndarray, optional
The regularisation operator. The Laplacian by default. It can
be an impulse response or a transfer function, as for the psf.
user_params : dict
Dictionary of parameters for the Gibbs sampler. See below.
clip : boolean, optional
True by default. If true, pixel values of the result above 1 or
under -1 are thresholded for skimage pipeline compatibility.
Returns
-------
x_postmean : (M, N) ndarray
The deconvolved image (the posterior mean).
chains : dict
The keys ``noise`` and ``prior`` contain the chain list of
noise and prior precision respectively.
Other parameters
----------------
The keys of ``user_params`` are:
threshold : float
The stopping criterion: the norm of the difference between to
successive approximated solution (empirical mean of object
samples, see Notes section). 1e-4 by default.
burnin : int
The number of sample to ignore to start computation of the
mean. 100 by default.
min_iter : int
The minimum number of iterations. 30 by default.
max_iter : int
The maximum number of iterations if ``threshold`` is not
satisfied. 150 by default.
callback : callable (None by default)
A user provided callable to which is passed, if the function
exists, the current image sample for whatever purpose. The user
can store the sample, or compute other moments than the
mean. It has no influence on the algorithm execution and is
only for inspection.
Examples
--------
>>> from skimage import color, data, restoration
>>> img = color.rgb2gray(data.astronaut())
>>> from scipy.signal import convolve2d
>>> psf = np.ones((5, 5)) / 25
>>> img = convolve2d(img, psf, 'same')
>>> img += 0.1 * img.std() * np.random.standard_normal(img.shape)
>>> deconvolved_img = restoration.unsupervised_wiener(img, psf)
Notes
-----
The estimated image is design as the posterior mean of a
probability law (from a Bayesian analysis). The mean is defined as
a sum over all the possible images weighted by their respective
probability. Given the size of the problem, the exact sum is not
tractable. This algorithm use of MCMC to draw image under the
posterior law. The practical idea is to only draw highly probable
images since they have the biggest contribution to the mean. At the
opposite, the less probable images are drawn less often since
their contribution is low. Finally the empirical mean of these
samples give us an estimation of the mean, and an exact
computation with an infinite sample set.
References
----------
.. [1] François Orieux, Jean-François Giovannelli, and Thomas
Rodet, "Bayesian estimation of regularization and point
spread function parameters for Wiener-Hunt deconvolution",
J. Opt. Soc. Am. A 27, 1593-1607 (2010)
http://www.opticsinfobase.org/josaa/abstract.cfm?URI=josaa-27-7-1593
http://research.orieux.fr/files/papers/OGR-JOSA10.pdf
"""
params = {'threshold': 1e-4, 'max_iter': 200,
'min_iter': 30, 'burnin': 15, 'callback': None}
params.update(user_params or {})
if reg is None:
reg, _ = uft.laplacian(image.ndim, image.shape, is_real=is_real)
if not np.iscomplexobj(reg):
reg = uft.ir2tf(reg, image.shape, is_real=is_real)
if psf.shape != reg.shape:
trans_fct = uft.ir2tf(psf, image.shape, is_real=is_real)
else:
trans_fct = psf
# The mean of the object
x_postmean = np.zeros(trans_fct.shape)
# The previous computed mean in the iterative loop
prev_x_postmean = np.zeros(trans_fct.shape)
# Difference between two successive mean
delta = np.NAN
# Initial state of the chain
gn_chain, gx_chain = [1], [1]
# The correlation of the object in Fourier space (if size is big,
# this can reduce computation time in the loop)
areg2 = np.abs(reg) ** 2
atf2 = np.abs(trans_fct) ** 2
# The Fourier transfrom may change the image.size attribut, so we
# store it.
if is_real:
data_spectrum = uft.urfft2(image.astype(np.float))
else:
data_spectrum = uft.ufft2(image.astype(np.float))
# Gibbs sampling
for iteration in range(params['max_iter']):
# Sample of Eq. 27 p(circX^k | gn^k-1, gx^k-1, y).
# weighting (correlation in direct space)
precision = gn_chain[-1] * atf2 + gx_chain[-1] * areg2 # Eq. 29
excursion = np.sqrt(0.5) / np.sqrt(precision) * (
np.random.standard_normal(data_spectrum.shape) +
1j * np.random.standard_normal(data_spectrum.shape))
# mean Eq. 30 (RLS for fixed gn, gamma0 and gamma1 ...)
wiener_filter = gn_chain[-1] * np.conj(trans_fct) / precision
# sample of X in Fourier space
x_sample = wiener_filter * data_spectrum + excursion
if params['callback']:
params['callback'](x_sample)
# sample of Eq. 31 p(gn | x^k, gx^k, y)
gn_chain.append(npr.gamma(image.size / 2,
2 / uft.image_quad_norm(data_spectrum -
x_sample *
trans_fct)))
# sample of Eq. 31 p(gx | x^k, gn^k-1, y)
gx_chain.append(npr.gamma((image.size - 1) / 2,
2 / uft.image_quad_norm(x_sample * reg)))
# current empirical average
if iteration > params['burnin']:
x_postmean = prev_x_postmean + x_sample
if iteration > (params['burnin'] + 1):
current = x_postmean / (iteration - params['burnin'])
previous = prev_x_postmean / (iteration - params['burnin'] - 1)
delta = np.sum(np.abs(current - previous)) / \
np.sum(np.abs(x_postmean)) / (iteration - params['burnin'])
prev_x_postmean = x_postmean
# stop of the algorithm
if (iteration > params['min_iter']) and (delta < params['threshold']):
break
# Empirical average \approx POSTMEAN Eq. 44
x_postmean = x_postmean / (iteration - params['burnin'])
if is_real:
x_postmean = uft.uirfft2(x_postmean, shape=image.shape)
else:
x_postmean = uft.uifft2(x_postmean)
if clip:
x_postmean[x_postmean > 1] = 1
x_postmean[x_postmean < -1] = -1
return (x_postmean, {'noise': gn_chain, 'prior': gx_chain})
def richardson_lucy(image, psf, iterations=50, clip=True):
"""Richardson-Lucy deconvolution.
Parameters
----------
image : ndarray
Input degraded image.
psf : ndarray
The point spread function.
iterations : int
Number of iterations. This parameter plays the role of
regularisation.
clip : boolean, optional
True by default. If true, pixel value of the result above 1 or
under -1 are thresholded for skimage pipeline compatibility.
Returns
-------
im_deconv : ndarray
The deconvolved image.
Examples
--------
>>> from skimage import color, data, restoration
>>> camera = color.rgb2gray(data.camera())
>>> from scipy.signal import convolve2d
>>> psf = np.ones((5, 5)) / 25
>>> camera = convolve2d(camera, psf, 'same')
>>> camera += 0.1 * camera.std() * np.random.standard_normal(camera.shape)
>>> deconvolved = restoration.richardson_lucy(camera, psf, 5)
References
----------
.. [1] http://en.wikipedia.org/wiki/Richardson%E2%80%93Lucy_deconvolution
"""
image = image.astype(np.float)
psf = psf.astype(np.float)
im_deconv = 0.5 * np.ones(image.shape)
psf_mirror = psf[::-1, ::-1]
for _ in range(iterations):
relative_blur = image / convolve2d(im_deconv, psf, 'same')
im_deconv *= convolve2d(relative_blur, psf_mirror, 'same')
if clip:
im_deconv[im_deconv > 1] = 1
im_deconv[im_deconv < -1] = -1
return im_deconv
|
bsd-3-clause
|
mjg2203/edx-platform-seas
|
lms/djangoapps/dashboard/management/commands/git_add_course.py
|
9
|
1525
|
"""
Script for importing courseware from git/xml into a mongo modulestore
"""
import os
import re
import StringIO
import subprocess
import logging
from django.core import management
from django.core.management.base import BaseCommand, CommandError
from django.utils.translation import ugettext as _
import dashboard.git_import
from dashboard.git_import import GitImportError
from dashboard.models import CourseImportLog
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.xml import XMLModuleStore
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Pull a git repo and import into the mongo based content database.
"""
help = _('Import the specified git repository into the '
'modulestore and directory')
def handle(self, *args, **options):
"""Check inputs and run the command"""
if isinstance(modulestore, XMLModuleStore):
raise CommandError('This script requires a mongo module store')
if len(args) < 1:
raise CommandError('This script requires at least one argument, '
'the git URL')
if len(args) > 2:
raise CommandError('This script requires no more than two '
'arguments')
rdir_arg = None
if len(args) > 1:
rdir_arg = args[1]
try:
dashboard.git_import.add_repo(args[0], rdir_arg)
except GitImportError as ex:
raise CommandError(str(ex))
|
agpl-3.0
|
ZHAW-INES/rioxo-uClinux-dist
|
user/python/python-2.4.4/Mac/scripts/buildpkg.py
|
5
|
15904
|
#!/usr/bin/env python
"""buildpkg.py -- Build OS X packages for Apple's Installer.app.
This is an experimental command-line tool for building packages to be
installed with the Mac OS X Installer.app application.
It is much inspired by Apple's GUI tool called PackageMaker.app, that
seems to be part of the OS X developer tools installed in the folder
/Developer/Applications. But apparently there are other free tools to
do the same thing which are also named PackageMaker like Brian Hill's
one:
http://personalpages.tds.net/~brian_hill/packagemaker.html
Beware of the multi-package features of Installer.app (which are not
yet supported here) that can potentially screw-up your installation
and are discussed in these articles on Stepwise:
http://www.stepwise.com/Articles/Technical/Packages/InstallerWoes.html
http://www.stepwise.com/Articles/Technical/Packages/InstallerOnX.html
Beside using the PackageMaker class directly, by importing it inside
another module, say, there are additional ways of using this module:
the top-level buildPackage() function provides a shortcut to the same
feature and is also called when using this module from the command-
line.
****************************************************************
NOTE: For now you should be able to run this even on a non-OS X
system and get something similar to a package, but without
the real archive (needs pax) and bom files (needs mkbom)
inside! This is only for providing a chance for testing to
folks without OS X.
****************************************************************
TODO:
- test pre-process and post-process scripts (Python ones?)
- handle multi-volume packages (?)
- integrate into distutils (?)
Dinu C. Gherman,
gherman@europemail.com
November 2001
!! USE AT YOUR OWN RISK !!
"""
__version__ = 0.2
__license__ = "FreeBSD"
import os, sys, glob, fnmatch, shutil, string, copy, getopt
from os.path import basename, dirname, join, islink, isdir, isfile
Error = "buildpkg.Error"
PKG_INFO_FIELDS = """\
Title
Version
Description
DefaultLocation
DeleteWarning
NeedsAuthorization
DisableStop
UseUserMask
Application
Relocatable
Required
InstallOnly
RequiresReboot
RootVolumeOnly
LongFilenames
LibrarySubdirectory
AllowBackRev
OverwritePermissions
InstallFat\
"""
######################################################################
# Helpers
######################################################################
# Convenience class, as suggested by /F.
class GlobDirectoryWalker:
"A forward iterator that traverses files in a directory tree."
def __init__(self, directory, pattern="*"):
self.stack = [directory]
self.pattern = pattern
self.files = []
self.index = 0
def __getitem__(self, index):
while 1:
try:
file = self.files[self.index]
self.index = self.index + 1
except IndexError:
# pop next directory from stack
self.directory = self.stack.pop()
self.files = os.listdir(self.directory)
self.index = 0
else:
# got a filename
fullname = join(self.directory, file)
if isdir(fullname) and not islink(fullname):
self.stack.append(fullname)
if fnmatch.fnmatch(file, self.pattern):
return fullname
######################################################################
# The real thing
######################################################################
class PackageMaker:
"""A class to generate packages for Mac OS X.
This is intended to create OS X packages (with extension .pkg)
containing archives of arbitrary files that the Installer.app
will be able to handle.
As of now, PackageMaker instances need to be created with the
title, version and description of the package to be built.
The package is built after calling the instance method
build(root, **options). It has the same name as the constructor's
title argument plus a '.pkg' extension and is located in the same
parent folder that contains the root folder.
E.g. this will create a package folder /my/space/distutils.pkg/:
pm = PackageMaker("distutils", "1.0.2", "Python distutils.")
pm.build("/my/space/distutils")
"""
packageInfoDefaults = {
'Title': None,
'Version': None,
'Description': '',
'DefaultLocation': '/',
'DeleteWarning': '',
'NeedsAuthorization': 'NO',
'DisableStop': 'NO',
'UseUserMask': 'YES',
'Application': 'NO',
'Relocatable': 'YES',
'Required': 'NO',
'InstallOnly': 'NO',
'RequiresReboot': 'NO',
'RootVolumeOnly' : 'NO',
'InstallFat': 'NO',
'LongFilenames': 'YES',
'LibrarySubdirectory': 'Standard',
'AllowBackRev': 'YES',
'OverwritePermissions': 'NO',
}
def __init__(self, title, version, desc):
"Init. with mandatory title/version/description arguments."
info = {"Title": title, "Version": version, "Description": desc}
self.packageInfo = copy.deepcopy(self.packageInfoDefaults)
self.packageInfo.update(info)
# variables set later
self.packageRootFolder = None
self.packageResourceFolder = None
self.sourceFolder = None
self.resourceFolder = None
def build(self, root, resources=None, **options):
"""Create a package for some given root folder.
With no 'resources' argument set it is assumed to be the same
as the root directory. Option items replace the default ones
in the package info.
"""
# set folder attributes
self.sourceFolder = root
if resources == None:
self.resourceFolder = root
else:
self.resourceFolder = resources
# replace default option settings with user ones if provided
fields = self. packageInfoDefaults.keys()
for k, v in options.items():
if k in fields:
self.packageInfo[k] = v
elif not k in ["OutputDir"]:
raise Error, "Unknown package option: %s" % k
# Check where we should leave the output. Default is current directory
outputdir = options.get("OutputDir", os.getcwd())
packageName = self.packageInfo["Title"]
self.PackageRootFolder = os.path.join(outputdir, packageName + ".pkg")
# do what needs to be done
self._makeFolders()
self._addInfo()
self._addBom()
self._addArchive()
self._addResources()
self._addSizes()
self._addLoc()
def _makeFolders(self):
"Create package folder structure."
# Not sure if the package name should contain the version or not...
# packageName = "%s-%s" % (self.packageInfo["Title"],
# self.packageInfo["Version"]) # ??
contFolder = join(self.PackageRootFolder, "Contents")
self.packageResourceFolder = join(contFolder, "Resources")
os.mkdir(self.PackageRootFolder)
os.mkdir(contFolder)
os.mkdir(self.packageResourceFolder)
def _addInfo(self):
"Write .info file containing installing options."
# Not sure if options in PKG_INFO_FIELDS are complete...
info = ""
for f in string.split(PKG_INFO_FIELDS, "\n"):
if self.packageInfo.has_key(f):
info = info + "%s %%(%s)s\n" % (f, f)
info = info % self.packageInfo
base = self.packageInfo["Title"] + ".info"
path = join(self.packageResourceFolder, base)
f = open(path, "w")
f.write(info)
def _addBom(self):
"Write .bom file containing 'Bill of Materials'."
# Currently ignores if the 'mkbom' tool is not available.
try:
base = self.packageInfo["Title"] + ".bom"
bomPath = join(self.packageResourceFolder, base)
cmd = "mkbom %s %s" % (self.sourceFolder, bomPath)
res = os.system(cmd)
except:
pass
def _addArchive(self):
"Write .pax.gz file, a compressed archive using pax/gzip."
# Currently ignores if the 'pax' tool is not available.
cwd = os.getcwd()
# create archive
os.chdir(self.sourceFolder)
base = basename(self.packageInfo["Title"]) + ".pax"
self.archPath = join(self.packageResourceFolder, base)
cmd = "pax -w -f %s %s" % (self.archPath, ".")
res = os.system(cmd)
# compress archive
cmd = "gzip %s" % self.archPath
res = os.system(cmd)
os.chdir(cwd)
def _addResources(self):
"Add Welcome/ReadMe/License files, .lproj folders and scripts."
# Currently we just copy everything that matches the allowed
# filenames. So, it's left to Installer.app to deal with the
# same file available in multiple formats...
if not self.resourceFolder:
return
# find candidate resource files (txt html rtf rtfd/ or lproj/)
allFiles = []
for pat in string.split("*.txt *.html *.rtf *.rtfd *.lproj", " "):
pattern = join(self.resourceFolder, pat)
allFiles = allFiles + glob.glob(pattern)
# find pre-process and post-process scripts
# naming convention: packageName.{pre,post}_{upgrade,install}
# Alternatively the filenames can be {pre,post}_{upgrade,install}
# in which case we prepend the package name
packageName = self.packageInfo["Title"]
for pat in ("*upgrade", "*install", "*flight"):
pattern = join(self.resourceFolder, packageName + pat)
pattern2 = join(self.resourceFolder, pat)
allFiles = allFiles + glob.glob(pattern)
allFiles = allFiles + glob.glob(pattern2)
# check name patterns
files = []
for f in allFiles:
for s in ("Welcome", "License", "ReadMe"):
if string.find(basename(f), s) == 0:
files.append((f, f))
if f[-6:] == ".lproj":
files.append((f, f))
elif basename(f) in ["pre_upgrade", "pre_install", "post_upgrade", "post_install"]:
files.append((f, packageName+"."+basename(f)))
elif basename(f) in ["preflight", "postflight"]:
files.append((f, f))
elif f[-8:] == "_upgrade":
files.append((f,f))
elif f[-8:] == "_install":
files.append((f,f))
# copy files
for src, dst in files:
src = basename(src)
dst = basename(dst)
f = join(self.resourceFolder, src)
if isfile(f):
shutil.copy(f, os.path.join(self.packageResourceFolder, dst))
elif isdir(f):
# special case for .rtfd and .lproj folders...
d = join(self.packageResourceFolder, dst)
os.mkdir(d)
files = GlobDirectoryWalker(f)
for file in files:
shutil.copy(file, d)
def _addSizes(self):
"Write .sizes file with info about number and size of files."
# Not sure if this is correct, but 'installedSize' and
# 'zippedSize' are now in Bytes. Maybe blocks are needed?
# Well, Installer.app doesn't seem to care anyway, saying
# the installation needs 100+ MB...
numFiles = 0
installedSize = 0
zippedSize = 0
files = GlobDirectoryWalker(self.sourceFolder)
for f in files:
numFiles = numFiles + 1
installedSize = installedSize + os.lstat(f)[6]
try:
zippedSize = os.stat(self.archPath+ ".gz")[6]
except OSError: # ignore error
pass
base = self.packageInfo["Title"] + ".sizes"
f = open(join(self.packageResourceFolder, base), "w")
format = "NumFiles %d\nInstalledSize %d\nCompressedSize %d\n"
f.write(format % (numFiles, installedSize, zippedSize))
def _addLoc(self):
"Write .loc file."
base = self.packageInfo["Title"] + ".loc"
f = open(join(self.packageResourceFolder, base), "w")
f.write('/')
# Shortcut function interface
def buildPackage(*args, **options):
"A Shortcut function for building a package."
o = options
title, version, desc = o["Title"], o["Version"], o["Description"]
pm = PackageMaker(title, version, desc)
apply(pm.build, list(args), options)
######################################################################
# Tests
######################################################################
def test0():
"Vanilla test for the distutils distribution."
pm = PackageMaker("distutils2", "1.0.2", "Python distutils package.")
pm.build("/Users/dinu/Desktop/distutils2")
def test1():
"Test for the reportlab distribution with modified options."
pm = PackageMaker("reportlab", "1.10",
"ReportLab's Open Source PDF toolkit.")
pm.build(root="/Users/dinu/Desktop/reportlab",
DefaultLocation="/Applications/ReportLab",
Relocatable="YES")
def test2():
"Shortcut test for the reportlab distribution with modified options."
buildPackage(
"/Users/dinu/Desktop/reportlab",
Title="reportlab",
Version="1.10",
Description="ReportLab's Open Source PDF toolkit.",
DefaultLocation="/Applications/ReportLab",
Relocatable="YES")
######################################################################
# Command-line interface
######################################################################
def printUsage():
"Print usage message."
format = "Usage: %s <opts1> [<opts2>] <root> [<resources>]"
print format % basename(sys.argv[0])
print
print " with arguments:"
print " (mandatory) root: the package root folder"
print " (optional) resources: the package resources folder"
print
print " and options:"
print " (mandatory) opts1:"
mandatoryKeys = string.split("Title Version Description", " ")
for k in mandatoryKeys:
print " --%s" % k
print " (optional) opts2: (with default values)"
pmDefaults = PackageMaker.packageInfoDefaults
optionalKeys = pmDefaults.keys()
for k in mandatoryKeys:
optionalKeys.remove(k)
optionalKeys.sort()
maxKeyLen = max(map(len, optionalKeys))
for k in optionalKeys:
format = " --%%s:%s %%s"
format = format % (" " * (maxKeyLen-len(k)))
print format % (k, repr(pmDefaults[k]))
def main():
"Command-line interface."
shortOpts = ""
keys = PackageMaker.packageInfoDefaults.keys()
longOpts = map(lambda k: k+"=", keys)
try:
opts, args = getopt.getopt(sys.argv[1:], shortOpts, longOpts)
except getopt.GetoptError, details:
print details
printUsage()
return
optsDict = {}
for k, v in opts:
optsDict[k[2:]] = v
ok = optsDict.keys()
if not (1 <= len(args) <= 2):
print "No argument given!"
elif not ("Title" in ok and \
"Version" in ok and \
"Description" in ok):
print "Missing mandatory option!"
else:
apply(buildPackage, args, optsDict)
return
printUsage()
# sample use:
# buildpkg.py --Title=distutils \
# --Version=1.0.2 \
# --Description="Python distutils package." \
# /Users/dinu/Desktop/distutils
if __name__ == "__main__":
main()
|
gpl-2.0
|
euri10/zipline
|
tests/test_algorithm_gen.py
|
18
|
7339
|
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from nose.tools import (
timed,
nottest
)
from datetime import datetime
import pandas as pd
import pytz
from zipline.finance import trading
from zipline.algorithm import TradingAlgorithm
from zipline.finance import slippage
from zipline.utils import factory
from zipline.utils.factory import create_simulation_parameters
from zipline.utils.test_utils import (
setup_logger,
teardown_logger
)
from zipline.protocol import (
Event,
DATASOURCE_TYPE
)
DEFAULT_TIMEOUT = 15 # seconds
EXTENDED_TIMEOUT = 90
class RecordDateSlippage(slippage.FixedSlippage):
def __init__(self, spread):
super(RecordDateSlippage, self).__init__(spread=spread)
self.latest_date = None
def simulate(self, event, open_orders):
self.latest_date = event.dt
result = super(RecordDateSlippage, self).simulate(event, open_orders)
return result
class TestAlgo(TradingAlgorithm):
def __init__(self, asserter, *args, **kwargs):
super(TestAlgo, self).__init__(*args, **kwargs)
self.asserter = asserter
def initialize(self, window_length=100):
self.latest_date = None
self.set_slippage(RecordDateSlippage(spread=0.05))
self.stocks = [self.sid(8229)]
self.ordered = False
self.num_bars = 0
def handle_data(self, data):
self.num_bars += 1
self.latest_date = self.get_datetime()
if not self.ordered:
for stock in self.stocks:
self.order(stock, 100)
self.ordered = True
else:
self.asserter.assertGreaterEqual(
self.latest_date,
self.slippage.latest_date
)
class AlgorithmGeneratorTestCase(TestCase):
def setUp(self):
setup_logger(self)
def tearDown(self):
teardown_logger(self)
@nottest
def test_lse_algorithm(self):
lse = trading.TradingEnvironment(
bm_symbol='^FTSE',
exchange_tz='Europe/London'
)
with lse:
sim_params = factory.create_simulation_parameters(
start=datetime(2012, 5, 1, tzinfo=pytz.utc),
end=datetime(2012, 6, 30, tzinfo=pytz.utc)
)
algo = TestAlgo(self, identifiers=[8229], sim_params=sim_params)
trade_source = factory.create_daily_trade_source(
[8229],
200,
sim_params
)
algo.set_sources([trade_source])
gen = algo.get_generator()
results = list(gen)
self.assertEqual(len(results), 42)
# May 7, 2012 was an LSE holiday, confirm the 4th trading
# day was May 8.
self.assertEqual(results[4]['daily_perf']['period_open'],
datetime(2012, 5, 8, 8, 31, tzinfo=pytz.utc))
@timed(DEFAULT_TIMEOUT)
def test_generator_dates(self):
"""
Ensure the pipeline of generators are in sync, at least as far as
their current dates.
"""
sim_params = factory.create_simulation_parameters(
start=datetime(2011, 7, 30, tzinfo=pytz.utc),
end=datetime(2012, 7, 30, tzinfo=pytz.utc)
)
algo = TestAlgo(self, identifiers=[8229], sim_params=sim_params)
trade_source = factory.create_daily_trade_source(
[8229],
sim_params
)
algo.set_sources([trade_source])
gen = algo.get_generator()
self.assertTrue(list(gen))
self.assertTrue(algo.slippage.latest_date)
self.assertTrue(algo.latest_date)
@timed(DEFAULT_TIMEOUT)
def test_handle_data_on_market(self):
"""
Ensure that handle_data is only called on market minutes.
i.e. events that come in at midnight should be processed at market
open.
"""
from zipline.finance.trading import SimulationParameters
sim_params = SimulationParameters(
period_start=datetime(2012, 7, 30, tzinfo=pytz.utc),
period_end=datetime(2012, 7, 30, tzinfo=pytz.utc),
data_frequency='minute'
)
algo = TestAlgo(self, identifiers=[8229], sim_params=sim_params)
midnight_custom_source = [Event({
'custom_field': 42.0,
'sid': 'custom_data',
'source_id': 'TestMidnightSource',
'dt': pd.Timestamp('2012-07-30', tz='UTC'),
'type': DATASOURCE_TYPE.CUSTOM
})]
minute_event_source = [Event({
'volume': 100,
'price': 200.0,
'high': 210.0,
'open_price': 190.0,
'low': 180.0,
'sid': 8229,
'source_id': 'TestMinuteEventSource',
'dt': pd.Timestamp('2012-07-30 9:31 AM', tz='US/Eastern').
tz_convert('UTC'),
'type': DATASOURCE_TYPE.TRADE
})]
algo.set_sources([midnight_custom_source, minute_event_source])
gen = algo.get_generator()
# Consume the generator
list(gen)
# Though the events had different time stamps, handle data should
# have only been called once, at the market open.
self.assertEqual(algo.num_bars, 1)
@timed(DEFAULT_TIMEOUT)
def test_progress(self):
"""
Ensure the pipeline of generators are in sync, at least as far as
their current dates.
"""
sim_params = factory.create_simulation_parameters(
start=datetime(2008, 1, 1, tzinfo=pytz.utc),
end=datetime(2008, 1, 5, tzinfo=pytz.utc)
)
algo = TestAlgo(self, sim_params=sim_params)
trade_source = factory.create_daily_trade_source(
[8229],
sim_params
)
algo.set_sources([trade_source])
gen = algo.get_generator()
results = list(gen)
self.assertEqual(results[-2]['progress'], 1.0)
def test_benchmark_times_match_market_close_for_minutely_data(self):
"""
Benchmark dates should be adjusted so that benchmark events are
emitted at the end of each trading day when working with minutely
data.
Verification relies on the fact that there are no trades so
algo.datetime should be equal to the last benchmark time.
See https://github.com/quantopian/zipline/issues/241
"""
sim_params = create_simulation_parameters(num_days=1,
data_frequency='minute')
algo = TestAlgo(self, sim_params=sim_params, identifiers=[8229])
algo.run(source=[], overwrite_sim_params=False)
self.assertEqual(algo.datetime, sim_params.last_close)
|
apache-2.0
|
gechr/ansible-modules-extras
|
system/locale_gen.py
|
40
|
7666
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: locale_gen
short_description: Creates or removes locales.
description:
- Manages locales by editing /etc/locale.gen and invoking locale-gen.
version_added: "1.6"
author: "Augustus Kling (@AugustusKling)"
options:
name:
description:
- Name and encoding of the locale, such as "en_GB.UTF-8".
required: true
default: null
aliases: []
state:
description:
- Whether the locale shall be present.
required: false
choices: ["present", "absent"]
default: "present"
'''
EXAMPLES = '''
# Ensure a locale exists.
- locale_gen: name=de_CH.UTF-8 state=present
'''
import os
import os.path
from subprocess import Popen, PIPE, call
import re
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
LOCALE_NORMALIZATION = {
".utf8": ".UTF-8",
".eucjp": ".EUC-JP",
".iso885915": ".ISO-8859-15",
".cp1251": ".CP1251",
".koi8r": ".KOI8-R",
".armscii8": ".ARMSCII-8",
".euckr": ".EUC-KR",
".gbk": ".GBK",
".gb18030": ".GB18030",
".euctw": ".EUC-TW",
}
# ===========================================
# location module specific support methods.
#
def is_available(name, ubuntuMode):
"""Check if the given locale is available on the system. This is done by
checking either :
* if the locale is present in /etc/locales.gen
* or if the locale is present in /usr/share/i18n/SUPPORTED"""
if ubuntuMode:
__regexp = '^(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
__locales_available = '/usr/share/i18n/SUPPORTED'
else:
__regexp = '^#{0,1}\s*(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
__locales_available = '/etc/locale.gen'
re_compiled = re.compile(__regexp)
fd = open(__locales_available, 'r')
for line in fd:
result = re_compiled.match(line)
if result and result.group('locale') == name:
return True
fd.close()
return False
def is_present(name):
"""Checks if the given locale is currently installed."""
output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0]
return any(fix_case(name) == fix_case(line) for line in output.splitlines())
def fix_case(name):
"""locale -a might return the encoding in either lower or upper case.
Passing through this function makes them uniform for comparisons."""
for s, r in LOCALE_NORMALIZATION.iteritems():
name = name.replace(s, r)
return name
def replace_line(existing_line, new_line):
"""Replaces lines in /etc/locale.gen"""
try:
f = open("/etc/locale.gen", "r")
lines = [line.replace(existing_line, new_line) for line in f]
finally:
f.close()
try:
f = open("/etc/locale.gen", "w")
f.write("".join(lines))
finally:
f.close()
def set_locale(name, enabled=True):
""" Sets the state of the locale. Defaults to enabled. """
search_string = '#{0,1}\s*%s (?P<charset>.+)' % name
if enabled:
new_string = '%s \g<charset>' % (name)
else:
new_string = '# %s \g<charset>' % (name)
try:
f = open("/etc/locale.gen", "r")
lines = [re.sub(search_string, new_string, line) for line in f]
finally:
f.close()
try:
f = open("/etc/locale.gen", "w")
f.write("".join(lines))
finally:
f.close()
def apply_change(targetState, name):
"""Create or remove locale.
Keyword arguments:
targetState -- Desired state, either present or absent.
name -- Name including encoding such as de_CH.UTF-8.
"""
if targetState=="present":
# Create locale.
set_locale(name, enabled=True)
else:
# Delete locale.
set_locale(name, enabled=False)
localeGenExitValue = call("locale-gen")
if localeGenExitValue!=0:
raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue))
def apply_change_ubuntu(targetState, name):
"""Create or remove locale.
Keyword arguments:
targetState -- Desired state, either present or absent.
name -- Name including encoding such as de_CH.UTF-8.
"""
if targetState=="present":
# Create locale.
# Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local
localeGenExitValue = call(["locale-gen", name])
else:
# Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales.
try:
f = open("/var/lib/locales/supported.d/local", "r")
content = f.readlines()
finally:
f.close()
try:
f = open("/var/lib/locales/supported.d/local", "w")
for line in content:
locale, charset = line.split(' ')
if locale != name:
f.write(line)
finally:
f.close()
# Purge locales and regenerate.
# Please provide a patch if you know how to avoid regenerating the locales to keep!
localeGenExitValue = call(["locale-gen", "--purge"])
if localeGenExitValue!=0:
raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue))
# ==============================================================
# main
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(choices=['present','absent'], default='present'),
),
supports_check_mode=True
)
name = module.params['name']
state = module.params['state']
if not os.path.exists("/etc/locale.gen"):
if os.path.exists("/var/lib/locales/supported.d/"):
# Ubuntu created its own system to manage locales.
ubuntuMode = True
else:
module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?")
else:
# We found the common way to manage locales.
ubuntuMode = False
if not is_available(name, ubuntuMode):
module.fail_json(msg="The locales you've entered is not available "
"on your system.")
if is_present(name):
prev_state = "present"
else:
prev_state = "absent"
changed = (prev_state!=state)
if module.check_mode:
module.exit_json(changed=changed)
else:
if changed:
try:
if ubuntuMode==False:
apply_change(state, name)
else:
apply_change_ubuntu(state, name)
except EnvironmentError:
e = get_exception()
module.fail_json(msg=e.strerror, exitValue=e.errno)
module.exit_json(name=name, changed=changed, msg="OK")
main()
|
gpl-3.0
|
mKeRix/home-assistant
|
homeassistant/components/html5/notify.py
|
7
|
18756
|
"""HTML5 Push Messaging notification service."""
from datetime import datetime, timedelta
from functools import partial
import json
import logging
import time
from urllib.parse import urlparse
import uuid
from aiohttp.hdrs import AUTHORIZATION
import jwt
from py_vapid import Vapid
from pywebpush import WebPusher
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.components import websocket_api
from homeassistant.components.frontend import add_manifest_json_key
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TARGET,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import (
HTTP_BAD_REQUEST,
HTTP_INTERNAL_SERVER_ERROR,
HTTP_UNAUTHORIZED,
URL_ROOT,
)
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv
from homeassistant.util import ensure_unique_string
from homeassistant.util.json import load_json, save_json
from .const import DOMAIN, SERVICE_DISMISS
_LOGGER = logging.getLogger(__name__)
REGISTRATIONS_FILE = "html5_push_registrations.conf"
ATTR_GCM_SENDER_ID = "gcm_sender_id"
ATTR_GCM_API_KEY = "gcm_api_key"
ATTR_VAPID_PUB_KEY = "vapid_pub_key"
ATTR_VAPID_PRV_KEY = "vapid_prv_key"
ATTR_VAPID_EMAIL = "vapid_email"
def gcm_api_deprecated(value):
"""Warn user that GCM API config is deprecated."""
if value:
_LOGGER.warning(
"Configuring html5_push_notifications via the GCM api"
" has been deprecated and will stop working after April 11,"
" 2019. Use the VAPID configuration instead. For instructions,"
" see https://www.home-assistant.io/integrations/html5/"
)
return value
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(ATTR_GCM_SENDER_ID): vol.All(cv.string, gcm_api_deprecated),
vol.Optional(ATTR_GCM_API_KEY): cv.string,
vol.Optional(ATTR_VAPID_PUB_KEY): cv.string,
vol.Optional(ATTR_VAPID_PRV_KEY): cv.string,
vol.Optional(ATTR_VAPID_EMAIL): cv.string,
}
)
ATTR_SUBSCRIPTION = "subscription"
ATTR_BROWSER = "browser"
ATTR_NAME = "name"
ATTR_ENDPOINT = "endpoint"
ATTR_KEYS = "keys"
ATTR_AUTH = "auth"
ATTR_P256DH = "p256dh"
ATTR_EXPIRATIONTIME = "expirationTime"
ATTR_TAG = "tag"
ATTR_ACTION = "action"
ATTR_ACTIONS = "actions"
ATTR_TYPE = "type"
ATTR_URL = "url"
ATTR_DISMISS = "dismiss"
ATTR_PRIORITY = "priority"
DEFAULT_PRIORITY = "normal"
ATTR_TTL = "ttl"
DEFAULT_TTL = 86400
ATTR_JWT = "jwt"
WS_TYPE_APPKEY = "notify/html5/appkey"
SCHEMA_WS_APPKEY = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_APPKEY}
)
# The number of days after the moment a notification is sent that a JWT
# is valid.
JWT_VALID_DAYS = 7
KEYS_SCHEMA = vol.All(
dict,
vol.Schema(
{vol.Required(ATTR_AUTH): cv.string, vol.Required(ATTR_P256DH): cv.string}
),
)
SUBSCRIPTION_SCHEMA = vol.All(
dict,
vol.Schema(
{
# pylint: disable=no-value-for-parameter
vol.Required(ATTR_ENDPOINT): vol.Url(),
vol.Required(ATTR_KEYS): KEYS_SCHEMA,
vol.Optional(ATTR_EXPIRATIONTIME): vol.Any(None, cv.positive_int),
}
),
)
DISMISS_SERVICE_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_TARGET): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_DATA): dict,
}
)
REGISTER_SCHEMA = vol.Schema(
{
vol.Required(ATTR_SUBSCRIPTION): SUBSCRIPTION_SCHEMA,
vol.Required(ATTR_BROWSER): vol.In(["chrome", "firefox"]),
vol.Optional(ATTR_NAME): cv.string,
}
)
CALLBACK_EVENT_PAYLOAD_SCHEMA = vol.Schema(
{
vol.Required(ATTR_TAG): cv.string,
vol.Required(ATTR_TYPE): vol.In(["received", "clicked", "closed"]),
vol.Required(ATTR_TARGET): cv.string,
vol.Optional(ATTR_ACTION): cv.string,
vol.Optional(ATTR_DATA): dict,
}
)
NOTIFY_CALLBACK_EVENT = "html5_notification"
# Badge and timestamp are Chrome specific (not in official spec)
HTML5_SHOWNOTIFICATION_PARAMETERS = (
"actions",
"badge",
"body",
"dir",
"icon",
"image",
"lang",
"renotify",
"requireInteraction",
"tag",
"timestamp",
"vibrate",
)
def get_service(hass, config, discovery_info=None):
"""Get the HTML5 push notification service."""
json_path = hass.config.path(REGISTRATIONS_FILE)
registrations = _load_config(json_path)
if registrations is None:
return None
vapid_pub_key = config.get(ATTR_VAPID_PUB_KEY)
vapid_prv_key = config.get(ATTR_VAPID_PRV_KEY)
vapid_email = config.get(ATTR_VAPID_EMAIL)
def websocket_appkey(hass, connection, msg):
connection.send_message(websocket_api.result_message(msg["id"], vapid_pub_key))
hass.components.websocket_api.async_register_command(
WS_TYPE_APPKEY, websocket_appkey, SCHEMA_WS_APPKEY
)
hass.http.register_view(HTML5PushRegistrationView(registrations, json_path))
hass.http.register_view(HTML5PushCallbackView(registrations))
gcm_api_key = config.get(ATTR_GCM_API_KEY)
gcm_sender_id = config.get(ATTR_GCM_SENDER_ID)
if gcm_sender_id is not None:
add_manifest_json_key(ATTR_GCM_SENDER_ID, config.get(ATTR_GCM_SENDER_ID))
return HTML5NotificationService(
hass, gcm_api_key, vapid_prv_key, vapid_email, registrations, json_path
)
def _load_config(filename):
"""Load configuration."""
try:
return load_json(filename)
except HomeAssistantError:
pass
return {}
class HTML5PushRegistrationView(HomeAssistantView):
"""Accepts push registrations from a browser."""
url = "/api/notify.html5"
name = "api:notify.html5"
def __init__(self, registrations, json_path):
"""Init HTML5PushRegistrationView."""
self.registrations = registrations
self.json_path = json_path
async def post(self, request):
"""Accept the POST request for push registrations from a browser."""
try:
data = await request.json()
except ValueError:
return self.json_message("Invalid JSON", HTTP_BAD_REQUEST)
try:
data = REGISTER_SCHEMA(data)
except vol.Invalid as ex:
return self.json_message(humanize_error(data, ex), HTTP_BAD_REQUEST)
devname = data.get(ATTR_NAME)
data.pop(ATTR_NAME, None)
name = self.find_registration_name(data, devname)
previous_registration = self.registrations.get(name)
self.registrations[name] = data
try:
hass = request.app["hass"]
await hass.async_add_job(save_json, self.json_path, self.registrations)
return self.json_message("Push notification subscriber registered.")
except HomeAssistantError:
if previous_registration is not None:
self.registrations[name] = previous_registration
else:
self.registrations.pop(name)
return self.json_message(
"Error saving registration.", HTTP_INTERNAL_SERVER_ERROR
)
def find_registration_name(self, data, suggested=None):
"""Find a registration name matching data or generate a unique one."""
endpoint = data.get(ATTR_SUBSCRIPTION).get(ATTR_ENDPOINT)
for key, registration in self.registrations.items():
subscription = registration.get(ATTR_SUBSCRIPTION)
if subscription.get(ATTR_ENDPOINT) == endpoint:
return key
return ensure_unique_string(suggested or "unnamed device", self.registrations)
async def delete(self, request):
"""Delete a registration."""
try:
data = await request.json()
except ValueError:
return self.json_message("Invalid JSON", HTTP_BAD_REQUEST)
subscription = data.get(ATTR_SUBSCRIPTION)
found = None
for key, registration in self.registrations.items():
if registration.get(ATTR_SUBSCRIPTION) == subscription:
found = key
break
if not found:
# If not found, unregistering was already done. Return 200
return self.json_message("Registration not found.")
reg = self.registrations.pop(found)
try:
hass = request.app["hass"]
await hass.async_add_job(save_json, self.json_path, self.registrations)
except HomeAssistantError:
self.registrations[found] = reg
return self.json_message(
"Error saving registration.", HTTP_INTERNAL_SERVER_ERROR
)
return self.json_message("Push notification subscriber unregistered.")
class HTML5PushCallbackView(HomeAssistantView):
"""Accepts push registrations from a browser."""
requires_auth = False
url = "/api/notify.html5/callback"
name = "api:notify.html5/callback"
def __init__(self, registrations):
"""Init HTML5PushCallbackView."""
self.registrations = registrations
def decode_jwt(self, token):
"""Find the registration that signed this JWT and return it."""
# 1. Check claims w/o verifying to see if a target is in there.
# 2. If target in claims, attempt to verify against the given name.
# 2a. If decode is successful, return the payload.
# 2b. If decode is unsuccessful, return a 401.
target_check = jwt.decode(token, verify=False)
if target_check.get(ATTR_TARGET) in self.registrations:
possible_target = self.registrations[target_check[ATTR_TARGET]]
key = possible_target[ATTR_SUBSCRIPTION][ATTR_KEYS][ATTR_AUTH]
try:
return jwt.decode(token, key, algorithms=["ES256", "HS256"])
except jwt.exceptions.DecodeError:
pass
return self.json_message(
"No target found in JWT", status_code=HTTP_UNAUTHORIZED
)
# The following is based on code from Auth0
# https://auth0.com/docs/quickstart/backend/python
def check_authorization_header(self, request):
"""Check the authorization header."""
auth = request.headers.get(AUTHORIZATION)
if not auth:
return self.json_message(
"Authorization header is expected", status_code=HTTP_UNAUTHORIZED
)
parts = auth.split()
if parts[0].lower() != "bearer":
return self.json_message(
"Authorization header must start with Bearer",
status_code=HTTP_UNAUTHORIZED,
)
if len(parts) != 2:
return self.json_message(
"Authorization header must be Bearer token",
status_code=HTTP_UNAUTHORIZED,
)
token = parts[1]
try:
payload = self.decode_jwt(token)
except jwt.exceptions.InvalidTokenError:
return self.json_message("token is invalid", status_code=HTTP_UNAUTHORIZED)
return payload
async def post(self, request):
"""Accept the POST request for push registrations event callback."""
auth_check = self.check_authorization_header(request)
if not isinstance(auth_check, dict):
return auth_check
try:
data = await request.json()
except ValueError:
return self.json_message("Invalid JSON", HTTP_BAD_REQUEST)
event_payload = {
ATTR_TAG: data.get(ATTR_TAG),
ATTR_TYPE: data[ATTR_TYPE],
ATTR_TARGET: auth_check[ATTR_TARGET],
}
if data.get(ATTR_ACTION) is not None:
event_payload[ATTR_ACTION] = data.get(ATTR_ACTION)
if data.get(ATTR_DATA) is not None:
event_payload[ATTR_DATA] = data.get(ATTR_DATA)
try:
event_payload = CALLBACK_EVENT_PAYLOAD_SCHEMA(event_payload)
except vol.Invalid as ex:
_LOGGER.warning(
"Callback event payload is not valid: %s",
humanize_error(event_payload, ex),
)
event_name = f"{NOTIFY_CALLBACK_EVENT}.{event_payload[ATTR_TYPE]}"
request.app["hass"].bus.fire(event_name, event_payload)
return self.json({"status": "ok", "event": event_payload[ATTR_TYPE]})
class HTML5NotificationService(BaseNotificationService):
"""Implement the notification service for HTML5."""
def __init__(self, hass, gcm_key, vapid_prv, vapid_email, registrations, json_path):
"""Initialize the service."""
self._gcm_key = gcm_key
self._vapid_prv = vapid_prv
self._vapid_email = vapid_email
self.registrations = registrations
self.registrations_json_path = json_path
async def async_dismiss_message(service):
"""Handle dismissing notification message service calls."""
kwargs = {}
if self.targets is not None:
kwargs[ATTR_TARGET] = self.targets
elif service.data.get(ATTR_TARGET) is not None:
kwargs[ATTR_TARGET] = service.data.get(ATTR_TARGET)
kwargs[ATTR_DATA] = service.data.get(ATTR_DATA)
await self.async_dismiss(**kwargs)
hass.services.async_register(
DOMAIN,
SERVICE_DISMISS,
async_dismiss_message,
schema=DISMISS_SERVICE_SCHEMA,
)
@property
def targets(self):
"""Return a dictionary of registered targets."""
targets = {}
for registration in self.registrations:
targets[registration] = registration
return targets
def dismiss(self, **kwargs):
"""Dismisses a notification."""
data = kwargs.get(ATTR_DATA)
tag = data.get(ATTR_TAG) if data else ""
payload = {ATTR_TAG: tag, ATTR_DISMISS: True, ATTR_DATA: {}}
self._push_message(payload, **kwargs)
async def async_dismiss(self, **kwargs):
"""Dismisses a notification.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.dismiss, **kwargs))
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
tag = str(uuid.uuid4())
payload = {
"badge": "/static/images/notification-badge.png",
"body": message,
ATTR_DATA: {},
"icon": "/static/icons/favicon-192x192.png",
ATTR_TAG: tag,
ATTR_TITLE: kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT),
}
data = kwargs.get(ATTR_DATA)
if data:
# Pick out fields that should go into the notification directly vs
# into the notification data dictionary.
data_tmp = {}
for key, val in data.items():
if key in HTML5_SHOWNOTIFICATION_PARAMETERS:
payload[key] = val
else:
data_tmp[key] = val
payload[ATTR_DATA] = data_tmp
if (
payload[ATTR_DATA].get(ATTR_URL) is None
and payload.get(ATTR_ACTIONS) is None
):
payload[ATTR_DATA][ATTR_URL] = URL_ROOT
self._push_message(payload, **kwargs)
def _push_message(self, payload, **kwargs):
"""Send the message."""
timestamp = int(time.time())
ttl = int(kwargs.get(ATTR_TTL, DEFAULT_TTL))
priority = kwargs.get(ATTR_PRIORITY, DEFAULT_PRIORITY)
if priority not in ["normal", "high"]:
priority = DEFAULT_PRIORITY
payload["timestamp"] = timestamp * 1000 # Javascript ms since epoch
targets = kwargs.get(ATTR_TARGET)
if not targets:
targets = self.registrations.keys()
for target in list(targets):
info = self.registrations.get(target)
try:
info = REGISTER_SCHEMA(info)
except vol.Invalid:
_LOGGER.error(
"%s is not a valid HTML5 push notification target", target
)
continue
payload[ATTR_DATA][ATTR_JWT] = add_jwt(
timestamp,
target,
payload[ATTR_TAG],
info[ATTR_SUBSCRIPTION][ATTR_KEYS][ATTR_AUTH],
)
webpusher = WebPusher(info[ATTR_SUBSCRIPTION])
if self._vapid_prv and self._vapid_email:
vapid_headers = create_vapid_headers(
self._vapid_email, info[ATTR_SUBSCRIPTION], self._vapid_prv
)
vapid_headers.update({"urgency": priority, "priority": priority})
response = webpusher.send(
data=json.dumps(payload), headers=vapid_headers, ttl=ttl
)
else:
# Only pass the gcm key if we're actually using GCM
# If we don't, notifications break on FireFox
gcm_key = (
self._gcm_key
if "googleapis.com" in info[ATTR_SUBSCRIPTION][ATTR_ENDPOINT]
else None
)
response = webpusher.send(json.dumps(payload), gcm_key=gcm_key, ttl=ttl)
if response.status_code == 410:
_LOGGER.info("Notification channel has expired")
reg = self.registrations.pop(target)
if not save_json(self.registrations_json_path, self.registrations):
self.registrations[target] = reg
_LOGGER.error("Error saving registration")
else:
_LOGGER.info("Configuration saved")
def add_jwt(timestamp, target, tag, jwt_secret):
"""Create JWT json to put into payload."""
jwt_exp = datetime.fromtimestamp(timestamp) + timedelta(days=JWT_VALID_DAYS)
jwt_claims = {
"exp": jwt_exp,
"nbf": timestamp,
"iat": timestamp,
ATTR_TARGET: target,
ATTR_TAG: tag,
}
return jwt.encode(jwt_claims, jwt_secret).decode("utf-8")
def create_vapid_headers(vapid_email, subscription_info, vapid_private_key):
"""Create encrypted headers to send to WebPusher."""
if vapid_email and vapid_private_key and ATTR_ENDPOINT in subscription_info:
url = urlparse(subscription_info.get(ATTR_ENDPOINT))
vapid_claims = {
"sub": f"mailto:{vapid_email}",
"aud": f"{url.scheme}://{url.netloc}",
}
vapid = Vapid.from_string(private_key=vapid_private_key)
return vapid.sign(vapid_claims)
return None
|
mit
|
af1rst/bite-project
|
deps/gdata-python-client/samples/apps/marketplace_sample/gdata/tlslite/utils/Cryptlib_AES.py
|
359
|
1364
|
"""Cryptlib AES implementation."""
from cryptomath import *
from AES import *
if cryptlibpyLoaded:
def new(key, mode, IV):
return Cryptlib_AES(key, mode, IV)
class Cryptlib_AES(AES):
def __init__(self, key, mode, IV):
AES.__init__(self, key, mode, IV, "cryptlib")
self.context = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED, cryptlib_py.CRYPT_ALGO_AES)
cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_MODE, cryptlib_py.CRYPT_MODE_CBC)
cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_KEYSIZE, len(key))
cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_KEY, key)
cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_IV, IV)
def __del__(self):
cryptlib_py.cryptDestroyContext(self.context)
def encrypt(self, plaintext):
AES.encrypt(self, plaintext)
bytes = stringToBytes(plaintext)
cryptlib_py.cryptEncrypt(self.context, bytes)
return bytesToString(bytes)
def decrypt(self, ciphertext):
AES.decrypt(self, ciphertext)
bytes = stringToBytes(ciphertext)
cryptlib_py.cryptDecrypt(self.context, bytes)
return bytesToString(bytes)
|
apache-2.0
|
KohlsTechnology/ansible
|
lib/ansible/modules/packaging/os/svr4pkg.py
|
95
|
7684
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Boyd Adamson <boyd () boydadamson.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: svr4pkg
short_description: Manage Solaris SVR4 packages
description:
- Manages SVR4 packages on Solaris 10 and 11.
- These were the native packages on Solaris <= 10 and are available
as a legacy feature in Solaris 11.
- Note that this is a very basic packaging system. It will not enforce
dependencies on install or remove.
version_added: "0.9"
author: "Boyd Adamson (@brontitall)"
options:
name:
description:
- Package name, e.g. C(SUNWcsr)
required: true
state:
description:
- Whether to install (C(present)), or remove (C(absent)) a package.
- If the package is to be installed, then I(src) is required.
- The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package.
required: true
choices: ["present", "absent"]
src:
description:
- Specifies the location to install the package from. Required when C(state=present).
- "Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg)."
- If using a file or directory, they must already be accessible by the host. See the M(copy) module for a way to get them there.
proxy:
description:
- HTTP[s] proxy to be used if C(src) is a URL.
response_file:
description:
- Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4)
required: false
zone:
description:
- Whether to install the package only in the current zone, or install it into all zones.
- The installation into all zones works only if you are working with the global zone.
required: false
default: "all"
choices: ["current", "all"]
version_added: "1.6"
category:
description:
- Install/Remove category instead of a single package.
required: false
type: bool
version_added: "1.6"
'''
EXAMPLES = '''
# Install a package from an already copied file
- svr4pkg:
name: CSWcommon
src: /tmp/cswpkgs.pkg
state: present
# Install a package directly from an http site
- svr4pkg:
name: CSWpkgutil
src: 'http://get.opencsw.org/now'
state: present
zone: current
# Install a package with a response file
- svr4pkg:
name: CSWggrep
src: /tmp/third-party.pkg
response_file: /tmp/ggrep.response
state: present
# Ensure that a package is not installed.
- svr4pkg:
name: SUNWgnome-sound-recorder
state: absent
# Ensure that a category is not installed.
- svr4pkg:
name: FIREFOX
state: absent
category: true
'''
import os
import tempfile
from ansible.module_utils.basic import AnsibleModule
def package_installed(module, name, category):
cmd = [module.get_bin_path('pkginfo', True)]
cmd.append('-q')
if category:
cmd.append('-c')
cmd.append(name)
rc, out, err = module.run_command(' '.join(cmd))
if rc == 0:
return True
else:
return False
def create_admin_file():
(desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True)
fullauto = '''
mail=
instance=unique
partial=nocheck
runlevel=quit
idepend=nocheck
rdepend=nocheck
space=quit
setuid=nocheck
conflict=nocheck
action=nocheck
networktimeout=60
networkretries=3
authentication=quit
keystore=/var/sadm/security
proxy=
basedir=default
'''
os.write(desc, fullauto)
os.close(desc)
return filename
def run_command(module, cmd):
progname = cmd[0]
cmd[0] = module.get_bin_path(progname, True)
return module.run_command(cmd)
def package_install(module, name, src, proxy, response_file, zone, category):
adminfile = create_admin_file()
cmd = ['pkgadd', '-n']
if zone == 'current':
cmd += ['-G']
cmd += ['-a', adminfile, '-d', src]
if proxy is not None:
cmd += ['-x', proxy]
if response_file is not None:
cmd += ['-r', response_file]
if category:
cmd += ['-Y']
cmd.append(name)
(rc, out, err) = run_command(module, cmd)
os.unlink(adminfile)
return (rc, out, err)
def package_uninstall(module, name, src, category):
adminfile = create_admin_file()
if category:
cmd = ['pkgrm', '-na', adminfile, '-Y', name]
else:
cmd = ['pkgrm', '-na', adminfile, name]
(rc, out, err) = run_command(module, cmd)
os.unlink(adminfile)
return (rc, out, err)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
state=dict(required=True, choices=['present', 'absent']),
src=dict(default=None),
proxy=dict(default=None),
response_file=dict(default=None),
zone=dict(required=False, default='all', choices=['current', 'all']),
category=dict(default=False, type='bool')
),
supports_check_mode=True
)
state = module.params['state']
name = module.params['name']
src = module.params['src']
proxy = module.params['proxy']
response_file = module.params['response_file']
zone = module.params['zone']
category = module.params['category']
rc = None
out = ''
err = ''
result = {}
result['name'] = name
result['state'] = state
if state == 'present':
if src is None:
module.fail_json(name=name,
msg="src is required when state=present")
if not package_installed(module, name, category):
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category)
# Stdout is normally empty but for some packages can be
# very long and is not often useful
if len(out) > 75:
out = out[:75] + '...'
elif state == 'absent':
if package_installed(module, name, category):
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = package_uninstall(module, name, src, category)
out = out[:75]
# Returncodes as per pkgadd(1m)
# 0 Successful completion
# 1 Fatal error.
# 2 Warning.
# 3 Interruption.
# 4 Administration.
# 5 Administration. Interaction is required. Do not use pkgadd -n.
# 10 Reboot after installation of all packages.
# 20 Reboot after installation of this package.
# 99 (observed) pkgadd: ERROR: could not process datastream from </tmp/pkgutil.pkg>
if rc in (0, 2, 3, 10, 20):
result['changed'] = True
# no install nor uninstall, or failed
else:
result['changed'] = False
# rc will be none when the package already was installed and no action took place
# Only return failed=False when the returncode is known to be good as there may be more
# undocumented failure return codes
if rc not in (None, 0, 2, 10, 20):
result['failed'] = True
else:
result['failed'] = False
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
holmes/intellij-community
|
python/helpers/docutils/parsers/rst/directives/parts.py
|
41
|
4241
|
# $Id: parts.py 6385 2010-08-13 12:17:01Z milde $
# Authors: David Goodger <goodger@python.org>; Dmitry Jemerov
# Copyright: This module has been placed in the public domain.
"""
Directives for document parts.
"""
__docformat__ = 'reStructuredText'
from docutils import nodes, languages
from docutils.transforms import parts
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
class Contents(Directive):
"""
Table of contents.
The table of contents is generated in two passes: initial parse and
transform. During the initial parse, a 'pending' element is generated
which acts as a placeholder, storing the TOC title and any options
internally. At a later stage in the processing, the 'pending' element is
replaced by a 'topic' element, a title and the table of contents proper.
"""
backlinks_values = ('top', 'entry', 'none')
def backlinks(arg):
value = directives.choice(arg, Contents.backlinks_values)
if value == 'none':
return None
else:
return value
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'depth': directives.nonnegative_int,
'local': directives.flag,
'backlinks': backlinks,
'class': directives.class_option}
def run(self):
if not (self.state_machine.match_titles
or isinstance(self.state_machine.node, nodes.sidebar)):
raise self.error('The "%s" directive may not be used within '
'topics or body elements.' % self.name)
document = self.state_machine.document
language = languages.get_language(document.settings.language_code)
if self.arguments:
title_text = self.arguments[0]
text_nodes, messages = self.state.inline_text(title_text,
self.lineno)
title = nodes.title(title_text, '', *text_nodes)
else:
messages = []
if 'local' in self.options:
title = None
else:
title = nodes.title('', language.labels['contents'])
topic = nodes.topic(classes=['contents'])
topic['classes'] += self.options.get('class', [])
# the latex2e writer needs source and line for a warning:
src, srcline = self.state_machine.get_source_and_line()
topic.source = src
topic.line = srcline - 1
if 'local' in self.options:
topic['classes'].append('local')
if title:
name = title.astext()
topic += title
else:
name = language.labels['contents']
name = nodes.fully_normalize_name(name)
if not document.has_name(name):
topic['names'].append(name)
document.note_implicit_target(topic)
pending = nodes.pending(parts.Contents, rawsource=self.block_text)
pending.details.update(self.options)
document.note_pending(pending)
topic += pending
return [topic] + messages
class Sectnum(Directive):
"""Automatic section numbering."""
option_spec = {'depth': int,
'start': int,
'prefix': directives.unchanged_required,
'suffix': directives.unchanged_required}
def run(self):
pending = nodes.pending(parts.SectNum)
pending.details.update(self.options)
self.state_machine.document.note_pending(pending)
return [pending]
class Header(Directive):
"""Contents of document header."""
has_content = True
def run(self):
self.assert_has_content()
header = self.state_machine.document.get_decoration().get_header()
self.state.nested_parse(self.content, self.content_offset, header)
return []
class Footer(Directive):
"""Contents of document footer."""
has_content = True
def run(self):
self.assert_has_content()
footer = self.state_machine.document.get_decoration().get_footer()
self.state.nested_parse(self.content, self.content_offset, footer)
return []
|
apache-2.0
|
TRESCLOUD/odoopub
|
addons/base_gengo/res_company.py
|
24
|
1784
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_company(osv.Model):
_name = "res.company"
_inherit = "res.company"
_columns = {
"gengo_private_key": fields.text("Gengo Private Key", copy=False),
"gengo_public_key": fields.text("Gengo Public Key", copy=False),
"gengo_comment": fields.text("Comments", help="This comment will be automatically be enclosed in each an every request sent to Gengo"),
"gengo_auto_approve": fields.boolean("Auto Approve Translation ?", help="Jobs are Automatically Approved by Gengo."),
"gengo_sandbox": fields.boolean("Sandbox Mode", help="Check this box if you're using the sandbox mode of Gengo, mainly used for testing purpose."),
}
_defaults = {
"gengo_auto_approve": True,
}
|
agpl-3.0
|
Hackplayers/Empire-mod-Hpys-tests
|
lib/modules/python/situational_awareness/network/port_scan.py
|
2
|
5624
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Port Scanner.',
# list of one or more authors for the module
'Author': ['@424f424f'],
# more verbose multi-line description of the module
'Description': ('Simple Port Scanner.'),
# True if the module needs to run in the background
'Background' : True,
# File extension to save the file as
'OutputExtension' : None,
# if the module needs administrative privileges
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : True,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': ['CIDR Parser credits to http://bibing.us.es/proyectos/abreproy/12106/fichero/ARCHIVOS%252Fservidor_xmlrpc%252Fcidr.py']
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to execute module on.',
'Required' : True,
'Value' : ''
},
'Target' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Targets to scan in single, range 0-255 or CIDR format.',
'Required' : True,
'Value' : ''
},
'Port' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'The port to scan for.',
'Required' : True,
'Value' : '8080'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
target = self.options['Target']['Value']
port = self.options['Port']['Value']
script = """
import socket
iplist = []
def iprange(addressrange): # converts a ip range into a list
first3octets = '.'.join(addressrange.split('-')[0].split('.')[:3]) + '.'
for i in range(int(addressrange.split('-')[0].split('.')[3]),int(addressrange.split('-')[1])+1):
iplist.append(first3octets+str(i))
return iplist
def ip2bin(ip):
b = ""
inQuads = ip.split(".")
outQuads = 4
for q in inQuads:
if q != "":
b += dec2bin(int(q),8)
outQuads -= 1
while outQuads > 0:
b += "00000000"
outQuads -= 1
return b
def dec2bin(n,d=None):
s = ""
while n>0:
if n&1:
s = "1"+s
else:
s = "0"+s
n >>= 1
if d is not None:
while len(s)<d:
s = "0"+s
if s == "": s = "0"
return s
def bin2ip(b):
ip = ""
for i in range(0,len(b),8):
ip += str(int(b[i:i+8],2))+"."
return ip[:-1]
def printCIDR(c):
parts = c.split("/")
baseIP = ip2bin(parts[0])
subnet = int(parts[1])
if subnet == 32:
print bin2ip(baseIP)
else:
ipPrefix = baseIP[:-(32-subnet)]
for i in range(2**(32-subnet)):
iplist.append(bin2ip(ipPrefix+dec2bin(i, (32-subnet))))
return
def validateCIDRBlock(b):
p = re.compile("^([0-9]{1,3}\.){0,3}[0-9]{1,3}(/[0-9]{1,2}){1}$")
if not p.match(b):
print "Error: Invalid CIDR format!"
return False
prefix, subnet = b.split("/")
quads = prefix.split(".")
for q in quads:
if (int(q) < 0) or (int(q) > 255):
print "Error: quad "+str(q)+" wrong size."
return False
if (int(subnet) < 1) or (int(subnet) > 32):
print "Error: subnet "+str(subnet)+" wrong size."
return False
return True
def portscan(target,port):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.1)
s.connect((target, port))
except Exception:
failvar = 0
print "Host {} {}/tcp closed".format(target, port)
else:
print "Host {} {}/tcp open".format(target, port)
s.close()
def main(target, port):
if '/' in target:
printCIDR(target)
for ip in iplist:
portscan(ip, port)
elif '-' in target:
iprange(target)
for ip in iplist:
portscan(ip, port)
else:
portscan(target, port)
target = "%s"
port = %s
main(target, port)
""" %(target, port)
return script
|
bsd-3-clause
|
ecederstrand/django
|
tests/view_tests/tests/py3_test_debug.py
|
335
|
1849
|
"""
Since this file contains Python 3 specific syntax, it's named without a test_
prefix so the test runner won't try to import it. Instead, the test class is
imported in test_debug.py, but only on Python 3.
This filename is also in setup.cfg flake8 exclude since the Python 2 syntax
error (raise ... from ...) can't be silenced using NOQA.
"""
import sys
from django.test import RequestFactory, TestCase
from django.views.debug import ExceptionReporter
class Py3ExceptionReporterTests(TestCase):
rf = RequestFactory()
def test_reporting_of_nested_exceptions(self):
request = self.rf.get('/test_view/')
try:
try:
raise AttributeError('Top level')
except AttributeError as explicit:
try:
raise ValueError('Second exception') from explicit
except ValueError:
raise IndexError('Final exception')
except Exception:
# Custom exception handler, just pass it into ExceptionReporter
exc_type, exc_value, tb = sys.exc_info()
explicit_exc = 'The above exception ({0}) was the direct cause of the following exception:'
implicit_exc = 'During handling of the above exception ({0}), another exception occurred:'
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
# Both messages are twice on page -- one rendered as html,
# one as plain text (for pastebin)
self.assertEqual(2, html.count(explicit_exc.format("Top level")))
self.assertEqual(2, html.count(implicit_exc.format("Second exception")))
text = reporter.get_traceback_text()
self.assertIn(explicit_exc.format("Top level"), text)
self.assertIn(implicit_exc.format("Second exception"), text)
|
bsd-3-clause
|
h8rift/android_kernel_htc_msm8960_evita-h8x
|
Documentation/target/tcm_mod_builder.py
|
3119
|
42754
|
#!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!(se_nacl_new))\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!(tpg)) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!(" + fabric_mod_port + ")) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd_to_pool = " + fabric_mod_name + "_release_cmd,\n"
buf += " .release_cmd_direct = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .new_cmd_failure = " + fabric_mod_name + "_new_cmd_failure,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " .pack_lun = " + fabric_mod_name + "_pack_lun,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (!(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return -ENOMEM;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!(" + fabric_mod_name + "_fabric_configfs))\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "#ifdef MODULE\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
buf += "#endif\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!(nacl)) {\n"
buf += " printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('release_cmd_to_pool', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('new_cmd_failure\)\(', fo):
buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
if re.search('pack_lun\)\(', fo):
buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n"
buf += "{\n"
buf += " WARN_ON(lun >= 256);\n"
buf += " /* Caller wants this byte-swapped */\n"
buf += " return cpu_to_le64((lun & 0xff) << 8);\n"
buf += "}\n\n"
bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
gpl-2.0
|
safwanrahman/mozillians
|
mozillians/settings/base.py
|
1
|
10137
|
# -*- coding: utf-8 -*-
# Django settings for the mozillians project.
import logging
import os.path
import sys
from funfactory.manage import path
from funfactory.settings_base import * # noqa
from funfactory.settings_base import JINJA_CONFIG as funfactory_JINJA_CONFIG
from urlparse import urljoin
from django.utils.functional import lazy
# Log settings
SYSLOG_TAG = "http_app_mozillians"
LOGGING = {
'loggers': {
'landing': {'level': logging.INFO},
'phonebook': {'level': logging.INFO},
},
}
# Database settings
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '',
'PORT': '',
'OPTIONS': {
'init_command': 'SET storage_engine=InnoDB',
'charset': 'utf8',
'use_unicode': True,
},
'TEST_CHARSET': 'utf8',
'TEST_COLLATION': 'utf8_general_ci',
},
}
# L10n
LOCALE_PATHS = [path('locale')]
# Tells the extract script what files to parse for strings and what functions to use.
DOMAIN_METHODS = {
'messages': [
('mozillians/**.py',
'tower.management.commands.extract.extract_tower_python'),
('mozillians/**/templates/**.html',
'tower.management.commands.extract.extract_tower_template'),
('templates/**.html',
'tower.management.commands.extract.extract_tower_template'),
],
}
# Accepted locales
LANGUAGE_CODE = 'en-US'
PROD_LANGUAGES = ('ca', 'cs', 'de', 'en-US', 'en-GB', 'es', 'hu', 'fr', 'it', 'ko',
'nl', 'pl', 'pt-BR', 'ro', 'ru', 'sk', 'sl', 'sq', 'sr', 'sv-SE', 'zh-TW',
'zh-CN', 'lt', 'ja', 'hsb', 'dsb', 'uk',)
# List of RTL locales known to this project. Subset of LANGUAGES.
RTL_LANGUAGES = () # ('ar', 'fa', 'fa-IR', 'he')
# For absoluate urls
PROTOCOL = "https://"
PORT = 443
# Templates.
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'jingo.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = get_template_context_processors(
append=['mozillians.common.context_processors.current_year',
'mozillians.common.context_processors.canonical_path'])
JINGO_EXCLUDE_APPS = [
'admin',
'autocomplete_light',
'browserid',
'rest_framework',
]
def JINJA_CONFIG():
config = funfactory_JINJA_CONFIG()
config['extensions'].append('compressor.contrib.jinja2ext.CompressorExtension')
return config
def COMPRESS_JINJA2_GET_ENVIRONMENT():
from jingo import env
return env
MIDDLEWARE_CLASSES = get_middleware(append=[
'commonware.response.middleware.StrictTransportMiddleware',
'csp.middleware.CSPMiddleware',
'django_statsd.middleware.GraphiteMiddleware',
'django_statsd.middleware.GraphiteRequestTimingMiddleware',
'django_statsd.middleware.TastyPieRequestTimingMiddleware',
'mozillians.common.middleware.StrongholdMiddleware',
'mozillians.phonebook.middleware.RegisterMiddleware',
'mozillians.phonebook.middleware.UsernameRedirectionMiddleware',
'mozillians.groups.middleware.OldGroupRedirectionMiddleware',
'waffle.middleware.WaffleMiddleware',
])
# StrictTransport
STS_SUBDOMAINS = True
# Not all URLs need locale.
SUPPORTED_NONLOCALES = list(SUPPORTED_NONLOCALES) + [
'csp',
'api',
'browserid',
'contribute.json',
'admin',
'autocomplete',
'humans.txt'
]
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'mozillians.common.authbackend.MozilliansAuthBackend'
)
USERNAME_MAX_LENGTH = 30
# On Login, we redirect through register.
LOGIN_URL = '/'
LOGIN_REDIRECT_URL = '/login/'
INSTALLED_APPS = get_apps(append=[
'csp',
'mozillians',
'mozillians.users',
'mozillians.phonebook',
'mozillians.groups',
'mozillians.common',
'mozillians.api',
'mozillians.mozspaces',
'mozillians.funfacts',
'mozillians.announcements',
'mozillians.humans',
'mozillians.geo',
'sorl.thumbnail',
'autocomplete_light',
'django.contrib.admin',
'import_export',
'waffle',
'rest_framework',
])
# Auth
PWD_ALGORITHM = 'bcrypt'
HMAC_KEYS = {
'2011-01-01': 'cheesecake',
}
SESSION_COOKIE_HTTPONLY = True
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
SESSION_COOKIE_NAME = 'mozillians_sessionid'
ANON_ALWAYS = True
# Email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
FROM_NOREPLY = u'Mozillians.org <no-reply@mozillians.org>'
FROM_NOREPLY_VIA = '%s via Mozillians.org <noreply@mozillians.org>'
# Auth
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
MAX_PHOTO_UPLOAD_SIZE = 8 * (1024 ** 2)
AUTO_VOUCH_DOMAINS = ('mozilla.com', 'mozilla.org', 'mozillafoundation.org')
AUTO_VOUCH_REASON = 'An automatic vouch for being a Mozilla employee.'
# Django-CSP
CSP_DEFAULT_SRC = ("'self'",
'http://*.mapbox.com',
'https://*.mapbox.com')
CSP_FONT_SRC = ("'self'",
'http://*.mozilla.net',
'https://*.mozilla.net',
'http://*.mozilla.org',
'https://*.mozilla.org')
CSP_FRAME_SRC = ("'self'",
'https://login.persona.org',)
CSP_IMG_SRC = ("'self'",
'data:',
'http://*.mozilla.net',
'https://*.mozilla.net',
'http://*.mozilla.org',
'https://*.mozilla.org',
'*.google-analytics.com',
'*.gravatar.com',
'*.wp.com',
'http://*.mapbox.com',
'https://*.mapbox.com')
CSP_SCRIPT_SRC = ("'self'",
'http://www.mozilla.org',
'https://www.mozilla.org',
'http://*.mozilla.net',
'https://*.mozilla.net',
'https://*.google-analytics.com',
'https://login.persona.org',
'http://*.mapbox.com',
'https://*.mapbox.com')
CSP_STYLE_SRC = ("'self'",
"'unsafe-inline'",
'http://www.mozilla.org',
'https://www.mozilla.org',
'http://*.mozilla.net',
'https://*.mozilla.net',
'http://*.mapbox.com',
'https://*.mapbox.com')
# Elasticutils settings
ES_DISABLED = True
ES_URLS = ['http://127.0.0.1:9200']
ES_INDEXES = {'default': 'mozillians',
'public': 'mozillians-public'}
ES_INDEXING_TIMEOUT = 10
# Sorl settings
THUMBNAIL_DUMMY = True
THUMBNAIL_PREFIX = 'uploads/sorl-cache/'
# Statsd Graphite
STATSD_CLIENT = 'django_statsd.clients.normal'
# Basket
# If we're running tests, don't hit the real basket server.
if 'test' in sys.argv:
BASKET_URL = 'http://127.0.0.1'
else:
# Basket requires SSL now for some calls
BASKET_URL = 'https://basket.mozilla.com'
BASKET_NEWSLETTER = 'mozilla-phone'
USER_AVATAR_DIR = 'uploads/userprofile'
MOZSPACE_PHOTO_DIR = 'uploads/mozspaces'
ANNOUNCEMENTS_PHOTO_DIR = 'uploads/announcements'
# Google Analytics
GA_ACCOUNT_CODE = 'UA-35433268-19'
# Set ALLOWED_HOSTS based on SITE_URL.
def _allowed_hosts():
from django.conf import settings
from urlparse import urlparse
host = urlparse(settings.SITE_URL).netloc # Remove protocol and path
host = host.rsplit(':', 1)[0] # Remove port
return [host]
ALLOWED_HOSTS = lazy(_allowed_hosts, list)()
STRONGHOLD_EXCEPTIONS = ['^%s' % MEDIA_URL,
'^/csp/',
'^/admin/',
'^/browserid/',
'^/api/']
# Set default avatar for user profiles
DEFAULT_AVATAR = 'img/default_avatar.png'
DEFAULT_AVATAR_URL = urljoin(MEDIA_URL, DEFAULT_AVATAR)
DEFAULT_AVATAR_PATH = os.path.join(MEDIA_ROOT, DEFAULT_AVATAR)
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
SECRET_KEY = ''
USE_TZ = True
# Pagination: Items per page.
ITEMS_PER_PAGE = 24
COMPRESS_OFFLINE = True
COMPRESS_ENABLED = True
HUMANSTXT_GITHUB_REPO = 'https://api.github.com/repos/mozilla/mozillians/contributors'
HUMANSTXT_LOCALE_REPO = 'https://svn.mozilla.org/projects/l10n-misc/trunk/mozillians/locales'
HUMANSTXT_FILE = os.path.join(STATIC_ROOT, 'humans.txt')
HUMANSTXT_URL = urljoin(STATIC_URL, 'humans.txt')
# These must both be set to a working mapbox token for the maps to work.
MAPBOX_MAP_ID = 'examples.map-zr0njcqy'
# This is the token for the edit profile page alone.
MAPBOX_PROFILE_ID = MAPBOX_MAP_ID
def _browserid_request_args():
from django.conf import settings
from tower import ugettext_lazy as _lazy
args = {
'siteName': _lazy('Mozillians'),
}
if settings.SITE_URL.startswith('https'):
args['siteLogo'] = urljoin(STATIC_URL, "mozillians/img/apple-touch-icon-144.png")
return args
def _browserid_audiences():
from django.conf import settings
return [settings.SITE_URL]
# BrowserID creates a user if one doesn't exist.
BROWSERID_CREATE_USER = True
BROWSERID_VERIFY_CLASS = 'mozillians.common.authbackend.BrowserIDVerify'
BROWSERID_REQUEST_ARGS = lazy(_browserid_request_args, dict)()
BROWSERID_AUDIENCES = lazy(_browserid_audiences, list)()
# All accounts limited in 6 vouches total. Bug 997400.
VOUCH_COUNT_LIMIT = 6
# All accounts need 1 vouches to be able to vouch.
CAN_VOUCH_THRESHOLD = 3
REST_FRAMEWORK = {
'URL_FIELD_NAME': '_url',
'PAGINATE_BY': 30,
'MAX_PAGINATE_BY': 200,
'DEFAULT_PERMISSION_CLASSES': (
'mozillians.api.v2.permissions.MozilliansPermission',
),
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rest_framework.serializers.HyperlinkedModelSerializer',
'DEFAULT_FILTER_BACKENDS': (
'rest_framework.filters.DjangoFilterBackend',
'rest_framework.filters.OrderingFilter',
),
}
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
|
bsd-3-clause
|
madj4ck/ansible
|
lib/ansible/utils/hashing.py
|
202
|
3125
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError
# Note, sha1 is the only hash algorithm compatible with python2.4 and with
# FIPS-140 mode (as of 11-2014)
try:
from hashlib import sha1 as sha1
except ImportError:
from sha import sha as sha1
# Backwards compat only
try:
from hashlib import md5 as _md5
except ImportError:
try:
from md5 import md5 as _md5
except ImportError:
# Assume we're running in FIPS mode here
_md5 = None
def secure_hash_s(data, hash_func=sha1):
''' Return a secure hash hex digest of data. '''
digest = hash_func()
try:
if not isinstance(data, basestring):
data = "%s" % data
digest.update(data)
except UnicodeEncodeError:
digest.update(data.encode('utf-8'))
return digest.hexdigest()
def secure_hash(filename, hash_func=sha1):
''' Return a secure hash hex digest of local file, None if file is not present or a directory. '''
if not os.path.exists(filename) or os.path.isdir(filename):
return None
digest = hash_func()
blocksize = 64 * 1024
try:
infile = open(filename, 'rb')
block = infile.read(blocksize)
while block:
digest.update(block)
block = infile.read(blocksize)
infile.close()
except IOError as e:
raise AnsibleError("error while accessing the file %s, error was: %s" % (filename, e))
return digest.hexdigest()
# The checksum algorithm must match with the algorithm in ShellModule.checksum() method
checksum = secure_hash
checksum_s = secure_hash_s
# Backwards compat functions. Some modules include md5s in their return values
# Continue to support that for now. As of ansible-1.8, all of those modules
# should also return "checksum" (sha1 for now)
# Do not use md5 unless it is needed for:
# 1) Optional backwards compatibility
# 2) Compliance with a third party protocol
#
# MD5 will not work on systems which are FIPS-140-2 compliant.
def md5s(data):
if not _md5:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return secure_hash_s(data, _md5)
def md5(filename):
if not _md5:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return secure_hash(filename, _md5)
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.