code
stringlengths 1
199k
|
|---|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('aggregator', '0031_dataset_order'),
]
operations = [
migrations.AddField(
model_name='dimension',
name='dataType',
field=models.CharField(max_length=256, null=True),
),
migrations.AddField(
model_name='dimension',
name='original_column_name',
field=models.CharField(max_length=256, null=True),
),
migrations.AddField(
model_name='dimension',
name='sameAs',
field=models.CharField(max_length=256, null=True),
),
migrations.AddField(
model_name='variable',
name='dataType',
field=models.CharField(max_length=256, null=True),
),
migrations.AddField(
model_name='variable',
name='original_column_name',
field=models.CharField(max_length=256, null=True),
),
migrations.AddField(
model_name='variable',
name='sameAs',
field=models.CharField(max_length=256, null=True),
),
]
|
import time
import sys
import _mysql
import random
import string
import re
import os
import traceback
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
import selenium.webdriver.chrome.service as service
service = service.Service('D:\ChromeDriver\chromedriver')
service.start()
capabilities = {'chrome.binary': 'C:\Program Files (x86)\Google\Chrome\Application\chrome'} # Chrome path is different for everyone
driver = webdriver.Remote(service.service_url, capabilities)
try:
# Check to see if it was added
db=_mysql.connect('localhost','root','root','paws_db')
rand_name=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
rand_color=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
rand_coat=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
db.query("INSERT INTO cats (cat_name, color, coat,is_kitten, dob, is_female, breed_id, bio, created,is_deleted) VALUES (\""+rand_name+"\",\""+rand_color+"\",\""+rand_coat+"\",1,'2001-03-20',1,1,\"Fast health regeneration, adamantium claws, aggressive...\",NOW(),false);")
db.store_result()
db.query("SELECT id,cat_name FROM cats where cat_name=\""+rand_name+"\"")
r=db.store_result()
k=r.fetch_row(1,1)
cat_id = k[0].get('id')
driver.set_window_size(sys.argv[1], sys.argv[2]);
driver.get('http://localhost:8765');
driver.find_element_by_id('email').send_keys('theparrotsarecoming@gmail.com')
driver.find_element_by_id('password').send_keys('password')
driver.find_element_by_css_selector('input[type="submit"]').click()
driver.get('http://localhost:8765/cats/view/'+cat_id);
upload_elem = driver.find_element_by_css_selector('a[data-ix="attachment-notification"]')
upload_elem.click()
driver.find_element_by_css_selector('a[data-ix="add-file-click-desktop"]').click()
browse = driver.find_element_by_id("uploaded-file")
browse.send_keys(os.getcwd()+"/doc/test_doc_1.pdf")
driver.find_element_by_id('file-note').send_keys(Keys.RETURN)
src = driver.page_source
if 'test_doc_1.pdf' in src:
print("pass")
else:
print('fail')
except Exception as e:
traceback.print_exc()
print(e)
print("fail")
finally:
driver.quit()
|
"""Add a new SSH key."""
from os import path
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
@click.command()
@click.argument('label')
@click.option('--in-file', '-f',
type=click.Path(exists=True),
help="The id_rsa.pub file to import for this key")
@click.option('--key', '-k', help="The actual SSH key")
@click.option('--note', help="Extra note that will be associated with key")
@environment.pass_env
def cli(env, label, in_file, key, note):
"""Add a new SSH key."""
if in_file is None and key is None:
raise exceptions.ArgumentError(
'Either [-f | --in-file] or [-k | --key] arguments are required to add a key'
)
if in_file and key:
raise exceptions.ArgumentError(
'[-f | --in-file] is not allowed with [-k | --key]'
)
if key:
key_text = key
else:
with open(path.expanduser(in_file), 'rU', encoding="utf-8") as key_file:
key_text = key_file.read().strip()
key_file.close()
mgr = SoftLayer.SshKeyManager(env.client)
result = mgr.add_key(key_text, label, note)
env.fout("SSH key added: %s" % result.get('fingerprint'))
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('inventory', '0021_license_view'),
]
operations = [
migrations.AddField(
model_name='collaborator',
name='license_id',
field=models.IntegerField(default=-1),
),
]
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/droid_interface/shared_ddi_seinar_interface_mk4.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","ddi_seinar_interface_mk4")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
import random
def main():
"""Main"""
# Initialize
frequency1 = 0
frequency2 = 0
frequency3 = 0
frequency4 = 0
frequency5 = 0
frequency6 = 0
for roll in range(1, 6001): # Rolls a die 6000 times
face = random.randrange(1, 7) # Random number from 1 to 7
# Count frequencies
if face == 1:
frequency1 += 1
elif face == 2:
frequency2 += 1
elif face == 3:
frequency3 += 1
elif face == 4:
frequency4 += 1
elif face == 5:
frequency5 += 1
elif face == 6:
frequency6 += 1
else:
print "It should never get here!"
print "%4s %13s" % ("Face", "Frequency")
print "%4s %13s" % (1, frequency1)
print "%4s %13s" % (2, frequency2)
print "%4s %13s" % (3, frequency3)
print "%4s %13s" % (4, frequency4)
print "%4s %13s" % (5, frequency5)
print "%4s %13s" % (6, frequency6)
if __name__ == "__main__":
main()
|
from plugins.Plugin import Plugin
import os
import json
import sqlite3
DB_PATH = "market_data/loan_history.sqlite3"
class Charts(Plugin):
def on_bot_init(self):
super(Charts, self).on_bot_init()
# If there's no history database, can't use this
if not os.path.isfile(DB_PATH):
self.log.log_error("DB Doesn't Exist. 'AccountStats' plugin must be enabled.")
return
self.log.addSectionLog("plugins", "charts", { 'navbar': True })
self.db = sqlite3.connect('market_data/loan_history.sqlite3')
self.last_dump = 0
self.dump_interval = int(self.config.get("CHARTS", "DumpInterval", 21600))
self.history_file = self.config.get("CHARTS", "HistoryFile", "www/history.json")
self.activeCurrencies = self.config.get_all_currencies()
def before_lending(self):
return
def after_lending(self):
if self.get_db_version() > 0 and self.last_dump + self.dump_interval < sqlite3.time.time():
self.log.log("Dumping Charts Data")
self.dump_history()
self.last_dump = sqlite3.time.time()
def get_db_version(self):
return self.db.execute("PRAGMA user_version").fetchone()[0]
def dump_history(self):
cursor = self.db.cursor()
data = { }
placeholder = '?'
placeholders = ', '.join(placeholder for unused in self.activeCurrencies)
# Get distinct coins
query = "SELECT DISTINCT currency FROM history WHERE currency IN (%s) ORDER BY currency DESC" % placeholders
cursor.execute(query, self.activeCurrencies)
for i in cursor:
data[i[0]] = []
# Loop over the coins and get data for each
for coin in data:
runningTotal = 0.0
cursor.execute("SELECT strftime('%%s', strftime('%%Y-%%m-%%d 00:00:00', close)) ts, round(SUM(earned), 8) i " \
"FROM history WHERE currency = '%s' GROUP BY ts ORDER BY ts" % (coin));
for row in cursor:
runningTotal += float(row[1])
data[coin].append([ int(row[0]), float(row[1]), float(runningTotal) ])
# Dump data to file
with open(self.history_file, "w") as hist:
hist.write(json.dumps(data))
self.log.log("Charts Plugin: History dumped. You can open charts.html.")
cursor.close()
|
from datetime import datetime
from ..extensions import db
class Notification(db.Model):
__tablename__ = "notifications"
id = db.Column(db.Integer, primary_key=True)
created_at = db.Column(db.DateTime(), default=datetime.now)
message = db.Column(db.String(255), nullable=False)
category = db.Column(db.String(32))
action = db.Column(db.String(32))
seen = db.Column(db.Boolean(), default=False)
# Relations
user_id = db.Column(db.Integer(),
db.ForeignKey('users.id'))
def __init__(self, *args, **kwargs):
super(Notification, self).__init__(*args, **kwargs)
def __repr__(self):
return u"<Notification #{0}>".format(self.id)
|
import json
from aleph.tests.util import TestCase
class ReconcileApiTestCase(TestCase):
def setUp(self):
super(ReconcileApiTestCase, self).setUp()
def test_index(self):
res = self.client.get('/api/freebase/reconcile')
assert res.status_code == 200, res
assert 'schemaSpace' in res.json, res.json
def test_recon(self):
self.load_fixtures('docs.yaml')
res = self.client.get('/api/freebase/reconcile?query=kwazulu')
assert res.json['num'] == 1, res
assert res.json['result'][0]['name'] == 'KwaZulu', res.json
data = json.dumps({'query': 'KWazulu'})
res = self.client.get('/api/freebase/reconcile?query=%s' % data)
assert res.json['num'] == 1, res
assert res.json['result'][0]['name'] == 'KwaZulu', res.json
def test_suggest(self):
self.load_fixtures('docs.yaml')
res = self.client.get('/api/freebase/suggest')
assert res.status_code == 200, res
assert 'result' in res.json, res.json
assert not len(res.json['result']), res.json
res = self.client.get('/api/freebase/suggest?prefix=kwa')
assert res.status_code == 200, res
assert 1 == len(res.json['result']), res.json
res = self.client.get('/api/freebase/suggest?prefix=kwa&type=Person')
assert res.status_code == 200, res
assert 0 == len(res.json['result']), res.json
res = self.client.get('/api/freebase/suggest?prefix=kwa&type=Company')
assert res.status_code == 200, res
assert 1 == len(res.json['result']), res.json
def test_property(self):
res = self.client.get('/api/freebase/property')
assert res.status_code == 200, res
assert 'result' in res.json, res.json
assert 5 == len(res.json['result']), res.json
res = self.client.get('/api/freebase/property?prefix=email')
assert 1 == len(res.json['result']), res.json
res = self.client.get('/api/freebase/property?prefix=banana')
assert 0 == len(res.json['result']), res.json
def test_type(self):
res = self.client.get('/api/freebase/type')
assert res.status_code == 200, res
assert len(res.json['result']) > 3, res.json
res = self.client.get('/api/freebase/property?prefix=Compa')
assert 1 == len(res.json['result']), res.json
res = self.client.get('/api/freebase/property?prefix=Banana')
assert 0 == len(res.json['result']), res.json
|
import os
os.system("sudo python setup.py install")
|
if __name__ == '__main__':
from types import MappingProxyType
d = {1: 'A'}
d_proxy = MappingProxyType(d)
print(repr(d_proxy))
print(d_proxy[1])
d[2] = 'B'
print(d_proxy[2])
|
class SecurionPayException(Exception):
def __init__(self, type, code, message, charge_id, blacklist_rule_id):
self.type = type
self.code = code
self.message = message
self.charge_id = charge_id
self.blacklist_rule_id = blacklist_rule_id
def __str__(self):
return 'SecurionPayException:\n\tType: %s\n\tCode: %s\n\tMessage: %s\n\tChargeId: %s\n\tBlacklistRuleId: %s' %\
tuple([str(v) for v in [self.type, self.code, self.message, self.charge_id, self.blacklist_rule_id]])
|
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS= [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet-hd.py',
'walletbackup.py',
# vv Tests less than 5m vv
'p2p-fullblocktest.py',
'fundrawtransaction.py',
'p2p-compactblocks.py',
'segwit.py',
# vv Tests less than 2m vv
'wallet.py',
'wallet-accounts.py',
'p2p-segwit.py',
'wallet-dump.py',
'listtransactions.py',
# vv Tests less than 60s vv
'sendheaders.py',
'zapwallettxes.py',
'importmulti.py',
'mempool_limit.py',
'merkle_blocks.py',
'receivedby.py',
'abandonconflict.py',
'bip68-112-113-p2p.py',
'rawtransactions.py',
'reindex.py',
# vv Tests less than 30s vv
'keypool-topup.py',
'zmq_test.py',
'bitcoin_cli.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'multiwallet.py',
'httpbasics.py',
'multi_rpc.py',
'proxy_test.py',
'signrawtransactions.py',
'disconnect_ban.py',
'decodescript.py',
'blockchain.py',
'disablewallet.py',
'net.py',
'keypool.py',
'p2p-mempool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'p2p-versionbits-warning.py',
'preciousblock.py',
'importprunedfunds.py',
'signmessages.py',
'nulldummy.py',
'import-rescan.py',
'mining.py',
'bumpfee.py',
'rpcnamedargs.py',
'listsinceblock.py',
'p2p-leaktests.py',
'wallet-encryption.py',
'bipdersig-p2p.py',
'bip65-cltv-p2p.py',
'uptime.py',
'resendwallettransactions.py',
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'pruning.py',
# vv Tests less than 20m vv
'smartfees.py',
# vv Tests less than 5m vv
'maxuploadtarget.py',
'mempool_packages.py',
'dbcrash.py',
# vv Tests less than 2m vv
'bip68-sequence.py',
'getblocktemplate_longpoll.py',
'p2p-timeouts.py',
# vv Tests less than 60s vv
'bip9-softforks.py',
'p2p-feefilter.py',
'rpcbind_test.py',
# vv Tests less than 30s vv
'assumevalid.py',
'example_test.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
'p2p-acceptblock.py',
'replace-by-fee.py',
]
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/bitcoin_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print("No functional tests to run. Wallet, utils, and bitcoind must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [re.sub("\.py$", "", t) + ".py" for t in tests]
test_list = []
for t in tests:
if t in ALL_SCRIPTS:
test_list.append(t)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], t))
else:
# No individual tests have been specified.
# Run all base tests, and optionally run extended tests.
test_list = BASE_SCRIPTS
if args.extended:
# place the EXTENDED_SCRIPTS first since the three longest ones
# are there and the list is shorter
test_list = EXTENDED_SCRIPTS + test_list
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
tests_excl = [re.sub("\.py$", "", t) + ".py" for t in args.exclude.split(',')]
for exclude_test in tests_excl:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h'])
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args)
def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[]):
# Warn if bitcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "bitcoind"]) is not None:
print("%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
#Set env vars
if "BITCOIND" not in os.environ:
os.environ["BITCOIND"] = build_dir + '/src/bitcoind' + exeext
os.environ["BITCOINCLI"] = build_dir + '/src/bitcoin-cli' + exeext
tests_dir = src_dir + '/test/functional/'
flags = ["--srcdir={}/src".format(build_dir)] + args
flags.append("--cachedir=%s" % cache_dir)
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
subprocess.check_output([tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
#Run Tests
job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
time0 = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
for _ in range(len(test_list)):
test_result, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
if test_result.status == "Passed":
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0]))
else:
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
print_results(test_results, max_len_name, (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=lambda result: result.name.lower())
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
portseed = len(self.test_list) + self.portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
tmpdir = ["--tmpdir=%s/%s_%s" % (self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)]
self.jobs.append((t,
time.time(),
subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, log_out, log_err) = j
if os.getenv('TRAVIS') == 'true' and int(time.time() - time0) > 20 * 60:
# In travis, timeout individual tests after 20 minutes (to stop tests hanging and not
# providing useful output.
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(j)
return TestResult(name, status, int(time.time() - time0)), stdout, stderr
print('.', end='', flush=True)
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if os.getenv('TRAVIS') == 'true':
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage(object):
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
|
import skimage.io # bug. need to import this before tensorflow
import skimage.transform # bug. need to import this before tensorflow
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.training import moving_averages
from config import Config
import datetime
import numpy as np
import os
import time
MOVING_AVERAGE_DECAY = 0.9997
BN_DECAY = MOVING_AVERAGE_DECAY
BN_EPSILON = 0.001
CONV_WEIGHT_DECAY = 0.00004
CONV_WEIGHT_STDDEV = 0.1
FC_WEIGHT_DECAY = 0.00004
FC_WEIGHT_STDDEV = 0.01
RESNET_VARIABLES = 'resnet_variables'
UPDATE_OPS_COLLECTION = 'resnet_update_ops' # must be grouped with training op
IMAGENET_MEAN_BGR = [103.062623801, 115.902882574, 123.151630838, ]
tf.app.flags.DEFINE_integer('input_size', 224, "input image size")
activation = tf.nn.relu
def inference(x, is_training,
num_classes=1000,
num_blocks=[3, 4, 6, 3], # defaults to 50-layer network
use_bias=False, # defaults to using batch norm
bottleneck=True):
c = Config()
c['bottleneck'] = bottleneck
c['is_training'] = tf.convert_to_tensor(is_training,
dtype='bool',
name='is_training')
c['ksize'] = 3
c['stride'] = 1
c['use_bias'] = use_bias
c['fc_units_out'] = num_classes
c['num_blocks'] = num_blocks
c['stack_stride'] = 2
with tf.variable_scope('scale1'):
c['conv_filters_out'] = 64
c['ksize'] = 7
c['stride'] = 2
x = conv(x, c)
x = bn(x, c)
x = activation(x)
with tf.variable_scope('scale2'):
x = _max_pool(x, ksize=3, stride=2)
c['num_blocks'] = num_blocks[0]
c['stack_stride'] = 1
c['block_filters_internal'] = 64
x = stack(x, c)
with tf.variable_scope('scale3'):
c['num_blocks'] = num_blocks[1]
c['block_filters_internal'] = 128
assert c['stack_stride'] == 2
x = stack(x, c)
with tf.variable_scope('scale4'):
c['num_blocks'] = num_blocks[2]
c['block_filters_internal'] = 256
x = stack(x, c)
with tf.variable_scope('scale5'):
c['num_blocks'] = num_blocks[3]
c['block_filters_internal'] = 512
x = stack(x, c)
# post-net
x = tf.reduce_mean(x, axis=[1, 2], name="avg_pool")
if num_classes != None:
with tf.variable_scope('fc'):
x = fc(x, c)
return x
def inference_small(x,
is_training,
num_blocks=3, # 6n+2 total weight layers will be used.
use_bias=False, # defaults to using batch norm
num_classes=10):
c = Config()
c['is_training'] = tf.convert_to_tensor(is_training,
dtype='bool',
name='is_training')
c['use_bias'] = use_bias
c['fc_units_out'] = num_classes
c['num_blocks'] = num_blocks
c['num_classes'] = num_classes
inference_small_config(x, c)
def inference_small_config(x, c):
c['bottleneck'] = False
c['ksize'] = 3
c['stride'] = 1
with tf.variable_scope('scale1'):
c['conv_filters_out'] = 16
c['block_filters_internal'] = 16
c['stack_stride'] = 1
x = conv(x, c)
x = bn(x, c)
x = activation(x)
x = stack(x, c)
with tf.variable_scope('scale2'):
c['block_filters_internal'] = 32
c['stack_stride'] = 2
x = stack(x, c)
with tf.variable_scope('scale3'):
c['block_filters_internal'] = 64
c['stack_stride'] = 2
x = stack(x, c)
# post-net
x = tf.reduce_mean(x, axis=[1, 2], name="avg_pool")
if c['num_classes'] != None:
with tf.variable_scope('fc'):
x = fc(x, c)
return x
def _imagenet_preprocess(rgb):
"""Changes RGB [0,1] valued image to BGR [0,255] with mean subtracted."""
red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb * 255.0)
bgr = tf.concat(axis=3, values=[blue, green, red])
bgr -= IMAGENET_MEAN_BGR
return bgr
def loss(logits, labels):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)
cross_entropy_mean = tf.reduce_mean(cross_entropy)
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss_ = tf.add_n([cross_entropy_mean] + regularization_losses)
tf.summary.scalar('loss', loss_)
return loss_
def stack(x, c):
for n in range(c['num_blocks']):
s = c['stack_stride'] if n == 0 else 1
c['block_stride'] = s
with tf.variable_scope('block%d' % (n + 1)):
x = block(x, c)
return x
def block(x, c):
filters_in = x.get_shape()[-1]
# Note: filters_out isn't how many filters are outputed.
# That is the case when bottleneck=False but when bottleneck is
# True, filters_internal*4 filters are outputted. filters_internal is how many filters
# the 3x3 convs output internally.
m = 4 if c['bottleneck'] else 1
filters_out = m * c['block_filters_internal']
shortcut = x # branch 1
c['conv_filters_out'] = c['block_filters_internal']
if c['bottleneck']:
with tf.variable_scope('a'):
c['ksize'] = 1
c['stride'] = c['block_stride']
x = conv(x, c)
x = bn(x, c)
x = activation(x)
with tf.variable_scope('b'):
c['ksize'] = 3
x = conv(x, c)
x = bn(x, c)
x = activation(x)
with tf.variable_scope('c'):
c['conv_filters_out'] = filters_out
c['ksize'] = 1
assert c['stride'] == 1
x = conv(x, c)
x = bn(x, c)
else:
with tf.variable_scope('A'):
c['stride'] = c['block_stride']
assert c['ksize'] == 3
x = conv(x, c)
x = bn(x, c)
x = activation(x)
with tf.variable_scope('B'):
c['conv_filters_out'] = filters_out
assert c['ksize'] == 3
assert c['stride'] == 1
x = conv(x, c)
x = bn(x, c)
with tf.variable_scope('shortcut'):
if filters_out != filters_in or c['block_stride'] != 1:
c['ksize'] = 1
c['stride'] = c['block_stride']
c['conv_filters_out'] = filters_out
shortcut = conv(shortcut, c)
shortcut = bn(shortcut, c)
return activation(x + shortcut)
def bn(x, c):
x_shape = x.get_shape()
params_shape = x_shape[-1:]
if c['use_bias']:
bias = _get_variable('bias', params_shape,
initializer=tf.zeros_initializer())
return x + bias
axis = list(range(len(x_shape) - 1))
beta = _get_variable('beta',
params_shape,
initializer=tf.zeros_initializer())
gamma = _get_variable('gamma',
params_shape,
initializer=tf.ones_initializer())
moving_mean = _get_variable('moving_mean',
params_shape,
initializer=tf.zeros_initializer(),
trainable=False)
moving_variance = _get_variable('moving_variance',
params_shape,
initializer=tf.ones_initializer(),
trainable=False)
# These ops will only be preformed when training.
mean, variance = tf.nn.moments(x, axis)
update_moving_mean = moving_averages.assign_moving_average(moving_mean,
mean, BN_DECAY)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, BN_DECAY)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)
mean, variance = control_flow_ops.cond(
c['is_training'], lambda: (mean, variance),
lambda: (moving_mean, moving_variance))
x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON)
#x.set_shape(inputs.get_shape()) ??
return x
def fc(x, c):
num_units_in = x.get_shape()[1]
num_units_out = c['fc_units_out']
weights_initializer = tf.truncated_normal_initializer(
stddev=FC_WEIGHT_STDDEV)
weights = _get_variable('weights',
shape=[num_units_in, num_units_out],
initializer=weights_initializer,
weight_decay=FC_WEIGHT_STDDEV)
biases = _get_variable('biases',
shape=[num_units_out],
initializer=tf.zeros_initializer())
x = tf.nn.xw_plus_b(x, weights, biases)
return x
def _get_variable(name,
shape,
initializer,
weight_decay=0.0,
dtype='float',
trainable=True):
"A little wrapper around tf.get_variable to do weight decay and add to"
"resnet collection"
if weight_decay > 0:
regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
else:
regularizer = None
collections = [tf.GraphKeys.GLOBAL_VARIABLES, RESNET_VARIABLES]
return tf.get_variable(name,
shape=shape,
initializer=initializer,
dtype=dtype,
regularizer=regularizer,
collections=collections,
trainable=trainable)
def conv(x, c):
ksize = c['ksize']
stride = c['stride']
filters_out = c['conv_filters_out']
filters_in = x.get_shape()[-1]
shape = [ksize, ksize, filters_in, filters_out]
initializer = tf.truncated_normal_initializer(stddev=CONV_WEIGHT_STDDEV)
weights = _get_variable('weights',
shape=shape,
dtype='float',
initializer=initializer,
weight_decay=CONV_WEIGHT_DECAY)
return tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding='SAME')
def _max_pool(x, ksize=3, stride=2):
return tf.nn.max_pool(x,
ksize=[1, ksize, ksize, 1],
strides=[1, stride, stride, 1],
padding='SAME')
|
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import numpy as np
import time, os
import tvm
import tvm.contrib.graph_runtime as runtime
from tvm import relay
import tensorflow as tf
import tvm.relay.testing.tf as tf_testing
from tensorflow import nn
np.random.seed(0)
"""
Network parameters
"""
BATCH_SIZE = 8
N = 112
FIN = 3
FOUT = 32
K_Y = 3
K_X = 3
NB_TESTS = 101
"""
Target settings
"""
target = "llvm -mcpu=core-avx2"
target_host = "llvm"
layout = None
input_shape = (BATCH_SIZE, N + 2, N + 2, FIN)
dtype = "float32"
"""
Create the graph in TensorFlow
"""
def Convolution(X, weights, bias):
conv = nn.conv2d(X, weights, strides=[1, 1, 1, 1], padding="VALID", data_format="NHWC")
conv_bias = nn.bias_add(conv, bias, data_format="NHWC")
return conv_bias
weights = np.random.rand(K_Y, K_X, FIN, FOUT)
bias = np.random.rand(FOUT)
X = tf.compat.v1.placeholder(tf.float32, [BATCH_SIZE, N + 2, N + 2, FIN], name="X")
activations = Convolution(X, weights, bias)
model_path = "tf_model.pb"
tf.io.write_graph(tf.compat.v1.get_default_graph(), "", model_path, as_text=False)
"""
Create the graph in TVM and compile it
"""
with tf.io.gfile.GFile(model_path, "rb") as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
graph = tf.import_graph_def(graph_def, name="")
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
shape_dict = {"X": input_shape}
mod, parameters = relay.frontend.from_tensorflow(graph_def, layout=layout, shape=shape_dict)
with relay.build_config(opt_level=3):
graph, lib, params = relay.build_module.build(
mod, target=target, params=parameters)
""" Execute and evaluate the graph """
ctx = tvm.cpu()
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module = runtime.create(graph, lib, ctx)
module.set_input("X", data_tvm)
module.set_input(**params)
print("Evaluate inference time cost...")
ftimer = module.module.time_evaluator("run", ctx, number=NB_TESTS, repeat=1)
prof_res = np.array(ftimer().results) * 1000 # convert to millisecond
print("Network execution time : ", np.median(prof_res))
|
from util_graph import *
if __name__ == '__main__':
get_labeled_graph_for_CC('./data/graph/graph.dat', './data/graph/graph_labeled_CC.dat', './data/graph/nodes_hash_CC.dat')
|
from django.conf.urls import patterns, url
from tour import api
urlpatterns = patterns(
'',
url(r'^api/tour/$', api.TourApiView.as_view(), name='tour.tour_api'),
)
|
from pprint import pprint
from config_loader import try_load_from_file
from hpOneView.exceptions import HPOneViewException
from hpOneView.oneview_client import OneViewClient
config = {
"ip": "",
"credentials": {
"userName": "administrator",
"password": ""
}
}
fabric_id = ''
config = try_load_from_file(config)
oneview_client = OneViewClient(config)
print("Get all fabrics")
fabrics = oneview_client.fabrics.get_all()
pprint(fabrics)
print("\nGet all fabrics sorting by name")
fabrics_sorted = oneview_client.fabrics.get_all(sort='name:descending')
pprint(fabrics_sorted)
try:
print("\nGet a fabric by id")
fabrics_byid = oneview_client.fabrics.get(fabric_id)
pprint(fabrics_byid)
except HPOneViewException as e:
print(e.msg['message'])
print("\nGet a fabrics by name")
fabric_byname = oneview_client.fabrics.get_by('name', 'DefaultFabric')[0]
pprint(fabric_byname)
|
class Solution:
# @return an integer
def numTrees(self, n):
solutions = [1, 1, 2]
for i in range(3, n+1):
solutions.append(sum([solutions[i] * solutions[-1-i] for i in range(len(solutions))]))
return solutions[n]
def main():
solver = Solution()
tests = list(range(5))
for test in tests:
print(test)
print(' ->')
result = solver.numTrees(test)
print(result)
print('~'*10)
pass
if __name__ == '__main__':
main()
pass
|
import gzip
from pathlib import Path
from typing import Mapping, Sequence
from urllib.request import urlretrieve
import os
from declarative_parser.parser import Argument, Parser, action
from models import Gene
from utils import jit
REMOTE = 'https://github.com/kn-bibs/pathways-data/raw/master/gsea/msigdb/'
DATA_DIR = Path('data')
def gzip_open_text(path, mode='r'):
return gzip.open(path, mode + 't')
class GeneSet:
def __init__(self, name, genes, url=None):
self.name = name
self.genes = {Gene(name) for name in genes}
self.url = url
self.enrichment = None
# Following is meant to enable fast __contains__ test:
# granted, one might want to simply use memory address
# retrieved with id(gene) but it is not guaranteed to
# (and will not) work with multiprocessing.
# On the other hand a set of integers is easily pick-able,
# as is the integer-holding 'id' attribute of Gene(s).
# Trivia: set(list-comprehension) is faster than set(generator).
self.gene_ids = set([gene.id for gene in self.genes])
def restrict_to_genes(self, genes: Sequence[Gene]):
"""Clear itself of genes which are not in the the provided list.
Returns: set of genes removed from the dataset
"""
excessive_genes = self.genes.difference(genes)
for gene in excessive_genes:
self.genes.discard(gene)
self.gene_ids.discard(gene.id)
return excessive_genes
def __contains__(self, gene: Gene):
return gene.id in self.gene_ids
def __len__(self):
return len(self.genes)
def __lt__(self, other):
return self.enrichment < other.enrichment
def __repr__(self):
return f'<GeneSet: {self.name} with {len(self.genes)} genes>'
class MolecularSignatureDatabase:
def __init__(self, gene_sets: Mapping[str, GeneSet], label=None):
self.label = label
self.gene_sets = gene_sets
class GMTSignatureDatabase(MolecularSignatureDatabase):
def __init__(self, path, label=None):
self.path = DATA_DIR / path
gene_sets = self.load()
super().__init__(gene_sets, label)
def load(self, opener=open):
if self.path.suffix.endswith('.gz'):
opener = gzip_open_text
gene_sets = {}
with opener(self.path) as f:
for line in f:
name, url, *genes = line.strip().split('\t')
gene_sets[name] = GeneSet(name, genes, url)
return gene_sets
class RemoteDatabase(GMTSignatureDatabase):
def __init__(self, set_name, identifiers='symbols', version=6.1, remote=REMOTE, label=None):
path = f'{version}/{set_name}.v{version}.{identifiers}.gmt.gz'
self.raw_path = path
self.remote = remote
GMTSignatureDatabase.__init__(self, path, label)
def fetch(self):
url = self.remote + str(self.raw_path)
os.makedirs(self.path.parent, exist_ok=True)
urlretrieve(url, self.path)
def load(self, opener=open):
if not self.path.exists():
self.fetch()
return super().load(opener)
DATABASE_PRESETS = {
'H': (
'h.all',
'hallmark gene sets'
)
}
class DatabaseParser(Parser):
"""Help"""
__skip_if_absent__ = False
name_or_path = Argument(
default='H',
help='Name of Molecular Signature Database to use. '
'By default hallmark genes will be used. '
f'Use one of following: {", ".join(DATABASE_PRESETS)} '
'or provide a path to custom database (.gmt file). '
# http://software.broadinstitute.org/gsea/msigdb_license_terms.jsp
'Please note that due to licencing restrictions data '
'from following sources: '
'KEGG, BioCarta and AAAS/STKE Cell Signaling Database '
'are currently unavailable.'
,
)
version = Argument(
default=6.1
)
identifiers = Argument(
choices=['symbols', 'entrez'],
default='symbols'
)
remote = Argument(
# we may want to create a mirror when things go serious to
# do not overuse BI and to gain independence
default=REMOTE
)
@action
def show_gene_sets(namespace):
try:
parser = DatabaseParser(**vars(namespace))
db = parser.produce(None).database
except Exception as e:
print(e)
raise
for name, gene_set in db.gene_sets.items():
print(name, gene_set.url)
def produce(self, unknown_args):
n = self.namespace
# handle nested case
if hasattr(n, 'database'):
n = n.database
name = n.name_or_path
# try to get one of pre-defined database by name
if name in DATABASE_PRESETS:
set_name, label = DATABASE_PRESETS[name]
n.database = RemoteDatabase(
set_name, label=label,
version=n.version,
identifiers=n.identifiers,
remote=n.remote
)
else:
# or to load a database from path
n.database = GMTSignatureDatabase(name)
return n
|
import inspect
import sys
from minimock import Mock
def mock_module(module_name):
"""
Replaces all of the functions of a module with mock versions of the same
functions that don't do anything. Doesn't modify classes or the methods
of classes, nor does it change any global data in the modules.
Example usage:
mock_module('seattlegeni.common.api.lockserver')
"""
__import__(module_name)
module = sys.modules[module_name]
for item_name in dir(module):
obj = getattr(module, item_name)
if inspect.isfunction(obj):
print "Mocking: " + module_name + "." + item_name
setattr(module, item_name, Mock(obj))
|
"""
Read a maf file and write out a new maf with only blocks having the
required species, after dropping any other species and removing
columns containing only gaps.
usage: %prog species,species2,... input_maf output_maf allow_partial min_species_per_block
"""
from galaxy import eggs
import pkg_resources; pkg_resources.require( "bx-python" )
import bx.align.maf
from galaxy.tools.util import maf_utilities
import sys
assert sys.version_info[:2] >= ( 2, 4 )
def main():
species = maf_utilities.parse_species_option( sys.argv[1] )
if species:
spec_len = len( species )
else:
spec_len = 0
try:
maf_reader = bx.align.maf.Reader( open( sys.argv[2],'r' ) )
maf_writer = bx.align.maf.Writer( open( sys.argv[3],'w' ) )
except:
print >>sys.stderr, "Your MAF file appears to be malformed."
sys.exit()
allow_partial = False
if int( sys.argv[4] ): allow_partial = True
min_species_per_block = int( sys.argv[5] )
maf_blocks_kept = 0
for m in maf_reader:
if species:
m = m.limit_to_species( species )
m.remove_all_gap_columns()
spec_in_block_len = len( maf_utilities.get_species_in_block( m ) )
if ( not species or allow_partial or spec_in_block_len == spec_len ) and spec_in_block_len > min_species_per_block:
maf_writer.write( m )
maf_blocks_kept += 1
maf_reader.close()
maf_writer.close()
print "Restricted to species: %s." % ", ".join( species )
print "%i MAF blocks have been kept." % maf_blocks_kept
if __name__ == "__main__":
main()
|
from __future__ import unicode_literals
from django.apps import AppConfig
class MainConfig(AppConfig):
name = 'main'
|
from google.appengine.ext import ndb
from consts.district_type import DistrictType
from models.team import Team
class DistrictTeam(ndb.Model):
"""
DistrictTeam represents the "home district" for a team in a year
key_name is like <year><district_short>_<team_key> (e.g. 2015ne_frc1124)
district_short is one of DistrictType.type_abbrevs
"""
team = ndb.KeyProperty(kind=Team)
year = ndb.IntegerProperty()
district = ndb.IntegerProperty() # One of DistrictType constants
created = ndb.DateTimeProperty(auto_now_add=True, indexed=False)
updated = ndb.DateTimeProperty(auto_now=True, indexed=False)
def __init__(self, *args, **kw):
# store set of affected references referenced keys for cache clearing
# keys must be model properties
self._affected_references = {
'district': set(),
'district_key': set(),
'team': set(),
'year': set(),
}
super(DistrictTeam, self).__init__(*args, **kw)
@property
def key_name(self):
return self.renderKeyName(self.year, self.district, self.team.id())
@property
def district_key(self):
districtAbbrev = DistrictType.type_abbrevs[districtEnum]
return '{}{}'.format(year, districtAbbrev)
@classmethod
def renderKeyName(self, year, districtEnum, teamKey):
districtAbbrev = DistrictType.type_abbrevs[districtEnum]
return "{}{}_{}".format(year, districtAbbrev, teamKey)
|
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/general/shared_poi_tato_farm_64x64_s02.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
class Allergies(object):
_allergies = [
"eggs",
"peanuts",
"shellfish",
"strawberries",
"tomatoes",
"chocolate",
"pollen",
"cats"
]
def __init__(self, score):
self.score = score
def is_allergic_to(self, allergy):
return bool(self.score & 1 << self._allergies.index(allergy))
@property
def lst(self):
return [allergy for allergy in self._allergies
if self.is_allergic_to(allergy)]
|
from __future__ import absolute_import, division, print_function, unicode_literals
from os import path
import sys
import nib
default_config = path.join(nib.cwd, 'defaults.nib')
def merge(dest, source):
"""In-place, recursive merge of two dictionaries."""
for key in source:
if key in dest:
if isinstance(dest[key], dict) and isinstance(source[key], dict):
merge(dest[key], source[key])
continue
dest[key] = source[key]
return dest
class Config(dict):
def __init__(self, filename=None):
values = nib.yaml.load(default_config)
if filename is not None:
if path.isfile(filename):
overrides = nib.yaml.load(filename)
merge(values, overrides)
else:
sys.stderr.write('Warning: no site config found at "{}"\n'.format(filename))
dict.__init__(self, values)
|
from sqlalchemy.testing import eq_
from sqlalchemy import *
from sqlalchemy import types as sqltypes, exc, schema
from sqlalchemy.sql import table, column
from sqlalchemy.testing import fixtures, AssertsExecutionResults, AssertsCompiledSQL
from sqlalchemy import testing
from sqlalchemy.util import u, b
from sqlalchemy import util
from sqlalchemy.testing import assert_raises, assert_raises_message
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.dialects.oracle import cx_oracle, base as oracle
from sqlalchemy.engine import default
import decimal
from sqlalchemy.testing.schema import Table, Column
import datetime
import os
from sqlalchemy import sql
class OutParamTest(fixtures.TestBase, AssertsExecutionResults):
__only_on__ = 'oracle+cx_oracle'
@classmethod
def setup_class(cls):
testing.db.execute("""
create or replace procedure foo(x_in IN number, x_out OUT number, y_out OUT number, z_out OUT varchar) IS
retval number;
begin
retval := 6;
x_out := 10;
y_out := x_in * 15;
z_out := NULL;
end;
""")
def test_out_params(self):
result = \
testing.db.execute(text('begin foo(:x_in, :x_out, :y_out, '
':z_out); end;',
bindparams=[bindparam('x_in', Float),
outparam('x_out', Integer),
outparam('y_out', Float),
outparam('z_out', String)]), x_in=5)
eq_(result.out_parameters, {'x_out': 10, 'y_out': 75, 'z_out'
: None})
assert isinstance(result.out_parameters['x_out'], int)
@classmethod
def teardown_class(cls):
testing.db.execute("DROP PROCEDURE foo")
class CXOracleArgsTest(fixtures.TestBase):
__only_on__ = 'oracle+cx_oracle'
def test_autosetinputsizes(self):
dialect = cx_oracle.dialect()
assert dialect.auto_setinputsizes
dialect = cx_oracle.dialect(auto_setinputsizes=False)
assert not dialect.auto_setinputsizes
def test_exclude_inputsizes_none(self):
dialect = cx_oracle.dialect(exclude_setinputsizes=None)
eq_(dialect.exclude_setinputsizes, set())
def test_exclude_inputsizes_custom(self):
import cx_Oracle
dialect = cx_oracle.dialect(dbapi=cx_Oracle,
exclude_setinputsizes=('NCLOB',))
eq_(dialect.exclude_setinputsizes, set([cx_Oracle.NCLOB]))
class QuotedBindRoundTripTest(fixtures.TestBase):
__only_on__ = 'oracle'
@testing.provide_metadata
def test_table_round_trip(self):
oracle.RESERVED_WORDS.remove('UNION')
metadata = self.metadata
table = Table("t1", metadata,
Column("option", Integer),
Column("plain", Integer, quote=True),
# test that quote works for a reserved word
# that the dialect isn't aware of when quote
# is set
Column("union", Integer, quote=True)
)
metadata.create_all()
table.insert().execute(
{"option":1, "plain":1, "union":1}
)
eq_(
testing.db.execute(table.select()).first(),
(1, 1, 1)
)
table.update().values(option=2, plain=2, union=2).execute()
eq_(
testing.db.execute(table.select()).first(),
(2, 2, 2)
)
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = oracle.dialect()
def test_true_false(self):
self.assert_compile(
sql.false(), "0"
)
self.assert_compile(
sql.true(),
"1"
)
def test_owner(self):
meta = MetaData()
parent = Table('parent', meta, Column('id', Integer,
primary_key=True), Column('name', String(50)),
schema='ed')
child = Table('child', meta, Column('id', Integer,
primary_key=True), Column('parent_id', Integer,
ForeignKey('ed.parent.id')), schema='ed')
self.assert_compile(parent.join(child),
'ed.parent JOIN ed.child ON ed.parent.id = '
'ed.child.parent_id')
def test_subquery(self):
t = table('sometable', column('col1'), column('col2'))
s = select([t])
s = select([s.c.col1, s.c.col2])
self.assert_compile(s, "SELECT col1, col2 FROM (SELECT "
"sometable.col1 AS col1, sometable.col2 "
"AS col2 FROM sometable)")
def test_bindparam_quote(self):
"""test that bound parameters take on quoting for reserved words,
column names quote flag enabled."""
# note: this is only in cx_oracle at the moment. not sure
# what other hypothetical oracle dialects might need
self.assert_compile(
bindparam("option"), ':"option"'
)
self.assert_compile(
bindparam("plain"), ':plain'
)
t = Table("s", MetaData(), Column('plain', Integer, quote=True))
self.assert_compile(
t.insert().values(plain=5), 'INSERT INTO s ("plain") VALUES (:"plain")'
)
self.assert_compile(
t.update().values(plain=5), 'UPDATE s SET "plain"=:"plain"'
)
def test_limit(self):
t = table('sometable', column('col1'), column('col2'))
s = select([t])
c = s.compile(dialect=oracle.OracleDialect())
assert t.c.col1 in set(c.result_map['col1'][1])
s = select([t]).limit(10).offset(20)
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT col1, '
'col2, ROWNUM AS ora_rn FROM (SELECT '
'sometable.col1 AS col1, sometable.col2 AS '
'col2 FROM sometable) WHERE ROWNUM <= '
':ROWNUM_1) WHERE ora_rn > :ora_rn_1')
c = s.compile(dialect=oracle.OracleDialect())
assert t.c.col1 in set(c.result_map['col1'][1])
s = select([s.c.col1, s.c.col2])
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT col1, col2 '
'FROM (SELECT col1, col2, ROWNUM AS ora_rn '
'FROM (SELECT sometable.col1 AS col1, '
'sometable.col2 AS col2 FROM sometable) '
'WHERE ROWNUM <= :ROWNUM_1) WHERE ora_rn > '
':ora_rn_1)')
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT col1, col2 '
'FROM (SELECT col1, col2, ROWNUM AS ora_rn '
'FROM (SELECT sometable.col1 AS col1, '
'sometable.col2 AS col2 FROM sometable) '
'WHERE ROWNUM <= :ROWNUM_1) WHERE ora_rn > '
':ora_rn_1)')
s = select([t]).limit(10).offset(20).order_by(t.c.col2)
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT col1, '
'col2, ROWNUM AS ora_rn FROM (SELECT '
'sometable.col1 AS col1, sometable.col2 AS '
'col2 FROM sometable ORDER BY '
'sometable.col2) WHERE ROWNUM <= '
':ROWNUM_1) WHERE ora_rn > :ora_rn_1')
s = select([t], for_update=True).limit(10).order_by(t.c.col2)
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT '
'sometable.col1 AS col1, sometable.col2 AS '
'col2 FROM sometable ORDER BY '
'sometable.col2) WHERE ROWNUM <= :ROWNUM_1 '
'FOR UPDATE')
s = select([t],
for_update=True).limit(10).offset(20).order_by(t.c.col2)
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT col1, '
'col2, ROWNUM AS ora_rn FROM (SELECT '
'sometable.col1 AS col1, sometable.col2 AS '
'col2 FROM sometable ORDER BY '
'sometable.col2) WHERE ROWNUM <= '
':ROWNUM_1) WHERE ora_rn > :ora_rn_1 FOR '
'UPDATE')
def test_limit_preserves_typing_information(self):
class MyType(TypeDecorator):
impl = Integer
stmt = select([type_coerce(column('x'), MyType).label('foo')]).limit(1)
dialect = oracle.dialect()
compiled = stmt.compile(dialect=dialect)
assert isinstance(compiled.result_map['foo'][-1], MyType)
def test_use_binds_for_limits_disabled(self):
t = table('sometable', column('col1'), column('col2'))
dialect = oracle.OracleDialect(use_binds_for_limits=False)
self.assert_compile(select([t]).limit(10),
"SELECT col1, col2 FROM (SELECT sometable.col1 AS col1, "
"sometable.col2 AS col2 FROM sometable) WHERE ROWNUM <= 10",
dialect=dialect)
self.assert_compile(select([t]).offset(10),
"SELECT col1, col2 FROM (SELECT col1, col2, ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable)) WHERE ora_rn > 10",
dialect=dialect)
self.assert_compile(select([t]).limit(10).offset(10),
"SELECT col1, col2 FROM (SELECT col1, col2, ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable) WHERE ROWNUM <= 20) WHERE ora_rn > 10",
dialect=dialect)
def test_use_binds_for_limits_enabled(self):
t = table('sometable', column('col1'), column('col2'))
dialect = oracle.OracleDialect(use_binds_for_limits = True)
self.assert_compile(select([t]).limit(10),
"SELECT col1, col2 FROM (SELECT sometable.col1 AS col1, "
"sometable.col2 AS col2 FROM sometable) WHERE ROWNUM "
"<= :ROWNUM_1",
dialect=dialect)
self.assert_compile(select([t]).offset(10),
"SELECT col1, col2 FROM (SELECT col1, col2, ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable)) WHERE ora_rn > :ora_rn_1",
dialect=dialect)
self.assert_compile(select([t]).limit(10).offset(10),
"SELECT col1, col2 FROM (SELECT col1, col2, ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable) WHERE ROWNUM <= :ROWNUM_1) WHERE ora_rn > "
":ora_rn_1",
dialect=dialect)
def test_long_labels(self):
dialect = default.DefaultDialect()
dialect.max_identifier_length = 30
ora_dialect = oracle.dialect()
m = MetaData()
a_table = Table(
'thirty_characters_table_xxxxxx',
m,
Column('id', Integer, primary_key=True)
)
other_table = Table(
'other_thirty_characters_table_',
m,
Column('id', Integer, primary_key=True),
Column('thirty_characters_table_id',
Integer,
ForeignKey('thirty_characters_table_xxxxxx.id'),
primary_key=True
)
)
anon = a_table.alias()
self.assert_compile(select([other_table,
anon]).
select_from(
other_table.outerjoin(anon)).apply_labels(),
'SELECT other_thirty_characters_table_.id '
'AS other_thirty_characters__1, '
'other_thirty_characters_table_.thirty_char'
'acters_table_id AS other_thirty_characters'
'__2, thirty_characters_table__1.id AS '
'thirty_characters_table__3 FROM '
'other_thirty_characters_table_ LEFT OUTER '
'JOIN thirty_characters_table_xxxxxx AS '
'thirty_characters_table__1 ON '
'thirty_characters_table__1.id = '
'other_thirty_characters_table_.thirty_char'
'acters_table_id', dialect=dialect)
self.assert_compile(select([other_table,
anon]).select_from(
other_table.outerjoin(anon)).apply_labels(),
'SELECT other_thirty_characters_table_.id '
'AS other_thirty_characters__1, '
'other_thirty_characters_table_.thirty_char'
'acters_table_id AS other_thirty_characters'
'__2, thirty_characters_table__1.id AS '
'thirty_characters_table__3 FROM '
'other_thirty_characters_table_ LEFT OUTER '
'JOIN thirty_characters_table_xxxxxx '
'thirty_characters_table__1 ON '
'thirty_characters_table__1.id = '
'other_thirty_characters_table_.thirty_char'
'acters_table_id', dialect=ora_dialect)
def test_outer_join(self):
table1 = table('mytable',
column('myid', Integer),
column('name', String),
column('description', String),
)
table2 = table(
'myothertable',
column('otherid', Integer),
column('othername', String),
)
table3 = table(
'thirdtable',
column('userid', Integer),
column('otherstuff', String),
)
query = select([table1, table2], or_(table1.c.name == 'fred',
table1.c.myid == 10, table2.c.othername != 'jack'
, 'EXISTS (select yay from foo where boo = lar)'
), from_obj=[outerjoin(table1, table2,
table1.c.myid == table2.c.otherid)])
self.assert_compile(query,
'SELECT mytable.myid, mytable.name, '
'mytable.description, myothertable.otherid,'
' myothertable.othername FROM mytable, '
'myothertable WHERE (mytable.name = '
':name_1 OR mytable.myid = :myid_1 OR '
'myothertable.othername != :othername_1 OR '
'EXISTS (select yay from foo where boo = '
'lar)) AND mytable.myid = '
'myothertable.otherid(+)',
dialect=oracle.OracleDialect(use_ansi=False))
query = table1.outerjoin(table2, table1.c.myid
== table2.c.otherid).outerjoin(table3,
table3.c.userid == table2.c.otherid)
self.assert_compile(query.select(),
'SELECT mytable.myid, mytable.name, '
'mytable.description, myothertable.otherid,'
' myothertable.othername, '
'thirdtable.userid, thirdtable.otherstuff '
'FROM mytable LEFT OUTER JOIN myothertable '
'ON mytable.myid = myothertable.otherid '
'LEFT OUTER JOIN thirdtable ON '
'thirdtable.userid = myothertable.otherid')
self.assert_compile(query.select(),
'SELECT mytable.myid, mytable.name, '
'mytable.description, myothertable.otherid,'
' myothertable.othername, '
'thirdtable.userid, thirdtable.otherstuff '
'FROM mytable, myothertable, thirdtable '
'WHERE thirdtable.userid(+) = '
'myothertable.otherid AND mytable.myid = '
'myothertable.otherid(+)',
dialect=oracle.dialect(use_ansi=False))
query = table1.join(table2, table1.c.myid
== table2.c.otherid).join(table3,
table3.c.userid == table2.c.otherid)
self.assert_compile(query.select(),
'SELECT mytable.myid, mytable.name, '
'mytable.description, myothertable.otherid,'
' myothertable.othername, '
'thirdtable.userid, thirdtable.otherstuff '
'FROM mytable, myothertable, thirdtable '
'WHERE thirdtable.userid = '
'myothertable.otherid AND mytable.myid = '
'myothertable.otherid',
dialect=oracle.dialect(use_ansi=False))
query = table1.join(table2, table1.c.myid
== table2.c.otherid).outerjoin(table3,
table3.c.userid == table2.c.otherid)
self.assert_compile(query.select().order_by(table1.c.name).
limit(10).offset(5),
'SELECT myid, name, description, otherid, '
'othername, userid, otherstuff FROM '
'(SELECT myid, name, description, otherid, '
'othername, userid, otherstuff, ROWNUM AS '
'ora_rn FROM (SELECT mytable.myid AS myid, '
'mytable.name AS name, mytable.description '
'AS description, myothertable.otherid AS '
'otherid, myothertable.othername AS '
'othername, thirdtable.userid AS userid, '
'thirdtable.otherstuff AS otherstuff FROM '
'mytable, myothertable, thirdtable WHERE '
'thirdtable.userid(+) = '
'myothertable.otherid AND mytable.myid = '
'myothertable.otherid ORDER BY '
'mytable.name) WHERE ROWNUM <= :ROWNUM_1) '
'WHERE ora_rn > :ora_rn_1',
dialect=oracle.dialect(use_ansi=False))
subq = select([table1]).select_from(table1.outerjoin(table2,
table1.c.myid == table2.c.otherid)).alias()
q = select([table3]).select_from(table3.outerjoin(subq,
table3.c.userid == subq.c.myid))
self.assert_compile(q,
'SELECT thirdtable.userid, '
'thirdtable.otherstuff FROM thirdtable '
'LEFT OUTER JOIN (SELECT mytable.myid AS '
'myid, mytable.name AS name, '
'mytable.description AS description FROM '
'mytable LEFT OUTER JOIN myothertable ON '
'mytable.myid = myothertable.otherid) '
'anon_1 ON thirdtable.userid = anon_1.myid'
, dialect=oracle.dialect(use_ansi=True))
self.assert_compile(q,
'SELECT thirdtable.userid, '
'thirdtable.otherstuff FROM thirdtable, '
'(SELECT mytable.myid AS myid, '
'mytable.name AS name, mytable.description '
'AS description FROM mytable, myothertable '
'WHERE mytable.myid = myothertable.otherid('
'+)) anon_1 WHERE thirdtable.userid = '
'anon_1.myid(+)',
dialect=oracle.dialect(use_ansi=False))
q = select([table1.c.name]).where(table1.c.name == 'foo')
self.assert_compile(q,
'SELECT mytable.name FROM mytable WHERE '
'mytable.name = :name_1',
dialect=oracle.dialect(use_ansi=False))
subq = select([table3.c.otherstuff]).where(table3.c.otherstuff
== table1.c.name).label('bar')
q = select([table1.c.name, subq])
self.assert_compile(q,
'SELECT mytable.name, (SELECT '
'thirdtable.otherstuff FROM thirdtable '
'WHERE thirdtable.otherstuff = '
'mytable.name) AS bar FROM mytable',
dialect=oracle.dialect(use_ansi=False))
def test_nonansi_nested_right_join(self):
a = table('a', column('a'))
b = table('b', column('b'))
c = table('c', column('c'))
j = a.join(b.join(c, b.c.b == c.c.c), a.c.a == b.c.b)
self.assert_compile(
select([j]),
"SELECT a.a, b.b, c.c FROM a, b, c "
"WHERE a.a = b.b AND b.b = c.c",
dialect=oracle.OracleDialect(use_ansi=False)
)
j = a.outerjoin(b.join(c, b.c.b == c.c.c), a.c.a == b.c.b)
self.assert_compile(
select([j]),
"SELECT a.a, b.b, c.c FROM a, b, c "
"WHERE a.a = b.b(+) AND b.b = c.c",
dialect=oracle.OracleDialect(use_ansi=False)
)
j = a.join(b.outerjoin(c, b.c.b == c.c.c), a.c.a == b.c.b)
self.assert_compile(
select([j]),
"SELECT a.a, b.b, c.c FROM a, b, c "
"WHERE a.a = b.b AND b.b = c.c(+)",
dialect=oracle.OracleDialect(use_ansi=False)
)
def test_alias_outer_join(self):
address_types = table('address_types', column('id'),
column('name'))
addresses = table('addresses', column('id'), column('user_id'),
column('address_type_id'),
column('email_address'))
at_alias = address_types.alias()
s = select([at_alias,
addresses]).select_from(addresses.outerjoin(at_alias,
addresses.c.address_type_id
== at_alias.c.id)).where(addresses.c.user_id
== 7).order_by(addresses.c.id, address_types.c.id)
self.assert_compile(s,
'SELECT address_types_1.id, '
'address_types_1.name, addresses.id, '
'addresses.user_id, addresses.address_type_'
'id, addresses.email_address FROM '
'addresses LEFT OUTER JOIN address_types '
'address_types_1 ON addresses.address_type_'
'id = address_types_1.id WHERE '
'addresses.user_id = :user_id_1 ORDER BY '
'addresses.id, address_types.id')
def test_returning_insert(self):
t1 = table('t1', column('c1'), column('c2'), column('c3'))
self.assert_compile(
t1.insert().values(c1=1).returning(t1.c.c2, t1.c.c3),
"INSERT INTO t1 (c1) VALUES (:c1) RETURNING "
"t1.c2, t1.c3 INTO :ret_0, :ret_1"
)
def test_returning_insert_functional(self):
t1 = table('t1', column('c1'), column('c2', String()), column('c3', String()))
fn = func.lower(t1.c.c2, type_=String())
stmt = t1.insert().values(c1=1).returning(fn, t1.c.c3)
compiled = stmt.compile(dialect=oracle.dialect())
eq_(
compiled.result_map,
{'ret_1': ('ret_1', (t1.c.c3, 'c3', 'c3'), t1.c.c3.type),
'ret_0': ('ret_0', (fn, 'lower', None), fn.type)}
)
self.assert_compile(
stmt,
"INSERT INTO t1 (c1) VALUES (:c1) RETURNING "
"lower(t1.c2), t1.c3 INTO :ret_0, :ret_1"
)
def test_returning_insert_labeled(self):
t1 = table('t1', column('c1'), column('c2'), column('c3'))
self.assert_compile(
t1.insert().values(c1=1).returning(t1.c.c2.label('c2_l'), t1.c.c3.label('c3_l')),
"INSERT INTO t1 (c1) VALUES (:c1) RETURNING "
"t1.c2, t1.c3 INTO :ret_0, :ret_1"
)
def test_compound(self):
t1 = table('t1', column('c1'), column('c2'), column('c3'))
t2 = table('t2', column('c1'), column('c2'), column('c3'))
self.assert_compile(union(t1.select(), t2.select()),
'SELECT t1.c1, t1.c2, t1.c3 FROM t1 UNION '
'SELECT t2.c1, t2.c2, t2.c3 FROM t2')
self.assert_compile(except_(t1.select(), t2.select()),
'SELECT t1.c1, t1.c2, t1.c3 FROM t1 MINUS '
'SELECT t2.c1, t2.c2, t2.c3 FROM t2')
def test_no_paren_fns(self):
for fn, expected in [
(func.uid(), "uid"),
(func.UID(), "UID"),
(func.sysdate(), "sysdate"),
(func.row_number(), "row_number()"),
(func.rank(), "rank()"),
(func.now(), "CURRENT_TIMESTAMP"),
(func.current_timestamp(), "CURRENT_TIMESTAMP"),
(func.user(), "USER"),
]:
self.assert_compile(fn, expected)
def test_create_index_alt_schema(self):
m = MetaData()
t1 = Table('foo', m,
Column('x', Integer),
schema="alt_schema"
)
self.assert_compile(
schema.CreateIndex(Index("bar", t1.c.x)),
"CREATE INDEX alt_schema.bar ON alt_schema.foo (x)"
)
class CompatFlagsTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = 'oracle'
def test_ora8_flags(self):
def server_version_info(self):
return (8, 2, 5)
dialect = oracle.dialect(dbapi=testing.db.dialect.dbapi)
dialect._get_server_version_info = server_version_info
# before connect, assume modern DB
assert dialect._supports_char_length
assert dialect._supports_nchar
assert dialect.use_ansi
dialect.initialize(testing.db.connect())
assert not dialect.implicit_returning
assert not dialect._supports_char_length
assert not dialect._supports_nchar
assert not dialect.use_ansi
self.assert_compile(String(50),"VARCHAR2(50)",dialect=dialect)
self.assert_compile(Unicode(50),"VARCHAR2(50)",dialect=dialect)
self.assert_compile(UnicodeText(),"CLOB",dialect=dialect)
dialect = oracle.dialect(implicit_returning=True,
dbapi=testing.db.dialect.dbapi)
dialect._get_server_version_info = server_version_info
dialect.initialize(testing.db.connect())
assert dialect.implicit_returning
def test_default_flags(self):
"""test with no initialization or server version info"""
dialect = oracle.dialect(dbapi=testing.db.dialect.dbapi)
assert dialect._supports_char_length
assert dialect._supports_nchar
assert dialect.use_ansi
self.assert_compile(String(50),"VARCHAR2(50 CHAR)",dialect=dialect)
self.assert_compile(Unicode(50),"NVARCHAR2(50)",dialect=dialect)
self.assert_compile(UnicodeText(),"NCLOB",dialect=dialect)
def test_ora10_flags(self):
def server_version_info(self):
return (10, 2, 5)
dialect = oracle.dialect(dbapi=testing.db.dialect.dbapi)
dialect._get_server_version_info = server_version_info
dialect.initialize(testing.db.connect())
assert dialect._supports_char_length
assert dialect._supports_nchar
assert dialect.use_ansi
self.assert_compile(String(50),"VARCHAR2(50 CHAR)",dialect=dialect)
self.assert_compile(Unicode(50),"NVARCHAR2(50)",dialect=dialect)
self.assert_compile(UnicodeText(),"NCLOB",dialect=dialect)
class MultiSchemaTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = 'oracle'
@classmethod
def setup_class(cls):
# currently assuming full DBA privs for the user.
# don't really know how else to go here unless
# we connect as the other user.
for stmt in """
create table test_schema.parent(
id integer primary key,
data varchar2(50)
);
create table test_schema.child(
id integer primary key,
data varchar2(50),
parent_id integer references test_schema.parent(id)
);
create synonym test_schema.ptable for test_schema.parent;
create synonym test_schema.ctable for test_schema.child;
-- can't make a ref from local schema to the
-- remote schema's table without this,
-- *and* cant give yourself a grant !
-- so we give it to public. ideas welcome.
grant references on test_schema.parent to public;
grant references on test_schema.child to public;
""".split(";"):
if stmt.strip():
testing.db.execute(stmt)
@classmethod
def teardown_class(cls):
for stmt in """
drop table test_schema.child;
drop table test_schema.parent;
drop synonym test_schema.ctable;
drop synonym test_schema.ptable;
""".split(";"):
if stmt.strip():
testing.db.execute(stmt)
def test_create_same_names_explicit_schema(self):
schema = testing.db.dialect.default_schema_name
meta = MetaData(testing.db)
parent = Table('parent', meta,
Column('pid', Integer, primary_key=True),
schema=schema
)
child = Table('child', meta,
Column('cid', Integer, primary_key=True),
Column('pid', Integer, ForeignKey('%s.parent.pid' % schema)),
schema=schema
)
meta.create_all()
try:
parent.insert().execute({'pid':1})
child.insert().execute({'cid':1, 'pid':1})
eq_(child.select().execute().fetchall(), [(1, 1)])
finally:
meta.drop_all()
def test_create_same_names_implicit_schema(self):
meta = MetaData(testing.db)
parent = Table('parent', meta,
Column('pid', Integer, primary_key=True),
)
child = Table('child', meta,
Column('cid', Integer, primary_key=True),
Column('pid', Integer, ForeignKey('parent.pid')),
)
meta.create_all()
try:
parent.insert().execute({'pid':1})
child.insert().execute({'cid':1, 'pid':1})
eq_(child.select().execute().fetchall(), [(1, 1)])
finally:
meta.drop_all()
def test_reflect_alt_owner_explicit(self):
meta = MetaData(testing.db)
parent = Table('parent', meta, autoload=True, schema='test_schema')
child = Table('child', meta, autoload=True, schema='test_schema')
self.assert_compile(parent.join(child),
"test_schema.parent JOIN test_schema.child ON "
"test_schema.parent.id = test_schema.child.parent_id")
select([parent, child]).\
select_from(parent.join(child)).\
execute().fetchall()
def test_reflect_local_to_remote(self):
testing.db.execute('CREATE TABLE localtable (id INTEGER '
'PRIMARY KEY, parent_id INTEGER REFERENCES '
'test_schema.parent(id))')
try:
meta = MetaData(testing.db)
lcl = Table('localtable', meta, autoload=True)
parent = meta.tables['test_schema.parent']
self.assert_compile(parent.join(lcl),
'test_schema.parent JOIN localtable ON '
'test_schema.parent.id = '
'localtable.parent_id')
select([parent,
lcl]).select_from(parent.join(lcl)).execute().fetchall()
finally:
testing.db.execute('DROP TABLE localtable')
def test_reflect_alt_owner_implicit(self):
meta = MetaData(testing.db)
parent = Table('parent', meta, autoload=True,
schema='test_schema')
child = Table('child', meta, autoload=True, schema='test_schema'
)
self.assert_compile(parent.join(child),
'test_schema.parent JOIN test_schema.child '
'ON test_schema.parent.id = '
'test_schema.child.parent_id')
select([parent,
child]).select_from(parent.join(child)).execute().fetchall()
def test_reflect_alt_owner_synonyms(self):
testing.db.execute('CREATE TABLE localtable (id INTEGER '
'PRIMARY KEY, parent_id INTEGER REFERENCES '
'test_schema.ptable(id))')
try:
meta = MetaData(testing.db)
lcl = Table('localtable', meta, autoload=True,
oracle_resolve_synonyms=True)
parent = meta.tables['test_schema.ptable']
self.assert_compile(parent.join(lcl),
'test_schema.ptable JOIN localtable ON '
'test_schema.ptable.id = '
'localtable.parent_id')
select([parent,
lcl]).select_from(parent.join(lcl)).execute().fetchall()
finally:
testing.db.execute('DROP TABLE localtable')
def test_reflect_remote_synonyms(self):
meta = MetaData(testing.db)
parent = Table('ptable', meta, autoload=True,
schema='test_schema',
oracle_resolve_synonyms=True)
child = Table('ctable', meta, autoload=True,
schema='test_schema',
oracle_resolve_synonyms=True)
self.assert_compile(parent.join(child),
'test_schema.ptable JOIN '
'test_schema.ctable ON test_schema.ptable.i'
'd = test_schema.ctable.parent_id')
select([parent,
child]).select_from(parent.join(child)).execute().fetchall()
class ConstraintTest(fixtures.TablesTest):
__only_on__ = 'oracle'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('foo', metadata, Column('id', Integer, primary_key=True))
def test_oracle_has_no_on_update_cascade(self):
bar = Table('bar', self.metadata,
Column('id', Integer, primary_key=True),
Column('foo_id', Integer,
ForeignKey('foo.id', onupdate='CASCADE')))
assert_raises(exc.SAWarning, bar.create)
bat = Table('bat', self.metadata,
Column('id', Integer, primary_key=True),
Column('foo_id', Integer),
ForeignKeyConstraint(['foo_id'], ['foo.id'],
onupdate='CASCADE'))
assert_raises(exc.SAWarning, bat.create)
class TwoPhaseTest(fixtures.TablesTest):
"""test cx_oracle two phase, which remains in a semi-broken state
so requires a carefully written test."""
__only_on__ = 'oracle+cx_oracle'
@classmethod
def define_tables(cls, metadata):
Table('datatable', metadata,
Column('id', Integer, primary_key=True),
Column('data', String(50))
)
def _connection(self):
conn = testing.db.connect()
conn.detach()
return conn
def _assert_data(self, rows):
eq_(
testing.db.scalar("select count(*) from datatable"),
rows
)
def test_twophase_prepare_false(self):
conn = self._connection()
for i in range(2):
trans = conn.begin_twophase()
conn.execute("select 1 from dual")
trans.prepare()
trans.commit()
conn.close()
self._assert_data(0)
def test_twophase_prepare_true(self):
conn = self._connection()
for i in range(2):
trans = conn.begin_twophase()
conn.execute("insert into datatable (id, data) "
"values (%s, 'somedata')" % i)
trans.prepare()
trans.commit()
conn.close()
self._assert_data(2)
def test_twophase_rollback(self):
conn = self._connection()
trans = conn.begin_twophase()
conn.execute("insert into datatable (id, data) "
"values (%s, 'somedata')" % 1)
trans.rollback()
trans = conn.begin_twophase()
conn.execute("insert into datatable (id, data) "
"values (%s, 'somedata')" % 1)
trans.prepare()
trans.commit()
conn.close()
self._assert_data(1)
def test_not_prepared(self):
conn = self._connection()
trans = conn.begin_twophase()
conn.execute("insert into datatable (id, data) "
"values (%s, 'somedata')" % 1)
trans.commit()
conn.close()
self._assert_data(1)
class DialectTypesTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = oracle.OracleDialect()
def test_no_clobs_for_string_params(self):
"""test that simple string params get a DBAPI type of
VARCHAR, not CLOB. This is to prevent setinputsizes
from setting up cx_oracle.CLOBs on
string-based bind params [ticket:793]."""
class FakeDBAPI(object):
def __getattr__(self, attr):
return attr
dialect = oracle.OracleDialect()
dbapi = FakeDBAPI()
b = bindparam("foo", "hello world!")
assert b.type.dialect_impl(dialect).get_dbapi_type(dbapi) == 'STRING'
b = bindparam("foo", "hello world!")
assert b.type.dialect_impl(dialect).get_dbapi_type(dbapi) == 'STRING'
def test_long(self):
self.assert_compile(oracle.LONG(), "LONG")
def test_type_adapt(self):
dialect = cx_oracle.dialect()
for start, test in [
(Date(), cx_oracle._OracleDate),
(oracle.OracleRaw(), cx_oracle._OracleRaw),
(String(), String),
(VARCHAR(), cx_oracle._OracleString),
(DATE(), DATE),
(String(50), cx_oracle._OracleString),
(Unicode(), cx_oracle._OracleNVarChar),
(Text(), cx_oracle._OracleText),
(UnicodeText(), cx_oracle._OracleUnicodeText),
(NCHAR(), cx_oracle._OracleNVarChar),
(oracle.RAW(50), cx_oracle._OracleRaw),
]:
assert isinstance(start.dialect_impl(dialect), test), \
"wanted %r got %r" % (test, start.dialect_impl(dialect))
def test_raw_compile(self):
self.assert_compile(oracle.RAW(), "RAW")
self.assert_compile(oracle.RAW(35), "RAW(35)")
def test_char_length(self):
self.assert_compile(VARCHAR(50),"VARCHAR(50 CHAR)")
oracle8dialect = oracle.dialect()
oracle8dialect.server_version_info = (8, 0)
self.assert_compile(VARCHAR(50),"VARCHAR(50)",dialect=oracle8dialect)
self.assert_compile(NVARCHAR(50),"NVARCHAR2(50)")
self.assert_compile(CHAR(50),"CHAR(50)")
def test_varchar_types(self):
dialect = oracle.dialect()
for typ, exp in [
(String(50), "VARCHAR2(50 CHAR)"),
(Unicode(50), "NVARCHAR2(50)"),
(NVARCHAR(50), "NVARCHAR2(50)"),
(VARCHAR(50), "VARCHAR(50 CHAR)"),
(oracle.NVARCHAR2(50), "NVARCHAR2(50)"),
(oracle.VARCHAR2(50), "VARCHAR2(50 CHAR)"),
]:
self.assert_compile(typ, exp, dialect=dialect)
def test_interval(self):
for type_, expected in [(oracle.INTERVAL(),
'INTERVAL DAY TO SECOND'),
(oracle.INTERVAL(day_precision=3),
'INTERVAL DAY(3) TO SECOND'),
(oracle.INTERVAL(second_precision=5),
'INTERVAL DAY TO SECOND(5)'),
(oracle.INTERVAL(day_precision=2,
second_precision=5),
'INTERVAL DAY(2) TO SECOND(5)')]:
self.assert_compile(type_, expected)
class TypesTest(fixtures.TestBase):
__only_on__ = 'oracle'
__dialect__ = oracle.OracleDialect()
@testing.fails_on('+zxjdbc', 'zxjdbc lacks the FIXED_CHAR dbapi type')
def test_fixed_char(self):
m = MetaData(testing.db)
t = Table('t1', m,
Column('id', Integer, primary_key=True),
Column('data', CHAR(30), nullable=False)
)
t.create()
try:
t.insert().execute(
dict(id=1, data="value 1"),
dict(id=2, data="value 2"),
dict(id=3, data="value 3")
)
eq_(t.select().where(t.c.data=='value 2').execute().fetchall(),
[(2, 'value 2 ')]
)
m2 = MetaData(testing.db)
t2 = Table('t1', m2, autoload=True)
assert type(t2.c.data.type) is CHAR
eq_(t2.select().where(t2.c.data=='value 2').execute().fetchall(),
[(2, 'value 2 ')]
)
finally:
t.drop()
@testing.requires.returning
def test_int_not_float(self):
m = MetaData(testing.db)
t1 = Table('t1', m, Column('foo', Integer))
t1.create()
try:
r = t1.insert().values(foo=5).returning(t1.c.foo).execute()
x = r.scalar()
assert x == 5
assert isinstance(x, int)
x = t1.select().scalar()
assert x == 5
assert isinstance(x, int)
finally:
t1.drop()
@testing.provide_metadata
def test_rowid(self):
metadata = self.metadata
t = Table('t1', metadata,
Column('x', Integer)
)
t.create()
t.insert().execute(x=5)
s1 = select([t])
s2 = select([column('rowid')]).select_from(s1)
rowid = s2.scalar()
# the ROWID type is not really needed here,
# as cx_oracle just treats it as a string,
# but we want to make sure the ROWID works...
rowid_col= column('rowid', oracle.ROWID)
s3 = select([t.c.x, rowid_col]).\
where(rowid_col == cast(rowid, oracle.ROWID))
eq_(s3.select().execute().fetchall(),
[(5, rowid)]
)
@testing.fails_on('+zxjdbc',
'Not yet known how to pass values of the '
'INTERVAL type')
@testing.provide_metadata
def test_interval(self):
metadata = self.metadata
interval_table = Table('intervaltable', metadata, Column('id',
Integer, primary_key=True,
test_needs_autoincrement=True),
Column('day_interval',
oracle.INTERVAL(day_precision=3)))
metadata.create_all()
interval_table.insert().\
execute(day_interval=datetime.timedelta(days=35,
seconds=5743))
row = interval_table.select().execute().first()
eq_(row['day_interval'], datetime.timedelta(days=35,
seconds=5743))
def test_numerics(self):
m = MetaData(testing.db)
t1 = Table('t1', m,
Column('intcol', Integer),
Column('numericcol', Numeric(precision=9, scale=2)),
Column('floatcol1', Float()),
Column('floatcol2', FLOAT()),
Column('doubleprec', oracle.DOUBLE_PRECISION),
Column('numbercol1', oracle.NUMBER(9)),
Column('numbercol2', oracle.NUMBER(9, 3)),
Column('numbercol3', oracle.NUMBER),
)
t1.create()
try:
t1.insert().execute(
intcol=1,
numericcol=5.2,
floatcol1=6.5,
floatcol2 = 8.5,
doubleprec = 9.5,
numbercol1=12,
numbercol2=14.85,
numbercol3=15.76
)
m2 = MetaData(testing.db)
t2 = Table('t1', m2, autoload=True)
for row in (
t1.select().execute().first(),
t2.select().execute().first()
):
for i, (val, type_) in enumerate((
(1, int),
(decimal.Decimal("5.2"), decimal.Decimal),
(6.5, float),
(8.5, float),
(9.5, float),
(12, int),
(decimal.Decimal("14.85"), decimal.Decimal),
(15.76, float),
)):
eq_(row[i], val)
assert isinstance(row[i], type_), '%r is not %r' \
% (row[i], type_)
finally:
t1.drop()
def test_numeric_no_decimal_mode(self):
engine = testing_engine(options=dict(coerce_to_decimal=False))
value = engine.scalar("SELECT 5.66 FROM DUAL")
assert isinstance(value, float)
value = testing.db.scalar("SELECT 5.66 FROM DUAL")
assert isinstance(value, decimal.Decimal)
@testing.provide_metadata
def test_numerics_broken_inspection(self):
"""Numeric scenarios where Oracle type info is 'broken',
returning us precision, scale of the form (0, 0) or (0, -127).
We convert to Decimal and let int()/float() processors take over.
"""
metadata = self.metadata
# this test requires cx_oracle 5
foo = Table('foo', metadata,
Column('idata', Integer),
Column('ndata', Numeric(20, 2)),
Column('ndata2', Numeric(20, 2)),
Column('nidata', Numeric(5, 0)),
Column('fdata', Float()),
)
foo.create()
foo.insert().execute(
{'idata':5, 'ndata':decimal.Decimal("45.6"),
'ndata2':decimal.Decimal("45.0"),
'nidata':decimal.Decimal('53'), 'fdata':45.68392},
)
stmt = """
SELECT
idata,
ndata,
ndata2,
nidata,
fdata
FROM foo
"""
row = testing.db.execute(stmt).fetchall()[0]
eq_([type(x) for x in row], [int, decimal.Decimal, decimal.Decimal, int, float])
eq_(
row,
(5, decimal.Decimal('45.6'), decimal.Decimal('45'), 53, 45.683920000000001)
)
# with a nested subquery,
# both Numeric values that don't have decimal places, regardless
# of their originating type, come back as ints with no useful
# typing information beyond "numeric". So native handler
# must convert to int.
# this means our Decimal converters need to run no matter what.
# totally sucks.
stmt = """
SELECT
(SELECT (SELECT idata FROM foo) FROM DUAL) AS idata,
(SELECT CAST((SELECT ndata FROM foo) AS NUMERIC(20, 2)) FROM DUAL)
AS ndata,
(SELECT CAST((SELECT ndata2 FROM foo) AS NUMERIC(20, 2)) FROM DUAL)
AS ndata2,
(SELECT CAST((SELECT nidata FROM foo) AS NUMERIC(5, 0)) FROM DUAL)
AS nidata,
(SELECT CAST((SELECT fdata FROM foo) AS FLOAT) FROM DUAL) AS fdata
FROM dual
"""
row = testing.db.execute(stmt).fetchall()[0]
eq_([type(x) for x in row], [int, decimal.Decimal, int, int, decimal.Decimal])
eq_(
row,
(5, decimal.Decimal('45.6'), 45, 53, decimal.Decimal('45.68392'))
)
row = testing.db.execute(text(stmt,
typemap={
'idata':Integer(),
'ndata':Numeric(20, 2),
'ndata2':Numeric(20, 2),
'nidata':Numeric(5, 0),
'fdata':Float()
})).fetchall()[0]
eq_([type(x) for x in row], [int, decimal.Decimal, decimal.Decimal, decimal.Decimal, float])
eq_(row,
(5, decimal.Decimal('45.6'), decimal.Decimal('45'), decimal.Decimal('53'), 45.683920000000001)
)
stmt = """
SELECT
anon_1.idata AS anon_1_idata,
anon_1.ndata AS anon_1_ndata,
anon_1.ndata2 AS anon_1_ndata2,
anon_1.nidata AS anon_1_nidata,
anon_1.fdata AS anon_1_fdata
FROM (SELECT idata, ndata, ndata2, nidata, fdata
FROM (
SELECT
(SELECT (SELECT idata FROM foo) FROM DUAL) AS idata,
(SELECT CAST((SELECT ndata FROM foo) AS NUMERIC(20, 2))
FROM DUAL) AS ndata,
(SELECT CAST((SELECT ndata2 FROM foo) AS NUMERIC(20, 2))
FROM DUAL) AS ndata2,
(SELECT CAST((SELECT nidata FROM foo) AS NUMERIC(5, 0))
FROM DUAL) AS nidata,
(SELECT CAST((SELECT fdata FROM foo) AS FLOAT) FROM DUAL)
AS fdata
FROM dual
)
WHERE ROWNUM >= 0) anon_1
"""
row =testing.db.execute(stmt).fetchall()[0]
eq_([type(x) for x in row], [int, decimal.Decimal, int, int, decimal.Decimal])
eq_(row, (5, decimal.Decimal('45.6'), 45, 53, decimal.Decimal('45.68392')))
row = testing.db.execute(text(stmt,
typemap={
'anon_1_idata':Integer(),
'anon_1_ndata':Numeric(20, 2),
'anon_1_ndata2':Numeric(20, 2),
'anon_1_nidata':Numeric(5, 0),
'anon_1_fdata':Float()
})).fetchall()[0]
eq_([type(x) for x in row], [int, decimal.Decimal, decimal.Decimal, decimal.Decimal, float])
eq_(row,
(5, decimal.Decimal('45.6'), decimal.Decimal('45'), decimal.Decimal('53'), 45.683920000000001)
)
row = testing.db.execute(text(stmt,
typemap={
'anon_1_idata':Integer(),
'anon_1_ndata':Numeric(20, 2, asdecimal=False),
'anon_1_ndata2':Numeric(20, 2, asdecimal=False),
'anon_1_nidata':Numeric(5, 0, asdecimal=False),
'anon_1_fdata':Float(asdecimal=True)
})).fetchall()[0]
eq_([type(x) for x in row], [int, float, float, float, decimal.Decimal])
eq_(row,
(5, 45.6, 45, 53, decimal.Decimal('45.68392'))
)
def test_reflect_dates(self):
metadata = MetaData(testing.db)
Table(
"date_types", metadata,
Column('d1', DATE),
Column('d2', TIMESTAMP),
Column('d3', TIMESTAMP(timezone=True)),
Column('d4', oracle.INTERVAL(second_precision=5)),
)
metadata.create_all()
try:
m = MetaData(testing.db)
t1 = Table(
"date_types", m,
autoload=True)
assert isinstance(t1.c.d1.type, DATE)
assert isinstance(t1.c.d2.type, TIMESTAMP)
assert not t1.c.d2.type.timezone
assert isinstance(t1.c.d3.type, TIMESTAMP)
assert t1.c.d3.type.timezone
assert isinstance(t1.c.d4.type, oracle.INTERVAL)
finally:
metadata.drop_all()
def test_reflect_all_types_schema(self):
types_table = Table('all_types', MetaData(testing.db),
Column('owner', String(30), primary_key=True),
Column('type_name', String(30), primary_key=True),
autoload=True, oracle_resolve_synonyms=True
)
for row in types_table.select().execute().fetchall():
[row[k] for k in row.keys()]
@testing.provide_metadata
def test_raw_roundtrip(self):
metadata = self.metadata
raw_table = Table('raw', metadata,
Column('id', Integer, primary_key=True),
Column('data', oracle.RAW(35))
)
metadata.create_all()
testing.db.execute(raw_table.insert(), id=1, data=b("ABCDEF"))
eq_(
testing.db.execute(raw_table.select()).first(),
(1, b("ABCDEF"))
)
@testing.provide_metadata
def test_reflect_nvarchar(self):
metadata = self.metadata
t = Table('t', metadata,
Column('data', sqltypes.NVARCHAR(255))
)
metadata.create_all()
m2 = MetaData(testing.db)
t2 = Table('t', m2, autoload=True)
assert isinstance(t2.c.data.type, sqltypes.NVARCHAR)
if testing.against('oracle+cx_oracle'):
# nvarchar returns unicode natively. cx_oracle
# _OracleNVarChar type should be at play here.
assert isinstance(
t2.c.data.type.dialect_impl(testing.db.dialect),
cx_oracle._OracleNVarChar)
data = u('m’a réveillé.')
t2.insert().execute(data=data)
res = t2.select().execute().first()['data']
eq_(res, data)
assert isinstance(res, util.text_type)
def test_char_length(self):
metadata = MetaData(testing.db)
t1 = Table('t1', metadata,
Column("c1", VARCHAR(50)),
Column("c2", NVARCHAR(250)),
Column("c3", CHAR(200))
)
t1.create()
try:
m2 = MetaData(testing.db)
t2 = Table('t1', m2, autoload=True)
eq_(t2.c.c1.type.length, 50)
eq_(t2.c.c2.type.length, 250)
eq_(t2.c.c3.type.length, 200)
finally:
t1.drop()
@testing.provide_metadata
def test_long_type(self):
metadata = self.metadata
t = Table('t', metadata,
Column('data', oracle.LONG)
)
metadata.create_all(testing.db)
testing.db.execute(t.insert(), data='xyz')
eq_(
testing.db.scalar(select([t.c.data])),
"xyz"
)
def test_longstring(self):
metadata = MetaData(testing.db)
testing.db.execute("""
CREATE TABLE Z_TEST
(
ID NUMERIC(22) PRIMARY KEY,
ADD_USER VARCHAR2(20) NOT NULL
)
""")
try:
t = Table("z_test", metadata, autoload=True)
t.insert().execute(id=1.0, add_user='foobar')
assert t.select().execute().fetchall() == [(1, 'foobar')]
finally:
testing.db.execute("DROP TABLE Z_TEST")
@testing.fails_on('+zxjdbc', 'auto_convert_lobs not applicable')
def test_lobs_without_convert(self):
engine = testing_engine(options=dict(auto_convert_lobs=False))
metadata = MetaData()
t = Table("z_test", metadata, Column('id', Integer, primary_key=True),
Column('data', Text), Column('bindata', LargeBinary))
t.create(engine)
try:
engine.execute(t.insert(), id=1,
data='this is text',
bindata=b('this is binary'))
row = engine.execute(t.select()).first()
eq_(row['data'].read(), 'this is text')
eq_(row['bindata'].read(), b('this is binary'))
finally:
t.drop(engine)
class EuroNumericTest(fixtures.TestBase):
"""test the numeric output_type_handler when using non-US locale for NLS_LANG."""
__only_on__ = 'oracle+cx_oracle'
def setup(self):
self.old_nls_lang = os.environ.get('NLS_LANG', False)
os.environ['NLS_LANG'] = "GERMAN"
self.engine = testing_engine()
def teardown(self):
if self.old_nls_lang is not False:
os.environ['NLS_LANG'] = self.old_nls_lang
else:
del os.environ['NLS_LANG']
self.engine.dispose()
@testing.provide_metadata
def test_output_type_handler(self):
metadata = self.metadata
for stmt, exp, kw in [
("SELECT 0.1 FROM DUAL", decimal.Decimal("0.1"), {}),
("SELECT 15 FROM DUAL", 15, {}),
("SELECT CAST(15 AS NUMERIC(3, 1)) FROM DUAL", decimal.Decimal("15"), {}),
("SELECT CAST(0.1 AS NUMERIC(5, 2)) FROM DUAL", decimal.Decimal("0.1"), {}),
("SELECT :num FROM DUAL", decimal.Decimal("2.5"), {'num':decimal.Decimal("2.5")})
]:
test_exp = self.engine.scalar(stmt, **kw)
eq_(
test_exp,
exp
)
assert type(test_exp) is type(exp)
class DontReflectIOTTest(fixtures.TestBase):
"""test that index overflow tables aren't included in
table_names."""
__only_on__ = 'oracle'
def setup(self):
testing.db.execute("""
CREATE TABLE admin_docindex(
token char(20),
doc_id NUMBER,
token_frequency NUMBER,
token_offsets VARCHAR2(2000),
CONSTRAINT pk_admin_docindex PRIMARY KEY (token, doc_id))
ORGANIZATION INDEX
TABLESPACE users
PCTTHRESHOLD 20
OVERFLOW TABLESPACE users
""")
def teardown(self):
testing.db.execute("drop table admin_docindex")
def test_reflect_all(self):
m = MetaData(testing.db)
m.reflect()
eq_(
set(t.name for t in m.tables.values()),
set(['admin_docindex'])
)
class BufferedColumnTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = 'oracle'
@classmethod
def setup_class(cls):
global binary_table, stream, meta
meta = MetaData(testing.db)
binary_table = Table('binary_table', meta,
Column('id', Integer, primary_key=True),
Column('data', LargeBinary)
)
meta.create_all()
stream = os.path.join(
os.path.dirname(__file__), "..",
'binary_data_one.dat')
with open(stream, "rb") as file_:
stream = file_.read(12000)
for i in range(1, 11):
binary_table.insert().execute(id=i, data=stream)
@classmethod
def teardown_class(cls):
meta.drop_all()
def test_fetch(self):
result = binary_table.select().order_by(binary_table.c.id).\
execute().fetchall()
eq_(result, [(i, stream) for i in range(1, 11)])
@testing.fails_on('+zxjdbc', 'FIXME: zxjdbc should support this')
def test_fetch_single_arraysize(self):
eng = testing_engine(options={'arraysize': 1})
result = eng.execute(binary_table.select().
order_by(binary_table.c.id)).fetchall()
eq_(result, [(i, stream) for i in range(1, 11)])
class UnsupportedIndexReflectTest(fixtures.TestBase):
__only_on__ = 'oracle'
def setup(self):
global metadata
metadata = MetaData(testing.db)
t1 = Table('test_index_reflect', metadata,
Column('data', String(20), primary_key=True)
)
metadata.create_all()
def teardown(self):
metadata.drop_all()
@testing.emits_warning("No column names")
def test_reflect_functional_index(self):
testing.db.execute('CREATE INDEX DATA_IDX ON '
'TEST_INDEX_REFLECT (UPPER(DATA))')
m2 = MetaData(testing.db)
t2 = Table('test_index_reflect', m2, autoload=True)
class RoundTripIndexTest(fixtures.TestBase):
__only_on__ = 'oracle'
def test_basic(self):
engine = testing.db
metadata = MetaData(engine)
table=Table("sometable", metadata,
Column("id_a", Unicode(255), primary_key=True),
Column("id_b", Unicode(255), primary_key=True, unique=True),
Column("group", Unicode(255), primary_key=True),
Column("col", Unicode(255)),
UniqueConstraint('col','group'),
)
# "group" is a keyword, so lower case
normalind = Index('tableind', table.c.id_b, table.c.group)
# create
metadata.create_all()
try:
# round trip, create from reflection
mirror = MetaData(engine)
mirror.reflect()
metadata.drop_all()
mirror.create_all()
# inspect the reflected creation
inspect = MetaData(engine)
inspect.reflect()
def obj_definition(obj):
return obj.__class__, tuple([c.name for c in
obj.columns]), getattr(obj, 'unique', None)
# find what the primary k constraint name should be
primaryconsname = engine.execute(
text("""SELECT constraint_name
FROM all_constraints
WHERE table_name = :table_name
AND owner = :owner
AND constraint_type = 'P' """),
table_name=table.name.upper(),
owner=engine.url.username.upper()).fetchall()[0][0]
reflectedtable = inspect.tables[table.name]
# make a dictionary of the reflected objects:
reflected = dict([(obj_definition(i), i) for i in
reflectedtable.indexes
| reflectedtable.constraints])
# assert we got primary key constraint and its name, Error
# if not in dict
assert reflected[(PrimaryKeyConstraint, ('id_a', 'id_b',
'group'), None)].name.upper() \
== primaryconsname.upper()
# Error if not in dict
assert reflected[(Index, ('id_b', 'group'), False)].name \
== normalind.name
assert (Index, ('id_b', ), True) in reflected
assert (Index, ('col', 'group'), True) in reflected
assert len(reflectedtable.constraints) == 1
assert len(reflectedtable.indexes) == 3
finally:
metadata.drop_all()
class SequenceTest(fixtures.TestBase, AssertsCompiledSQL):
def test_basic(self):
seq = Sequence('my_seq_no_schema')
dialect = oracle.OracleDialect()
assert dialect.identifier_preparer.format_sequence(seq) \
== 'my_seq_no_schema'
seq = Sequence('my_seq', schema='some_schema')
assert dialect.identifier_preparer.format_sequence(seq) \
== 'some_schema.my_seq'
seq = Sequence('My_Seq', schema='Some_Schema')
assert dialect.identifier_preparer.format_sequence(seq) \
== '"Some_Schema"."My_Seq"'
class ExecuteTest(fixtures.TestBase):
__only_on__ = 'oracle'
def test_basic(self):
eq_(testing.db.execute('/*+ this is a comment */ SELECT 1 FROM '
'DUAL').fetchall(), [(1, )])
def test_sequences_are_integers(self):
seq = Sequence('foo_seq')
seq.create(testing.db)
try:
val = testing.db.execute(seq)
eq_(val, 1)
assert type(val) is int
finally:
seq.drop(testing.db)
@testing.provide_metadata
def test_limit_offset_for_update(self):
metadata = self.metadata
# oracle can't actually do the ROWNUM thing with FOR UPDATE
# very well.
t = Table('t1', metadata, Column('id', Integer, primary_key=True),
Column('data', Integer)
)
metadata.create_all()
t.insert().execute(
{'id':1, 'data':1},
{'id':2, 'data':7},
{'id':3, 'data':12},
{'id':4, 'data':15},
{'id':5, 'data':32},
)
# here, we can't use ORDER BY.
eq_(
t.select(for_update=True).limit(2).execute().fetchall(),
[(1, 1),
(2, 7)]
)
# here, its impossible. But we'd prefer it to raise ORA-02014
# instead of issuing a syntax error.
assert_raises_message(
exc.DatabaseError,
"ORA-02014",
t.select(for_update=True).limit(2).offset(3).execute
)
class UnicodeSchemaTest(fixtures.TestBase):
__only_on__ = 'oracle'
@testing.provide_metadata
def test_quoted_column_non_unicode(self):
metadata = self.metadata
table=Table("atable", metadata,
Column("_underscorecolumn", Unicode(255), primary_key=True),
)
metadata.create_all()
table.insert().execute(
{'_underscorecolumn': u('’é')},
)
result = testing.db.execute(
table.select().where(table.c._underscorecolumn==u('’é'))
).scalar()
eq_(result, u('’é'))
@testing.provide_metadata
def test_quoted_column_unicode(self):
metadata = self.metadata
table=Table("atable", metadata,
Column(u("méil"), Unicode(255), primary_key=True),
)
metadata.create_all()
table.insert().execute(
{u('méil'): u('’é')},
)
result = testing.db.execute(
table.select().where(table.c[u('méil')] == u('’é'))
).scalar()
eq_(result, u('’é'))
class DBLinkReflectionTest(fixtures.TestBase):
__requires__ = 'oracle_test_dblink',
__only_on__ = 'oracle'
@classmethod
def setup_class(cls):
from sqlalchemy.testing import config
cls.dblink = config.file_config.get('sqla_testing', 'oracle_db_link')
with testing.db.connect() as conn:
conn.execute(
"create table test_table "
"(id integer primary key, data varchar2(50))")
conn.execute("create synonym test_table_syn "
"for test_table@%s" % cls.dblink)
@classmethod
def teardown_class(cls):
with testing.db.connect() as conn:
conn.execute("drop synonym test_table_syn")
conn.execute("drop table test_table")
def test_hello_world(self):
"""test that the synonym/dblink is functional."""
testing.db.execute("insert into test_table_syn (id, data) "
"values (1, 'some data')")
eq_(
testing.db.execute("select * from test_table_syn").first(),
(1, 'some data')
)
def test_reflection(self):
"""test the resolution of the synonym/dblink. """
m = MetaData()
t = Table('test_table_syn', m, autoload=True,
autoload_with=testing.db, oracle_resolve_synonyms=True)
eq_(list(t.c.keys()), ['id', 'data'])
eq_(list(t.primary_key), [t.c.id])
|
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_space_comm_chiss_assassin_m.iff"
result.attribute_template_id = 9
result.stfName("npc_name","chiss_patron")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('open_humans', '0005_userevent'),
]
operations = [
migrations.AddField(
model_name='userevent',
name='event_type',
field=models.CharField(default='', max_length=32),
preserve_default=False,
),
]
|
import re
from pkg_resources import get_distribution, DistributionNotFound
from setuptools import setup, find_packages
long_description = """A library for image augmentation in machine learning experiments, particularly convolutional
neural networks. Supports the augmentation of images, keypoints/landmarks, bounding boxes, heatmaps and segmentation
maps in a variety of different ways."""
INSTALL_REQUIRES = [
"six",
"numpy>=1.15",
"scipy",
"Pillow",
"matplotlib",
"scikit-image>=0.14.2",
"opencv-python-headless",
"imageio<=2.6.1; python_version<'3.5'",
"imageio; python_version>='3.5'",
"Shapely"
]
ALT_INSTALL_REQUIRES = {
"opencv-python-headless": ["opencv-python", "opencv-contrib-python", "opencv-contrib-python-headless"],
}
def check_alternative_installation(install_require, alternative_install_requires):
"""If some version version of alternative requirement installed, return alternative,
else return main.
"""
for alternative_install_require in alternative_install_requires:
try:
alternative_pkg_name = re.split(r"[!<>=]", alternative_install_require)[0]
get_distribution(alternative_pkg_name)
return str(alternative_install_require)
except DistributionNotFound:
continue
return str(install_require)
def get_install_requirements(main_requires, alternative_requires):
"""Iterates over all install requires
If an install require has an alternative option, check if this option is installed
If that is the case, replace the install require by the alternative to not install dual package"""
install_requires = []
for main_require in main_requires:
if main_require in alternative_requires:
main_require = check_alternative_installation(main_require, alternative_requires.get(main_require))
install_requires.append(main_require)
return install_requires
INSTALL_REQUIRES = get_install_requirements(INSTALL_REQUIRES, ALT_INSTALL_REQUIRES)
setup(
name="imgaug",
version="0.4.0",
author="Alexander Jung",
author_email="kontakt@ajung.name",
url="https://github.com/aleju/imgaug",
download_url="https://github.com/aleju/imgaug/archive/0.4.0.tar.gz",
install_requires=INSTALL_REQUIRES,
packages=find_packages(),
include_package_data=True,
package_data={
"": ["LICENSE", "README.md", "requirements.txt"],
"imgaug": ["DejaVuSans.ttf", "quokka.jpg", "quokka_annotations.json", "quokka_depth_map_halfres.png"],
"imgaug.checks": ["README.md"]
},
license="MIT",
description="Image augmentation library for deep neural networks",
long_description=long_description,
keywords=["augmentation", "image", "deep learning", "neural network", "CNN", "machine learning",
"computer vision", "overfitting"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Image Recognition",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
|
import datetime
import logging
from apscheduler.schedulers.background import BackgroundScheduler
from pajbot import utils
log = logging.getLogger(__name__)
class ScheduledJob:
def __init__(self, job):
self.job = job
def pause(self, *args, **kwargs):
if self.job:
self.job.pause(*args, **kwargs)
def resume(self, *args, **kwargs):
if self.job:
self.job.resume(*args, **kwargs)
def remove(self, *args, **kwargs):
if self.job:
self.job.remove(*args, **kwargs)
class ScheduleManager:
base_scheduler = None
@staticmethod
def init():
if not ScheduleManager.base_scheduler:
ScheduleManager.base_scheduler = BackgroundScheduler(daemon=True)
ScheduleManager.base_scheduler.start()
@staticmethod
def execute_now(method, args=[], kwargs={}, scheduler=None):
if scheduler is None:
scheduler = ScheduleManager.base_scheduler
if scheduler is None:
raise ValueError("No scheduler available")
job = scheduler.add_job(method, "date", run_date=utils.now(), args=args, kwargs=kwargs)
return ScheduledJob(job)
@staticmethod
def execute_delayed(delay, method, args=[], kwargs={}, scheduler=None):
if scheduler is None:
scheduler = ScheduleManager.base_scheduler
if scheduler is None:
raise ValueError("No scheduler available")
job = scheduler.add_job(
method, "date", run_date=utils.now() + datetime.timedelta(seconds=delay), args=args, kwargs=kwargs
)
return ScheduledJob(job)
@staticmethod
def execute_every(interval, method, args=[], kwargs={}, scheduler=None, jitter=None):
if scheduler is None:
scheduler = ScheduleManager.base_scheduler
if scheduler is None:
raise ValueError("No scheduler available")
job = scheduler.add_job(method, "interval", seconds=interval, args=args, kwargs=kwargs, jitter=jitter)
return ScheduledJob(job)
|
import datetime
from order.models.order import Order, OrderProduct, OrderShop
class OrderService():
def list():
result = Order.all()
return dict(result)
def create(shop, price, total):
result = Order.create(
price = price,
total = total,
created_at = datetime.datetime.now(),
updated_at = datetime.datetime.now(),
shop = shop
)
return dict(result)
def update(id, shop, price, total):
result = Order.get(id).update(
price = price,
total = total,
updated_at = datetime.datetime.now(),
shop = shop
)
return dict(result)
def delete(id):
result = Order.get(id).delete()
return dict(result)
|
import question_template
game_type = 'find_the_failure'
source_language = 'python'
parameter_list = [
['$x1','int'],['$x2','int']
]
tuple_list = [
['py_min_ff_', [1,None],[None,None],[None,7]]
]
global_code_template = '''\
xX import sys
xX
d \'\'\'
d purpose
d return the smaller of a and b
d precondition
d none
d examples
d min(1,2) returns 1
d min(1,1) returns 1
d min(1,0) returns 0
d \'\'\'
dxX def min(a,b):
dxX if a < b:
dxX return a
dxX else:
x return b
dX return a
dxX
'''
main_code_template = '''\
dxX print min($x1,$x2)
'''
argv_template = ''
stdin_template = ''
stdout_template = ''
question = question_template.Question_template(game_type,source_language,
parameter_list,tuple_list,global_code_template,main_code_template,
argv_template,stdin_template,stdout_template)
|
def pytest_addoption(parser):
group = parser.getgroup("JIT options")
group.addoption('--slow', action="store_true",
default=False, dest="run_slow_tests",
help="run all the compiled tests (instead of just a few)")
|
from .resource import Resource
class LoadBalancer(Resource):
"""LoadBalancer resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict
:param frontend_ip_configurations: Object representing the frontend IPs to
be used for the load balancer
:type frontend_ip_configurations: list of :class:`FrontendIPConfiguration
<azure.mgmt.network.v2016_09_01.models.FrontendIPConfiguration>`
:param backend_address_pools: Collection of backend address pools used by
a load balancer
:type backend_address_pools: list of :class:`BackendAddressPool
<azure.mgmt.network.v2016_09_01.models.BackendAddressPool>`
:param load_balancing_rules: Object collection representing the load
balancing rules Gets the provisioning
:type load_balancing_rules: list of :class:`LoadBalancingRule
<azure.mgmt.network.v2016_09_01.models.LoadBalancingRule>`
:param probes: Collection of probe objects used in the load balancer
:type probes: list of :class:`Probe
<azure.mgmt.network.v2016_09_01.models.Probe>`
:param inbound_nat_rules: Collection of inbound NAT Rules used by a load
balancer. Defining inbound NAT rules on your load balancer is mutually
exclusive with defining an inbound NAT pool. Inbound NAT pools are
referenced from virtual machine scale sets. NICs that are associated with
individual virtual machines cannot reference an Inbound NAT pool. They
have to reference individual inbound NAT rules.
:type inbound_nat_rules: list of :class:`InboundNatRule
<azure.mgmt.network.v2016_09_01.models.InboundNatRule>`
:param inbound_nat_pools: Defines an external port range for inbound NAT
to a single backend port on NICs associated with a load balancer. Inbound
NAT rules are created automatically for each NIC associated with the Load
Balancer using an external port from this range. Defining an Inbound NAT
pool on your Load Balancer is mutually exclusive with defining inbound Nat
rules. Inbound NAT pools are referenced from virtual machine scale sets.
NICs that are associated with individual virtual machines cannot reference
an inbound NAT pool. They have to reference individual inbound NAT rules.
:type inbound_nat_pools: list of :class:`InboundNatPool
<azure.mgmt.network.v2016_09_01.models.InboundNatPool>`
:param outbound_nat_rules: The outbound NAT rules.
:type outbound_nat_rules: list of :class:`OutboundNatRule
<azure.mgmt.network.v2016_09_01.models.OutboundNatRule>`
:param resource_guid: The resource GUID property of the load balancer
resource.
:type resource_guid: str
:param provisioning_state: Gets the provisioning state of the PublicIP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'frontend_ip_configurations': {'key': 'properties.frontendIPConfigurations', 'type': '[FrontendIPConfiguration]'},
'backend_address_pools': {'key': 'properties.backendAddressPools', 'type': '[BackendAddressPool]'},
'load_balancing_rules': {'key': 'properties.loadBalancingRules', 'type': '[LoadBalancingRule]'},
'probes': {'key': 'properties.probes', 'type': '[Probe]'},
'inbound_nat_rules': {'key': 'properties.inboundNatRules', 'type': '[InboundNatRule]'},
'inbound_nat_pools': {'key': 'properties.inboundNatPools', 'type': '[InboundNatPool]'},
'outbound_nat_rules': {'key': 'properties.outboundNatRules', 'type': '[OutboundNatRule]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, location=None, tags=None, frontend_ip_configurations=None, backend_address_pools=None, load_balancing_rules=None, probes=None, inbound_nat_rules=None, inbound_nat_pools=None, outbound_nat_rules=None, resource_guid=None, provisioning_state=None, etag=None):
super(LoadBalancer, self).__init__(id=id, location=location, tags=tags)
self.frontend_ip_configurations = frontend_ip_configurations
self.backend_address_pools = backend_address_pools
self.load_balancing_rules = load_balancing_rules
self.probes = probes
self.inbound_nat_rules = inbound_nat_rules
self.inbound_nat_pools = inbound_nat_pools
self.outbound_nat_rules = outbound_nat_rules
self.resource_guid = resource_guid
self.provisioning_state = provisioning_state
self.etag = etag
|
from django.utils.text import slugify
from rest_framework import serializers
from fragments.models import Post, Fragment
class FragmentSerializer(serializers.ModelSerializer):
"""
Serializer for Fragment instances
"""
class Meta:
model = Fragment
fields = (
'post',
'fragment_type',
'order',
'content',
'is_sanitized',
'credit',
'caption',
'language',
'embed_type',
'created',
'updated',
)
read_only_field = (
'created',
'updated',
)
class PostSerializer(serializers.ModelSerializer):
"""
Serializer for Post instances
"""
fragments = FragmentSerializer(many=True, required=False)
class Meta:
model = Post
fields = (
'title',
'slug',
'tldr',
'author',
'fragments',
'org',
'created',
'updated',
)
read_only_field = (
'created',
'updated',
)
extra_kwargs = {
'slug': {'required': False}
}
def validate(self, attrs):
author = attrs.get('author')
org = attrs.get('org')
if author and org:
if not org.is_member(author):
raise serializers.ValidationError({
'organization': 'Author is not part of organization: %s' % (
org.name)
})
title = attrs.get('title')
slug = attrs.get('slug')
if title and not slug:
attrs['slug'] = slugify(unicode(title))
return attrs
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('constituencies', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='constituency',
name='count',
field=models.IntegerField(),
),
]
|
from haystack import indexes
from dictionary.models import Word
class WordIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
finnish = indexes.CharField(model_attr='finnish')
def get_model(self):
return Word
def index_queryset(self, using=None):
"""Used when the entire index for model is updated."""
return self.get_model().objects.all()
|
import time
import bs4
import requests
def get_news(page, entrant=1):
while True:
try:
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5)',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'accept-charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'accept-encoding': 'gzip,deflate,sdch',
'accept-language': 'it-IT',
}
request = requests.get(page.format(entrant), headers=headers)
if request.status_code == 200:
return bs4.BeautifulSoup(request.text, 'html.parser')
except:
time.sleep(15)
prefix_disim = 'http://www.disim.univaq.it/main/{}'
elenco_prof = get_news("http://www.disim.univaq.it/main/people.php").find_all('li')[44:165]
string_to_print = ""
for element in elenco_prof:
entry = element.a['href']
soup = get_news(prefix_disim, entry)
courses = soup.find_all('div', 'ten columns')[::-1][0:1]
courses_to_print = ""
if len(courses) == 1:
for course in courses[0].find_all('a')[:-1]:
courses_to_print += """{\n
"nome": " """ + course.string + """ ",
"link": " """ + course['href'] + """ "
},"""
string_to_print += """{\n
"nome": " """ + soup.find('h1').string + """ ",
"email": " """ + (soup.find('div', 'icon_mail').get_text() or 'Non disponibile') + """ ",
"telefono": " """ + (soup.find('div', 'icon_phone').get_text() or 'Non disponibile') + """ ",
"stanza": " """ + ('Non disponibile' if (soup.find('div', 'icon_loc').get_text() == ' , Room ') else soup.find('div', 'icon_loc').get_text()) + """ ",
"CV": " """ + (soup.find('div', 'icon_cv').a['href'] or '<i>Non disponibile</i>') + """ ",
"corsi": [""" + courses_to_print + """]
}, """
print(string_to_print)
|
import platform
import socket
import os
def autodetect():
"""
Returns
-------
bool
True if current platform matches, otherwise False
"""
fqdn = socket.getfqdn()
if not ".cheyenne" in fqdn:
return False
dirs = os.path.abspath(__file__).split('/')
sweet_src_dirname = dirs[len(dirs)-5]
# Autodetect based on source folder name for MULE source
# This helps to utilize different versions of MULE on cheyenne
if sweet_src_dirname=="sweet_intel":
return True
if __name__ == "__main__":
print("Autodetect: "+str(autodetect()))
|
"""
Created on Fri Jul 11 11:07:07 2014
@author: Mark
"""
for x in range (56):
max_a = x/6+1
max_b = x/9+1
max_c = x/20+1
for c in range (max_c):
for b in range (max_b):
for a in range (max_a):
if 6*a+9*b+20*c-x == 0:
print x,'=6*',a,'+9*',b,'+20*',c
|
def foo(a):
if a:
print("hit!")
else:
print("nothing!")
def generator():
x = yield 42
print(x)
x = yield
print(x)
x = 12 + (yield 42)
print(x)
x = 12 + (yield)
print(x)
foo((yield 42))
foo((yield))
gen = generator()
a = next(gen)
print(a) # 42
print("+++++++++++++++++++++++++++++++++++++++++++")
gen.send(100)
print("+++++++++++++++++++++++++++++++++++++++++++")
b = gen.send(1)
print(b) # 42
gen.send(2)
gen.send(3)
c = gen.send(4)
print(c) # 42
|
from check import *
import os
import bitarray
def opencode(dict):
f=open("kodai","r")
lines = f.readlines()
praeita = ''
for x in lines:
if x!='\n':
x = x.rstrip('\n')
if "++" in x:
dict["+"] = x[2:]
elif praeita != '':
dict[praeita]= x[1:]
praeita = ''
else:
x = x.split("+")
try:
dict[x[0]] = x[1]
except IndexError:
praeita = x[0]
return dict
def kodas(blist,dict,mystr):
tmp = ""
for x in blist:
if x == True:
tmp+='1'
else:
tmp+='0'
for x in dict:
if tmp == dict.get(x):
mystr += x
tmp = ""
return mystr
def getcode(myfile,code,dict):
import os
tmp = ''
with open('filelength','r') as f:
tmp += f.readline()
length = int(tmp)
bitarr = bitarray.bitarray()
with open(myfile,'rb') as f:
bitarr.fromfile(f)
tmp = ''
allcode = ''
keyval(dict,'00')
os.system('rm -f 2kodas')
with open('2kodas','w') as fw:
for x in bitarr.to01():
allcode += str(x)
tmp+=str(x)
if str(tmp) in dict.values():
for key, value in dict.iteritems():
if value == tmp:
#if key != '\n':
# key = key.rstrip('\n')
fw.write(key)
code += len(key)
tmp = ''
if code == length:
print 'sustojo ties: ', code
#print 'kodas ', code, 'ilgis: ', length
fw.close()
return code
fw.close()
return code
def decode():
arr = {}
myfile = 'binfile.bin'
mycode = 0
arr = opencode(arr)
check(arr)
mycode = getcode(myfile,mycode,arr)
|
from neuron import Neuron
from PIL import Image
import os
import sys
import glob
import pickle
threshold = 1
width = 35
l_rate = 0.005
err_margin = 0.1 # does nothing so far
a_func = "step"
f_stretch = 1
def main():
print "=============="
print "TRAINING PHASE"
print "=============="
neurons = list()
try: # if first argument is provided, load as neuron data
f = open(sys.argv[1], "r")
neurons = pickle.load(f)
print "Loaded " + sys.argv[1] + " as neuron data."
except IndexError: # if no neuron data file is provided, ask to train anew
yn = raw_input("No neuron data specified: train anew? (y/n) ")
if yn.lower()[0] == "y":
for x in range(10):
neurons.append(Neuron(width, a_func, f_stretch, threshold, l_rate, err_margin))
train(neurons)
out_name = raw_input("Save name: ")
out_file = open("data/" + out_name, "w")
pickle.dump(neurons, out_file)
print "Neuron data file saved in 'data/" + out_name + "'."
print "Proceeding with current training."
else:
print "USAGE: to train anew : python digit_recog.py"
print " to load data : python digit_recog.py data/your_data_file"
print "Exiting."
exit(1)
except IOError: # if neuron data file is unreadable, print error msg and exit
print "ERROR: neuron data file cannot be read."
print "USAGE: to train anew : python digit_recog.py"
print " to load data : python digit_recog.py data/your_data_file"
exit(1)
print "================="
print "DIGIT RECOGNITION"
print "================="
# list compatible images
compat = glob.glob("*.png")
if len(compat) > 0:
print "Compatible images found:"
print "---"
for png in compat:
print png
print "---"
else:
print "No compatible images found in current directory."
print "---"
# prompt to input image name
while True:
try:
img_name = raw_input("Input image filename (Ctrl+C to exit): ")
except KeyboardInterrupt:
print
print "Exiting."
exit(0)
# process image
img = Image.open(img_name)
counter = 0
ans = None
for x in range(len(neurons)):
n = neurons[x]
feed(img, n)
n.activate()
if n.get_output() == 1.0:
print "Neuron %i is responding." %x
ans = str(x)
counter += 1
if counter == 1: # if one neuron responded
print img_name + " has been recognized as a " + ans + "."
else: # if multiple or no neurons responded
print img_name + " was unrecognizable."
def old_main():
# neuron attributes
threshold = 1
width = 35
l_rate = 0.005
err_margin = 0.1 # does nothing so far
a_func = "step"
f_stretch = 1
# create list of neurons, one to be trained for each number
neurons = list()
for x in range(10):
neurons.append(Neuron(width, a_func, f_stretch, threshold, l_rate, err_margin))
# train or load from file if save file exists
train(neurons)
# trial
test_img = Image.open(sys.argv[1])
for x in range(len(neurons)):
n = neurons[x]
feed(test_img, n)
n.activate()
if n.get_output() == 1.0:
print "NEURON %i IS RESPONDING" %x
def train(neurons):
"""
Use data from training-sets directory to train.
"""
for digit in range(len(neurons)):
print "TRAINING FOR %i" %digit
n = neurons[digit]
ls1 = os.listdir("training-sets/")
ls1 = sorted(ls1)
errors = 1
counter = 0
while errors > 0:
errors = 0
for i in ls1: # for every directory in training sets
### SET EXPECTED OUTPUT FOR CURRENT DIRECTORY
try:
exp_out = 0 # for other digits, training as not recognized (0)
if int(i) == digit:
exp_out = 1 # corresponding digit, training as recognized (1)
except ValueError:
break # ignore directories that are not named as an integer
### FOR EACH IMAGE, FEED PIXEL SET AS INPUT AND USE exp_out TO CHECK
dir1 = "training-sets/%s/" %i
ls2 = os.listdir(dir1)
ls2 = sorted(ls2)
for j in ls2: # for every image
img = Image.open(dir1 + j)
feed(img, n)
counter += 1
errors += n.train_step(exp_out) # train with inputs
# end of j-loop
# end of i-loop
print "Errors: %i" %errors
# end of while loop
print "Images processed: %i" %counter
# end of for loop
def feed(img, n):
"""
Sets the inputs of neuron according to pixel data provided of image
PARAMS:
img : Image to take pixel data from
n : neuron
"""
pixels = img.load()
width = img.size[0]
height = img.size[1]
# distill pixel values into input nodes
for w in range(width):
for h in range(height):
n.set_input(w * height + h, pixels[w,h][0]) # takes just the red value
if __name__ == "__main__":
main()
|
"""Page model for Automation/Anisble/Credentials"""
import attr
from navmazing import NavigateToAttribute
from navmazing import NavigateToSibling
from widgetastic.exceptions import NoSuchElementException
from widgetastic.utils import ParametrizedLocator
from widgetastic.widget import ConditionalSwitchableView
from widgetastic.widget import ParametrizedView
from widgetastic.widget import Text
from widgetastic.widget import TextInput
from widgetastic.widget import View
from widgetastic_patternfly import BootstrapSelect
from widgetastic_patternfly import Button
from widgetastic_patternfly import Dropdown
from widgetastic_patternfly import Input
from cfme.base import Server
from cfme.base.login import BaseLoggedInPage
from cfme.common import Taggable
from cfme.common import TagPageView
from cfme.exceptions import ItemNotFound
from cfme.modeling.base import BaseCollection
from cfme.modeling.base import BaseEntity
from cfme.utils.appliance.implementations.ui import CFMENavigateStep
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.appliance.implementations.ui import navigator
from cfme.utils.wait import wait_for
from widgetastic_manageiq import PaginationPane
from widgetastic_manageiq import ParametrizedSummaryTable
from widgetastic_manageiq import Table
class CredentialsBaseView(BaseLoggedInPage):
title = Text(locator=".//div[@id='main-content']//h1")
@property
def in_ansible_credentials(self):
return (
self.logged_in_as_current_user and
self.navigation.currently_selected == ["Automation", "Ansible", "Credentials"]
)
class CredentialsListView(CredentialsBaseView):
@View.nested
class toolbar(View): # noqa
configuration = Dropdown("Configuration")
policy = Dropdown(text='Policy')
credentials = Table(".//div[@id='miq-gtl-view']//table")
paginator = PaginationPane()
@property
def is_displayed(self):
return self.in_ansible_credentials and self.title.text == "Credentials"
class CredentialDetailsView(CredentialsBaseView):
@View.nested
class toolbar(View): # noqa
configuration = Dropdown("Configuration")
download = Button(title="Print or export summary")
policy = Dropdown(text='Policy')
@View.nested
class entities(View): # noqa
summary = ParametrizedView.nested(ParametrizedSummaryTable)
@property
def is_displayed(self):
return (
self.in_ansible_credentials and
self.title.text == self.context["object"].expected_details_title
)
class CredentialFormView(CredentialsBaseView):
name = Input(name="name")
credential_form = ConditionalSwitchableView(reference="credential_type")
@credential_form.register("<Choose>", default=True)
class CredentialFormDefaultView(View):
pass
@credential_form.register("Machine")
class CredentialFormMachineView(View):
username = Input(locator='.//input[@title="Username for this credential"]')
password = Input(
locator='.//input[@title="Password for this credential" and "not @disabled"]'
)
private_key = TextInput(
locator='.//textarea[@title="RSA or DSA private key to be used instead of password"'
'and "not @disabled"]'
)
private_key_phrase = Input(
locator='.//input[@title="Passphrase to unlock SSH private key if encrypted"'
'and "not @disabled"]'
)
privilage_escalation = BootstrapSelect("{{name}}")
privilage_escalation_username = Input(
locator='.//input[@title="Privilege escalation username"]'
)
privilage_escalation_password = Input(
locator='.//input[@title="Password for privilege escalation method"'
'and "not @disabled"]'
)
@credential_form.register("Scm")
class CredentialFormScmView(View):
username = Input(locator='.//input[@title="Username for this credential"]')
password = Input(
locator='.//input[@title="Password for this credential" and "not @disabled"]'
)
private_key = TextInput(
locator='.//textarea[@title="RSA or DSA private key to be used instead of password"'
'and "not @disabled"]'
)
private_key_phrase = Input(
locator='.//input[@title="Passphrase to unlock SSH private key if encrypted"'
'and "not @disabled"]'
)
@credential_form.register("Vault")
class CredentialFormVaultView(View):
vault_password = Input(
locator='.//input[@title="Vault password" and "not @disabled"]'
)
@credential_form.register("Amazon")
class CredentialFormAmazonView(View):
access_key = Input(
locator='.//input[@title="AWS Access Key for this credential"]'
)
secret_key = Input(
locator='.//input[@title="AWS Secret Key for this credential" and "not @disabled"]'
)
sts_token = Input(
locator='.//input[@title="Security Token Service(STS) Token for this credential"'
'and "not @disabled"]'
)
@credential_form.register("VMware")
class CredentialFormVMwareView(View):
username = Input(locator='.//input[@title="Username for this credential"]')
password = Input(
locator='.//input[@title="Password for this credential" and "not @disabled"]'
)
vcenter_host = Input(
locator='.//input[@title="The hostname or IP address of the vCenter Host"]'
)
@credential_form.register("OpenStack")
class CredentialFormOpenStackView(View):
username = Input(
locator='.//input[@title="The username to use to connect to OpenStack"]'
)
password = Input(
locator='.//input[@title="The password or API'
' key to use to connect to OpenStack" and "not @disabled"]'
)
authentication_url = Input(
locator='.//input[@title="The host to authenticate with. '
'For example, https://openstack.business.com/v2.0"]'
)
project = Input(
locator='.//input[@title="This is the tenant name. This value '
'is usually the same as the username"]'
)
domain = Input(
locator='.//input[@title="OpenStack domains define administrative '
'boundaries. It is only needed for Keystone v3 authentication URLs"]'
)
@credential_form.register("Red Hat Virtualization")
class CredentialFormRHVView(View):
username = Input(locator='.//input[@title="Username for this credential"]')
password = Input(
locator='.//input[@title="Password for this credential" and "not @disabled"]'
)
host = Input(locator='.//input[@title="The host to authenticate with"]')
@credential_form.register("Google Compute Engine")
class CredentialFormGCEView(View):
service_account = Input(
locator='.//input[@title="The email address assigned to '
'the Google Compute Engine service account"]'
)
priv_key = TextInput(
locator='.//textarea[@title="Contents of the PEM file associated with '
'the service account email"]'
)
project = Input(
locator='.//input[@title="The GCE assigned identification. It is '
"constructed as two words followed by a three digit number, such as: "
'squeamish-ossifrage-123"]'
)
@credential_form.register("Azure")
class CredentialFormAzureView(View):
username = Input(
locator='.//input[@title="The username to use to connect to the '
'Microsoft Azure account"]'
)
password = Input(
locator='.//input[@title="The password to use to connect to the '
'Microsoft Azure account"]'
)
subscription_id = Input(
locator='.//input[@title="The Subscription UUID for the Microsoft Azure account"]'
)
tenant_id = Input(
locator='.//input[@title="The Tenant ID for the Microsoft Azure account"]'
)
client_secret = Input(
locator='.//input[@title="The Client Secret for the Microsoft Azure account"]'
)
client_id = Input(
locator='.//input[@title="The Client ID for the Microsoft Azure account"]'
)
@credential_form.register("Network")
class CredentialFormNetworkView(View):
username = Input(locator='.//input[@title="Username for this credential"]')
password = Input(locator='.//input[@title="Password for this credential"]')
authorize = Input(
locator='.//input[@title="Whether to use the authorize mechanism"]'
)
authorize_password = Input(
locator='.//input[@title="Password used by the authorize mechanism"]'
)
ssh_key = TextInput(
locator='.//textarea[@title="RSA or DSA private key to be used instead of password"'
'and "not @disabled"]'
)
private_key_phrase = Input(
locator='.//input[@title="Passphrase to unlock SSH private key if encrypted"'
'and "not @disabled"]'
)
cancel_button = Button("Cancel")
class CredentialAddView(CredentialFormView):
credential_type = BootstrapSelect("type")
add_button = Button("Add")
@property
def is_displayed(self):
return (
self.in_ansible_credentials and
self.title.text == "Add a new Credential"
)
class CredentialEditView(CredentialFormView):
@ParametrizedView.nested
class input(ParametrizedView): # noqa
PARAMETERS = ("title", )
field_enable = Text(ParametrizedLocator(
".//*[(self::input or self::textarea) and "
"@title={title|quote}]/../../a[text()='Update']"))
field_disable = Text(ParametrizedLocator(
".//*[(self::input or self::textarea) and "
"@title={title|quote}]/../../a[text()='Cancel']"))
def toggle(self):
if self.field_enable.is_displayed:
self.field_enable.click()
elif self.field_disable.is_displayed:
self.field_disable.click()
credential_type = Text(locator=".//label[normalize-space(.)='Credential type']/../div")
save_button = Button("Save")
reset_button = Button("Reset")
@property
def is_displayed(self):
return (
self.in_ansible_credentials and
self.title.text == 'Edit a Credential "{}"'.format(self.context["object"].name)
)
def before_fill(self, values):
for name in self.widget_names:
if name not in values or values[name] is None:
continue
widget = getattr(self, name)
title = self.browser.get_attribute("title", widget)
try:
self.input(title).toggle()
except NoSuchElementException:
continue
def machine_credentials(credentials):
return {
"username": credentials.get("username"),
"password": credentials.get("password"),
"private_key": credentials.get("private_key"),
"private_key_phrase": credentials.get("private_key_phrase"),
"privilage_escalation": credentials.get("privilage_escalation"),
"privilage_escalation_username": credentials.get("privilage_escalation_username"),
"privilage_escalation_password": credentials.get("privilage_escalation_password"),
}
def scm_credentials(credentials):
return {
"username": credentials.get("username"),
"password": credentials.get("password"),
"private_key": credentials.get("private_key"),
"private_key_phrase": credentials.get("private_key_phrase")
}
def vault_credentials(credentials):
return {
"vault_password": credentials.get("vault_password")
}
def amazon_credentials(credentials):
return {
"access_key": credentials.get("access_key"),
"secret_key": credentials.get("secret_key"),
"sts_token": credentials.get("sts_token"),
}
def azure_credentials(credentials):
return {
"username": credentials.get("username"),
"password": credentials.get("password"),
"subscription_id": credentials.get("subscription_id"),
"tenant_id": credentials.get("tenant_id"),
"client_secret": credentials.get("client_secret"),
"client_id": credentials.get("client_id"),
}
def network_credentials(credentials):
return {
"username": credentials.get("username"),
"password": credentials.get("password"),
"authorize": credentials.get("authorize"),
"authorize_password": credentials.get("authorize_password"),
"ssh_key": credentials.get("ssh_key"),
"private_key_phrase": credentials.get("private_key_phrase"),
}
def vmware_credentials(credentials):
return {
"username": credentials.get("username"),
"password": credentials.get("password"),
"vcenter_host": credentials.get("vcenter_host")
}
def openstack_credentials(credentials):
return {
"username": credentials.get("username"),
"password": credentials.get("password"),
"authentication_url": credentials.get("authentication_url"),
"project": credentials.get("project"),
"domain": credentials.get("domain")
}
def gce_credentials(credentials):
return {
"service_account": credentials.get("service_account"),
"priv_key": credentials.get("priv_key"),
"project": credentials.get("project")
}
def rhv_credentials(credentials):
return {
"username": credentials.get("username"),
"password": credentials.get("password"),
"host": credentials.get("host")
}
class Credential(BaseEntity, Taggable):
"""A class representing one Embedded Ansible credential in the UI."""
# TODO - This is one of the only classes that hasn't been converted to attrs
# The class needs to be reworked and split into multiple subtypes. The kwargs
# is also problematic for attrs
def __init__(self, collection, name, credential_type, **credentials):
super(Credential, self).__init__(collection)
self.name = name
self.credential_type = credential_type
for key, value in credentials.items():
setattr(self, key, value)
__repr__ = object.__repr__
def update(self, updates):
credential_type_map = {
"Machine": machine_credentials(updates),
"Scm": scm_credentials(updates),
"Vault": vault_credentials(updates),
"Amazon": amazon_credentials(updates),
"Azure": azure_credentials(updates),
"Network": network_credentials(updates),
"VMware": vmware_credentials(updates),
"OpenStack": openstack_credentials(updates),
"Red Hat Virtualization": rhv_credentials(updates),
"Google Compute Engine": gce_credentials(updates)
}
edit_page = navigate_to(self, "Edit")
changed = edit_page.fill({"name": updates.get("name")})
form_changed = edit_page.credential_form.fill(credential_type_map[self.credential_type])
if changed or form_changed:
edit_page.save_button.click()
else:
edit_page.cancel_button.click()
view = self.create_view(CredentialsListView)
wait_for(lambda: False, silent_failure=True, timeout=5)
assert view.is_displayed
view.flash.assert_no_error()
if changed or form_changed:
view.flash.assert_message(
'Modification of Credential "{}" has been successfully queued.'.format(
updates.get("name", self.name)))
else:
view.flash.assert_message(
'Edit of Credential "{}" was canceled by the user.'.format(self.name))
def delete(self):
view = navigate_to(self, "Details")
view.toolbar.configuration.item_select("Remove this Credential from Inventory",
handle_alert=True)
credentials_list_page = self.create_view(CredentialsListView)
wait_for(lambda: False, silent_failure=True, timeout=5)
assert credentials_list_page.is_displayed
credentials_list_page.flash.assert_success_message(
'Deletion of Credential "{}" was successfully initiated.'.format(self.name))
wait_for(
lambda: not self.exists,
delay=10,
fail_func=credentials_list_page.browser.selenium.refresh,
timeout=300
)
@attr.s
class CredentialsCollection(BaseCollection):
"""Collection object for the :py:class:`Credential`."""
ENTITY = Credential
def create(self, name, credential_type, **credentials):
add_page = navigate_to(self, "Add")
credential_type_map = {
"Machine": machine_credentials(credentials),
"Scm": scm_credentials(credentials),
"Vault": vault_credentials(credentials),
"Amazon": amazon_credentials(credentials),
"Azure": azure_credentials(credentials),
"Network": network_credentials(credentials),
"VMware": vmware_credentials(credentials),
"OpenStack": openstack_credentials(credentials),
"Red Hat Virtualization": rhv_credentials(credentials),
"Google Compute Engine": gce_credentials(credentials)
}
add_page.fill({"name": name, "credential_type": credential_type})
add_page.credential_form.fill(credential_type_map[credential_type])
add_page.add_button.click()
credentials_list_page = self.create_view(CredentialsListView)
# Without this StaleElementReferenceException can be raised
wait_for(lambda: False, silent_failure=True, timeout=5)
assert credentials_list_page.is_displayed
credentials_list_page.flash.assert_success_message(
'Add of Credential "{}" has been successfully queued.'.format(name))
credential = self.instantiate(name, credential_type, **credentials)
wait_for(
lambda: credential.exists,
fail_func=credentials_list_page.browser.selenium.refresh,
delay=5,
timeout=300)
return credential
@navigator.register(Server)
class AnsibleCredentials(CFMENavigateStep):
VIEW = CredentialsListView
prerequisite = NavigateToSibling("LoggedIn")
def step(self, *args, **kwargs):
self.view.navigation.select("Automation", "Ansible", "Credentials")
@navigator.register(Credential)
class Details(CFMENavigateStep):
VIEW = CredentialDetailsView
prerequisite = NavigateToAttribute("appliance.server", "AnsibleCredentials")
def step(self, *args, **kwargs):
credentials = self.prerequisite_view.credentials
try:
for row in credentials:
if row["Name"].text == self.obj.name:
row["Name"].click()
break
else:
raise ItemNotFound
except NoSuchElementException:
raise ItemNotFound
@navigator.register(CredentialsCollection)
class Add(CFMENavigateStep):
VIEW = CredentialAddView
prerequisite = NavigateToAttribute("appliance.server", "AnsibleCredentials")
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.configuration.item_select("Add New Credential")
@navigator.register(Credential)
class Edit(CFMENavigateStep):
VIEW = CredentialEditView
prerequisite = NavigateToSibling("Details")
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.configuration.item_select("Edit this Credential")
@navigator.register(Credential, 'EditTags')
class EditTagsFromListCollection(CFMENavigateStep):
VIEW = TagPageView
prerequisite = NavigateToAttribute("appliance.server", "AnsibleCredentials")
def step(self, *args, **kwargs):
try:
row = self.prerequisite_view.paginator.find_row_on_pages(
table=self.prerequisite_view.credentials,
name=self.obj.name)
row[0].check()
except NoSuchElementException:
raise ItemNotFound('Could not locate ansible credential table row with name {}'
.format(self.obj.name))
self.prerequisite_view.toolbar.policy.item_select('Edit Tags')
|
"""Integration tests for the pull_request event."""
from __future__ import absolute_import, unicode_literals
import httpretty
from flask import json
from kwalitee.models import CommitStatus, Repository
from kwalitee.tasks import push
from hamcrest import (assert_that, equal_to, contains_string, has_length,
has_item)
from utils import MyQueue
def test_push(app, repository):
"""POST /payload (push) performs the checks"""
queue = MyQueue()
# Replace the default Redis queue
app.config["queue"] = queue
push_event = {
"commits": [{
"id": "1",
"url": "https://github.com/commits/1"
}, {
"id": "2",
"url": "https://github.com/commits/2"
}],
"repository": {
"name": "test",
"owner": {
"name": "invenio"
}
}
}
tester = app.test_client()
response = tester.post("/payload", content_type="application/json",
headers=(("X-GitHub-Event", "push"),
("X-GitHub-Delivery", "1")),
data=json.dumps(push_event))
assert_that(response.status_code, equal_to(200))
body = json.loads(response.data)
assert_that(body["payload"]["state"], equal_to("pending"))
cs = CommitStatus.query.filter_by(repository_id=repository.id,
sha="1").first()
(fn, commit_id, commit_url, status_url, config) = queue.dequeue()
assert_that(fn, equal_to(push))
assert_that(commit_id, equal_to(cs.id))
assert_that(commit_url, equal_to("https://api.github.com"
"/repos/invenio/test/commits/1"))
assert_that(status_url, contains_string("/invenio/test/commits/1"))
cs = CommitStatus.query.filter_by(repository_id=repository.id,
sha="2").first()
(fn, commit_id, commit_url, status_url, config) = queue.dequeue()
assert_that(fn, equal_to(push))
assert_that(commit_id, equal_to(cs.id))
assert_that(commit_url, equal_to("https://api.github.com"
"/repos/invenio/test/commits/2"))
assert_that(status_url, contains_string("/invenio/test/commits/2"))
def test_push_with_auto_create(app, repository):
"""POST /payload (push) performs the checks"""
queue = MyQueue()
# Replace the default Redis queue
config = dict(app.config)
app.config["queue"] = queue
app.config["AUTO_CREATE"] = True
push_event = {
"commits": [{
"id": "1",
"url": "https://github.com/commits/1"
}],
"repository": {
"name": "doe",
"owner": {
"name": "john"
}
}
}
tester = app.test_client()
response = tester.post("/payload", content_type="application/json",
headers=(("X-GitHub-Event", "push"),
("X-GitHub-Delivery", "1")),
data=json.dumps(push_event))
assert_that(response.status_code, equal_to(200))
body = json.loads(response.data)
assert_that(body["payload"]["state"], equal_to("pending"))
repo = Repository.query.filter_by(name="doe").first()
assert_that(repo)
assert_that(repo.owner.name, equal_to("john"))
app.config = config
def test_push_to_unknown_repository(app):
"""POST /payload (push) with unknown repository should fails."""
queue = MyQueue()
config = dict(app.config)
app.config["queue"] = queue
push_event = {
"commits": [{
"id": "1",
"url": "https://github.com/commits/1"
}],
"repository": {
"name": "eggs",
"owner": {
"name": "spam"
}
}
}
tester = app.test_client()
response = tester.post("/payload", content_type="application/json",
headers=(("X-GitHub-Event", "push"),
("X-GitHub-Delivery", "1")),
data=json.dumps(push_event))
assert_that(response.status_code, equal_to(200))
body = json.loads(response.data)
assert_that(body["payload"]["state"], equal_to("error"))
assert_that(body["payload"]["description"],
contains_string("spam/eggs"))
assert_that(body["payload"]["context"],
equal_to(app.config.get("CONTEXT")))
app.config = config
def test_push_valid_commit(app, repository):
"""Worker push /commits/1 is valid"""
httpretty.reset()
commit = {
"sha": 1,
"url": "https://api.github.com/commits/1",
"html_url": "https://github.com/commits/1",
"comments_url": "https://api.github.com/commits/1/comments",
"commit": {
"message": "comp: that\n\nBy: John Doe <john.doe@example.org>",
},
"files": [{
"filename": "spam/__init__.py",
"status": "added",
"raw_url": "https://github.com/raw/1/spam/__init__.py"
}, {
"filename": "spam/deleted_files.py",
"status": "removed",
"raw_url": "https://github.com/raw/1/spam/deleted_files.py"
}]
}
httpretty.register_uri(httpretty.GET,
"https://api.github.com/commits/1",
body=json.dumps(commit),
content_type="application/json")
init_py = '"""Test module."""\n'
httpretty.register_uri(httpretty.GET,
"https://github.com/raw/1/spam/__init__.py",
body=init_py,
content_type="text/plain")
status = {"id": 1, "state": "success"}
httpretty.register_uri(httpretty.POST,
"https://api.github.com/statuses/1",
status=201,
body=json.dumps(status),
content_type="application/json")
cs = CommitStatus.find_or_create(repository,
commit["sha"],
commit["url"])
assert_that(cs.is_pending())
httpretty.enable()
push(cs.id,
"https://api.github.com/commits/1",
"https://api.github.com/statuses/1",
{"COMPONENTS": ["comp"],
"SIGNATURES": ["By"],
"TRUSTED_DEVELOPERS": ["john.doe@example.org"],
"CHECK_LICENSE": False,
"repository": repository.id})
httpretty.disable()
latest_requests = httpretty.HTTPretty.latest_requests
assert_that(len(latest_requests), equal_to(3), "2x GET, 1x POST")
expected_requests = [
"",
"",
"success"
]
for expected, request in zip(expected_requests, latest_requests):
assert_that(str(request.parsed_body), contains_string(expected))
cs = CommitStatus.query.filter_by(repository_id=repository.id,
sha=commit["sha"]).first()
assert_that(cs)
assert_that(cs.state, equal_to("success"))
assert_that(cs.errors, equal_to(0))
assert_that(cs.content["files"]["spam/__init__.py"]["errors"],
has_length(0))
def test_push_wip_commit(app, repository):
"""Worker push /commits/1 has wip as a component and is ignored"""
httpretty.reset()
commit = {
"sha": 1,
"url": "https://api.github.com/commits/1",
"html_url": "https://github.com/commits/1",
"comments_url": "https://api.github.com/commits/1/comments",
"commit": {
"message": "wip: herp derp\n\nBy: John Doe <john.doe@example.org>",
},
"files": [{
"filename": "spam/__init__.py",
"status": "added",
"raw_url": "https://github.com/raw/1/spam/__init__.py"
}]
}
httpretty.register_uri(httpretty.GET,
"https://api.github.com/commits/1",
body=json.dumps(commit),
content_type="application/json")
status = {"id": 1, "state": "success"}
httpretty.register_uri(httpretty.POST,
"https://api.github.com/statuses/1",
status=201,
body=json.dumps(status),
content_type="application/json")
cs = CommitStatus.find_or_create(repository,
commit["sha"],
commit["url"])
assert_that(cs.is_pending())
httpretty.enable()
push(cs.id,
"https://api.github.com/commits/1",
"https://api.github.com/statuses/1",
{"COMPONENTS": ["comp"],
"SIGNATURES": ["By"],
"TRUSTED_DEVELOPERS": ["john.doe@example.org"],
"CHECK_LICENSE": False,
"repository": repository.id})
httpretty.disable()
latest_requests = httpretty.HTTPretty.latest_requests
assert_that(len(latest_requests), equal_to(2), "1x GET, 1x POST")
expected_requests = [
"",
"success"
]
for expected, request in zip(expected_requests, latest_requests):
assert_that(str(request.parsed_body), contains_string(expected))
cs = CommitStatus.query.filter_by(repository_id=repository.id,
sha=commit["sha"]).first()
assert_that(cs)
assert_that(cs.state, equal_to("success"))
assert_that(cs.errors, equal_to(0))
def test_push_broken_commit_message(app, repository):
"""Worker push /commits/1 is invalid (message)"""
httpretty.reset()
commit = {
"sha": 1,
"url": "https://api.github.com/commits/1",
"html_url": "https://github.com/commits/1",
"comments_url": "https://api.github.com/commits/1/comments",
"commit": {
"message": "Fix all the bugs!"
},
"files": [{
"filename": "spam/eggs.py",
"status": "modified",
"raw_url": "https://github.com/raw/1/spam/eggs.py"
}]
}
httpretty.register_uri(httpretty.GET,
"https://api.github.com/commits/1",
body=json.dumps(commit),
content_type="application/json")
eggs_py = '"""Eggs are boiled."""\n'
httpretty.register_uri(httpretty.GET,
"https://github.com/raw/1/spam/eggs.py",
body=eggs_py,
content_type="text/plain")
httpretty.register_uri(httpretty.POST,
"https://api.github.com/commits/1/comments",
status=201,
body=json.dumps({"id": 1}),
content_type="application/json")
status = {"id": 1, "state": "success"}
httpretty.register_uri(httpretty.POST,
"https://api.github.com/statuses/1",
status=201,
body=json.dumps(status),
content_type="application/json")
cs = CommitStatus.find_or_create(repository,
commit["sha"],
commit["url"])
assert_that(cs.is_pending())
httpretty.enable()
push(cs.id,
"https://api.github.com/commits/1",
"https://api.github.com/statuses/1",
{"CHECK_LICENSE": False,
"repository": repository.id})
httpretty.disable()
latest_requests = httpretty.HTTPretty.latest_requests
assert_that(len(latest_requests), equal_to(4), "2x GET, 2x POST")
expected_requests = [
"",
"needs more reviewers",
"",
"error"
]
for expected, request in zip(expected_requests, latest_requests):
assert_that(str(request.parsed_body),
contains_string(expected))
def test_push_broken_files(repository):
"""Worker push /commits/1 is invalid (files)"""
httpretty.reset()
commit = {
"sha": 1,
"url": "https://api.github.com/commits/1",
"html_url": "https://github.com/commits/1",
"comments_url": "https://api.github.com/commits/1/comments",
"commit": {
"message": "comp: bob\n\nBy: John <john.doe@example.org>"
},
"files": [{
"filename": "spam/eggs.py",
"status": "modified",
"raw_url": "https://github.com/raw/1/spam/eggs.py"
}]
}
httpretty.register_uri(httpretty.GET,
"https://api.github.com/commits/1",
body=json.dumps(commit),
content_type="application/json")
eggs_py = "if foo == bar:\n print('derp')\n"
httpretty.register_uri(httpretty.GET,
"https://github.com/raw/1/spam/eggs.py",
body=eggs_py,
content_type="text/plain")
httpretty.register_uri(httpretty.POST,
"https://api.github.com/commits/1/comments",
status=201,
body=json.dumps({"id": 1}),
content_type="application/json")
status = {"id": 1, "state": "success"}
httpretty.register_uri(httpretty.POST,
"https://api.github.com/statuses/1",
status=201,
body=json.dumps(status),
content_type="application/json")
cs = CommitStatus.find_or_create(repository,
commit["sha"],
commit["url"])
assert_that(cs.is_pending())
httpretty.enable()
push(cs.id,
"https://api.github.com/commits/1",
"https://api.github.com/statuses/1",
{"COMPONENTS": ["comp"],
"SIGNATURES": ["By"],
"TRUSTED_DEVELOPERS": ["john.doe@example.org"],
"repository": repository.id})
httpretty.disable()
latest_requests = httpretty.HTTPretty.latest_requests
assert_that(len(latest_requests), equal_to(4), "2x GET, 2x POST")
expected_requests = [
"",
"",
"F821 undefined name 'foo'",
"error"
]
for expected, request in zip(expected_requests, latest_requests):
assert_that(str(request.parsed_body),
contains_string(expected))
def test_push_known_commit(repository, session):
"""Worker push /commits/1 is not rechecked if known"""
httpretty.reset()
commit = {
"sha": 1,
"url": "https://api.github.com/commits/1",
"html_url": "https://github.com/commits/1",
"comments_url": "https://api.github.com/commits/1/comments",
"commit": {
"message": "Fix all the bugs!"
},
"files": [{
"filename": "spam/eggs.py",
"status": "modified",
"raw_url": "https://github.com/raw/1/spam/eggs.py"
}]
}
httpretty.register_uri(httpretty.GET,
"https://api.github.com/commits/1",
body=json.dumps(commit),
content_type="application/json")
cs = CommitStatus(repository,
"1",
"https://github.com/commits/1",
{"message": ["error 1", "error 2"], "files": {}})
session.add(cs)
session.commit()
assert_that(cs.is_pending(), equal_to(False))
httpretty.enable()
body = push(cs.id,
"https://api.github.com/commits/1",
"https://api.github.com/statuses/1",
{"repository": repository.id})
httpretty.disable()
latest_requests = httpretty.HTTPretty.latest_requests
assert_that(len(latest_requests), equal_to(1), "1x GET")
assert_that(body["description"],
contains_string("[error] 2 errors"))
def test_push_half_known_commit(repository, session):
"""Worker push /commits/1 checks the files if none"""
httpretty.reset()
commit = {
"sha": "1",
"url": "https://api.github.com/commits/1",
"html_url": "https://github.com/commits/1",
"comments_url": "https://api.github.com/commits/1/comments",
"commit": {
"message": "Fix all the bugs!"
},
"files": [{
"filename": "spam/eggs.py",
"status": "modified",
"raw_url": "https://github.com/raw/1/spam/eggs.py"
}]
}
httpretty.register_uri(httpretty.GET,
"https://api.github.com/commits/1",
body=json.dumps(commit),
content_type="application/json")
eggs_py = "if foo == bar:\n print('derp')\n"
httpretty.register_uri(httpretty.GET,
"https://github.com/raw/1/spam/eggs.py",
body=eggs_py,
content_type="text/plain")
httpretty.register_uri(httpretty.POST,
"https://api.github.com/commits/1/comments",
status=201,
body=json.dumps({"id": 1}),
content_type="application/json")
status = {"id": 1, "state": "success"}
httpretty.register_uri(httpretty.POST,
"https://api.github.com/statuses/1",
status=201,
body=json.dumps(status),
content_type="application/json")
cs = CommitStatus(repository,
"1",
"https://github.com/commits/1",
{"message": [], "files": None})
session.add(cs)
session.commit()
assert_that(cs.is_pending(), equal_to(False))
httpretty.enable()
push(cs.id,
"https://api.github.com/commits/1",
"https://api.github.com/statuses/1",
{"repository": repository.id})
httpretty.disable()
latest_requests = httpretty.HTTPretty.latest_requests
assert_that(len(latest_requests), equal_to(4), "2x GET, 2x POST")
expected_requests = [
"",
"",
"F821 undefined name 'foo'",
"error"
]
for expected, request in zip(expected_requests, latest_requests):
assert_that(str(request.parsed_body),
contains_string(expected))
cs = CommitStatus.query.filter_by(id=cs.id).first()
assert_that(cs)
assert_that(cs.is_pending(), equal_to(False))
assert_that(cs.content["files"]["spam/eggs.py"]["errors"],
has_item("1: D100 Missing docstring in public module"))
|
"""
Plotting results of variable ring buffer experiment.
Copyright (C) Sarah Mount, 2009.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have rceeived a copy of the GNU General Public License
along with this program; if not, write to the Free Software
"""
from scipy import *
from pylab import *
__author__ = 'Sarah Mount <s.mount@wlv.ac.uk>'
__date__ = 'November 2009'
FILENAME = 'token_ring.png'
subplots_adjust(hspace=0.4, wspace=0.6)
t = array([2, 4, 8, 16, 32, 64, 128, 256, 512, 1024])
yvals = {1:{'procs':array([350.057995,
314.430736, 157.215372, 78.607687, 39.303844,
19.651922, 9.825962, 4.912981, 2.456491, None, None]),
'threads':array([314.430851, 157.215429, 78.607716, 39.303859,
19.651930, 9.825965, 4.912983, 2.456492, None, None]),
'jython':array([314.431448, 157.215729, 78.607867, 39.303935,
19.651969, 9.825985, 4.912993, 2.456502, 1.228249, 0.614125])}
}
subplot(111)
title('Variable sized ring buffer \nwith one token')
plot(t, yvals[1]['procs'], 'g^-')
plot(t, yvals[1]['threads'], 'k*--')
plot(t, yvals[1]['jython'], 'rx-.')
legend(['Processes reified as OS processes',
'Processes reified as OS threads',
'Processes reified as Java threads'],
loc='upper left')
xlabel('Number of nodes in token ring')
ylabel(r'Time $(\mu{}s)$')
grid(True)
savefig(FILENAME, format='png')
show()
|
import logging
import sys
class Utils(object):
logpath = '/var/log/libvirt/libvirtwakeonlan.log'
@staticmethod
def SetupLogging(logpath=None):
returnValue = True
logformat = "%(asctime)s|%(levelname)s|%(message)s"
dateformat = "%Y-%m-%d %H:%M:%S"
if logpath is None:
logpath = Utils.logpath
try:
logging.basicConfig(
filename=logpath,
level=logging.INFO,
format=logformat,
datefmt=dateformat)
except Exception:
logging.basicConfig(
level=logging.INFO,
format=logformat,
datefmt=dateformat)
logging.error("Unable to write to log file " + logpath)
returnValue = False
return returnValue
@staticmethod
def ShowVersion(Version):
if len(sys.argv) == 2 and sys.argv[1] == 'version':
print "LibVirt Wake-On-Lan Version " + Version
sys.exit(0)
return False
|
__author__ = 'Aram Kananov <arcanan@flashmail.com>, Petr Vanek, <petr@yarpen.cz>'
class OracleTypeSource(OraclePLSQLSource):
"""Source code of type"""
pass
|
import os
from jinja2 import Environment, PackageLoader
environment = Environment(loader = PackageLoader('megaphysics', 'templates'))
if 'MEGPHYS_ROOT' in os.environ:
environment.globals['ROOT'] = os.environ['MEGPHYS_ROOT']
else:
environment.globals['ROOT'] = "/"
def generate_page(template, filepath, **kwargs):
"""Renders a page to a file, given a template and the path of the file."""
page = environment.get_template(template)
with open(filepath, 'w') as f:
f.write(page.render(**kwargs).encode('utf-8'))
|
import ping, socket
import os, time, json
hosts = "/etc/hosts"
timeout = 1500 #timeout in ms
interval = 200 #ping interval in ms
attempts = 10
tld = ".czf"
domain = ".brevnov.czf"
smokeping_prefix = "Klienti"
smpater_prefix = "Backbone"
smokeping_babble_length = 3
smpater_babble_length = 2
smokeping_html = "/var/www/html/web/sites/sysifos/hosts-ping/index.html"
smpater_html = "/var/www/html/web/sites/sysifos/hosts-ping/backbone.html"
smokeping_avg_html = "/var/www/html/web/sites/sysifos/hosts-ping/avg.html"
smpater_avg_html = "/var/www/html/web/sites/sysifos/hosts-ping/backbone-avg.html"
smokeping_avg_json = "/var/www/html/web/sites/sysifos/hosts-ping/smokeping.json"
smpater_avg_json = "/var/www/html/web/sites/sysifos/hosts-ping/smpater.json"
smokeping_url = "http://sisyfos.brevnov.czf/cgi-bin/smokeping.cgi?filter=%s&target=%s"
smpater_url = "http://tartarus.brevnov.czf/cgi-bin/smokeping.cgi?filter=%s&target=%s"
table_head = """
<table class="decorated last">
<caption>hosts ping (%s)</caption><thead><tr>
<th style="text-align: right;">#</th>
<th>hostname</th>
<th style="text-align: right;">loss</th>
<th style="text-align: right;">avg</th>
<th style="text-align: right;">best</th>
<th style="text-align: right;">worst</th>
</tr></thead><tbody>
"""
table_end = """
</tbody></table>
<br />
<p>Page generated by (G)2013 xChaos hosts-ping version 0.2-a</p>
"""
def try_to_ping(host):
sum = 0.0
best = None
worst = None
loss = 0
for i in range(0, attempts):
try:
delay = ping.Ping(host, timeout = timeout).do() #timeout in ms
time.sleep(interval/1000)
if delay:
sum += delay
if not best or best > delay:
best = delay
if not worst or worst < delay:
worst = delay
else:
loss += 1
except socket.error, e:
loss += 1
return (sum/attempts, best, worst, loss)
def smokenam_style(hostname, prefix, babble_length):
if not tld in hostname:
hostname += domain
babble = hostname.split('.')
return '.'.join([prefix,] + [a_tooth for a_tooth in reversed(babble)][1:babble_length] + ['-'.join(babble),])
def append_host(html, host, base_url, counter, red_treshold, green_treshold):
style = {'right': 'text-align: right;'}
columns = ('loss','avg','best','worst')
for kolikaty, column in enumerate(columns):
style[column] = style['right']
if not host[column]:
host[column] = 0 #don't want it to be "None" type
if host[column] > red_treshold[kolikaty]:
style[column] += ' color: red;'
elif host[column] < green_treshold[kolikaty]:
style[column] += ' color: green;'
loss = float(100*host['loss'])/host['attempts']
html.write( ('<tr class="%s"><td style="%s">%d</td><td><a href="%s" target="_blank" class="blue">%s</a></td><td style="%s">%.1f%%</td>' + "\n")
% (('even', 'odd')[counter % 2], style['right'], counter, base_url % (host['name'], host['smokename']), host['name'], style['loss'], loss))
if host['avg'] and host['best'] and host['worst']:
html.write( ('<td style="%s">%.1f</td><td style="%s">%.1f</td><td style="%s">%.1f</td></tr>' + "\n")
% (style['avg'], host['avg'], style['best'], host['best'], style['worst'], host['worst']))
else:
html.write(3*('<td style="%s">-</td>' % style['loss']) + "\n")
def merge_json_avgs(filename, smoke_array):
try:
avg_dict = json.load(open(filename))
for host in smoke_array:
avg = avg_dict.get(host['ip'])
if avg:
if host['avg'] and avg['avg'] and avg['attempts']+host['attempts']-avg['loss']-host['loss'] > 0:
host['avg'] = ((avg['attempts']-avg['loss'])*avg['avg']+(host['attempts']-host['loss'])*host['avg'])/(avg['attempts']+host['attempts']-avg['loss']-host['loss'])
else:
host['avg'] = avg['avg']
if not host['best'] or host['best'] > avg['best']:
host['best'] = avg['best']
if not host['worst'] or host['worst'] < avg['worst']:
host['worst'] = avg['worst']
host['attempts'] += avg['attempts']
host['loss'] += avg['loss']
except:
pass #start from scratch...
def save_json(filename, smoke_array):
smoke_dict = {}
for host in smoke_array:
smoke_dict[host['ip']] = host
json.dump(smoke_dict, open(filename, 'w'))
smokeping = []
smpater = []
for radek in open(hosts):
if radek[0] != '#':
is_smokeping = 'smokeping' in radek and not 'hidden' in radek
is_smpater = 'smpater' in radek
if is_smokeping or is_smpater:
slovo = radek.split("\t")
host = { 'ip': slovo[0], 'name': slovo[1].split(' ')[0], 'attempts': attempts }
(host['avg'], host['best'], host['worst'], host['loss']) = try_to_ping(host['ip'])
if is_smokeping:
host['smokename'] = smokenam_style(host['name'], smokeping_prefix, smokeping_babble_length)
smokeping.append(host)
else:
host['smokename'] = smokenam_style(host['name'], smpater_prefix, smpater_babble_length)
smpater.append(host)
red_treshold = (0, 100, 50, 200)
green_treshold = (0, 7, 5, 20)
html = open(smokeping_html, 'w')
html.write("<h1>Aktuální odezva klientských zařízení</h1>");
html.write(table_head % time.ctime());
for kolikaty, host in enumerate(sorted(smokeping, key = lambda host: -host['loss']*host['attempts']*timeout-host['avg'])):
append_host(html, host, smokeping_url, kolikaty+1, red_treshold, green_treshold)
html.write(table_end)
html.close()
red_treshold = (0, 50, 20, 100)
green_treshold = (0, 5, 2, 10)
html = open(smpater_html, 'w')
html.write("<h1>Aktuální odezva páteřních routerů</h1>");
html.write(table_head % time.ctime());
for kolikaty, host in enumerate(sorted(smpater, key = lambda host: -host['loss']*host['attempts']*timeout-host['avg'])):
append_host(html, host, smpater_url, kolikaty+1, red_treshold, green_treshold)
html.write(table_end)
html.close()
red_treshold = (1000, 100, 20, 500)
green_treshold = (0, 7, 5, 20)
merge_json_avgs(smokeping_avg_json, smokeping)
html = open(smokeping_avg_html, 'w')
html.write("<h1>Průměrná odezva klientských zařízení</h1>");
html.write(table_head % time.ctime());
for kolikaty, host in enumerate(sorted(smokeping, key = lambda host: -host['loss']*host['attempts']*timeout-host['avg'])):
append_host(html, host, smokeping_url, kolikaty+1, red_treshold, green_treshold)
html.write(table_end)
html.close()
save_json(smokeping_avg_json, smokeping)
red_treshold = (100, 50, 10, 200)
green_treshold = (0, 5, 2, 10)
merge_json_avgs(smpater_avg_json, smpater)
html = open(smpater_avg_html, 'w')
html.write("<h1>Průměrná odezva páteřních routerů</h1>");
html.write(table_head % time.ctime());
for kolikaty, host in enumerate(sorted(smpater, key = lambda host: -host['loss']*host['attempts']*timeout-host['avg'])):
append_host(html, host, smpater_url, kolikaty+1, red_treshold, green_treshold)
html.write(table_end)
html.close()
save_json(smpater_avg_json, smpater)
|
import sys
import os
import argparse
import shutil
DESCRIPTION = "\n" \
"Update pyqt4-visual-graph with the most recent version of the library\n"
EPILOG = "\n" \
"Examples:\n" + \
"\n" + \
"update_visual_graph.py\n" + \
"\tupdates the xilinx build with the files in ~/Projects/visual_graph\n" + \
"\n" + \
"update_visual_graph.py <your path here>\n" + \
"\tupdates the xilinx build with the path specified"
debug = False
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=DESCRIPTION,
epilog=EPILOG
)
#Add an argument to the parser
home = os.path.expanduser("~")
default_path = os.path.join(home, "Projects", "pyqt4-visual-graph", "visual_graph")
parser.add_argument("path",
type = str,
nargs='?',
default=default_path,
help="Specify the path to visual_grapher (leave blank for %s" % default_path)
parser.parse_args()
arg = parser.parse_args()
if not os.path.exists(arg.path):
print "Path: %s Doesn't exists!" % arg.path
sys.exit(1)
out_dir = os.path.join(os.path.dirname(__file__), "visual_graph")
arg.path
#Remove any local version of the files
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
#Copy over site_scons
shutil.copytree(arg.path, out_dir)
|
from enum import Enum
from .tools import matrix_to_list
class DataType(Enum):
float = "float"
float2 = "float2"
float3 = "float3"
float4 = "float4"
float16 = "float4x4"
int = "int"
int4 = "int4"
bool = "bool"
texture = "texture"
data = "data"
class DataEntry:
name = ""
type = None,
key = None
value = []
class_name = None
def __init__(self, name, _type, value, key=None, class_name=None):
self.name = name
self.type = _type
self.value = value
self.key = key
self.class_name = class_name
def __str__(self):
return self.type.name + ": " + self.name
def __eq__(self, other):
return self.__dict__ == other.__dict__
@staticmethod
def create_from_matrix(name, matrix):
return DataEntry(name, DataType.float16, matrix_to_list(matrix))
class TextureEntry(DataEntry):
wrap_type = None
src = ""
def __init__(self, name, src, wrap_type=None, key=None):
super().__init__(name, DataType.texture, None, key)
self.src = src
self.wrap_type = wrap_type
class DataReference(DataEntry):
src = ""
def __init__(self, src):
super().__init__(None, DataType.data, None)
self.src = src
def __str__(self):
return self.type.name + ": " + self.src
def write_generic_entry(doc, entry):
entry_type = entry.type
entry_element = doc.createElement(entry_type.value)
if entry_type == DataType.data:
entry_element.setAttribute("src", entry.src)
return entry_element
entry_element.setAttribute("name", entry.name)
if entry.key:
entry_element.setAttribute("key", entry.key)
value = entry.value
value_str = None
if entry_type in {DataType.int, DataType.int4}:
value_str = ""
for t in value:
length = len(t) if isinstance(t, tuple) else 1
fs = length * "%.d "
value_str += fs % t
elif entry_type == DataType.texture:
if entry.wrap_type is not None:
entry_element.setAttribute("wrapS", entry.wrap_type)
entry_element.setAttribute("wrapT", entry.wrap_type)
img_element = doc.createElement("img")
img_element.setAttribute("src", entry.src)
entry_element.appendChild(img_element)
else:
if not isinstance(value, list):
value_str = str(value)
else:
value_str = " ".join(str(v) for v in value)
if value_str:
text_node = doc.createTextNode(value_str)
entry_element.appendChild(text_node)
return entry_element
def write_generic_entry_html(writer, entry):
element_name = entry.type.value
writer.start_element(element_name, name=entry.name)
if entry.class_name:
writer.attribute("class", entry.class_name)
writer.content(str(entry.value))
writer.end_element(element_name)
|
from tkinter import mainloop
from tkinter.messagebox import showinfo
from tkinter102 import MyGui
class CustomGui(MyGui): # inherit init
def reply(self): # replace reply
showinfo(title='popup', message='Ouch!')
if __name__ == '__main__':
CustomGui().pack()
mainloop()
|
title = 'Pmw.Balloon demonstration'
import sys
sys.path[:0] = ['../../..']
import tkinter
import Pmw
class Demo:
def __init__(self, parent):
# Create the Balloon.
self.balloon = Pmw.Balloon(parent)
# Create some widgets and megawidgets with balloon help.
frame = tkinter.Frame(parent)
frame.pack(padx = 10, pady = 5)
field = Pmw.EntryField(frame,
labelpos = 'nw',
label_text = 'Command:')
field.setentry('mycommand -name foo')
field.pack(side = 'left', padx = 10)
self.balloon.bind(field, 'Command to\nstart/stop',
'Enter the shell command to control')
start = tkinter.Button(frame, text='Start')
start.pack(side='left', padx = 10)
self.balloon.bind(start, 'Start the command')
stop = tkinter.Button(frame, text='Stop')
stop.pack(side='left', padx = 10)
self.balloon.bind(stop, 'Stop the command')
self.suicide = tkinter.Button(frame, text='Kill me soon!',
command = self.killButton)
self.suicide.pack(side='left', padx = 10)
self.balloon.bind(self.suicide, 'Watch this button disappear!')
scrolledCanvas = Pmw.ScrolledCanvas(parent,
canvas_width = 300,
canvas_height = 115,
)
scrolledCanvas.pack()
canvas = scrolledCanvas.component('canvas')
self.canvas = canvas
# Create some canvas items and individual help.
item = canvas.create_arc(5, 5, 35, 35, fill = 'red', extent = 315)
self.balloon.tagbind(canvas, item, 'This is help for\nan arc item')
item = canvas.create_bitmap(20, 150, bitmap = 'question')
self.balloon.tagbind(canvas, item, 'This is help for\na bitmap')
item = canvas.create_line(50, 60, 70, 80, 85, 20, width = 5)
self.balloon.tagbind(canvas, item, 'This is help for\na line item')
item = canvas.create_text(10, 90, text = 'Canvas items with balloons',
anchor = 'nw', font = field.cget('entry_font'))
self.balloon.tagbind(canvas, item, 'This is help for\na text item')
# Create two canvas items which have the same tag and which use
# the same help.
canvas.create_rectangle(100, 10, 170, 50, fill = 'aliceblue',
tags = 'TAG1')
self.bluecircle = canvas.create_oval(110, 30, 160, 80, fill = 'blue',
tags = 'TAG1')
self.balloon.tagbind(canvas, 'TAG1',
'This is help for the two blue items' + '\n' * 10 +
'It is very, very big.',
'This is help for the two blue items')
item = canvas.create_text(180, 10, text = 'Delete',
anchor = 'nw', font = field.cget('entry_font'))
self.balloon.tagbind(canvas, item,
'After 2 seconds,\ndelete the blue circle')
canvas.tag_bind(item, '<ButtonPress>', self._canvasButtonpress)
scrolledCanvas.resizescrollregion()
scrolledText = Pmw.ScrolledText(parent,
text_width = 32,
text_height = 4,
text_wrap = 'none',
)
scrolledText.pack(pady = 5)
text = scrolledText.component('text')
self.text = text
text.insert('end',
'This is a text widget with ', '',
' balloon', 'TAG1',
'\nhelp. Find the ', '',
' text ', 'TAG1',
' tagged with', '',
' help.', 'TAG2',
'\n', '',
'Remove tag 1.', 'TAG3',
'\nAnother line.\nAnd another', '',
)
text.tag_configure('TAG1', borderwidth = 2, relief = 'sunken')
text.tag_configure('TAG3', borderwidth = 2, relief = 'raised')
self.balloon.tagbind(text, 'TAG1',
'There is one secret\nballoon help.\nCan you find it?')
self.balloon.tagbind(text, 'TAG2',
'Well done!\nYou found it!')
self.balloon.tagbind(text, 'TAG3',
'After 2 seconds\ndelete the tag')
text.tag_bind('TAG3', '<ButtonPress>', self._textButtonpress)
frame = tkinter.Frame(parent)
frame.pack(padx = 10)
self.toggleBalloonVar = tkinter.IntVar()
self.toggleBalloonVar.set(1)
toggle = tkinter.Checkbutton(frame,
variable = self.toggleBalloonVar,
text = 'Balloon help', command = self.toggle)
toggle.pack(side = 'left', padx = 10)
self.balloon.bind(toggle, 'Toggle balloon help\non and off')
self.toggleStatusVar = tkinter.IntVar()
self.toggleStatusVar.set(1)
toggle = tkinter.Checkbutton(frame,
variable = self.toggleStatusVar,
text = 'Status help', command = self.toggle)
toggle.pack(side = 'left', padx = 10)
self.balloon.bind(toggle,
'Toggle status help on and off, on and off' + '\n' * 10 +
'It is very, very big, too.',
'Toggle status help on and off')
# Create and pack the MessageBar.
messageBar = Pmw.MessageBar(parent,
entry_width = 40,
entry_relief='groove',
labelpos = 'w',
label_text = 'Status:')
messageBar.pack(fill = 'x', expand = 1, padx = 10, pady = 5)
# Configure the balloon to display its status messages in the
# message bar.
self.balloon.configure(statuscommand = messageBar.helpmessage)
def toggle(self):
if self.toggleBalloonVar.get():
if self.toggleStatusVar.get():
self.balloon.configure(state = 'both')
else:
self.balloon.configure(state = 'balloon')
else:
if self.toggleStatusVar.get():
self.balloon.configure(state = 'status')
else:
self.balloon.configure(state = 'none')
def killButton(self):
# Test for old bug when destroying widgets 1) while the
# balloon was up and 2) during the initwait period.
print('Destroying button in 2 seconds')
self.suicide.after(2000, self.suicide.destroy)
def _canvasButtonpress(self, event):
print('Destroying blue circle in 2 seconds')
self.canvas.after(2000, self.deleteBlueCircle)
def deleteBlueCircle(self):
self.balloon.tagunbind(self.canvas, self.bluecircle)
self.canvas.delete(self.bluecircle)
def _textButtonpress(self, event):
print('Deleting the text tag in 2 seconds')
self.text.after(2000, self.deleteTextTag)
def deleteTextTag(self):
self.balloon.tagunbind(self.text, 'TAG1')
self.text.tag_delete('TAG1')
if __name__ == '__main__':
root = tkinter.Tk()
Pmw.initialise(root, 12, fontScheme = 'default')
root.title(title)
exitButton = tkinter.Button(root, text = 'Exit', command = root.destroy)
exitButton.pack(side = 'bottom')
widget = Demo(root)
root.mainloop()
|
from . import historyentry
from . import msgarea
from . import statusbar
import meld.linkmap
import meld.diffmap
import meld.util.sourceviewer
|
"""
Created on Mon Dec 16 08:28:25 2013
@author: mel
"""
from beatle.model import TComponent
from beatle import tran
class Data(TComponent):
"""Implements data"""
context_container = True
# visual methods
@tran.TransactionalMethod('move python variable {0}')
def drop(self, to):
"""Drops datamember inside project or another folder """
target = to.inner_variable_container
if not target:
return False # avoid move classes between projects
index = 0
tran.TransactionalMoveObject(
object=self, origin=self.parent, target=target, index=index)
return True
def __init__(self, **kwargs):
"Initialization"
self._value = kwargs.get('value', "'None'")
super(Data, self).__init__(**kwargs)
k = self.inner_module or self.inner_package
if k:
k.ExportPythonCodeFiles()
def Delete(self):
"""Handle delete"""
k = self.inner_module or self.inner_package
super(Data, self).Delete()
if k:
k.ExportPythonCodeFiles()
def get_kwargs(self):
"""Returns the kwargs needed for this object"""
kwargs = {}
kwargs['value'] = self._value
kwargs.update(super(Data, self).get_kwargs())
return kwargs
def WriteCode(self, pf):
"""wtite data definition"""
pass
def GetInitializer(self):
"""Return the initializer sequence"""
if len(self._value) > 0:
return self._value
return self._name
@property
def bitmap_index(self):
"""Index of tree image"""
from beatle.app import resources as rc
return rc.GetBitmapIndex("py_variable")
def OnUndoRedoAdd(self):
"""Restore object from undo"""
super(Data, self).OnUndoRedoAdd()
def OnUndoRedoChanged(self):
"""Make changes in the model as result of change"""
super(Data, self).OnUndoRedoChanged()
def ExportPythonCode(self, wf):
"""Write code"""
if len(self._value):
wf.writeln('{self._name} = {self._value}'.format(self=self))
else:
wf.writeln('{self._name} = None'.format(self=self))
|
specification = {
-2: #submodel_id
[
"ln(urbansim.zone.average_income)",
#"urbansim.household_x_zone.cost_to_income_ratio",
#"ln(urbansim.zone.residential_units)",
"urbansim.household_x_zone.income_and_ln_improvement_value_per_unit",
]
}
|
import os, sys
import string
import glob
debug=0
ignored_files = {
"trio": "too many non standard macros",
"trio.c": "too many non standard macros",
"trionan.c": "too many non standard macros",
"triostr.c": "too many non standard macros",
"acconfig.h": "generated portability layer",
"config.h": "generated portability layer",
"libxml.h": "internal only",
"testOOM.c": "out of memory tester",
"testOOMlib.h": "out of memory tester",
"testOOMlib.c": "out of memory tester",
}
ignored_words = {
"WINAPI": (0, "Windows keyword"),
"LIBXML_DLL_IMPORT": (0, "Special macro to flag external keywords"),
"XMLPUBVAR": (0, "Special macro for extern vars for win32"),
"XSLTPUBVAR": (0, "Special macro for extern vars for win32"),
"EXSLTPUBVAR": (0, "Special macro for extern vars for win32"),
"XMLPUBFUN": (0, "Special macro for extern funcs for win32"),
"XSLTPUBFUN": (0, "Special macro for extern funcs for win32"),
"EXSLTPUBFUN": (0, "Special macro for extern funcs for win32"),
"XMLCALL": (0, "Special macro for win32 calls"),
"XSLTCALL": (0, "Special macro for win32 calls"),
"EXSLTCALL": (0, "Special macro for win32 calls"),
"__declspec": (3, "Windows keyword"),
"ATTRIBUTE_UNUSED": (0, "macro keyword"),
"LIBEXSLT_PUBLIC": (0, "macro keyword"),
"X_IN_Y": (5, "macro function builder"),
"XSLT_ITEM_COMMON_FIELDS": (0, "Special macro")
}
def escape(raw):
raw = string.replace(raw, '&', '&')
raw = string.replace(raw, '<', '<')
raw = string.replace(raw, '>', '>')
raw = string.replace(raw, "'", ''')
raw = string.replace(raw, '"', '"')
return raw
def uniq(items):
d = {}
for item in items:
d[item]=1
return d.keys()
class identifier:
def __init__(self, name, module=None, type=None, lineno = 0,
info=None, extra=None):
self.name = name
self.module = module
self.type = type
self.info = info
self.extra = extra
self.lineno = lineno
self.static = 0
def __repr__(self):
r = "%s %s:" % (self.type, self.name)
if self.static:
r = r + " static"
if self.module != None:
r = r + " from %s" % (self.module)
if self.info != None:
r = r + " " + `self.info`
if self.extra != None:
r = r + " " + `self.extra`
return r
def set_module(self, module):
self.module = module
def set_type(self, type):
self.type = type
def set_info(self, info):
self.info = info
def set_extra(self, extra):
self.extra = extra
def set_lineno(self, lineno):
self.lineno = lineno
def set_static(self, static):
self.static = static
def get_name(self):
return self.name
def get_module(self):
return self.module
def get_type(self):
return self.type
def get_info(self):
return self.info
def get_lineno(self):
return self.lineno
def get_extra(self):
return self.extra
def get_static(self):
return self.static
def update(self, module, type = None, info = None, extra=None):
if module != None and self.module == None:
self.set_module(module)
if type != None and self.type == None:
self.set_type(type)
if info != None:
self.set_info(info)
if extra != None:
self.set_extra(extra)
class index:
def __init__(self, name = "noname"):
self.name = name
self.identifiers = {}
self.functions = {}
self.variables = {}
self.includes = {}
self.structs = {}
self.enums = {}
self.typedefs = {}
self.macros = {}
self.references = {}
self.info = {}
def add_ref(self, name, module, static, type, lineno, info=None, extra=None):
if name[0:2] == '__':
return None
d = None
try:
d = self.identifiers[name]
d.update(module, type, lineno, info, extra)
except:
d = identifier(name, module, type, lineno, info, extra)
self.identifiers[name] = d
if d != None and static == 1:
d.set_static(1)
if d != None and name != None and type != None:
self.references[name] = d
def add(self, name, module, static, type, lineno, info=None, extra=None):
if name[0:2] == '__':
return None
d = None
try:
d = self.identifiers[name]
d.update(module, type, lineno, info, extra)
except:
d = identifier(name, module, type, lineno, info, extra)
self.identifiers[name] = d
if d != None and static == 1:
d.set_static(1)
if d != None and name != None and type != None:
if type == "function":
self.functions[name] = d
elif type == "functype":
self.functions[name] = d
elif type == "variable":
self.variables[name] = d
elif type == "include":
self.includes[name] = d
elif type == "struct":
self.structs[name] = d
elif type == "enum":
self.enums[name] = d
elif type == "typedef":
self.typedefs[name] = d
elif type == "macro":
self.macros[name] = d
else:
print "Unable to register type ", type
return d
def merge(self, idx):
for id in idx.functions.keys():
#
# macro might be used to override functions or variables
# definitions
#
if self.macros.has_key(id):
del self.macros[id]
if self.functions.has_key(id):
print "function %s from %s redeclared in %s" % (
id, self.functions[id].module, idx.functions[id].module)
else:
self.functions[id] = idx.functions[id]
self.identifiers[id] = idx.functions[id]
for id in idx.variables.keys():
#
# macro might be used to override functions or variables
# definitions
#
if self.macros.has_key(id):
del self.macros[id]
if self.variables.has_key(id):
print "variable %s from %s redeclared in %s" % (
id, self.variables[id].module, idx.variables[id].module)
else:
self.variables[id] = idx.variables[id]
self.identifiers[id] = idx.variables[id]
for id in idx.structs.keys():
if self.structs.has_key(id):
print "struct %s from %s redeclared in %s" % (
id, self.structs[id].module, idx.structs[id].module)
else:
self.structs[id] = idx.structs[id]
self.identifiers[id] = idx.structs[id]
for id in idx.typedefs.keys():
if self.typedefs.has_key(id):
print "typedef %s from %s redeclared in %s" % (
id, self.typedefs[id].module, idx.typedefs[id].module)
else:
self.typedefs[id] = idx.typedefs[id]
self.identifiers[id] = idx.typedefs[id]
for id in idx.macros.keys():
#
# macro might be used to override functions or variables
# definitions
#
if self.variables.has_key(id):
continue
if self.functions.has_key(id):
continue
if self.enums.has_key(id):
continue
if self.macros.has_key(id):
print "macro %s from %s redeclared in %s" % (
id, self.macros[id].module, idx.macros[id].module)
else:
self.macros[id] = idx.macros[id]
self.identifiers[id] = idx.macros[id]
for id in idx.enums.keys():
if self.enums.has_key(id):
print "enum %s from %s redeclared in %s" % (
id, self.enums[id].module, idx.enums[id].module)
else:
self.enums[id] = idx.enums[id]
self.identifiers[id] = idx.enums[id]
def merge_public(self, idx):
for id in idx.functions.keys():
if self.functions.has_key(id):
up = idx.functions[id]
self.functions[id].update(None, up.type, up.info, up.extra)
# else:
# print "Function %s from %s is not declared in headers" % (
# id, idx.functions[id].module)
# TODO: do the same for variables.
def analyze_dict(self, type, dict):
count = 0
public = 0
for name in dict.keys():
id = dict[name]
count = count + 1
if id.static == 0:
public = public + 1
if count != public:
print " %d %s , %d public" % (count, type, public)
elif count != 0:
print " %d public %s" % (count, type)
def analyze(self):
self.analyze_dict("functions", self.functions)
self.analyze_dict("variables", self.variables)
self.analyze_dict("structs", self.structs)
self.analyze_dict("typedefs", self.typedefs)
self.analyze_dict("macros", self.macros)
class CLexer:
"""A lexer for the C language, tokenize the input by reading and
analyzing it line by line"""
def __init__(self, input):
self.input = input
self.tokens = []
self.line = ""
self.lineno = 0
def getline(self):
line = ''
while line == '':
line = self.input.readline()
if not line:
return None
self.lineno = self.lineno + 1
line = string.lstrip(line)
line = string.rstrip(line)
if line == '':
continue
while line[-1] == '\\':
line = line[:-1]
n = self.input.readline()
self.lineno = self.lineno + 1
n = string.lstrip(n)
n = string.rstrip(n)
if not n:
break
else:
line = line + n
return line
def getlineno(self):
return self.lineno
def push(self, token):
self.tokens.insert(0, token);
def debug(self):
print "Last token: ", self.last
print "Token queue: ", self.tokens
print "Line %d end: " % (self.lineno), self.line
def token(self):
while self.tokens == []:
if self.line == "":
line = self.getline()
else:
line = self.line
self.line = ""
if line == None:
return None
if line[0] == '#':
self.tokens = map((lambda x: ('preproc', x)),
string.split(line))
break;
l = len(line)
if line[0] == '"' or line[0] == "'":
end = line[0]
line = line[1:]
found = 0
tok = ""
while found == 0:
i = 0
l = len(line)
while i < l:
if line[i] == end:
self.line = line[i+1:]
line = line[:i]
l = i
found = 1
break
if line[i] == '\\':
i = i + 1
i = i + 1
tok = tok + line
if found == 0:
line = self.getline()
if line == None:
return None
self.last = ('string', tok)
return self.last
if l >= 2 and line[0] == '/' and line[1] == '*':
line = line[2:]
found = 0
tok = ""
while found == 0:
i = 0
l = len(line)
while i < l:
if line[i] == '*' and i+1 < l and line[i+1] == '/':
self.line = line[i+2:]
line = line[:i-1]
l = i
found = 1
break
i = i + 1
if tok != "":
tok = tok + "\n"
tok = tok + line
if found == 0:
line = self.getline()
if line == None:
return None
self.last = ('comment', tok)
return self.last
if l >= 2 and line[0] == '/' and line[1] == '/':
line = line[2:]
self.last = ('comment', line)
return self.last
i = 0
while i < l:
if line[i] == '/' and i+1 < l and line[i+1] == '/':
self.line = line[i:]
line = line[:i]
break
if line[i] == '/' and i+1 < l and line[i+1] == '*':
self.line = line[i:]
line = line[:i]
break
if line[i] == '"' or line[i] == "'":
self.line = line[i:]
line = line[:i]
break
i = i + 1
l = len(line)
i = 0
while i < l:
if line[i] == ' ' or line[i] == '\t':
i = i + 1
continue
o = ord(line[i])
if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
(o >= 48 and o <= 57):
s = i
while i < l:
o = ord(line[i])
if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
(o >= 48 and o <= 57) or string.find(
" \t(){}:;,+-*/%&!|[]=><", line[i]) == -1:
i = i + 1
else:
break
self.tokens.append(('name', line[s:i]))
continue
if string.find("(){}:;,[]", line[i]) != -1:
self.tokens.append(('sep', line[i]))
i = i + 1
continue
if string.find("+-*><=/%&!|.", line[i]) != -1:
if line[i] == '.' and i + 2 < l and \
line[i+1] == '.' and line[i+2] == '.':
self.tokens.append(('name', '...'))
i = i + 3
continue
j = i + 1
if j < l and (
string.find("+-*><=/%&!|", line[j]) != -1):
self.tokens.append(('op', line[i:j+1]))
i = j + 1
else:
self.tokens.append(('op', line[i]))
i = i + 1
continue
s = i
while i < l:
o = ord(line[i])
if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
(o >= 48 and o <= 57) or (
string.find(" \t(){}:;,+-*/%&!|[]=><", line[i]) == -1):
i = i + 1
else:
break
self.tokens.append(('name', line[s:i]))
tok = self.tokens[0]
self.tokens = self.tokens[1:]
self.last = tok
return tok
class CParser:
"""The C module parser"""
def __init__(self, filename, idx = None):
self.filename = filename
if len(filename) > 2 and filename[-2:] == '.h':
self.is_header = 1
else:
self.is_header = 0
self.input = open(filename)
self.lexer = CLexer(self.input)
if idx == None:
self.index = index()
else:
self.index = idx
self.top_comment = ""
self.last_comment = ""
self.comment = None
self.collect_ref = 0
self.no_error = 0
def collect_references(self):
self.collect_ref = 1
def stop_error(self):
self.no_error = 1
def start_error(self):
self.no_error = 0
def lineno(self):
return self.lexer.getlineno()
def index_add(self, name, module, static, type, info=None, extra = None):
self.index.add(name, module, static, type, self.lineno(),
info, extra)
def index_add_ref(self, name, module, static, type, info=None,
extra = None):
self.index.add_ref(name, module, static, type, self.lineno(),
info, extra)
def warning(self, msg):
if self.no_error:
return
print msg
def error(self, msg, token=-1):
if self.no_error:
return
print "Parse Error: " + msg
if token != -1:
print "Got token ", token
self.lexer.debug()
sys.exit(1)
def debug(self, msg, token=-1):
print "Debug: " + msg
if token != -1:
print "Got token ", token
self.lexer.debug()
def parseTopComment(self, comment):
res = {}
lines = string.split(comment, "\n")
item = None
for line in lines:
while line != "" and (line[0] == ' ' or line[0] == '\t'):
line = line[1:]
while line != "" and line[0] == '*':
line = line[1:]
while line != "" and (line[0] == ' ' or line[0] == '\t'):
line = line[1:]
try:
(it, line) = string.split(line, ":", 1)
item = it
while line != "" and (line[0] == ' ' or line[0] == '\t'):
line = line[1:]
if res.has_key(item):
res[item] = res[item] + " " + line
else:
res[item] = line
except:
if item != None:
if res.has_key(item):
res[item] = res[item] + " " + line
else:
res[item] = line
self.index.info = res
def parseComment(self, token):
if self.top_comment == "":
self.top_comment = token[1]
if self.comment == None or token[1][0] == '*':
self.comment = token[1];
else:
self.comment = self.comment + token[1]
token = self.lexer.token()
if string.find(self.comment, "DOC_DISABLE") != -1:
self.stop_error()
if string.find(self.comment, "DOC_ENABLE") != -1:
self.start_error()
return token
#
# Parse a comment block associate to a macro
#
def parseMacroComment(self, name, quiet = 0):
if name[0:2] == '__':
quiet = 1
args = []
desc = ""
if self.comment == None:
if not quiet:
self.warning("Missing comment for macro %s" % (name))
return((args, desc))
if self.comment[0] != '*':
if not quiet:
self.warning("Missing * in macro comment for %s" % (name))
return((args, desc))
lines = string.split(self.comment, '\n')
if lines[0] == '*':
del lines[0]
if lines[0] != "* %s:" % (name):
if not quiet:
self.warning("Misformatted macro comment for %s" % (name))
self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
return((args, desc))
del lines[0]
while lines[0] == '*':
del lines[0]
while len(lines) > 0 and lines[0][0:3] == '* @':
l = lines[0][3:]
try:
(arg, desc) = string.split(l, ':', 1)
desc=string.strip(desc)
arg=string.strip(arg)
except:
if not quiet:
self.warning("Misformatted macro comment for %s" % (name))
self.warning(" problem with '%s'" % (lines[0]))
del lines[0]
continue
del lines[0]
l = string.strip(lines[0])
while len(l) > 2 and l[0:3] != '* @':
while l[0] == '*':
l = l[1:]
desc = desc + ' ' + string.strip(l)
del lines[0]
if len(lines) == 0:
break
l = lines[0]
args.append((arg, desc))
while len(lines) > 0 and lines[0] == '*':
del lines[0]
desc = ""
while len(lines) > 0:
l = lines[0]
while len(l) > 0 and l[0] == '*':
l = l[1:]
l = string.strip(l)
desc = desc + " " + l
del lines[0]
desc = string.strip(desc)
if quiet == 0:
if desc == "":
self.warning("Macro comment for %s lack description of the macro" % (name))
return((args, desc))
#
# Parse a comment block and merge the informations found in the
# parameters descriptions, finally returns a block as complete
# as possible
#
def mergeFunctionComment(self, name, description, quiet = 0):
if name == 'main':
quiet = 1
if name[0:2] == '__':
quiet = 1
(ret, args) = description
desc = ""
retdesc = ""
if self.comment == None:
if not quiet:
self.warning("Missing comment for function %s" % (name))
return(((ret[0], retdesc), args, desc))
if self.comment[0] != '*':
if not quiet:
self.warning("Missing * in function comment for %s" % (name))
return(((ret[0], retdesc), args, desc))
lines = string.split(self.comment, '\n')
if lines[0] == '*':
del lines[0]
if lines[0] != "* %s:" % (name):
if not quiet:
self.warning("Misformatted function comment for %s" % (name))
self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
return(((ret[0], retdesc), args, desc))
del lines[0]
while len(lines) > 0 and lines[0] == '*':
del lines[0]
nbargs = len(args)
while len(lines) > 0 and lines[0][0:3] == '* @':
l = lines[0][3:]
try:
(arg, desc) = string.split(l, ':', 1)
desc=string.strip(desc)
arg=string.strip(arg)
except:
if not quiet:
self.warning("Misformatted function comment for %s" % (name))
self.warning(" problem with '%s'" % (lines[0]))
del lines[0]
continue
del lines[0]
l = string.strip(lines[0])
while len(l) > 2 and l[0:3] != '* @':
while l[0] == '*':
l = l[1:]
desc = desc + ' ' + string.strip(l)
del lines[0]
if len(lines) == 0:
break
l = lines[0]
i = 0
while i < nbargs:
if args[i][1] == arg:
args[i] = (args[i][0], arg, desc)
break;
i = i + 1
if i >= nbargs:
if not quiet:
self.warning("Unable to find arg %s from function comment for %s" % (
arg, name))
while len(lines) > 0 and lines[0] == '*':
del lines[0]
desc = ""
while len(lines) > 0:
l = lines[0]
while len(l) > 0 and l[0] == '*':
l = l[1:]
l = string.strip(l)
if len(l) >= 6 and l[0:6] == "return" or l[0:6] == "Return":
try:
l = string.split(l, ' ', 1)[1]
except:
l = ""
retdesc = string.strip(l)
del lines[0]
while len(lines) > 0:
l = lines[0]
while len(l) > 0 and l[0] == '*':
l = l[1:]
l = string.strip(l)
retdesc = retdesc + " " + l
del lines[0]
else:
desc = desc + " " + l
del lines[0]
retdesc = string.strip(retdesc)
desc = string.strip(desc)
if quiet == 0:
#
# report missing comments
#
i = 0
while i < nbargs:
if args[i][2] == None and args[i][0] != "void" and args[i][1] != None:
self.warning("Function comment for %s lack description of arg %s" % (name, args[i][1]))
i = i + 1
if retdesc == "" and ret[0] != "void":
self.warning("Function comment for %s lack description of return value" % (name))
if desc == "":
self.warning("Function comment for %s lack description of the function" % (name))
return(((ret[0], retdesc), args, desc))
def parsePreproc(self, token):
name = token[1]
if name == "#include":
token = self.lexer.token()
if token == None:
return None
if token[0] == 'preproc':
self.index_add(token[1], self.filename, not self.is_header,
"include")
return self.lexer.token()
return token
if name == "#define":
token = self.lexer.token()
if token == None:
return None
if token[0] == 'preproc':
# TODO macros with arguments
name = token[1]
lst = []
token = self.lexer.token()
while token != None and token[0] == 'preproc' and \
token[1][0] != '#':
lst.append(token[1])
token = self.lexer.token()
try:
name = string.split(name, '(') [0]
except:
pass
info = self.parseMacroComment(name, not self.is_header)
self.index_add(name, self.filename, not self.is_header,
"macro", info)
return token
token = self.lexer.token()
while token != None and token[0] == 'preproc' and \
token[1][0] != '#':
token = self.lexer.token()
return token
#
# token acquisition on top of the lexer, it handle internally
# preprocessor and comments since they are logically not part of
# the program structure.
#
def token(self):
global ignored_words
token = self.lexer.token()
while token != None:
if token[0] == 'comment':
token = self.parseComment(token)
continue
elif token[0] == 'preproc':
token = self.parsePreproc(token)
continue
elif token[0] == "name" and ignored_words.has_key(token[1]):
(n, info) = ignored_words[token[1]]
i = 0
while i < n:
token = self.lexer.token()
i = i + 1
token = self.lexer.token()
continue
else:
if debug:
print "=> ", token
return token
return None
#
# Parse a typedef, it records the type and its name.
#
def parseTypedef(self, token):
if token == None:
return None
token = self.parseType(token)
if token == None:
self.error("parsing typedef")
return None
base_type = self.type
type = base_type
#self.debug("end typedef type", token)
while token != None:
if token[0] == "name":
name = token[1]
signature = self.signature
if signature != None:
type = string.split(type, '(')[0]
d = self.mergeFunctionComment(name,
((type, None), signature), 1)
self.index_add(name, self.filename, not self.is_header,
"functype", d)
else:
if base_type == "struct":
self.index_add(name, self.filename, not self.is_header,
"struct", type)
base_type = "struct " + name
else:
self.index_add(name, self.filename, not self.is_header,
"typedef", type)
token = self.token()
else:
self.error("parsing typedef: expecting a name")
return token
#self.debug("end typedef", token)
if token != None and token[0] == 'sep' and token[1] == ',':
type = base_type
token = self.token()
while token != None and token[0] == "op":
type = type + token[1]
token = self.token()
elif token != None and token[0] == 'sep' and token[1] == ';':
break;
elif token != None and token[0] == 'name':
type = base_type
continue;
else:
self.error("parsing typedef: expecting ';'", token)
return token
token = self.token()
return token
#
# Parse a C code block, used for functions it parse till
# the balancing } included
#
def parseBlock(self, token):
while token != None:
if token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseBlock(token)
elif token[0] == "sep" and token[1] == "}":
self.comment = None
token = self.token()
return token
else:
if self.collect_ref == 1:
oldtok = token
token = self.token()
if oldtok[0] == "name" and oldtok[1][0:3] == "xml":
if token[0] == "sep" and token[1] == "(":
self.index_add_ref(oldtok[1], self.filename,
0, "function")
token = self.token()
elif token[0] == "name":
token = self.token()
if token[0] == "sep" and (token[1] == ";" or
token[1] == "," or token[1] == "="):
self.index_add_ref(oldtok[1], self.filename,
0, "type")
elif oldtok[0] == "name" and oldtok[1][0:4] == "XML_":
self.index_add_ref(oldtok[1], self.filename,
0, "typedef")
elif oldtok[0] == "name" and oldtok[1][0:7] == "LIBXML_":
self.index_add_ref(oldtok[1], self.filename,
0, "typedef")
else:
token = self.token()
return token
#
# Parse a C struct definition till the balancing }
#
def parseStruct(self, token):
fields = []
#self.debug("start parseStruct", token)
while token != None:
if token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseTypeBlock(token)
elif token[0] == "sep" and token[1] == "}":
self.struct_fields = fields
#self.debug("end parseStruct", token)
#print fields
token = self.token()
return token
else:
base_type = self.type
#self.debug("before parseType", token)
token = self.parseType(token)
#self.debug("after parseType", token)
if token != None and token[0] == "name":
fname = token[1]
token = self.token()
if token[0] == "sep" and token[1] == ";":
self.comment = None
token = self.token()
fields.append((self.type, fname, self.comment))
self.comment = None
else:
self.error("parseStruct: expecting ;", token)
elif token != None and token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseTypeBlock(token)
if token != None and token[0] == "name":
token = self.token()
if token != None and token[0] == "sep" and token[1] == ";":
token = self.token()
else:
self.error("parseStruct: expecting ;", token)
else:
self.error("parseStruct: name", token)
token = self.token()
self.type = base_type;
self.struct_fields = fields
#self.debug("end parseStruct", token)
#print fields
return token
#
# Parse a C enum block, parse till the balancing }
#
def parseEnumBlock(self, token):
self.enums = []
name = None
self.comment = None
comment = ""
value = "0"
while token != None:
if token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseTypeBlock(token)
elif token[0] == "sep" and token[1] == "}":
if name != None:
if self.comment != None:
comment = self.comment
self.comment = None
self.enums.append((name, value, comment))
token = self.token()
return token
elif token[0] == "name":
if name != None:
if self.comment != None:
comment = string.strip(self.comment)
self.comment = None
self.enums.append((name, value, comment))
name = token[1]
comment = ""
token = self.token()
if token[0] == "op" and token[1][0] == "=":
value = ""
if len(token[1]) > 1:
value = token[1][1:]
token = self.token()
while token[0] != "sep" or (token[1] != ',' and
token[1] != '}'):
value = value + token[1]
token = self.token()
else:
try:
value = "%d" % (int(value) + 1)
except:
self.warning("Failed to compute value of enum %s" % (name))
value=""
if token[0] == "sep" and token[1] == ",":
token = self.token()
else:
token = self.token()
return token
#
# Parse a C definition block, used for structs it parse till
# the balancing }
#
def parseTypeBlock(self, token):
while token != None:
if token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseTypeBlock(token)
elif token[0] == "sep" and token[1] == "}":
token = self.token()
return token
else:
token = self.token()
return token
#
# Parse a type: the fact that the type name can either occur after
# the definition or within the definition makes it a little harder
# if inside, the name token is pushed back before returning
#
def parseType(self, token):
self.type = ""
self.struct_fields = []
self.signature = None
if token == None:
return token
while token[0] == "name" and (
token[1] == "const" or token[1] == "unsigned" or
token[1] == "signed"):
if self.type == "":
self.type = token[1]
else:
self.type = self.type + " " + token[1]
token = self.token()
if token[0] == "name" and (token[1] == "long" or token[1] == "short"):
if self.type == "":
self.type = token[1]
else:
self.type = self.type + " " + token[1]
if token[0] == "name" and token[1] == "int":
if self.type == "":
self.type = tmp[1]
else:
self.type = self.type + " " + tmp[1]
elif token[0] == "name" and token[1] == "struct":
if self.type == "":
self.type = token[1]
else:
self.type = self.type + " " + token[1]
token = self.token()
nametok = None
if token[0] == "name":
nametok = token
token = self.token()
if token != None and token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseStruct(token)
elif token != None and token[0] == "op" and token[1] == "*":
self.type = self.type + " " + nametok[1] + " *"
token = self.token()
while token != None and token[0] == "op" and token[1] == "*":
self.type = self.type + " *"
token = self.token()
if token[0] == "name":
nametok = token
token = self.token()
else:
self.error("struct : expecting name", token)
return token
elif token != None and token[0] == "name" and nametok != None:
self.type = self.type + " " + nametok[1]
return token
if nametok != None:
self.lexer.push(token)
token = nametok
return token
elif token[0] == "name" and token[1] == "enum":
if self.type == "":
self.type = token[1]
else:
self.type = self.type + " " + token[1]
self.enums = []
token = self.token()
if token != None and token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseEnumBlock(token)
else:
self.error("parsing enum: expecting '{'", token)
enum_type = None
if token != None and token[0] != "name":
self.lexer.push(token)
token = ("name", "enum")
else:
enum_type = token[1]
for enum in self.enums:
self.index_add(enum[0], self.filename,
not self.is_header, "enum",
(enum[1], enum[2], enum_type))
return token
elif token[0] == "name":
if self.type == "":
self.type = token[1]
else:
self.type = self.type + " " + token[1]
else:
self.error("parsing type %s: expecting a name" % (self.type),
token)
return token
token = self.token()
while token != None and (token[0] == "op" or
token[0] == "name" and token[1] == "const"):
self.type = self.type + " " + token[1]
token = self.token()
#
# if there is a parenthesis here, this means a function type
#
if token != None and token[0] == "sep" and token[1] == '(':
self.type = self.type + token[1]
token = self.token()
while token != None and token[0] == "op" and token[1] == '*':
self.type = self.type + token[1]
token = self.token()
if token == None or token[0] != "name" :
self.error("parsing function type, name expected", token);
return token
self.type = self.type + token[1]
nametok = token
token = self.token()
if token != None and token[0] == "sep" and token[1] == ')':
self.type = self.type + token[1]
token = self.token()
if token != None and token[0] == "sep" and token[1] == '(':
token = self.token()
type = self.type;
token = self.parseSignature(token);
self.type = type;
else:
self.error("parsing function type, '(' expected", token);
return token
else:
self.error("parsing function type, ')' expected", token);
return token
self.lexer.push(token)
token = nametok
return token
#
# do some lookahead for arrays
#
if token != None and token[0] == "name":
nametok = token
token = self.token()
if token != None and token[0] == "sep" and token[1] == '[':
self.type = self.type + nametok[1]
while token != None and token[0] == "sep" and token[1] == '[':
self.type = self.type + token[1]
token = self.token()
while token != None and token[0] != 'sep' and \
token[1] != ']' and token[1] != ';':
self.type = self.type + token[1]
token = self.token()
if token != None and token[0] == 'sep' and token[1] == ']':
self.type = self.type + token[1]
token = self.token()
else:
self.error("parsing array type, ']' expected", token);
return token
elif token != None and token[0] == "sep" and token[1] == ':':
# remove :12 in case it's a limited int size
token = self.token()
token = self.token()
self.lexer.push(token)
token = nametok
return token
#
# Parse a signature: '(' has been parsed and we scan the type definition
# up to the ')' included
def parseSignature(self, token):
signature = []
if token != None and token[0] == "sep" and token[1] == ')':
self.signature = []
token = self.token()
return token
while token != None:
token = self.parseType(token)
if token != None and token[0] == "name":
signature.append((self.type, token[1], None))
token = self.token()
elif token != None and token[0] == "sep" and token[1] == ',':
token = self.token()
continue
elif token != None and token[0] == "sep" and token[1] == ')':
# only the type was provided
if self.type == "...":
signature.append((self.type, "...", None))
else:
signature.append((self.type, None, None))
if token != None and token[0] == "sep":
if token[1] == ',':
token = self.token()
continue
elif token[1] == ')':
token = self.token()
break
self.signature = signature
return token
#
# Parse a global definition, be it a type, variable or function
# the extern "C" blocks are a bit nasty and require it to recurse.
#
def parseGlobal(self, token):
static = 0
if token[1] == 'extern':
token = self.token()
if token == None:
return token
if token[0] == 'string':
if token[1] == 'C':
token = self.token()
if token == None:
return token
if token[0] == 'sep' and token[1] == "{":
token = self.token()
while token != None and (token[0] != 'sep' or
token[1] != "}"):
if token[0] == 'name':
token = self.parseGlobal(token)
else:
self.error(
"token %s %s unexpected at the top level" % (
token[0], token[1]))
token = self.parseGlobal(token)
token = self.token()
return token
else:
return token
elif token[1] == 'static':
static = 1
token = self.token()
if token == None or token[0] != 'name':
return token
if token[1] == 'typedef':
token = self.token()
return self.parseTypedef(token)
else:
token = self.parseType(token)
type_orig = self.type
if token == None or token[0] != "name":
return token
type = type_orig
self.name = token[1]
token = self.token()
while token != None and (token[0] == "sep" or token[0] == "op"):
if token[0] == "sep":
if token[1] == "[":
type = type + token[1]
token = self.token()
while token != None and (token[0] != "sep" or \
token[1] != ";"):
type = type + token[1]
token = self.token()
if token != None and token[0] == "op" and token[1] == "=":
#
# Skip the initialization of the variable
#
token = self.token()
if token[0] == 'sep' and token[1] == '{':
token = self.token()
token = self.parseBlock(token)
else:
self.comment = None
while token != None and (token[0] != "sep" or \
(token[1] != ';' and token[1] != ',')):
token = self.token()
self.comment = None
if token == None or token[0] != "sep" or (token[1] != ';' and
token[1] != ','):
self.error("missing ';' or ',' after value")
if token != None and token[0] == "sep":
if token[1] == ";":
self.comment = None
token = self.token()
if type == "struct":
self.index_add(self.name, self.filename,
not self.is_header, "struct", self.struct_fields)
else:
self.index_add(self.name, self.filename,
not self.is_header, "variable", type)
break
elif token[1] == "(":
token = self.token()
token = self.parseSignature(token)
if token == None:
return None
if token[0] == "sep" and token[1] == ";":
d = self.mergeFunctionComment(self.name,
((type, None), self.signature), 1)
self.index_add(self.name, self.filename, static,
"function", d)
token = self.token()
elif token[0] == "sep" and token[1] == "{":
d = self.mergeFunctionComment(self.name,
((type, None), self.signature), static)
self.index_add(self.name, self.filename, static,
"function", d)
token = self.token()
token = self.parseBlock(token);
elif token[1] == ',':
self.comment = None
self.index_add(self.name, self.filename, static,
"variable", type)
type = type_orig
token = self.token()
while token != None and token[0] == "sep":
type = type + token[1]
token = self.token()
if token != None and token[0] == "name":
self.name = token[1]
token = self.token()
else:
break
return token
def parse(self):
self.warning("Parsing %s" % (self.filename))
token = self.token()
while token != None:
if token[0] == 'name':
token = self.parseGlobal(token)
else:
self.error("token %s %s unexpected at the top level" % (
token[0], token[1]))
token = self.parseGlobal(token)
return
self.parseTopComment(self.top_comment)
return self.index
class docBuilder:
"""A documentation builder"""
def __init__(self, name, directories=['.'], excludes=[]):
self.name = name
self.directories = directories
self.excludes = excludes + ignored_files.keys()
self.modules = {}
self.headers = {}
self.idx = index()
self.xref = {}
self.index = {}
if name == 'libxml2':
self.basename = 'libxml'
else:
self.basename = name
def indexString(self, id, str):
if str == None:
return
str = string.replace(str, "'", ' ')
str = string.replace(str, '"', ' ')
str = string.replace(str, "/", ' ')
str = string.replace(str, '*', ' ')
str = string.replace(str, "[", ' ')
str = string.replace(str, "]", ' ')
str = string.replace(str, "(", ' ')
str = string.replace(str, ")", ' ')
str = string.replace(str, "<", ' ')
str = string.replace(str, '>', ' ')
str = string.replace(str, "&", ' ')
str = string.replace(str, '#', ' ')
str = string.replace(str, ",", ' ')
str = string.replace(str, '.', ' ')
str = string.replace(str, ';', ' ')
tokens = string.split(str)
for token in tokens:
try:
c = token[0]
if string.find(string.letters, c) < 0:
pass
elif len(token) < 3:
pass
else:
lower = string.lower(token)
# TODO: generalize this a bit
if lower == 'and' or lower == 'the':
pass
elif self.xref.has_key(token):
self.xref[token].append(id)
else:
self.xref[token] = [id]
except:
pass
def analyze(self):
print "Project %s : %d headers, %d modules" % (self.name, len(self.headers.keys()), len(self.modules.keys()))
self.idx.analyze()
def scanHeaders(self):
for header in self.headers.keys():
parser = CParser(header)
idx = parser.parse()
self.headers[header] = idx;
self.idx.merge(idx)
def scanModules(self):
for module in self.modules.keys():
parser = CParser(module)
idx = parser.parse()
# idx.analyze()
self.modules[module] = idx
self.idx.merge_public(idx)
def scan(self):
for directory in self.directories:
files = glob.glob(directory + "/*.c")
for file in files:
skip = 0
for excl in self.excludes:
if string.find(file, excl) != -1:
skip = 1;
break
if skip == 0:
self.modules[file] = None;
files = glob.glob(directory + "/*.h")
for file in files:
skip = 0
for excl in self.excludes:
if string.find(file, excl) != -1:
skip = 1;
break
if skip == 0:
self.headers[file] = None;
self.scanHeaders()
self.scanModules()
def modulename_file(self, file):
module = os.path.basename(file)
if module[-2:] == '.h':
module = module[:-2]
return module
def serialize_enum(self, output, name):
id = self.idx.enums[name]
output.write(" <enum name='%s' file='%s'" % (name,
self.modulename_file(id.module)))
if id.info != None:
info = id.info
if info[0] != None and info[0] != '':
try:
val = eval(info[0])
except:
val = info[0]
output.write(" value='%s'" % (val));
if info[2] != None and info[2] != '':
output.write(" type='%s'" % info[2]);
if info[1] != None and info[1] != '':
output.write(" info='%s'" % escape(info[1]));
output.write("/>\n")
def serialize_macro(self, output, name):
id = self.idx.macros[name]
output.write(" <macro name='%s' file='%s'>\n" % (name,
self.modulename_file(id.module)))
if id.info != None:
try:
(args, desc) = id.info
if desc != None and desc != "":
output.write(" <info>%s</info>\n" % (escape(desc)))
self.indexString(name, desc)
for arg in args:
(name, desc) = arg
if desc != None and desc != "":
output.write(" <arg name='%s' info='%s'/>\n" % (
name, escape(desc)))
self.indexString(name, desc)
else:
output.write(" <arg name='%s'/>\n" % (name))
except:
pass
output.write(" </macro>\n")
def serialize_typedef(self, output, name):
id = self.idx.typedefs[name]
if id.info[0:7] == 'struct ':
output.write(" <struct name='%s' file='%s' type='%s'" % (
name, self.modulename_file(id.module), id.info))
name = id.info[7:]
if self.idx.structs.has_key(name) and ( \
type(self.idx.structs[name].info) == type(()) or
type(self.idx.structs[name].info) == type([])):
output.write(">\n");
try:
for field in self.idx.structs[name].info:
desc = field[2]
self.indexString(name, desc)
if desc == None:
desc = ''
else:
desc = escape(desc)
output.write(" <field name='%s' type='%s' info='%s'/>\n" % (field[1] , field[0], desc))
except:
print "Failed to serialize struct %s" % (name)
output.write(" </struct>\n")
else:
output.write("/>\n");
else :
output.write(" <typedef name='%s' file='%s' type='%s'/>\n" % (
name, self.modulename_file(id.module), id.info))
def serialize_variable(self, output, name):
id = self.idx.variables[name]
if id.info != None:
output.write(" <variable name='%s' file='%s' type='%s'/>\n" % (
name, self.modulename_file(id.module), id.info))
else:
output.write(" <variable name='%s' file='%s'/>\n" % (
name, self.modulename_file(id.module)))
def serialize_function(self, output, name):
id = self.idx.functions[name]
output.write(" <%s name='%s' file='%s'>\n" % (id.type, name,
self.modulename_file(id.module)))
try:
(ret, params, desc) = id.info
output.write(" <info>%s</info>\n" % (escape(desc)))
self.indexString(name, desc)
if ret[0] != None:
if ret[0] == "void":
output.write(" <return type='void'/>\n")
else:
output.write(" <return type='%s' info='%s'/>\n" % (
ret[0], escape(ret[1])))
self.indexString(name, ret[1])
for param in params:
if param[0] == 'void':
continue
if param[2] == None:
output.write(" <arg name='%s' type='%s' info=''/>\n" % (param[1], param[0]))
else:
output.write(" <arg name='%s' type='%s' info='%s'/>\n" % (param[1], param[0], escape(param[2])))
self.indexString(name, param[2])
except:
print "Failed to save function %s info: " % name, `id.info`
output.write(" </%s>\n" % (id.type))
def serialize_exports(self, output, file):
module = self.modulename_file(file)
output.write(" <file name='%s'>\n" % (module))
dict = self.headers[file]
if dict.info != None:
for data in ('Summary', 'Description', 'Author'):
try:
output.write(" <%s>%s</%s>\n" % (
string.lower(data),
escape(dict.info[data]),
string.lower(data)))
except:
print "Header %s lacks a %s description" % (module, data)
if dict.info.has_key('Description'):
desc = dict.info['Description']
if string.find(desc, "DEPRECATED") != -1:
output.write(" <deprecated/>\n")
ids = dict.macros.keys()
ids.sort()
for id in uniq(ids):
# Macros are sometime used to masquerade other types.
if dict.functions.has_key(id):
continue
if dict.variables.has_key(id):
continue
if dict.typedefs.has_key(id):
continue
if dict.structs.has_key(id):
continue
if dict.enums.has_key(id):
continue
output.write(" <exports symbol='%s' type='macro'/>\n" % (id))
ids = dict.enums.keys()
ids.sort()
for id in uniq(ids):
output.write(" <exports symbol='%s' type='enum'/>\n" % (id))
ids = dict.typedefs.keys()
ids.sort()
for id in uniq(ids):
output.write(" <exports symbol='%s' type='typedef'/>\n" % (id))
ids = dict.structs.keys()
ids.sort()
for id in uniq(ids):
output.write(" <exports symbol='%s' type='struct'/>\n" % (id))
ids = dict.variables.keys()
ids.sort()
for id in uniq(ids):
output.write(" <exports symbol='%s' type='variable'/>\n" % (id))
ids = dict.functions.keys()
ids.sort()
for id in uniq(ids):
output.write(" <exports symbol='%s' type='function'/>\n" % (id))
output.write(" </file>\n")
def serialize_xrefs_files(self, output):
headers = self.headers.keys()
headers.sort()
for file in headers:
module = self.modulename_file(file)
output.write(" <file name='%s'>\n" % (module))
dict = self.headers[file]
ids = uniq(dict.functions.keys() + dict.variables.keys() + \
dict.macros.keys() + dict.typedefs.keys() + \
dict.structs.keys() + dict.enums.keys())
ids.sort()
for id in ids:
output.write(" <ref name='%s'/>\n" % (id))
output.write(" </file>\n")
pass
def serialize_xrefs_functions(self, output):
funcs = {}
for name in self.idx.functions.keys():
id = self.idx.functions[name]
try:
(ret, params, desc) = id.info
for param in params:
if param[0] == 'void':
continue
if funcs.has_key(param[0]):
funcs[param[0]].append(name)
else:
funcs[param[0]] = [name]
except:
pass
typ = funcs.keys()
typ.sort()
for type in typ:
if type == '' or type == 'void' or type == "int" or \
type == "char *" or type == "const char *" :
continue
output.write(" <type name='%s'>\n" % (type))
ids = funcs[type]
ids.sort()
pid = '' # not sure why we have dups, but get rid of them!
for id in ids:
if id != pid:
output.write(" <ref name='%s'/>\n" % (id))
pid = id
output.write(" </type>\n")
def serialize_xrefs_constructors(self, output):
funcs = {}
for name in self.idx.functions.keys():
id = self.idx.functions[name]
try:
(ret, params, desc) = id.info
if ret[0] == "void":
continue
if funcs.has_key(ret[0]):
funcs[ret[0]].append(name)
else:
funcs[ret[0]] = [name]
except:
pass
typ = funcs.keys()
typ.sort()
for type in typ:
if type == '' or type == 'void' or type == "int" or \
type == "char *" or type == "const char *" :
continue
output.write(" <type name='%s'>\n" % (type))
ids = funcs[type]
ids.sort()
for id in ids:
output.write(" <ref name='%s'/>\n" % (id))
output.write(" </type>\n")
def serialize_xrefs_alpha(self, output):
letter = None
ids = self.idx.identifiers.keys()
ids.sort()
for id in ids:
if id[0] != letter:
if letter != None:
output.write(" </letter>\n")
letter = id[0]
output.write(" <letter name='%s'>\n" % (letter))
output.write(" <ref name='%s'/>\n" % (id))
if letter != None:
output.write(" </letter>\n")
def serialize_xrefs_references(self, output):
typ = self.idx.identifiers.keys()
typ.sort()
for id in typ:
idf = self.idx.identifiers[id]
module = idf.module
output.write(" <reference name='%s' href='%s'/>\n" % (id,
'html/' + self.basename + '-' +
self.modulename_file(module) + '.html#' +
id))
def serialize_xrefs_index(self, output):
index = self.xref
typ = index.keys()
typ.sort()
letter = None
count = 0
chunk = 0
chunks = []
for id in typ:
if len(index[id]) > 30:
continue
if id[0] != letter:
if letter == None or count > 200:
if letter != None:
output.write(" </letter>\n")
output.write(" </chunk>\n")
count = 0
chunks.append(["chunk%s" % (chunk -1), first_letter, letter])
output.write(" <chunk name='chunk%s'>\n" % (chunk))
first_letter = id[0]
chunk = chunk + 1
elif letter != None:
output.write(" </letter>\n")
letter = id[0]
output.write(" <letter name='%s'>\n" % (letter))
output.write(" <word name='%s'>\n" % (id))
tokens = index[id];
tokens.sort()
tok = None
for token in tokens:
if tok == token:
continue
tok = token
output.write(" <ref name='%s'/>\n" % (token))
count = count + 1
output.write(" </word>\n")
if letter != None:
output.write(" </letter>\n")
output.write(" </chunk>\n")
if count != 0:
chunks.append(["chunk%s" % (chunk -1), first_letter, letter])
output.write(" <chunks>\n")
for ch in chunks:
output.write(" <chunk name='%s' start='%s' end='%s'/>\n" % (
ch[0], ch[1], ch[2]))
output.write(" </chunks>\n")
def serialize_xrefs(self, output):
output.write(" <references>\n")
self.serialize_xrefs_references(output)
output.write(" </references>\n")
output.write(" <alpha>\n")
self.serialize_xrefs_alpha(output)
output.write(" </alpha>\n")
output.write(" <constructors>\n")
self.serialize_xrefs_constructors(output)
output.write(" </constructors>\n")
output.write(" <functions>\n")
self.serialize_xrefs_functions(output)
output.write(" </functions>\n")
output.write(" <files>\n")
self.serialize_xrefs_files(output)
output.write(" </files>\n")
output.write(" <index>\n")
self.serialize_xrefs_index(output)
output.write(" </index>\n")
def serialize(self, outdir):
filename = outdir + "%s-api.xml" % self.name
print "Saving XML description %s" % (filename)
output = open(filename, "w")
output.write('<?xml version="1.0" encoding="ISO-8859-1"?>\n')
output.write("<api name='%s'>\n" % self.name)
output.write(" <files>\n")
headers = self.headers.keys()
headers.sort()
for file in headers:
self.serialize_exports(output, file)
output.write(" </files>\n")
output.write(" <symbols>\n")
macros = self.idx.macros.keys()
macros.sort()
for macro in macros:
self.serialize_macro(output, macro)
enums = self.idx.enums.keys()
enums.sort()
for enum in enums:
self.serialize_enum(output, enum)
typedefs = self.idx.typedefs.keys()
typedefs.sort()
for typedef in typedefs:
self.serialize_typedef(output, typedef)
variables = self.idx.variables.keys()
variables.sort()
for variable in variables:
self.serialize_variable(output, variable)
functions = self.idx.functions.keys()
functions.sort()
for function in functions:
self.serialize_function(output, function)
output.write(" </symbols>\n")
output.write("</api>\n")
output.close()
filename = outdir + "%s-refs.xml" % self.name
print "Saving XML Cross References %s" % (filename)
output = open(filename, "w")
output.write('<?xml version="1.0" encoding="ISO-8859-1"?>\n')
output.write("<apirefs name='%s'>\n" % self.name)
self.serialize_xrefs(output)
output.write("</apirefs>\n")
output.close()
def rebuild():
builder = None
if glob.glob("parser.c") != [] :
print "Rebuilding API description for libxml2"
builder = docBuilder("libxml2", [".", "."],
["xmlwin32version.h", "tst.c"])
elif glob.glob("../parser.c") != [] :
print "Rebuilding API description for libxml2"
builder = docBuilder("libxml2", ["..", "../include/libxml"],
["xmlwin32version.h", "tst.c"])
elif glob.glob("../libxslt/transform.c") != [] :
print "Rebuilding API description for libxslt"
builder = docBuilder("libxslt", ["../libxslt"],
["win32config.h", "libxslt.h", "tst.c"])
else:
print "rebuild() failed, unable to guess the module"
return None
builder.scan()
builder.analyze()
builder.serialize("./")
if glob.glob("../libexslt/exslt.c") != [] :
extra = docBuilder("libexslt", ["../libexslt"], ["libexslt.h"])
extra.scan()
extra.analyze()
extra.serialize("EXSLT/")
return builder
def parse(filename):
parser = CParser(filename)
idx = parser.parse()
return idx
if __name__ == "__main__":
rebuild()
|
NAME = "ZenPacks.OndrejJakubcik.OracleHwMonitoring"
VERSION = "1.1"
AUTHOR = "Ondrej Jakubcik"
LICENSE = "LGPL"
NAMESPACE_PACKAGES = ['ZenPacks', 'ZenPacks.OndrejJakubcik']
PACKAGES = ['ZenPacks', 'ZenPacks.OndrejJakubcik', 'ZenPacks.OndrejJakubcik.OracleHwMonitoring']
INSTALL_REQUIRES = []
COMPAT_ZENOSS_VERS = ">= 3.0"
PREV_ZENPACK_NAME = "OracleHwMonitoring"
from setuptools import setup, find_packages
setup(
# This ZenPack metadata should usually be edited with the Zenoss
# ZenPack edit page. Whenever the edit page is submitted it will
# overwrite the values below (the ones it knows about) with new values.
name = NAME,
version = VERSION,
author = AUTHOR,
license = LICENSE,
# This is the version spec which indicates what versions of Zenoss
# this ZenPack is compatible with
compatZenossVers = COMPAT_ZENOSS_VERS,
# previousZenPackName is a facility for telling Zenoss that the name
# of this ZenPack has changed. If no ZenPack with the current name is
# installed then a zenpack of this name if installed will be upgraded.
prevZenPackName = PREV_ZENPACK_NAME,
# Indicate to setuptools which namespace packages the zenpack
# participates in
namespace_packages = NAMESPACE_PACKAGES,
# Tell setuptools what packages this zenpack provides.
packages = find_packages(),
# Tell setuptools to figure out for itself which files to include
# in the binary egg when it is built.
include_package_data = True,
# The MANIFEST.in file is the recommended way of including additional files
# in your ZenPack. package_data is another.
#package_data = {}
# Indicate dependencies on other python modules or ZenPacks. This line
# is modified by zenoss when the ZenPack edit page is submitted. Zenoss
# tries to put add/delete the names it manages at the beginning of this
# list, so any manual additions should be added to the end. Things will
# go poorly if this line is broken into multiple lines or modified to
# dramatically.
install_requires = INSTALL_REQUIRES,
# Every ZenPack egg must define exactly one zenoss.zenpacks entry point
# of this form.
entry_points = {
'zenoss.zenpacks': '%s = %s' % (NAME, NAME),
},
# All ZenPack eggs must be installed in unzipped form.
zip_safe = False,
)
|
import os
import logging
from virttest import virt_vm
from virttest import libvirt_xml
from virttest import virsh
from virttest import utils_misc
from virttest import utils_test
from virttest import utils_config
from virttest import utils_libvirtd
from autotest.client import utils
from autotest.client.shared import error
def run(test, params, env):
"""
Test numa tuning with memory
"""
numad_log = []
memory_status = []
def _logger(line):
"""
Callback function to log libvirtd output.
"""
numad_log.append(line)
def mem_compare(used_node, left_node):
"""
Memory in used nodes should greater than left nodes
:param used_node: used node list
:param left_node: left node list
"""
used_mem_total = 0
left_node_mem_total = 0
for i in used_node:
used_mem_total += int(memory_status[i])
for i in left_node:
left_node_mem_total += int(memory_status[i])
if left_node_mem_total > used_mem_total:
raise error.TestFail("nodes memory usage not expected.")
def format_affinity_str(cpu_list):
"""
Format affinity str
:param cpu_list: list of cpu number
:return: cpu affinity string
"""
cmd = "lscpu | grep '^CPU(s):'"
cpu_num = int(utils.run(cmd).stdout.strip().split(':')[1].strip())
cpu_affinity_str = ""
for i in range(cpu_num):
if i in cpu_list:
cpu_affinity_str += "y"
else:
cpu_affinity_str += "-"
return cpu_affinity_str
def cpu_affinity_check(cpuset=None, node=None):
"""
Check vcpuinfo cpu affinity
:param cpuset: cpuset list
:param node: node number list
"""
result = virsh.vcpuinfo(vm_name, debug=True)
output = result.stdout.strip().splitlines()[-1]
cpu_affinity = output.split(":")[-1].strip()
if node:
tmp_list = []
for node_num in node:
host_node = utils_misc.NumaNode(i=node_num+1)
logging.debug("node %s cpu list is %s" %
(node_num, host_node.cpus))
tmp_list += host_node.cpus
cpu_list = [int(i) for i in tmp_list]
if cpuset:
cpu_list = cpuset
ret = format_affinity_str(cpu_list)
logging.debug("expect cpu affinity is %s", ret)
if cpu_affinity != ret:
raise error.TestFail("vcpuinfo cpu affinity not expected")
vcpu_placement = params.get("vcpu_placement")
vcpu_cpuset = params.get("vcpu_cpuset")
bug_url = params.get("bug_url", "")
status_error = "yes" == params.get("status_error", "no")
vm_name = params.get("vms")
vm = env.get_vm(vm_name)
backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
# Prepare numatune memory parameter dict
mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
numa_memory = {}
for mem_param in mem_tuple:
value = params.get(mem_param)
if value:
numa_memory[mem_param.split('_')[1]] = value
# Prepare libvirtd session with log level as 1
config_path = "/var/tmp/virt-test.conf"
open(config_path, 'a').close()
config = utils_config.LibvirtdConfig(config_path)
config.log_level = 1
arg_str = "--config %s" % config_path
numad_reg = ".*numad"
libvirtd = utils_libvirtd.LibvirtdSession(logging_handler=_logger,
logging_pattern=numad_reg)
try:
libvirtd.start(arg_str=arg_str)
# Get host numa node list
host_numa_node = utils_misc.NumaInfo()
node_list = host_numa_node.online_nodes
logging.debug("host node list is %s", node_list)
if numa_memory.get('nodeset'):
used_node = utils_test.libvirt.cpus_parser(numa_memory['nodeset'])
logging.debug("set node list is %s", used_node)
if not status_error:
for i in used_node:
if i > max(node_list):
raise error.TestNAError("nodeset %s out of range" %
numa_memory['nodeset'])
vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
vmxml.numa_memory = numa_memory
vcpu_num = vmxml.vcpu
max_mem = vmxml.max_mem
if vcpu_placement:
vmxml.placement = vcpu_placement
if vcpu_cpuset:
vmxml.cpuset = vcpu_cpuset
pre_cpuset = utils_test.libvirt.cpus_parser(vcpu_cpuset)
logging.debug("Parsed cpuset list is %s", pre_cpuset)
logging.debug("vm xml is %s", vmxml)
vmxml.sync()
numad_cmd_opt = "-w %s:%s" % (vcpu_num, max_mem/1024)
try:
vm.start()
vm.wait_for_login()
vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
numa_memory_new = vmxml_new.numa_memory
logging.debug("Current memory config dict is %s" % numa_memory_new)
# Check xml config
if numa_memory.get('placement') == 'static':
pre_numa_memory = numa_memory.copy()
del pre_numa_memory['placement']
else:
pre_numa_memory = numa_memory
if pre_numa_memory != numa_memory_new:
raise error.TestFail("memory config %s not expected after "
"domain start" % numa_memory_new)
pos_vcpu_placement = vmxml_new.placement
logging.debug("vcpu placement after domain start is %s",
pos_vcpu_placement)
try:
pos_cpuset = vmxml_new.cpuset
logging.debug("vcpu cpuset after vm start is %s", pos_cpuset)
except libvirt_xml.xcepts.LibvirtXMLNotFoundError:
if vcpu_cpuset and vcpu_placement != 'auto':
raise error.TestFail("cpuset not found in domain xml.")
except virt_vm.VMStartError, e:
# Starting VM failed.
if status_error:
return
else:
raise error.TestFail("Test failed in positive case.\n error:"
" %s\n%s" % (e, bug_url))
# Check qemu process numa memory usage
host_numa_node = utils_misc.NumaInfo()
memory_status, qemu_cpu = utils_test.qemu.get_numa_status(
host_numa_node,
vm.get_pid())
logging.debug("The memory status is %s", memory_status)
logging.debug("The cpu usage is %s", qemu_cpu)
if vcpu_cpuset:
total_cpu = []
for node_cpu in qemu_cpu:
total_cpu += node_cpu
for i in total_cpu:
if int(i) not in pre_cpuset:
raise error.TestFail("cpu %s is not expected" % i)
cpu_affinity_check(cpuset=pre_cpuset)
if numa_memory.get('nodeset'):
# If there are non-consitent node numbers on host,
# convert it to sequence number so that it can be used
# in mem_compare
left_node = [node_list.index(i) for i in node_list if i not in used_node]
used_node = [node_list.index(i) for i in used_node]
mem_compare(used_node, left_node)
logging.debug("numad log list is %s", numad_log)
if vcpu_placement == 'auto' or numa_memory.get('placement') == 'auto':
if not numad_log:
raise error.TestFail("numad usage not found in libvirtd log")
if numad_log[0].split("numad ")[-1] != numad_cmd_opt:
raise error.TestFail("numad command not expected in log")
numad_ret = numad_log[1].split("numad: ")[-1]
numad_node = utils_test.libvirt.cpus_parser(numad_ret)
left_node = [node_list.index(i) for i in node_list if i not in numad_node]
numad_node = [node_list.index(i) for i in numad_node]
logging.debug("numad nodes are %s", numad_node)
if numa_memory.get('placement') == 'auto':
mem_compare(numad_node, left_node)
if vcpu_placement == 'auto':
for i in left_node:
if qemu_cpu[i]:
raise error.TestFail("cpu usage in node %s is not "
"expected" % i)
cpu_affinity_check(node=numad_node)
finally:
libvirtd.exit()
if config_path:
config.restore()
if os.path.exists(config_path):
os.remove(config_path)
if vm.is_alive():
vm.destroy(gracefully=False)
backup_xml.sync()
|
"""
Utilties for working with the event loop.
"""
from __future__ import absolute_import
import logging
log = logging.getLogger("storage.asyncutils")
class LoopingCall(object):
"""
A simplified version of `twisted.internet.task.LoopingCall`.
This class implements the common pattern of running a call every
interval seconds.
The callback will be invoked exactly every interval seconds since
the looping call was started, unless the event loop was delayed, and
missed some of the deadlines.
The callback must not block, or it will delay the entire event loop.
If you need to invoke a blocking operation, run the blocking
operation in another thread.
Example usage:
lc = LoopingCall(loop, callback, arg1, arg2)
lc.start(10)
To stop the calls:
lc.stop()
Note: This class is not thread safe and will never be. All calls
must be done from the event loop thread. If you want to call from
another thread, use EventLoop.call_soon_threadsafe().
"""
# Threshold for warning about delayed callbacks. This can happen if the
# event loop is too busy, or was blocked by bad callback. Both cases are
# not expected and we should easlily detect them in the field.
warning_delay = 0.5
def __init__(self, loop, callback, *args):
"""
Initialize a LoopingCall
Arguments:
loop (`storage.asyncevent.EventLoop`): The event loop that
should run the looping call.
callback (callable): A callable object
*args: Arguments for the callback
Note: A LoopingCall may be created only from the event loop
thread.
"""
self._loop = loop
self._callback = callback
self._args = args
self._running = False
self._deadline = None
self._interval = None
self._timer = None
def start(self, interval):
"""
Start a LoopingCall
Arguments:
interval (float): Interval to call looping call callback.
The first call is performed immediately. To delay the first
call, use EventLoop.call_after():
loop.call_after(10, lc.start, 10)
Then, every interval seconds, looping call's callback will be
invoked.
"""
assert not self._running, "LoopingCall is already running"
self._running = True
self._interval = interval
self._deadline = self._loop.time()
self()
def is_running(self):
"""
Return True if the looping calls is running.
"""
return self._running
@property
def deadline(self):
"""
Return the next time the callback should be invoked.
The deadline is updated after the callback is invoked.
"""
return self._deadline
def stop(self):
"""
Stop the looping calls.
"""
if self._running:
self._running = False
if self._timer:
self._timer.cancel()
self._timer = None
def __call__(self):
if not self._running:
return
delay = self._loop.time() - self._deadline
if delay > self.warning_delay:
log.warning("Call %s delayed by %.2f seconds",
self._callback, delay)
try:
self._callback(*self._args)
finally:
# Schedule next call after callback, so we skip missed deadlines if
# callback was slow. For example, if you schedule callback to run
# every 1 second, but callback blocks for 1.1 seconds, it will be
# called every 2 seconds.
if self._running:
self._schedule_next_call()
def _schedule_next_call(self):
"""
Schedule the next call, skipping missed deadlines in the past.
"""
self._deadline += self._interval
now = self._loop.time()
if self._deadline <= now:
# We missed at least one deadline.
missed = (now - self._deadline) // self._interval + 1
self._deadline += missed * self._interval
log.warning("Call %s missed %d deadlines, scheduling next call "
"at %.2f",
self._callback, missed, self._deadline)
self._timer = self._loop.call_at(self._deadline, self)
|
__doc__="""IBMNetworkAdapterMap
IBMNetworkAdapterMap maps the ibmSystemLogicalNetworkAdapterTable table to cards
objects
$Id: IBMNetworkAdapterMap.py,v 1.0 2009/07/21 23:36:53 egor Exp $"""
__version__ = '$Revision: 1.0 $'[11:-2]
from Products.DataCollector.plugins.CollectorPlugin import SnmpPlugin, GetTableMap
from Products.DataCollector.plugins.DataMaps import MultiArgs
class IBMNetworkAdapterMap(SnmpPlugin):
"""Map IBM Director PCI table to model."""
maptype = "IBMNetworkAdapterMap"
modname = "ZenPacks.community.IBMMon.IBMNetworkAdapter"
relname = "cards"
compname = "hw"
snmpGetTableMaps = (
GetTableMap('networkAdapterTable',
'.1.3.6.1.4.1.2.6.159.1.1.110.1.1',
{
'.1': 'id',
'.3': 'model',
'.7': 'macaddress',
'.8': 'speed',
}
),
)
def process(self, device, results, log):
"""collect snmp information from this device"""
log.info('processing %s for device %s', self.name(), device.id)
rm = self.relMap()
getdata, tabledata = results
cardtable = tabledata.get('networkAdapterTable')
for oid, card in cardtable.iteritems():
try:
om = self.objectMap(card)
om.snmpindex = oid.strip('.')
om.id = self.prepId(om.id)
om.slot = 0
om.setProductKey = MultiArgs(om.model, om.model.split()[0])
if hasattr(om, 'macaddress'):
mac = []
for i in range(6):
mac.append(om.macaddress[i*2:i*2+2])
om.macaddress = ':'.join(mac)
if hasattr(om, 'speed'):
om.speed = int(om.speed) * 1000000
om.status = 2
except AttributeError:
continue
rm.append(om)
return rm
|
import json
import datetime
class DatetimeEncoder(json.JSONEncoder):
def default(self, obj):
if (isinstance(obj, datetime.datetime) or
isinstance(obj, datetime.date) or
isinstance(obj, datetime.timedelta)):
return repr(obj)
else:
return super(DateTimeEncoder, self).default(obj)
def datetime_parser(dct):
for k, v in dct.items():
if isinstance(v, str) and v.startswith("datetime.") and v.endswith(")"):
v = v[:-1]
items = v.split("(", maxsplit=1)[1]
args = []
for i in items.split(","):
try:
args.append(int(i.strip()))
except ValueError:
continue
if not args:
continue
if v.startswith("datetime.datetime("):
dct[k] = datetime.datetime(*args)
elif v.startswith("datetime.date("):
dct[k] = datetime.date(*args)
elif v.startswith("datetime.timedelta("):
dct[k] = datetime.timedelta(*args)
return dct
|
from openerp import api
from openerp.tests import HttpCase
from openerp.tests.common import TransactionCase
from openerp.addons.base.ir.ir_qweb import AssetsBundle
from openerp.modules.module import get_resource_path
from collections import Counter
from os import utime
import time
class TestJavascriptAssetsBundle(TransactionCase):
def setUp(self):
super(TestJavascriptAssetsBundle, self).setUp()
self.jsbundle_xmlid = 'test_assetsbundle.bundle1'
self.cssbundle_xmlid = 'test_assetsbundle.bundle2'
def _any_ira_for_bundle(self, type):
""" Returns all ir.attachments associated to a bundle, regardless of the verion.
"""
bundle = self.jsbundle_xmlid if type == 'js' else self.cssbundle_xmlid
return self.registry['ir.attachment'].search(self.cr, self.uid,[
('url', '=like', '/web/content/%-%/{0}%.{1}'.format(bundle, type))
])
def test_01_generation(self):
""" Checks that a bundle creates an ir.attachment record when its `js` method is called
for the first time.
"""
self.bundle = AssetsBundle(self.jsbundle_xmlid, env=self.env)
# there shouldn't be any attachment associated to this bundle
self.assertEquals(len(self._any_ira_for_bundle('js')), 0)
self.assertEquals(len(self.bundle.get_attachments('js')), 0)
# trigger the first generation and, thus, the first save in database
self.bundle.js()
# there should be one attachment associated to this bundle
self.assertEquals(len(self._any_ira_for_bundle('js')), 1)
self.assertEquals(len(self.bundle.get_attachments('js')), 1)
def test_02_access(self):
""" Checks that the bundle's cache is working, i.e. that the bundle creates only one
ir.attachment record when rendered multiple times.
"""
bundle0 = AssetsBundle(self.jsbundle_xmlid, env=self.env)
bundle0.js()
self.assertEquals(len(self._any_ira_for_bundle('js')), 1)
version0 = bundle0.version
ira0 = self.registry['ir.attachment'].browse(self.cr, self.uid, self._any_ira_for_bundle('js')[0])
date0 = ira0.create_date
bundle1 = AssetsBundle(self.jsbundle_xmlid, env=self.env)
bundle1.js()
self.assertEquals(len(self._any_ira_for_bundle('js')), 1)
version1 = bundle1.version
ira1 = self.registry['ir.attachment'].browse(self.cr, self.uid, self._any_ira_for_bundle('js')[0])
date1 = ira1.create_date
self.assertEquals(version0, version1)
self.assertEquals(date0, date1)
def test_03_date_invalidation(self):
""" Checks that a bundle is invalidated when one of its assets' modification date is changed.
"""
bundle0 = AssetsBundle(self.jsbundle_xmlid, env=self.env)
bundle0.js()
last_modified0 = bundle0.last_modified
version0 = bundle0.version
path = get_resource_path('test_assetsbundle', 'static', 'src', 'js', 'test_jsfile1.js')
utime(path, None) # touch
bundle1 = AssetsBundle(self.jsbundle_xmlid, env=self.env)
bundle1.js()
last_modified1 = bundle1.last_modified
version1 = bundle1.version
self.assertNotEquals(last_modified0, last_modified1)
self.assertNotEquals(version0, version1)
# check if the previous attachment is correctly cleaned
self.assertEquals(len(self._any_ira_for_bundle('js')), 1)
def test_04_content_invalidation(self):
""" Checks that a bundle is invalidated when its content is modified by adding a file to
source.
"""
bundle0 = AssetsBundle(self.jsbundle_xmlid, env=self.env)
bundle0.js()
html0 = bundle0.html
version0 = bundle0.version
self.assertEquals(len(self._any_ira_for_bundle('js')), 1)
view_arch = """
<data>
<xpath expr="." position="inside">
<script type="text/javascript" src="/test_assetsbundle/static/src/js/test_jsfile4.js"/>
</xpath>
</data>
"""
bundle_id = self.browse_ref(self.jsbundle_xmlid).id
newid = self.registry['ir.ui.view'].create(self.cr, self.uid, {
'name': 'test bundle inheritance',
'type': 'qweb',
'arch': view_arch,
'inherit_id': bundle_id,
})
bundle1 = AssetsBundle(self.jsbundle_xmlid, env=self.env(context={'check_view_ids': [newid]}))
bundle1.js()
html1 = bundle1.html
version1 = bundle1.version
self.assertNotEquals(html0, html1)
self.assertNotEquals(version0, version1)
# check if the previous attachment are correctly cleaned
self.assertEquals(len(self._any_ira_for_bundle('js')), 1)
def test_05_debug(self):
""" Checks that a bundle rendered in debug mode outputs non-minified assets.
"""
debug_bundle = AssetsBundle(self.jsbundle_xmlid, env=self.env)
content = debug_bundle.to_html(debug='assets')
# find back one of the original asset file
self.assertIn('/test_assetsbundle/static/src/js/test_jsfile1.js', content)
# there shouldn't be any assets created in debug mode
self.assertEquals(len(self._any_ira_for_bundle('js')), 0)
def test_06_paginated_css_generation1(self):
""" Checks that a bundle creates enough ir.attachment records when its `css` method is called
for the first time while the number of css rules exceed the limit.
"""
# note: changing the max_css_rules of a bundle does not invalidate its attachments
# self.cssbundle_xlmid contains 3 rules
self.bundle = AssetsBundle(self.cssbundle_xmlid, env=self.env, max_css_rules=1)
self.bundle.css()
self.assertEquals(len(self._any_ira_for_bundle('css')), 3)
self.assertEquals(len(self.bundle.get_attachments('css')), 3)
def test_07_paginated_css_generation2(self):
# self.cssbundle_xlmid contains 3 rules
self.bundle = AssetsBundle(self.cssbundle_xmlid, env=self.env, max_css_rules=2)
self.bundle.css()
self.assertEquals(len(self._any_ira_for_bundle('css')), 2)
self.assertEquals(len(self.bundle.get_attachments('css')), 2)
def test_08_paginated_css_generation3(self):
# self.cssbundle_xlmid contains 3 rules
self.bundle = AssetsBundle(self.cssbundle_xmlid, env=self.env, max_css_rules=3)
self.bundle.css()
self.assertEquals(len(self._any_ira_for_bundle('css')), 1)
self.assertEquals(len(self.bundle.get_attachments('css')), 1)
def test_09_paginated_css_access(self):
""" Checks that the bundle's cache is working, i.e. that a bundle creates only enough
ir.attachment records when rendered multiple times.
"""
bundle0 = AssetsBundle(self.cssbundle_xmlid, env=self.env, max_css_rules=1)
bundle0.css()
self.assertEquals(len(self._any_ira_for_bundle('css')), 3)
version0 = bundle0.version
ira0 = self.registry['ir.attachment'].browse(self.cr, self.uid, self._any_ira_for_bundle('css')[0])
date0 = ira0.create_date
ira1 = self.registry['ir.attachment'].browse(self.cr, self.uid, self._any_ira_for_bundle('css')[1])
date1 = ira1.create_date
ira2 = self.registry['ir.attachment'].browse(self.cr, self.uid, self._any_ira_for_bundle('css')[2])
date2 = ira2.create_date
bundle1 = AssetsBundle(self.cssbundle_xmlid, env=self.env, max_css_rules=1)
bundle1.css()
self.assertEquals(len(self._any_ira_for_bundle('css')), 3)
version1 = bundle1.version
ira3 = self.registry['ir.attachment'].browse(self.cr, self.uid, self._any_ira_for_bundle('css')[0])
date3 = ira1.create_date
ira4 = self.registry['ir.attachment'].browse(self.cr, self.uid, self._any_ira_for_bundle('css')[1])
date4 = ira1.create_date
ira5 = self.registry['ir.attachment'].browse(self.cr, self.uid, self._any_ira_for_bundle('css')[2])
date5 = ira1.create_date
self.assertEquals(version0, version1)
self.assertEquals(date0, date3)
self.assertEquals(date1, date4)
self.assertEquals(date2, date5)
def test_10_paginated_css_date_invalidation(self):
""" Checks that a bundle is invalidated when one of its assets' modification date is changed.
"""
bundle0 = AssetsBundle(self.cssbundle_xmlid, env=self.env, max_css_rules=1)
bundle0.css()
last_modified0 = bundle0.last_modified
version0 = bundle0.version
path = get_resource_path('test_assetsbundle', 'static', 'src', 'css', 'test_cssfile1.css')
utime(path, None) # touch
bundle1 = AssetsBundle(self.cssbundle_xmlid, env=self.env, max_css_rules=1)
bundle1.css()
last_modified1 = bundle1.last_modified
version1 = bundle1.version
self.assertNotEquals(last_modified0, last_modified1)
self.assertNotEquals(version0, version1)
# check if the previous attachment is correctly cleaned
self.assertEquals(len(self._any_ira_for_bundle('css')), 3)
def test_11_paginated_css_content_invalidation(self):
""" Checks that a bundle is invalidated when its content is modified by adding a file to
source.
"""
bundle0 = AssetsBundle(self.cssbundle_xmlid, env=self.env, max_css_rules=1)
bundle0.css()
html0 = bundle0.html
version0 = bundle0.version
self.assertEquals(len(self._any_ira_for_bundle('css')), 3)
view_arch = """
<data>
<xpath expr="." position="inside">
<link rel="stylesheet" href="/test_assetsbundle/static/src/css/test_cssfile2.css"/>
</xpath>
</data>
"""
bundle_id = self.browse_ref(self.cssbundle_xmlid).id
newid = self.registry['ir.ui.view'].create(self.cr, self.uid, {
'name': 'test bundle inheritance',
'type': 'qweb',
'arch': view_arch,
'inherit_id': bundle_id,
})
bundle1 = AssetsBundle(self.cssbundle_xmlid, env=self.env(context={'check_view_ids': [newid]}), max_css_rules=1)
bundle1.css()
html1 = bundle1.html
version1 = bundle1.version
self.assertNotEquals(html0, html1)
self.assertNotEquals(version0, version1)
# check if the previous attachment are correctly cleaned
self.assertEquals(len(self._any_ira_for_bundle('css')), 4)
def test_12_paginated_css_debug(self):
""" Check that a bundle in debug mode outputs non-minified assets.
"""
debug_bundle = AssetsBundle(self.cssbundle_xmlid, env=self.env, max_css_rules=1)
content = debug_bundle.to_html(debug='assets')
# find back one of the original asset file
self.assertIn('/test_assetsbundle/static/src/css/test_cssfile1.css', content)
# there shouldn't be any assets created in debug mode
self.assertEquals(len(self._any_ira_for_bundle('css')), 0)
def test_13_paginated_css_order(self):
# self.cssbundle_xlmid contains 3 rules
self.bundle = AssetsBundle(self.cssbundle_xmlid, env=self.env, max_css_rules=1)
stylesheets = self.bundle.css()
self.assertTrue(stylesheets[0].url.endswith('.0.css'))
self.assertTrue(stylesheets[1].url.endswith('.1.css'))
self.assertTrue(stylesheets[2].url.endswith('.2.css'))
class TestAssetsBundleInBrowser(HttpCase):
def test_01_js_interpretation(self):
""" Checks that the javascript of a bundle is correctly interpreted.
"""
self.phantom_js(
"/test_assetsbundle/js",
"a + b + c === 6 ? console.log('ok') : console.log('error')",
login="admin"
)
def test_02_js_interpretation_inline(self):
""" Checks that the javascript of a bundle is correctly interpretet when mixed with inline.
"""
with self.registry.cursor() as test_cursor:
view_arch = """
<data>
<xpath expr="." position="inside">
<script type="text/javascript">
var d = 4;
</script>
</xpath>
</data>
"""
self.registry['ir.ui.view'].create(test_cursor, self.uid, {
'name': 'test bundle inheritance inline js',
'type': 'qweb',
'arch': view_arch,
'inherit_id': self.browse_ref('test_assetsbundle.bundle1').id,
})
self.phantom_js(
"/test_assetsbundle/js",
"a + b + c + d === 10 ? console.log('ok') : console.log('error')",
login="admin",
)
class TestAssetsBundleWithIRAMock(TransactionCase):
def setUp(self):
super(TestAssetsBundleWithIRAMock, self).setUp()
self.lessbundle_xmlid = 'test_assetsbundle.bundle3'
self.counter = counter = Counter()
# patch methods 'create' and 'unlink' of model 'ir.attachment'
@api.model
def create(self, vals):
counter.update(['create'])
return create.origin(self, vals)
@api.multi
def unlink(self):
counter.update(['unlink'])
return unlink.origin(self)
self.env['ir.attachment']._patch_method('create', create)
self.addCleanup(self.env['ir.attachment']._revert_method, 'create')
self.env['ir.attachment']._patch_method('unlink', unlink)
self.addCleanup(self.env['ir.attachment']._revert_method, 'unlink')
def _bundle(self, should_create, should_unlink):
self.counter.clear()
AssetsBundle(self.lessbundle_xmlid, env=self.env).to_html(debug='assets')
self.assertEquals(self.counter['create'], int(should_create))
self.assertEquals(self.counter['unlink'], int(should_unlink))
def test_01_debug_mode_assets(self):
""" Checks that the ir.attachments records created for compiled less assets in debug mode
are correctly invalidated.
"""
# Compile for the first time
self._bundle(True, False)
# Compile a second time, without changes
self._bundle(False, False)
# Touch the file and compile a third time
path = get_resource_path('test_assetsbundle', 'static', 'src', 'less', 'test_lessfile1.less')
t = time.time() + 5
utime(path, (t, t)) # touch
self._bundle(True, True)
# Because we are in the same transaction since the beginning of the test, the first asset
# created and the second one have the same write_date, but the file's last modified date
# has really been modified. If we do not update the write_date to a posterior date, we are
# not able to reproduce the case where we compile this bundle again without changing
# anything.
self.cr.execute("update ir_attachment set write_date=clock_timestamp() + interval '10 seconds' where id = (select max(id) from ir_attachment)")
# Compile a fourth time, without changes
self._bundle(False, False)
|
import numpy as np
from sverchok.utils.testing import *
from sverchok.utils.logging import debug, info
from sverchok.utils.geom import PlaneEquation, LineEquation, linear_approximation
class PlaneTests(SverchokTestCase):
def test_plane_from_three_points(self):
p1 = (1, 0, 0)
p2 = (0, 1, 0)
p3 = (0, 0, 1)
plane = PlaneEquation.from_three_points(p1, p2, p3)
self.assertEquals(plane.a, 1)
self.assertEquals(plane.b, 1)
self.assertEquals(plane.c, 1)
self.assertEquals(plane.d, -1)
def test_nearest_to_origin(self):
p1 = (1, 0, 0)
p2 = (0, 1, 0)
p3 = (0, 0, 1)
plane = PlaneEquation.from_three_points(p1, p2, p3)
p = plane.nearest_point_to_origin()
self.assert_sverchok_data_equal(tuple(p), (0.3333, 0.3333, 0.3333), precision=4)
def test_check_yes(self):
plane = PlaneEquation.from_coordinate_plane('XY')
p = (7, 8, 0)
self.assertTrue(plane.check(p))
def test_check_no(self):
plane = PlaneEquation.from_coordinate_plane('XY')
p = (7, 8, 1)
self.assertFalse(plane.check(p))
def test_two_vectors(self):
p1 = (2, 0, 0)
p2 = (0, 1, 0)
p3 = (0, 0, 2)
plane = PlaneEquation.from_three_points(p1, p2, p3)
normal = plane.normal
v1, v2 = plane.two_vectors()
self.assertTrue(abs(normal.dot(v1)) < 1e-8)
self.assertTrue(abs(normal.dot(v2)) < 1e-8)
self.assertTrue(abs(v1.dot(v2)) < 1e-8)
def test_distance_to_point(self):
plane = PlaneEquation.from_coordinate_plane('XY')
point = (1, 2, 3)
distance = plane.distance_to_point(point)
self.assertEquals(distance, 3)
def test_distance_to_points(self):
plane = PlaneEquation.from_coordinate_plane('XY')
points = [(1, 2, 3), (4, 5, 6)]
distances = plane.distance_to_points(points)
self.assert_numpy_arrays_equal(distances, np.array([3, 6]))
def test_intersect_with_line(self):
plane = PlaneEquation.from_coordinate_plane('XY')
line = LineEquation.from_direction_and_point((1, 1, 1), (1, 1, 1))
point = plane.intersect_with_line(line)
self.assert_sverchok_data_equal(tuple(point), (0, 0, 0))
def test_intersect_with_plane(self):
plane1 = PlaneEquation.from_coordinate_plane('XY')
plane2 = PlaneEquation.from_coordinate_plane('XZ')
line = plane1.intersect_with_plane(plane2)
self.assert_sverchok_data_equal(tuple(line.direction.normalized()), (1, 0, 0))
self.assert_sverchok_data_equal(tuple(line.point), (-1, 0, 0))
class LineTests(SverchokTestCase):
def test_from_two_points(self):
p1 = (1, 1, 1)
p2 = (3, 3, 3)
line = LineEquation.from_two_points(p1, p2)
self.assert_sverchok_data_equal(tuple(line.direction), (2, 2, 2))
self.assert_sverchok_data_equal(tuple(line.point), p1)
def test_check_yes(self):
p1 = (1, 1, 1)
p2 = (3, 3, 3)
line = LineEquation.from_two_points(p1, p2)
p3 = (5, 5, 5)
self.assertTrue(line.check(p3))
def test_check_no(self):
p1 = (1, 1, 1)
p2 = (3, 3, 3)
line = LineEquation.from_two_points(p1, p2)
p3 = (5, 5, 6)
self.assertFalse(line.check(p3))
def test_distance_to_point(self):
line = LineEquation.from_coordinate_axis('Z')
point = (0, 2, 0)
self.assertEquals(line.distance_to_point(point), 2)
class LinearApproximationTests(SverchokTestCase):
def test_approximate_line_1(self):
p1 = (0, 0, 0)
p2 = (1, 0, 0)
p3 = (2, 0, 0)
p4 = (3, 0, 0)
line = linear_approximation([p1, p2, p3, p4]).most_similar_line()
self.assert_sverchok_data_equal(tuple(line.direction.normalized()), (1, 0, 0), precision=5)
def test_approximate_line_2(self):
p1 = (0, -1, 0)
p2 = (1, 1, 0)
p3 = (2, -1, 0)
p4 = (3, 1, 0)
line = linear_approximation([p1, p2, p3, p4]).most_similar_line()
self.assert_sverchok_data_equal(tuple(line.direction), (0.7882054448127747, 0.6154122352600098, 0.0), precision=5)
def test_approximate_plane(self):
p1 = (0, -1, 0)
p2 = (1, 1, 0)
p3 = (2, -1, 0)
p4 = (3, 1, 0)
plane = linear_approximation([p1, p2, p3, p4]).most_similar_plane()
self.assert_sverchok_data_equal(tuple(plane.normal.normalized()), (0, 0, 1), precision=5)
|
'''
This __init__ file is present to help with certain non-standard uses
of pubsub1:
1. so that sphinx's autodoc extension can find the pubsub1 documentation
2. so that py2exe can find the pub module when v1 API is used (pubsub1/pub.py
gets included in the library only if pubsub1 is a package)
Otherwise, the pubsub.pubsub1 is not a package but merely a folder that
holds the pub.py module for v1 API. Pubsub is designed so that, other than
the two above cases, pubsub/pubsub1/pub.py will appear as pubsub.pub when
pubsub.setupv1 is used (ie v1 API).
:copyright: Copyright 2006-2009 by Oliver Schoenborn, all rights reserved.
:license: BSD, see LICENSE.txt for details.
'''
|
import bpy
import mathutils
def reset_transform(ob):
m = mathutils.Matrix()
ob.matrix_local = m
def func_add_corrective_pose_shape_fast(source, target):
result = ""
reset_transform(target)
# If target object doesn't have Basis shape key, create it.
try:
num_keys = len( target.data.shape_keys.key_blocks )
except:
basis = target.shape_key_add()
basis.name = "Basis"
target.data.update()
key_index = target.active_shape_key_index
if key_index == 0:
# Insert new shape key
new_shapekey = target.shape_key_add()
new_shapekey.name = "Shape_" + source.name
new_shapekey_name = new_shapekey.name
key_index = len(target.data.shape_keys.key_blocks)-1
target.active_shape_key_index = key_index
# else, the active shape will be used (updated)
target.show_only_shape_key = True
shape_key_verts = target.data.shape_keys.key_blocks[ key_index ].data
try:
vgroup = target.active_shape_key.vertex_group
target.active_shape_key.vertex_group = ''
except:
print("blub")
result = "***ERROR*** blub"
pass
# copy the local vertex positions to the new shape
verts = source.data.vertices
try:
for n in range( len(verts)):
shape_key_verts[n].co = verts[n].co
# go to all armature modifies and unpose the shape
except:
message = "***ERROR***, meshes have different number of vertices"
result = message
for n in target.modifiers:
if n.type == 'ARMATURE' and n.show_viewport:
#~ print("got one")
n.use_bone_envelopes = False
n.use_deform_preserve_volume = False
n.use_vertex_groups = True
armature = n.object
unposeMesh( shape_key_verts, target, armature)
break
# set the new shape key value to 1.0, so we see the result instantly
target.data.shape_keys.key_blocks[ target.active_shape_key_index].value = 1.0
try:
target.active_shape_key.vertex_group = vgroup
except:
print("bluba")
result = result + "bluba"
pass
target.show_only_shape_key = False
target.data.update()
return result
class add_corrective_pose_shape_fast(bpy.types.Operator):
'''Adds 1st object as shape to 2nd object as pose shape (only 1 armature)'''
bl_idname = "object.add_corrective_pose_shape_fast"
bl_label = "Add object as corrective shape faster"
@classmethod
def poll(cls, context):
return context.active_object != None
def execute(self, context):
if len(context.selected_objects) > 2:
print("Select source and target objects please")
return {'FINISHED'}
selection = context.selected_objects
target = context.active_object
if context.active_object == selection[0]:
source = selection[1]
else:
source = selection[0]
print(source)
print(target)
func_add_corrective_pose_shape_fast( source, target)
return {'FINISHED'}
def register():
bpy.utils.register_module(__name__)
def unregister():
bpy.utils.unregister_module(__name__)
if __name__ == "__main__":
register()
|
import os
import json
import math
import time
import sys, io
import shutil
import requests
import re
from UserDict import UserDict
from urlparse import urlparse
try:
from html import escape # python 3.x
except ImportError:
from cgi import escape # python 2.x
scriptpath=os.path.dirname(os.path.realpath(__file__))
configfile=os.path.join(scriptpath,"config.py");
if not os.path.isfile(configfile) and not os.path.isfile(configfile+".template"):
print('Missing template or config file: config.py')
sys.exit()
if not os.path.isfile(configfile):
shutil.copyfile(configfile+".template", configfile);
print('Just created an config file with defaut values, which may not be suitable for you!!');
if not os.path.isfile(configfile):
print('Failed to create an config file !')
sys.exit()
execfile(configfile);
if web_dir.strip()=='':
web_dir=os.getcwd()
def query_kodi(url,payload):
headers = {'content-type': 'application/json'}
try:
r = requests.post(url, data=json.dumps(payload), headers=headers)
r = r.json()
return r['result']['movies']
except:
#raise
print('Connection error. Please check host and port.')
sys.exit()
def get_movies_from_kodi(host, port):
url = 'http://' + host + ':' + port + '/jsonrpc'
#we will always limit the search result, maybe some browsers will demand a lower settings
if limit <= 0:
maxlimit=1000
else:
maxlimit=limit
payload = ({"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies",
"params": {
"limits": { "start" : 0, "end": maxlimit },
"properties": ["rating", "imdbnumber", "playcount", "plot", "plotoutline",
"votes", "top250", "trailer", "year", "country", "studio",
"set", "genre", "mpaa", "tag", "tagline", "writer",
"originaltitle" ],
"sort": {"order": "ascending", "method": "label", "ignorearticle": True}},
"id": "libMovies"
})
return query_kodi(url,payload)
def get_new_movies_from_kodie(host,port):
url = 'http://' + host + ':' + port + '/jsonrpc'
payload = ({"jsonrpc": "2.0", "method": "VideoLibrary.GetRecentlyAddedMovies",
"params": {
"limits": { "start" : 0, "end": 50 },
"properties": ["imdbnumber"],
"sort": {"order": "ascending", "method": "label", "ignorearticle": True}},
"id": "libMovies"
})
return query_kodi(url,payload)
def get_poster_image_url(movie, tmdb_key, size, language):
base_url = 'http://api.themoviedb.org/3/movie/'
headers = {'content-type': 'application/json'}
if not 'imdbnumber' in movie or len(movie['imdbnumber']) <= 0 :
print('['+movie['label']+'] Cannot get poster: no imdbnumber found: will use default poster instead')
return "no-image"
if len(language)<=0:
url = (base_url + str(movie['imdbnumber']) + '/images' + '?api_key=' + tmdb_key + '&language=' + lang)
else:
url = (base_url + str(movie['imdbnumber']) + '/images' + '?api_key=' + tmdb_key + '&language=' + str(language[0]))
r = requests.get(url)
try:
# we want aspec ratio 0.66667 but this way we are a bit more flexible, maybe look at the vote count aswell
i=0
while r.json()['posters'][i]['aspect_ratio'] > poster_aspect_ratio+poster_aspect_ratio_offset or r.json()['posters'][i]['aspect_ratio'] < poster_aspect_ratio-poster_aspect_ratio_offset:
i += 1
image_url = r.json()['posters'][i]['file_path']
poster_url = 'http://image.tmdb.org/t/p/' + size + image_url
except LookupError:
if len(language)>=2:
#remove first element, and call recursive
language.pop(0)
poster_url = get_poster_image_url(movie, tmdb_key, size, language)
else:
print('['+movie['label']+'] No poster exists. Using default no poster image'+ url )
poster_url = "no-image"
return poster_url
def check_if_poster_exists(imdbid, size, web_dir):
folder_path = os.path.join(web_dir, 'posters', size)
if not os.path.exists(folder_path):
try:
os.makedirs(folder_path)
except OSError:
print "Failed to create path, check your permissions or settings: "+folder_path
if os.path.isfile(os.path.join(folder_path, imdbid + '.jpeg')):
return True
def save_poster_image(poster_url, imdbid, size, web_dir):
folder_path = os.path.join(web_dir, 'posters', size)
if poster_url == 'no-image':
no_image_path = os.path.join(web_dir, 'assets',
'no-image-' + size + '.jpeg')
filepath = os.path.join(folder_path, imdbid + '.jpeg')
shutil.copyfile(no_image_path, filepath)
else:
r = requests.get(poster_url)
filetype = r.headers['content-type'].split('/')[-1]
filepath = os.path.join(folder_path, imdbid + '.' + filetype)
f = open(filepath, "wb")
f.write(r.content)
f.close()
def movie_stars(stars=0,votes=0):
full_stars = int(math.floor(round(stars) / 2))
remaining_stars = round(stars) / 2 - full_stars;
full_star_url = os.path.join('assets', 'star-full.svg')
half_star_url = os.path.join('assets', 'star-half.svg')
img_full_star_html = "<img src='" + full_star_url + "' alt='star full' title='"+str(round(stars,1))+" rating with "+str(votes)+" votes'>"
img_half_star_html = "<img src='" + half_star_url + "' alt='star half' title='"+str(round(stars,1))+" rating with "+str(votes)+" votes'>"
if remaining_stars >= 0.5:
html_stars = img_full_star_html * full_stars + img_half_star_html
else:
html_stars = img_full_star_html * full_stars
return html_stars
class tekstreplace(UserDict):
def _make_regex(self):
return re.compile("(%s)" % "|".join(map(re.escape, self.keys())))
def __call__(self, mo):
# Count substitutions
self.count += 1 # Look-up string
return self[mo.string[mo.start():mo.end()]]
def substitute (self, text):
# Reset substitution counter
self.count = 0
# Process text
return self._make_regex().sub(self, text)
#make keys out of the dict, keys are surrounded by % and the values are escaped
#arrays will become , seperated stings
def dict2keys(self, _keys):
keys={}
for k,v in _keys.iteritems():
if str(k)[0] is not '\%':
#for some silly reason cgi.escape doesn't escape '
if isinstance(v, (list, tuple)):
keys["%%%s%%"%(k)]=escape(', '.join(v)).replace("'","'")
elif isinstance(v, (int, long, float, complex)):
keys["%%%s%%"%(k)]=unicode(v)
elif isinstance(v, basestring):
keys["%%%s%%"%(k)]=escape(unicode(v)).replace("'","'")
else:
keys["%%%s%%"%(k)]=escape(unicode(v)).replace("'","'")
else:
keys[k]=v
return keys
def inlist(mylist,idname ,idvalue):
if len([element for element in mylist if element[idname] == idvalue]) >=1 :
return True
return False
def mycopytree(src, dst, symlinks = False, ignore = None):
if not os.path.exists(dst):
os.makedirs(dst)
shutil.copystat(src, dst)
lst = os.listdir(src)
if ignore:
excl = ignore(src, lst)
lst = [x for x in lst if x not in excl]
for item in lst:
s = os.path.join(src, item)
d = os.path.join(dst, item)
if symlinks and os.path.islink(s):
if os.path.lexists(d):
os.remove(d)
os.symlink(os.readlink(s), d)
try:
st = os.lstat(s)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(d, mode)
except:
pass # lchmod not available
elif os.path.isdir(s):
mycopytree(s, d, symlinks, ignore)
else:
#avoid chmod issues with copy2
#shutil.copy2(s, d)
shutil.copyfile(s, d)
def gettrailerid(params):
try:
murl=urlparse(params['trailer'])
if 'youtube' in murl.netloc:
for param in murl.query.split('&') :
paramlist=param.split('=')
if paramlist[0]=='videoid':
return paramlist[1]
except:
print "no youtube trailer info found.."
return ""
def create_movie_html_block(movie):
if 'votes' not in movie:
movie['votes']=0
if 'rating' not in movie:
movie['rating']=0
#change movie info to keys
keys=tekstreplace().dict2keys(movie)
#add extra keys
if not '%watchedclass%' in keys:
keys['%watchedclass%'] = 'unwatched'
if movie['playcount'] > 0 :
keys['%watchedclass%'] = 'watched'
keys['%genreclasses%'] = escape(','.join(movie['genre']))
keys['%trailerid%'] = gettrailerid(movie)
keys['%trailerurl%'] = ''
if len(keys['%trailerid%']) >0:
keys['%trailerurl%'] = 'http://www.youtube.com/watch?v='+keys['%trailerid%']
keys['%trailerhtml%'] = ''
if len(keys['%trailerurl%']) > 0:
keys['%trailerhtml%'] = tekstreplace(keys).substitute(trailer_template)
keys['%imdbhtml%'] = ''
if '%imdbnumber%' in keys and len(keys['%imdbnumber%']) > 0:
keys['%imdburl%'] = 'http://www.imdb.com/title/'+keys['%imdbnumber%']+'/'
keys['%imdbhtml%'] = tekstreplace(keys).substitute(imdb_template)
else:
print ('['+movie['label']+'] imdbnumber not found, disable imdb button')
keys['%starshtml%'] = movie_stars(movie['rating'],keys['%votes%'])
#generate and return the html
return unicode(tekstreplace(keys).substitute(movie_template) )
def write_html(movies, genres, web_dir):
keys={}
keys["%cssfile%"]=cssfile
keys["%jsfile%"]=jsfile
if jsinline and os.path.exists(jsfile):
with open(jsfile) as f:
js_data ="<SCRIPT>\n"+f.read()+"</SCRIPT>"
f.close()
else:
js_data=tekstreplace(keys).substitute(jsfile_template)
if cssinline and os.path.exists(cssfile):
with open(cssfile) as f:
css_data ="<STYLE>\n"+f.read()+"</STYLE>"
f.close()
else:
css_data=tekstreplace(keys).substitute(cssfile_template)
genres.add("All")
filter_html=''
for genre in genres:
filter_html += "<div id='"+genre+"' class='filterbutton'><a href='#'>"+genre+"</a></div>"
watched_html='<input id="watchedbutton" type="checkbox" name="field" value="false"><label for="watchedbutton">Hide Watched</label> '
#String[] allgenres = genres.toArray(new String[genres.size()])
keys['%NAME%'] = gallery_name
keys['%GENREFILTER%'] = "<div id='genrefilter' active='' decorated=''>"+filter_html+"</div>"
keys['%WATCHEDFILTER%'] = "<div id='watchedfilter'>"+watched_html+"</div>"
keys['%GENRES%'] = ','.join(genres)
keys['%META%'] = meta_template
keys['%MADEBY%'] = made_template
keys['%PROJECTURL%'] = projectLink_template
keys['%TMDBLOGO%'] = tmdblogo_template
keys['%CSSFILE%'] = css_data
keys['%JSFILE%'] = js_data
keys['%MOVIE_HTML%'] = movies
keys['%CREATED%'] = time.strftime("%Y.%m.%d@%H:%M:%S")
f = io.open(os.path.join(web_dir, web_indexfile), 'w', encoding='utf-8')
f.write(unicode(tekstreplace(keys).substitute(html_template) ))
f.close()
def pushbullet_notification(apikey, movies, gallery, device):
string_to_push = ''
for movieid, movie in movies.iteritems():
string_to_push += movie['label'] + ', '
string_to_push = 'New movies added: ' + string_to_push[:-2] + '.'
url = 'https://api.pushbullet.com/v2/pushes'
headers = {'content-type': 'application/json',
'Authorization': 'Bearer ' + apikey}
payload = {'device_iden': device, 'type': 'note', 'title': gallery,
'body': string_to_push}
requests.post(url, data=json.dumps(payload), headers=headers)
if __name__ == "__main__":
posters_to_retrieve = {}
movies_html = ""
counter = 1
genres = set()
#load/set stuff when we want to verify the result
if htmllint:
try:
import tempfile
from tidylib import tidy_document
except:
htmllint=False
out_dir=web_dir
print "Failed to load the tempfile and tidylib python module: htmllint is disabled"
finally:
out_dir=tempfile.mkdtemp()
print "htmllint enabled: building in: "+out_dir + " release to: " + web_dir
else:
out_dir=web_dir
#build a list with new movies
new_movies={}
print "Read new movies from KODI: in progress"+"\r",
for i in get_new_movies_from_kodie(host,port):
new_movies[i['movieid']]=i
print "Read new movies from KODI: "+str(len(new_movies))+ " "*15
#build a list with previous run movies from the cache
print "Read movies from previous run: in progress"+"\r",
if not os.path.isfile(moviecache):
prev_movies={}
else:
prev_movies=eval(open(moviecache, 'r').read())
if not isinstance(prev_movies, dict): prev_movies={}
print "Read movies from previous run: "+str(len(prev_movies))+ " "*15
#query KODI
print "Read movies from KODI: this can take a while...."+ " "*15+"\r",
sys.stdout.flush()
movies=get_movies_from_kodi(host, port)
print "Read movies from KODI: "+str(len(movies))+ " "*30
processed_movies={}
for movie in movies:
#display progress
print "Movies processed: "+str(counter)+" / "+str(len(movies)) + " "*15+"\r",
sys.stdout.flush()
#check out_dir and web_dir for posters
foundposter = check_if_poster_exists(i['imdbnumber'], poster_size, out_dir)
if not foundposter and htmllint:
foundposter = check_if_poster_exists(i['imdbnumber'], poster_size, web_dir)
#add extra info to the movie for html generation mainly
movie['counter']=counter;
movie['poster_url']="posters/"+poster_size+"/"+movie['imdbnumber']+'.jpeg'
movie['movieclass'] ='old'
if movie['movieid'] in new_movies:
movie['movieclass'] ='new' #set the background for new movies
movie['watchedclass'] ='newmovie' #add a new ribbon, overrides by watched
elif not movie['movieid'] in prev_movies:
movie['movieclass']='added'
#generate html from the movie and append to the mainlist
movies_html = (movies_html + create_movie_html_block(movie))
#if in previous run there was no poster: try again
if foundposter:
if movie['movieid'] in prev_movies:
if 'poster_url_source' in prev_movies[movie['movieid']]:
if prev_movies[movie['movieid']]['poster_url_source'] == 'no-image' :
foundposter = False
#generate a list of posters we need to download
if not foundposter or refreshposters:
posters_to_retrieve[movie['movieid']]=movie
#build unique list with genres
for genre in movie['genre']:
genres.add(genre)
#store movie in indexed list
processed_movies[movie['movieid']]=movie
#the counter
counter += 1
#we like to have an indexed list (dict)
movies=processed_movies
print ""
#retrieve the posters
counter = 0
print "Download posters: in progress"+"\r",
for movieid, movie in posters_to_retrieve.iteritems():
counter += 1
print "Download posters: "+ str(counter) + ' / ' + str(len(posters_to_retrieve)) +" "*20+"\r",
sys.stdout.flush()
url = get_poster_image_url(movie, tmdb_key, poster_size, language)
movies[movieid]['poster_url_source']=url
save_poster_image(url, movie['imdbnumber'], poster_size, out_dir)
print "\nDownload posters: done"
#write the actual HTML file: the result
print "Write result to: " + os.path.join(out_dir,web_indexfile)
write_html("<div id='movies'>"+movies_html+"</div>", genres, out_dir)
#verify result (if configured) and release to website
assetsUpdate=True;
if htmllint:
print "Check result (htmllint) .. "
with open(os.path.join(out_dir, web_indexfile)) as f:
document, errors = tidy_document(f.read(),options={'numeric-entities':1, "show-warnings": htmllint_warnings })
f.close()
if len(errors) > 0:
assetsUpdate=False;
print "we got some errors and will not release: "
print error
#update assets
if assetsUpdate:
if web_dir=='':
web_dir=os.getcwd()
print "Copy assets to: "+web_dir
#copy the assets to the release dir
mycopytree(os.path.join(scriptpath,"assets") ,os.path.join(web_dir,"assets"))
#in case we use the htmllint we need to copy the result aswell from out_dir to the web_dir
#todo: when inline=true do not copy css and js files
if htmllint:
print "Copy posters to: "+web_dir
mycopytree(os.path.join(out_dir,"posters") ,os.path.join(web_dir,"posters"))
print "Release result to website: "+web_dir
mycopytree(os.path.join(out_dir,web_indexfile) ,os.path.join(web_dir))
#write the movies to cache file
target = open(moviecache, 'w')
target.write(str(movies))
#send to pushbullet
if pushbullet_api_key != '':
if len(posters_to_retrieve) != 0:
pushbullet_notification(pushbullet_api_key,
posters_to_retrieve, gallery_name,
pushbullet_device_iden)
|
from optparse import OptionParser
from sys import exit
from unittest import TestLoader, TestSuite, TextTestRunner
from itools.core import get_abspath
from itools.fs import lfs
from ikaaro.server import create_server
import test_database
import test_metadata
import test_server
from junitxml import JUnitXmlResult
test_modules = [test_metadata, test_server, test_database]
loader = TestLoader()
if __name__ == '__main__':
usage = '%prog [OPTIONS]'
description = 'Run ikaaro tests'
parser = OptionParser(usage, description=description)
parser.add_option('-m', '--mode', default='standard', help='tests mode')
options, args = parser.parse_args()
# Remove old test database if exists
path = 'demo.hforge.org'
if lfs.exists(path):
lfs.remove(path)
# Create test server
create_server(path, 'test@hforge.org',
'password', 'ikaaro', website_languages=['en', 'fr'])
# Launch test
suite = TestSuite()
for module in test_modules:
suite.addTest(loader.loadTestsFromModule(module))
if options.mode == 'standard':
ret = TextTestRunner(verbosity=1).run(suite)
elif options.mode == 'junitxml':
path = get_abspath('./junit.xml')
print('Result is here: %s' % path)
f = file(path, 'wb')
result = JUnitXmlResult(f)
result.startTestRun()
ret = suite.run(result)
result.stopTestRun()
exit_code = not ret.wasSuccessful()
exit(exit_code)
|
from __future__ import with_statement
import re
from base64 import b64encode
from pycurl import FORM_FILE, HTTPHEADER
from time import sleep
from module.common.json_layer import json_loads
from module.network.HTTPRequest import BadHeader
from module.network.RequestFactory import getRequest
from module.plugins.Hook import Hook, threaded
class DeathByCaptchaException(Exception):
DBC_ERRORS = {'not-logged-in': 'Access denied, check your credentials',
'invalid-credentials': 'Access denied, check your credentials',
'banned': 'Access denied, account is suspended',
'insufficient-funds': 'Insufficient account balance to decrypt CAPTCHA',
'invalid-captcha': 'CAPTCHA is not a valid image',
'service-overload': 'CAPTCHA was rejected due to service overload, try again later',
'invalid-request': 'Invalid request',
'timed-out': 'No CAPTCHA solution received in time'}
def __init__(self, err):
self.err = err
def getCode(self):
return self.err
def getDesc(self):
if self.err in self.DBC_ERRORS.keys():
return self.DBC_ERRORS[self.err]
else:
return self.err
def __str__(self):
return "<DeathByCaptchaException %s>" % self.err
def __repr__(self):
return "<DeathByCaptchaException %s>" % self.err
class DeathByCaptcha(Hook):
__name__ = "DeathByCaptcha"
__type__ = "hook"
__version__ = "0.06"
__config__ = [("username", "str", "Username", ""),
("passkey", "password", "Password", ""),
("force", "bool", "Force DBC even if client is connected", False)]
__description__ = """Send captchas to DeathByCaptcha.com"""
__license__ = "GPLv3"
__authors__ = [("RaNaN", "RaNaN@pyload.org"),
("zoidberg", "zoidberg@mujmail.cz")]
API_URL = "http://api.dbcapi.me/api/"
#@TODO: Remove in 0.4.10
def initPeriodical(self):
pass
def setup(self):
self.info = {} #@TODO: Remove in 0.4.10
def api_response(self, api="captcha", post=False, multipart=False):
req = getRequest()
req.c.setopt(HTTPHEADER, ["Accept: application/json", "User-Agent: pyLoad %s" % self.core.version])
if post:
if not isinstance(post, dict):
post = {}
post.update({"username": self.getConfig("username"),
"password": self.getConfig("passkey")})
res = None
try:
json = req.load("%s%s" % (self.API_URL, api),
post=post,
multipart=multipart)
self.logDebug(json)
res = json_loads(json)
if "error" in res:
raise DeathByCaptchaException(res['error'])
elif "status" not in res:
raise DeathByCaptchaException(str(res))
except BadHeader, e:
if 403 == e.code:
raise DeathByCaptchaException('not-logged-in')
elif 413 == e.code:
raise DeathByCaptchaException('invalid-captcha')
elif 503 == e.code:
raise DeathByCaptchaException('service-overload')
elif e.code in (400, 405):
raise DeathByCaptchaException('invalid-request')
else:
raise
finally:
req.close()
return res
def getCredits(self):
res = self.api_response("user", True)
if 'is_banned' in res and res['is_banned']:
raise DeathByCaptchaException('banned')
elif 'balance' in res and 'rate' in res:
self.info.update(res)
else:
raise DeathByCaptchaException(res)
def getStatus(self):
res = self.api_response("status", False)
if 'is_service_overloaded' in res and res['is_service_overloaded']:
raise DeathByCaptchaException('service-overload')
def submit(self, captcha, captchaType="file", match=None):
#@NOTE: Workaround multipart-post bug in HTTPRequest.py
if re.match("^\w*$", self.getConfig("passkey")):
multipart = True
data = (FORM_FILE, captcha)
else:
multipart = False
with open(captcha, 'rb') as f:
data = f.read()
data = "base64:" + b64encode(data)
res = self.api_response("captcha", {"captchafile": data}, multipart)
if "captcha" not in res:
raise DeathByCaptchaException(res)
ticket = res['captcha']
for _i in xrange(24):
sleep(5)
res = self.api_response("captcha/%d" % ticket, False)
if res['text'] and res['is_correct']:
break
else:
raise DeathByCaptchaException('timed-out')
result = res['text']
self.logDebug("Result %s : %s" % (ticket, result))
return ticket, result
def newCaptchaTask(self, task):
if "service" in task.data:
return False
if not task.isTextual():
return False
if not self.getConfig("username") or not self.getConfig("passkey"):
return False
if self.core.isClientConnected() and not self.getConfig("force"):
return False
try:
self.getStatus()
self.getCredits()
except DeathByCaptchaException, e:
self.logError(e.getDesc())
return False
balance, rate = self.info['balance'], self.info['rate']
self.logInfo(_("Account balance"),
_("US$%.3f (%d captchas left at %.2f cents each)") % (balance / 100,
balance // rate, rate))
if balance > rate:
task.handler.append(self)
task.data['service'] = self.__name__
task.setWaiting(180)
self._processCaptcha(task)
def captchaInvalid(self, task):
if task.data['service'] == self.__name__ and "ticket" in task.data:
try:
res = self.api_response("captcha/%d/report" % task.data['ticket'], True)
except DeathByCaptchaException, e:
self.logError(e.getDesc())
except Exception, e:
self.logError(e)
@threaded
def _processCaptcha(self, task):
c = task.captchaFile
try:
ticket, result = self.submit(c)
except DeathByCaptchaException, e:
task.error = e.getCode()
self.logError(e.getDesc())
return
task.data['ticket'] = ticket
task.setResult(result)
|
from django.db import models
from xml.sax.saxutils import escape
class Journal(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=225)
def escaped_name(self):
return escape(self.name)
class Meta:
db_table = u't_journals'
class Ion(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(unique=True, max_length=30, blank=True)
symbol = models.CharField(unique=True, max_length=10)
ionization = models.CharField(max_length=6, null=True)
ionization_decimal = models.IntegerField(null=False)
ion_charge = models.IntegerField(null=False)
nuclear_charge = models.IntegerField()
inchi = models.CharField(max_length=100)
inchikey = models.CharField(max_length=27)
mass_number = models.IntegerField()
'''def ion_charge(self):
return self.ionization_decimal - 1'''
def species_id(self):
return self.id
class Meta:
db_table = u't_ions'
class Particle(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(unique=True, max_length=30, blank=True)
symbol = models.CharField(unique=True, max_length=10)
def species_id(self):
return 'P%s'%self.id
class Meta:
db_table = u't_particles'
class Species(models.Model):
id = models.IntegerField(primary_key=True)
ion = models.ForeignKey(Ion, db_column="id_ion", null=True, on_delete=models.DO_NOTHING)
particle = models.ForeignKey(Particle, db_column="id_particle", null=True, on_delete=models.DO_NOTHING)
def particle_ion_id(self):
if self.ion is not None :
if self.ion.name == "meanion":
return None
return self.ion.species_id()
elif self.particle is not None :
return self.particle.species_id()
def particle_ion_name(self):
if self.ion is not None :
return self.ion.name
elif self.particle is not None :
return self.particle.name
class Meta:
db_table = u't_species'
class Dataset(models.Model):
id = models.IntegerField(primary_key=True)
target = models.ForeignKey(Species, db_column='id_target', on_delete=models.DO_NOTHING)
has_proton = models.IntegerField()
filename = models.TextField()
creation_date = models.DateField()
description = models.CharField(max_length=300, blank=True)
class Meta:
db_table = u't_datasets'
class Article(models.Model):
id = models.IntegerField(primary_key=True)
authors = models.TextField()
publication_year = models.DecimalField(max_digits=5, decimal_places=0)
journal = models.ForeignKey(Journal, db_column='id_journal', on_delete=models.DO_NOTHING)
volume = models.IntegerField()
pages = models.CharField(max_length=120)
title = models.TextField()
method = models.CharField(max_length=30)
ads_reference = models.TextField(blank=True)
doi_reference = models.TextField(blank=True)
other_reference = models.TextField(blank=True)
def escaped_title(self):
return escape(self.title)
def escaped_ads_reference(self):
if self.ads_reference is not None:
return escape(self.ads_reference)
return ""
def escaped_doi_reference(self):
return escape(self.doi_reference)
def escaped_other_reference(self):
if self.other_reference is not None :
return escape(self.other_reference)
return ""
def authors_list(self):
return self.authors.rsplit(',')
def escaped_authors_list(self):
authors = self.authors.rsplit(',')
print(authors)
return authors
#return [escape(author) for author in authors ]
class Meta:
db_table = u't_articles'
class ArticleDataset(models.Model):
id = models.IntegerField(primary_key=True)
article = models.ForeignKey(Article, db_column='id_article', on_delete=models.DO_NOTHING)
dataset = models.ForeignKey(Dataset, db_column='id_dataset', on_delete=models.DO_NOTHING)
class Meta:
db_table = u't_articles_datasets'
class DatasetCollider(models.Model):
dataset = models.ForeignKey(Dataset, db_column='id_dataset', on_delete=models.DO_NOTHING)
species = models.ForeignKey(Species, db_column='id_species', on_delete=models.DO_NOTHING)
class Meta:
db_table = u't_datasets_colliders'
class Level(models.Model):
id = models.IntegerField(primary_key=True)
dataset = models.ForeignKey(Dataset, db_column='id_dataset', on_delete=models.DO_NOTHING)
config = models.CharField(unique=True, max_length=60)
term = models.CharField(unique=True, max_length=36)
parity = models.CharField(max_length=4)
coupling = models.CharField(max_length=4)
J = models.CharField(unique=True, max_length=15, blank=True)
LS_L = models.IntegerField(null=True)
LS_S = models.FloatField(null=True)
LS_multiplicity = models.IntegerField(null=True)
jK_K = models.FloatField(null=True)
jK_J1 = models.FloatField(null=True)
jK_S2 = models.FloatField(null=True)
jj_j1 = models.FloatField(null=True)
jj_j2 = models.FloatField(null=True)
def j_asFloat(self):
#add .0 to "1/2" or "3/2" to get float value
if(self.J is not None) :
return eval(self.J+".0")
return None
def getjj(self):
result = []
if self.jj_j1 is not None :
return result.append([self.jj_j1])
if self.jj_j2 is not None :
return result.append([self.jj_j2])
return result
def get_int_parity(self):
if self.parity == 'odd' :
return 1
else:
return 2
def encoded_config(self):
"""
some data files contains <> characters in this field
not correct when exporting in xml
"""
return self.config.replace("<", "<").replace(">", ">")
class Meta:
db_table = u't_levels'
class Transition(models.Model):
id = models.IntegerField(primary_key=True)
dataset = models.ForeignKey(Dataset, db_column='id_dataset', on_delete=models.DO_NOTHING)
target = models.ForeignKey(Species, db_column='id_species', on_delete=models.DO_NOTHING)
lower_level = models.ForeignKey(Level, db_column='lower_level', related_name='lower_level', on_delete=models.DO_NOTHING)
upper_level = models.ForeignKey(Level, db_column='upper_level', related_name='upper_level', on_delete=models.DO_NOTHING)
wavelength = models.FloatField()
temperature = models.FloatField()
temperatureid = models.IntegerField(db_column='id_temperature')
density = models.FloatField()
inchikey = models.CharField(max_length=27)
class Meta:
db_table = u'v_transitionsvamdc'
class Transitiondata(models.Model):
id = models.IntegerField(primary_key=True)
transition = models.ForeignKey(Transition, db_column='id_transition', on_delete=models.DO_NOTHING)
density = models.FloatField(unique=True)
c = models.FloatField(null=True, blank=True)
class Meta:
db_table = u't_transitiondata'
class FitCoefficient(models.Model):
id = models.IntegerField(primary_key=True)
transitiondata = models.ForeignKey(Transitiondata, db_column='id_transitiondata', on_delete=models.DO_NOTHING)
species = models.ForeignKey(Species, db_column='id_species', on_delete=models.DO_NOTHING)
a0 = models.FloatField(null=True, blank=True)
a1 = models.FloatField(null=True, blank=True)
a2 = models.FloatField(null=True, blank=True)
b0 = models.FloatField(null=True, blank=True)
b1 = models.FloatField(null=True, blank=True)
b2 = models.FloatField(null=True, blank=True)
class Meta:
db_table = u't_fitcoefficients'
class Temperature(models.Model):
id = models.IntegerField(primary_key=True)
transitiondata = models.ForeignKey(Transitiondata, db_column='id_transitiondata', on_delete=models.DO_NOTHING)
temperature = models.IntegerField(unique=True)
a = models.FloatField(null=True, blank=True)
class Meta:
db_table = u't_temperatures'
class TemperatureCollider(models.Model):
id = models.IntegerField(primary_key=True)
#~ temperature = models.ForeignKey(Temperature, db_column='id_temperature', unique=True)
temperature = models.OneToOneField(
Temperature,
db_column='id_temperature',
on_delete=models.DO_NOTHING
#~ related_name='temperature_id'
)
species = models.ForeignKey(Species, db_column='id_species', on_delete=models.DO_NOTHING)
n_w = models.CharField(max_length=24, blank=True)
w = models.FloatField(null=True, blank=True)
n_d = models.CharField(max_length=24, blank=True)
d = models.FloatField(null=True, blank=True)
class Meta:
db_table = u't_temperatures_colliders'
|
from mitmflib.impacket.dcerpc.v5.ndr import NDRSTRUCT, NDRUniConformantVaryingArray, NDRENUM
from mitmflib.impacket.dcerpc.v5.dcomrt import DCOMCALL, DCOMANSWER, IRemUnknown2, PMInterfacePointer, INTERFACE
from mitmflib.impacket.dcerpc.v5.dtypes import LPWSTR, ULONG, DWORD, SHORT, GUID
from mitmflib.impacket.dcerpc.v5.rpcrt import DCERPCException
from mitmflib.impacket.dcerpc.v5.enum import Enum
from mitmflib.impacket import hresult_errors
from mitmflib.impacket.uuid import string_to_bin
class DCERPCSessionError(DCERPCException):
def __init__(self, error_string=None, error_code=None, packet=None):
DCERPCException.__init__(self, error_string, error_code, packet)
def __str__( self ):
if hresult_errors.ERROR_MESSAGES.has_key(self.error_code):
error_msg_short = hresult_errors.ERROR_MESSAGES[self.error_code][0]
error_msg_verbose = hresult_errors.ERROR_MESSAGES[self.error_code][1]
return 'VDS SessionError: code: 0x%x - %s - %s' % (self.error_code, error_msg_short, error_msg_verbose)
else:
return 'VDS SessionError: unknown error code: 0x%x' % (self.error_code)
CLSID_VirtualDiskService = string_to_bin('7D1933CB-86F6-4A98-8628-01BE94C9A575')
IID_IEnumVdsObject = string_to_bin('118610B7-8D94-4030-B5B8-500889788E4E')
IID_IVdsAdviseSink = string_to_bin('8326CD1D-CF59-4936-B786-5EFC08798E25')
IID_IVdsAsync = string_to_bin('D5D23B6D-5A55-4492-9889-397A3C2D2DBC')
IID_IVdsServiceInitialization = string_to_bin('4AFC3636-DB01-4052-80C3-03BBCB8D3C69')
IID_IVdsService = string_to_bin('0818A8EF-9BA9-40D8-A6F9-E22833CC771E')
IID_IVdsSwProvider = string_to_bin('9AA58360-CE33-4F92-B658-ED24B14425B8')
IID_IVdsProvider = string_to_bin('10C5E575-7984-4E81-A56B-431F5F92AE42')
error_status_t = ULONG
VDS_OBJECT_ID = GUID
class VDS_SERVICE_PROP(NDRSTRUCT):
structure = (
('pwszVersion',LPWSTR),
('ulFlags',ULONG),
)
class OBJECT_ARRAY(NDRUniConformantVaryingArray):
item = PMInterfacePointer
class VDS_PROVIDER_TYPE(NDRENUM):
class enumItems(Enum):
VDS_PT_UNKNOWN = 0
VDS_PT_SOFTWARE = 1
VDS_PT_HARDWARE = 2
VDS_PT_VIRTUALDISK = 3
VDS_PT_MAX = 4
class VDS_PROVIDER_PROP(NDRSTRUCT):
structure = (
('id',VDS_OBJECT_ID),
('pwszName',LPWSTR),
('guidVersionId',GUID),
('pwszVersion',LPWSTR),
('type',VDS_PROVIDER_TYPE),
('ulFlags',ULONG),
('ulStripeSizeFlags',ULONG),
('sRebuildPriority',SHORT),
)
class IVdsServiceInitialization_Initialize(DCOMCALL):
opnum = 3
structure = (
('pwszMachineName', LPWSTR),
)
class IVdsServiceInitialization_InitializeResponse(DCOMANSWER):
structure = (
('ErrorCode', error_status_t),
)
class IVdsService_IsServiceReady(DCOMCALL):
opnum = 3
structure = (
)
class IVdsService_IsServiceReadyResponse(DCOMANSWER):
structure = (
('ErrorCode', error_status_t),
)
class IVdsService_WaitForServiceReady(DCOMCALL):
opnum = 4
structure = (
)
class IVdsService_WaitForServiceReadyResponse(DCOMANSWER):
structure = (
('ErrorCode', error_status_t),
)
class IVdsService_GetProperties(DCOMCALL):
opnum = 5
structure = (
)
class IVdsService_GetPropertiesResponse(DCOMANSWER):
structure = (
('pServiceProp', VDS_SERVICE_PROP),
('ErrorCode', error_status_t),
)
class IVdsService_QueryProviders(DCOMCALL):
opnum = 6
structure = (
('masks', DWORD),
)
class IVdsService_QueryProvidersResponse(DCOMANSWER):
structure = (
('ppEnum', PMInterfacePointer),
('ErrorCode', error_status_t),
)
class IEnumVdsObject_Next(DCOMCALL):
opnum = 3
structure = (
('celt', ULONG),
)
class IEnumVdsObject_NextResponse(DCOMANSWER):
structure = (
('ppObjectArray', OBJECT_ARRAY),
('pcFetched', ULONG),
('ErrorCode', error_status_t),
)
class IVdsProvider_GetProperties(DCOMCALL):
opnum = 3
structure = (
)
class IVdsProvider_GetPropertiesResponse(DCOMANSWER):
structure = (
('pProviderProp', VDS_PROVIDER_PROP),
('ErrorCode', error_status_t),
)
OPNUMS = {
}
class IEnumVdsObject(IRemUnknown2):
def Next(self, celt=0xffff):
request = IEnumVdsObject_Next()
request['ORPCthis'] = self.get_cinstance().get_ORPCthis()
request['ORPCthis']['flags'] = 0
request['celt'] = celt
try:
resp = self.request(request, uuid = self.get_iPid())
except Exception, e:
resp = e.get_packet()
# If it is S_FALSE(1) means less items were returned
if resp['ErrorCode'] != 1:
raise
interfaces = list()
for interface in resp['ppObjectArray']:
interfaces.append(IRemUnknown2(INTERFACE(self.get_cinstance(), ''.join(interface['abData']), self.get_ipidRemUnknown(), target = self.get_target())))
return interfaces
class IVdsProvider(IRemUnknown2):
def GetProperties(self):
request = IVdsProvider_GetProperties()
request['ORPCthis'] = self.get_cinstance().get_ORPCthis()
request['ORPCthis']['flags'] = 0
resp = self.request(request, uuid = self.get_iPid())
return resp
class IVdsServiceInitialization(IRemUnknown2):
def __init__(self, interface):
IRemUnknown2.__init__(self, interface)
def Initialize(self):
request = IVdsServiceInitialization_Initialize()
request['ORPCthis'] = self.get_cinstance().get_ORPCthis()
request['ORPCthis']['flags'] = 0
request['pwszMachineName'] = '\x00'
resp = self.request(request, uuid = self.get_iPid())
return resp
class IVdsService(IRemUnknown2):
def __init__(self, interface):
IRemUnknown2.__init__(self, interface)
def IsServiceReady(self):
request = IVdsService_IsServiceReady()
request['ORPCthis'] = self.get_cinstance().get_ORPCthis()
request['ORPCthis']['flags'] = 0
try:
resp = self.request(request, uuid = self.get_iPid())
except Exception, e:
resp = e.get_packet()
return resp
def WaitForServiceReady(self):
request = IVdsService_WaitForServiceReady()
request['ORPCthis'] = self.get_cinstance().get_ORPCthis()
request['ORPCthis']['flags'] = 0
resp = self.request(request, uuid = self.get_iPid())
return resp
def GetProperties(self):
request = IVdsService_GetProperties()
request['ORPCthis'] = self.get_cinstance().get_ORPCthis()
request['ORPCthis']['flags'] = 0
resp = self.request(request, uuid = self.get_iPid())
return resp
def QueryProviders(self, masks):
request = IVdsService_QueryProviders()
request['ORPCthis'] = self.get_cinstance().get_ORPCthis()
request['ORPCthis']['flags'] = 0
request['masks'] = masks
resp = self.request(request, uuid = self.get_iPid())
return IEnumVdsObject(INTERFACE(self.get_cinstance(), ''.join(resp['ppEnum']['abData']), self.get_ipidRemUnknown(), target = self.get_target()))
|
from django.core.checks import register, Warning
from django.conf import settings
@register()
def deprecated_settings(app_configs, **kwargs):
issues = []
if ('payment_statement' in settings.HOST):
issues.append(
Warning(
"The setting HOST['payment_statement'] is no longer rendered.",
hint="Create a FrontpageMessage through the admin interface instead.",
obj='settings.HOST',
id='openbare.W001'
)
)
if ('use_statement' in settings.HOST):
issues.append(
Warning(
"The setting HOST['use_statement'] is no longer rendered.",
hint="Create a FrontpageMessage through the admin interface instead.",
obj='settings.HOST',
id='openbare.W002'
)
)
return issues
|
import os
import sys
import shutil as module
import xml.sax as package
correct_mod = os.path.join(sys.prefix, 'shutil.pyc')
correct_pkg = os.path.join(sys.prefix, 'xml', 'sax', '__init__.pyc')
print ' mod.__file__: %s' % module.__file__
print ' mod.__file__: %s' % correct_mod
print ' pkg.__file__: %s' % package.__file__
print ' pkg.__file__: %s' % correct_pkg
if not module.__file__ == correct_mod:
raise SystemExit('MODULE.__file__ attribute is wrong.')
if not package.__file__ == correct_pkg:
raise SystemExit('PACKAGE.__file__ attribute is wrong.')
|
import time
from task import BackgroundTask
from settings import settings
from utils import log
class TaskQueue(BackgroundTask):
"""A queue of tasks.
A task queue is a queue of other tasks. If you need, for example, to
do simple tasks A, B, and C, you can create a TaskQueue and add the
simple tasks to it:
q = TaskQueue()
q.add_task(A)
q.add_task(B)
q.add_task(C)
q.start()
The task queue behaves as a single task. It will execute the
tasks in order and start the next one when the previous finishes."""
def __init__(self):
BackgroundTask.__init__(self)
self.waiting_tasks = []
self.running_tasks = []
self.finished_tasks = 0
self.start_time = None
self.count = 0
self.paused = False
def add_task(self, task):
"""Add a task to the queue."""
self.waiting_tasks.append(task)
#if self.start_time and not self.running_tasks:
if self.start_time:
# add a task to a stalled taskqueue, shake it!
self.start_next_task()
def start_next_task(self):
if not self.waiting_tasks:
if not self.running_tasks:
self.done()
return
to_start = settings['jobs'] - len(self.running_tasks)
for i in range(to_start):
try:
task = self.waiting_tasks.pop(0)
except IndexError:
return
self.running_tasks.append(task)
task.add_listener('finished', self.task_finished)
task.start()
if self.paused:
task.toggle_pause(True)
self.count += 1
total = len(self.waiting_tasks) + self.finished_tasks
self.progress = float(self.finished_tasks) / total if total else 0
def started(self):
""" BackgroundTask setup callback """
log('Queue start: %d tasks, %d thread(s).' % (
len(self.waiting_tasks) + len(self.running_tasks),
settings['jobs']))
self.count = 0
self.paused = False
self.finished_tasks = 0
self.start_time = time.time()
self.start_next_task()
def finished(self):
""" BackgroundTask finish callback """
log('Queue done in %.3fs (%s tasks)' % (time.time() - self.start_time,
self.count))
self.queue_ended()
self.count = 0
self.start_time = None
self.running_tasks = []
self.waiting_tasks = []
self.running = False
def task_finished(self, task=None):
if not self.running_tasks:
return
if task in self.running_tasks:
self.running_tasks.remove(task)
self.finished_tasks += 1
self.start_next_task()
def abort(self):
for task in self.running_tasks:
task.abort()
BackgroundTask.abort(self)
self.running_tasks = []
self.waiting_tasks = []
self.running = False
self.start_time = None
def toggle_pause(self, paused):
self.paused = paused
for task in self.running_tasks:
task.toggle_pause(self.paused)
# The following is called when the Queue is finished
def queue_ended(self):
pass
# The following when progress changed
def progress_hook(self, progress):
pass
|
from openerp import models, fields, api, _
from openerp.exceptions import except_orm
from datetime import datetime
import ast
import logging
_logger = logging.getLogger(__name__)
class ClouderSaveRepository(models.Model):
"""
Define the save.repository object, which represent the repository where
the saves are stored.
"""
_name = 'clouder.save.repository'
name = fields.Char('Name', size=128, required=True)
type = fields.Selection([('container', 'Container'), ('base', 'Base')],
'Name', required=True)
date_change = fields.Date('Change Date')
date_expiration = fields.Date('Expiration Date')
container_name = fields.Char('Container Name', size=64)
container_server = fields.Char('Container Server', size=128)
base_name = fields.Char('Base Name', size=64)
base_domain = fields.Char('Base Domain', size=128)
save_ids = fields.One2many('clouder.save.save', 'repo_id', 'Saves')
_order = 'create_date desc'
class ClouderSaveSave(models.Model):
"""
Define the save.save object, which represent the saves of containers/bases.
"""
_name = 'clouder.save.save'
_inherit = ['clouder.model']
name = fields.Char('Name', size=256, required=True)
type = fields.Selection([('container', 'Container'), ('base', 'Base')],
'Type', related='repo_id.type', readonly=True)
backup_id = fields.Many2one(
'clouder.container', 'Backup Server', required=True)
repo_id = fields.Many2one('clouder.save.repository', 'Repository',
ondelete='cascade', required=True)
date_expiration = fields.Date('Expiration Date')
comment = fields.Text('Comment')
now_bup = fields.Char('Now bup', size=64)
container_id = fields.Many2one('clouder.container', 'Container')
container_app = fields.Char('Application', size=64)
container_img = fields.Char('Image', size=64)
container_img_version = fields.Char('Image Version', size=64)
container_ports = fields.Text('Ports')
container_volumes = fields.Text('Volumes')
container_volumes_comma = fields.Text('Volumes comma')
container_options = fields.Text('Container Options')
container_links = fields.Text('Container Links')
service_id = fields.Many2one('clouder.service', 'Service')
service_name = fields.Char('Service Name', size=64)
service_app_version = fields.Char('Application Version', size=64)
service_options = fields.Text('Service Options')
service_links = fields.Text('Service Links')
base_id = fields.Many2one('clouder.base', 'Base')
base_title = fields.Char('Title', size=64)
base_container_name = fields.Char('Container', size=64)
base_container_server = fields.Char('Server', size=64)
base_admin_name = fields.Char('Admin name', size=64)
base_admin_password = fields.Char('Admin passwd', size=64)
base_admin_email = fields.Char('Admin email', size=64)
base_poweruser_name = fields.Char('Poweruser name', size=64)
base_poweruser_password = fields.Char('Poweruser Password', size=64)
base_poweruser_email = fields.Char('Poweruser email', size=64)
base_build = fields.Char('Build', size=64)
base_test = fields.Boolean('Test?')
base_lang = fields.Char('Lang', size=64)
base_nosave = fields.Boolean('No save?')
base_options = fields.Text('Base Options')
base_links = fields.Text('Base Links')
container_name = fields.Char(
'Container Name', related='repo_id.container_name',
size=64, readonly=True)
container_server = fields.Char(
'Container Server', related='repo_id.container_server',
type='char', size=64, readonly=True)
container_restore_to_name = fields.Char('Restore to (Name)', size=64)
container_restore_to_server_id = fields.Many2one(
'clouder.server', 'Restore to (Server)')
base_name = fields.Char(
'Base Name', related='repo_id.base_name',
type='char', size=64, readonly=True)
base_domain = fields.Char(
'Base Domain', related='repo_id.base_domain',
type='char', size=64, readonly=True)
base_restore_to_name = fields.Char('Restore to (Name)', size=64)
base_restore_to_domain_id = fields.Many2one(
'clouder.domain', 'Restore to (Domain)')
create_date = fields.Datetime('Create Date')
@property
def now_epoch(self):
"""
Property returning the actual time, at the epoch format.
"""
return (datetime.strptime(self.now_bup, "%Y-%m-%d-%H%M%S") -
datetime(1970, 1, 1)).total_seconds()
@property
def base_dumpfile(self):
"""
Property returning the dumpfile name.
"""
return \
self.repo_id.type == 'base' \
and self.container_app.replace('-', '_') + '_' + \
self.base_name.replace('-', '_') + '_' + \
self.base_domain.replace('-', '_').replace('.', '_') + '.dump'
@property
def computed_container_restore_to_name(self):
"""
Property returning the container name which will be restored.
"""
return self.container_restore_to_name or self.base_container_name \
or self.repo_id.container_name
@property
def computed_container_restore_to_server(self):
"""
Property returning the container server which will be restored.
"""
return self.container_restore_to_server_id.name \
or self.base_container_server or self.repo_id.container_server
@property
def computed_base_restore_to_name(self):
"""
Property returning the base name which will be restored.
"""
return self.base_restore_to_name or self.repo_id.base_name
@property
def computed_base_restore_to_domain(self):
"""
Property returning the base domain which will be restored.
"""
return self.base_restore_to_domain_id.name or self.repo_id.base_domain
_order = 'create_date desc'
@api.multi
def create(self, vals):
"""
Override create method to add the data in container and base in the
save record, so we can restore it if the container/service/base are
deleted.
:param vals: The values we need to create the record.
"""
if 'container_id' in vals:
container = self.env['clouder.container'] \
.browse(vals['container_id'])
container_ports = {}
for port in container.port_ids:
container_ports[port.name] = {
'name': port.name, 'localport': port.localport,
'expose': port.expose, 'udp': port.udp}
container_volumes = {}
for volume in container.volume_ids:
container_volumes[volume.id] = {
'name': volume.name, 'hostpath': volume.hostpath,
'user': volume.user, 'readonly': volume.readonly,
'nosave': volume.nosave}
container_links = {}
for link in container.link_ids:
container_links[link.name.name.code] = {
'name': link.name.id,
'code': link.name.name.code,
'target': link.target and link.target.id or False
}
vals.update({
'container_volumes_comma': container.volumes_save,
'container_app': container.application_id.code,
'container_img': container.image_id.name,
'container_img_version': container.image_version_id.name,
'container_ports': str(container_ports),
'container_volumes': str(container_volumes),
'container_options': str(container.options),
'container_links': str(container_links),
})
if 'service_id' in vals:
service = self.env['clouder.service'].browse(vals['service_id'])
service_links = {}
for link in service.link_ids:
service_links[link.name.name.code] = {
'name': link.name.id,
'code': link.name.name.code,
'target': link.target and link.target.id or False
}
vals.update({
'service_name': service.name,
'service_app_version': service.application_version_id.name,
'service_options': str(service.options),
'service_links': str(service_links),
})
if 'base_id' in vals:
base = self.env['clouder.base'].browse(vals['base_id'])
base_links = {}
for link in base.link_ids:
base_links[link.name.name.code] = {
'name': link.name.id,
'code': link.name.name.code,
'target': link.target and link.target.id or False
}
vals.update({
'base_title': base.title,
'base_container_name': base.service_id.container_id.name,
'base_container_server':
base.service_id.container_id.server_id.name,
'base_admin_name': base.admin_name,
'base_admin_password': base.admin_password,
'base_admin_email': base.admin_email,
'base_poweruser_name': base.poweruser_name,
'base_poweruser_password': base.poweruser_password,
'base_poweruser_email': base.poweruser_email,
'base_build': base.build,
'base_test': base.test,
'base_lang': base.lang,
'base_nosave': base.nosave,
'base_options': str(base.options),
'base_links': str(base_links),
})
return super(ClouderSaveSave, self).create(vals)
@api.multi
def purge(self):
"""
Remove the save from the backup container.
"""
ssh = self.connect(self.backup_id.fullname)
self.execute(ssh, ['rm', '-rf', '/opt/backup/simple/' +
self.repo_id.name + '/' + self.name])
if self.search([('repo_id', '=', self.repo_id)]) == [self]:
self.execute(ssh, ['rm', '-rf', '/opt/backup/simple/' +
self.repo_id.name])
self.execute(ssh, ['git', '--git-dir=/opt/backup/bup',
'branch', '-D', self.repo_id.name])
ssh.close()
return
@api.multi
def restore_base(self):
"""
Hook which can be called by submodules to execute commands after we
restored a base.
"""
return
@api.multi
def restore(self):
"""
Restore a save to a container or a base. If container/service/base
aren't specified, they will be created.
"""
container_obj = self.env['clouder.container']
base_obj = self.env['clouder.base']
server_obj = self.env['clouder.server']
domain_obj = self.env['clouder.domain']
application_obj = self.env['clouder.application']
application_version_obj = self.env['clouder.application.version']
application_link_obj = self.env['clouder.application.link']
image_obj = self.env['clouder.image']
image_version_obj = self.env['clouder.image.version']
service_obj = self.env['clouder.service']
self = self.with_context(self.create_log('restore'))
apps = application_obj.search([('code', '=', self.container_app)])
if not apps:
raise except_orm(
_('Error!'),
_("Couldn't find application " + self.container_app +
", aborting restoration."))
imgs = image_obj.search([('name', '=', self.container_img)])
if not imgs:
raise except_orm(
_('Error!'),
_("Couldn't find image " + self.container_img +
", aborting restoration."))
img_versions = image_version_obj.search(
[('name', '=', self.container_img_version)])
# upgrade = True
if not img_versions:
self.log("Warning, couldn't find the image version, using latest")
# We do not want to force the upgrade if we had to use latest
# upgrade = False
versions = imgs[0].version_ids
if not versions:
raise except_orm(
_('Error!'),
_("Couldn't find versions for image " +
self.container_img + ", aborting restoration."))
img_versions = [versions[0]]
if self.container_restore_to_name or not self.container_id:
containers = container_obj.search([
('name', '=', self.computed_container_restore_to_name),
('server_id.name', '=',
self.computed_container_restore_to_server)
])
if not containers:
self.log("Can't find any corresponding container, "
"creating a new one")
servers = server_obj.search([
('name', '=', self.computed_container_restore_to_server)])
if not servers:
raise except_orm(
_('Error!'),
_("Couldn't find server " +
self.computed_container_restore_to_server +
", aborting restoration."))
ports = []
for port, port_vals \
in ast.literal_eval(self.container_ports).iteritems():
ports.append((0, 0, port_vals))
volumes = []
for volume, volume_vals in \
ast.literal_eval(self.container_volumes).iteritems():
volumes.append((0, 0, volume_vals))
options = []
for option, option_vals in \
ast.literal_eval(self.container_options).iteritems():
del option_vals['id']
options.append((0, 0, option_vals))
links = []
for link, link_vals in ast.literal_eval(
self.container_links).iteritems():
if not link_vals['name']:
link_apps = application_link_obj.search([
('name.code', '=', link_vals['code']),
('application_id', '=', apps[0])])
if link_apps:
link_vals['name'] = link_apps[0].id
else:
continue
del link_vals['code']
links.append((0, 0, link_vals))
container_vals = {
'name': self.computed_container_restore_to_name,
'server_id': servers[0],
'application_id': apps[0],
'image_id': imgs[0],
'image_version_id': img_versions[0],
'port_ids': ports,
'volume_ids': volumes,
'option_ids': options,
'link_ids': links
}
container = container_obj.create(container_vals)
else:
self.log("A corresponding container was found")
container = containers[0]
else:
self.log("A container_id was linked in the save")
container = self.container_id
if self.repo_id.type == 'container':
if container.image_version_id != img_versions[0]:
# if upgrade:
container.image_version_id = img_versions[0]
self = self.with_context(forcesave=False)
self = self.with_context(nosave=True)
self = self.with_context(
save_comment='Before restore ' + self.name)
container.save()
ssh = self.connect(container.fullname)
self.execute(ssh, ['supervisorctl', 'stop', 'all'])
self.execute(ssh, ['supervisorctl', 'start', 'sshd'])
self.restore_action(container)
for volume in container.volume_ids:
if volume.user:
self.execute(ssh, ['chown', '-R',
volume.user + ':' + volume.user,
volume.name])
ssh.close()
container.start()
container.deploy_links()
self.end_log()
res = container
else:
# upgrade = False
app_versions = application_version_obj.search(
[('name', '=', self.service_app_version),
('application_id', '=', apps[0].id)])
if not app_versions:
self.log(
"Warning, couldn't find the application version, "
"using latest")
# We do not want to force the upgrade if we had to use latest
# upgrade = False
versions = application_obj.browse(apps[0]).version_ids
if not versions:
raise except_orm(
_('Error!'),
_("Couldn't find versions for application " +
self.container_app + ", aborting restoration."))
app_versions = [versions[0]]
if not self.service_id \
or self.service_id.container_id != container:
services = service_obj.search(
[('name', '=', self.service_name),
('container_id', '=', container.id)])
if not services:
self.log("Can't find any corresponding service, "
"creating a new one")
options = []
for option, option_vals in ast.literal_eval(
self.service_options).iteritems():
del option_vals['id']
options.append((0, 0, option_vals))
links = []
for link, link_vals in ast.literal_eval(
self.service_links).iteritems():
if not link_vals['name']:
link_apps = application_link_obj.search(
[('name.code', '=', link_vals['code']),
('application_id', '=', apps[0])])
if link_apps:
link_vals['name'] = link_apps[0]
else:
continue
del link_vals['code']
links.append((0, 0, link_vals))
service_vals = {
'name': self.service_name,
'container_id': container,
'database_container_id': self.service_database_id.id,
'application_version_id': app_versions[0],
'option_ids': options,
'link_ids': links
}
service = service_obj.create(service_vals)
else:
self.log("A corresponding service was found")
service = services[0]
else:
self.log("A service_id was linked in the save")
service = self.service_id
if self.base_restore_to_name or not self.base_id:
bases = base_obj.search(
[('name', '=', self.computed_base_restore_to_name), (
'domain_id.name', '=',
self.computed_base_restore_to_domain)])
if not bases:
self.log(
"Can't find any corresponding base, "
"creating a new one")
domains = domain_obj.search(
[('name', '=', self.computed_base_restore_to_domain)])
if not domains:
raise except_orm(
_('Error!'),
_("Couldn't find domain " +
self.computed_base_restore_to_domain +
", aborting restoration."))
options = []
for option, option_vals in ast.literal_eval(
self.base_options).iteritems():
del option_vals['id']
options.append((0, 0, option_vals))
links = []
for link, link_vals in ast.literal_eval(
self.base_links).iteritems():
if not link_vals['name']:
link_apps = application_link_obj.search(
[('name.code', '=', link_vals['code']),
('application_id', '=', apps[0])])
if link_apps:
link_vals['name'] = link_apps[0]
else:
continue
del link_vals['code']
links.append((0, 0, link_vals))
base_vals = {
'name': self.computed_base_restore_to_name,
'service_id': service.id,
'application_id': apps[0].id,
'domain_id': domains[0].id,
'title': self.base_title,
'admin_name': self.base_admin_name,
'admin_password': self.base_admin_password,
'admin_email': self.base_admin_email,
'poweruser_name': self.base_poweruser_name,
'poweruser_password': self.base_poweruser_password,
'poweruser_email': self.base_poweruser_email,
'build': self.base_build,
'test': self.base_test,
'lang': self.base_lang,
'nosave': self.base_nosave,
'option_ids': options,
'link_ids': links,
'backup_ids': [(6, 0, [self.backup_id.id])]
}
self = self.with_context(base_restoration=True)
base = self.env['clouder.base'].create(base_vals)
else:
self.log("A corresponding base was found")
base = bases[0]
else:
self.log("A base_id was linked in the save")
base = self.base_id
if base.service_id.application_version_id != app_versions[0]:
# if upgrade:
base.application_version_id = app_versions[0]
self = self.with_context(
save_comment='Before restore ' + self.name)
base.save()
self.restore_action(base)
base.purge_db()
ssh = self.connect(
base.service_id.container_id.fullname,
username=base.application_id.type_id.system_user)
for key, database in base.databases.iteritems():
if base.service_id.database_type != 'mysql':
self.execute(ssh, ['createdb', '-h',
base.service_id.database_server, '-U',
base.service_id.db_user,
base.fullname_])
self.execute(ssh, ['cat',
'/base-backup/' + self.repo_id.name
+ '/' + self.base_dumpfile,
'|', 'psql', '-q', '-h',
base.service_id.database_server, '-U',
base.service_id.db_user,
base.fullname_])
else:
ssh_mysql, sftp_mysql = self.connect(
base.service_id.database.fullname)
self.execute(ssh_mysql, [
"mysql -u root -p'" +
base.service_id.database.root_password +
"' -se \"create database " + database + ";\""])
self.execute(ssh_mysql, [
"mysql -u root -p'" +
base.service_id.database.root_password +
"' -se \"grant all on " + database + ".* to '" +
base.service_id.db_user + "';\""])
ssh_mysql.close(), sftp_mysql.close()
self.execute(ssh, [
'mysql', '-h', base.service_id.database_server, '-u',
base.service_id.db_user,
'-p' + base.service_id.database_password, database,
'<', '/base-backup/' + self.repo_id.name + '/' +
database + '.dump'])
self.restore_base()
base_obj.deploy_links()
self.execute(ssh,
['rm', '-rf', '/base-backup/' + self.repo_id.name])
ssh.close()
self.end_log()
res = base
self.write({'container_restore_to_name': False,
'container_restore_to_server_id': False,
'base_restore_to_name': False,
'base_restore_to_domain_id': False})
return res
@api.multi
def restore_action(self, obj):
"""
Execute the command on the backup container et destination container
to get the save and restore it.
:param obj: The object which will be restored.
"""
if obj._name == 'clouder.base':
container = obj.service_id.container_id
else:
container = obj
directory = '/tmp/restore-' + self.repo_id.name
ssh = self.connect(self.backup_id.fullname, username='backup')
self.send(ssh, self.home_directory + '/.ssh/config',
'/home/backup/.ssh/config')
self.send(ssh,
self.home_directory + '/.ssh/keys/' +
container.fullname + '.pub',
'/home/backup/.ssh/keys/' + container.fullname + '.pub')
self.send(ssh,
self.home_directory + '/.ssh/keys/' + container.fullname,
'/home/backup/.ssh/keys/' + container.fullname)
self.execute(ssh, ['chmod', '-R', '700', '/home/backup/.ssh'])
self.execute(ssh, ['rm', '-rf', directory + '*'])
self.execute(ssh, ['mkdir', '-p', directory])
if self.backup_id.backup_method == 'simple':
self.execute(ssh, [
'cp', '-R', '/opt/backup/simple/' + self.repo_id.name +
'/' + self.name + '/*', directory])
if self.backup_id.backup_method == 'bup':
self.execute(ssh, [
'export BUP_DIR=/opt/backup/bup;',
'bup restore -C ' + directory + ' ' + self.repo_id.name +
'/' + self.now_bup])
self.execute(ssh, [
'mv', directory + '/' + self.now_bup + '/*', directory])
self.execute(ssh, ['rm -rf', directory + '/' + self.now_bup])
self.execute(ssh, [
'rsync', "-e 'ssh -o StrictHostKeyChecking=no'", '-ra',
directory + '/', container.fullname + ':' + directory])
self.execute(ssh, ['rm', '-rf', directory + '*'])
self.execute(ssh, ['rm', '/home/backup/.ssh/keys/*'])
ssh.close()
ssh = self.connect(container.fullname)
if self.repo_id.type == 'container':
for volume in self.container_volumes_comma.split(','):
self.execute(ssh, ['rm', '-rf', volume + '/*'])
else:
self.execute(ssh,
['rm', '-rf', '/base-backup/' + self.repo_id.name])
self.execute(ssh, ['rm', '-rf', directory + '/backup-date'])
if self.repo_id.type == 'container':
self.execute(ssh, ['cp', '-R', directory + '/*', '/'])
else:
self.execute(ssh, ['cp', '-R', directory,
'/base-backup/' + self.repo_id.name])
self.execute(ssh, ['chmod', '-R', '777',
'/base-backup/' + self.repo_id.name])
self.execute(ssh, ['rm', '-rf', directory + '*'])
ssh.close()
@api.multi
def deploy_base(self):
"""
Hook which can be called by submodules to execute commands after we
restored a base.
"""
return
@api.multi
def deploy(self):
"""
Build the save and move it into the backup container.
"""
self.log('Saving ' + self.name)
self.log('Comment: ' + self.comment)
if self.repo_id.type == 'base' and self.base_id:
base = self.base_id
ssh = self.connect(
base.service_id.container_id.fullname,
username=base.application_id.type_id.system_user)
self.execute(ssh,
['mkdir', '-p', '/base-backup/' + self.repo_id.name])
for key, database in base.databases.iteritems():
if base.service_id.database_type != 'mysql':
self.execute(ssh, [
'pg_dump', '-O', ''
'-h', base.service_id.database_server,
'-U', base.service_id.db_user, database,
'>', '/base-backup/' + self.repo_id.name + '/' +
database + '.dump'])
else:
self.execute(ssh, [
'mysqldump',
'-h', base.service_id.database_server,
'-u', base.service_id.db_user,
'-p' + base.service_id.database_password,
database, '>', '/base-backup/' + self.repo_id.name +
'/' + database + '.dump'])
self.deploy_base()
self.execute(ssh, ['chmod', '-R', '777',
'/base-backup/' + self.repo_id.name])
ssh.close()
directory = '/tmp/' + self.repo_id.name
ssh = self.connect(self.container_id.fullname)
self.execute(ssh, ['rm', '-rf', directory + '*'])
self.execute(ssh, ['mkdir', directory])
if self.repo_id.type == 'container':
for volume in self.container_volumes_comma.split(','):
self.execute(ssh, ['cp', '-R', '--parents', volume, directory])
else:
self.execute(ssh, ['cp', '-R',
'/base-backup/' + self.repo_id.name + '/*',
directory])
self.execute(ssh, [
'echo "' + self.now_date + '" > ' + directory + '/backup-date'])
self.execute(ssh, ['chmod', '-R', '777', directory + '*'])
ssh.close()
ssh = self.connect(self.backup_id.fullname, username='backup')
if self.repo_id.type == 'container':
name = self.container_id.fullname
else:
name = self.base_id.fullname_
self.execute(ssh, ['rm', '-rf', '/opt/backup/list/' + name])
self.execute(ssh, ['mkdir', '-p', '/opt/backup/list/' + name])
self.execute(ssh, [
'echo "' + self.repo_id.name +
'" > /opt/backup/list/' + name + '/repo'])
self.send(ssh, self.home_directory + '/.ssh/config',
'/home/backup/.ssh/config')
self.send(ssh,
self.home_directory + '/.ssh/keys/' +
self.container_id.fullname + '.pub',
'/home/backup/.ssh/keys/' +
self.container_id.fullname + '.pub')
self.send(ssh,
self.home_directory + '/.ssh/keys/' +
self.container_id.fullname,
'/home/backup/.ssh/keys/' +
self.container_id.fullname)
self.execute(ssh, ['chmod', '-R', '700', '/home/backup/.ssh'])
self.execute(ssh, ['rm', '-rf', directory])
self.execute(ssh, ['mkdir', directory])
self.execute(ssh, [
'rsync', "-e 'ssh -o StrictHostKeyChecking=no'", '-ra',
self.container_id.fullname + ':' + directory + '/', directory])
if self.backup_id.backup_method == 'simple':
self.execute(ssh, [
'mkdir', '-p', '/opt/backup/simple/' +
self.repo_id.name + '/' + self.name])
self.execute(ssh, [
'cp', '-R', directory + '/*',
'/opt/backup/simple/' + self.repo_id.name + '/' + self.name])
self.execute(ssh, [
'rm', '/opt/backup/simple/' + self.repo.name + '/latest'])
self.execute(ssh, [
'ln', '-s',
'/opt/backup/simple/' + self.repo.name + '/' + self.name,
'/opt/backup/simple/' + self.repo_id.name + '/latest'])
if self.backup_id.backup_method == 'bup':
self.execute(ssh, ['export BUP_DIR=/opt/backup/bup;',
'bup index ' + directory])
self.execute(ssh, [
'export BUP_DIR=/opt/backup/bup;',
'bup save -n ' + self.repo_id.name + ' -d ' +
str(int(self.now_epoch)) + ' --strip ' + directory])
self.execute(ssh, ['rm', '-rf', directory + '*'])
self.execute(ssh, ['rm', '/home/backup/.ssh/keys/*'])
ssh.close()
ssh = self.connect(self.container_id.fullname)
self.execute(ssh, ['rm', '-rf', directory + '*'])
ssh.close()
if self.repo_id.type == 'base':
ssh = self.connect(
self.base_id.service_id.container_id.fullname,
username=self.base_id.application_id.type_id.system_user)
self.execute(ssh,
['rm', '-rf', '/base-backup/' + self.repo_id.name])
ssh.close()
return
|
"""Navigation (back/forward) indicator displayed in the statusbar."""
from qutebrowser.mainwindow.statusbar import textbase
class Backforward(textbase.TextBase):
"""Shows navigation indicator (if you can go backward and/or forward)."""
def __init__(self, parent=None):
super().__init__(parent)
self.enabled = False
def on_tab_cur_url_changed(self, tabs):
"""Called on URL changes."""
tab = tabs.widget.currentWidget()
if tab is None: # pragma: no cover
self.setText('')
self.hide()
return
self.on_tab_changed(tab)
def on_tab_changed(self, tab):
"""Update the text based on the given tab."""
text = ''
if tab.history.can_go_back():
text += '<'
if tab.history.can_go_forward():
text += '>'
if text:
text = '[' + text + ']'
self.setText(text)
self.setVisible(bool(text) and self.enabled)
|
import analysis_params
from cellsim16popsParams_modified_spontan import multicompartment_params
from plot_methods import plot_signal_sum, plot_signal_sum_colorplot
import plotting_helpers as phlp
import h5py
import matplotlib.pyplot as plt
import os
import numpy as np
import matplotlib.style
matplotlib.style.use('classic')
def fig_exc_inh_contrib(
fig, axes, params, savefolders, T=[
800, 1000], transient=200, panel_labels='FGHIJ', show_xlabels=True):
'''
plot time series LFPs and CSDs with signal variances as function of depth
for the cases with all synapses intact, or knocking out excitatory
input or inhibitory input to the postsynaptic target region
args:
::
fig :
axes :
savefolders : list of simulation output folders
T : list of ints, first and last time sample
transient : int, duration of transient period
returns:
::
matplotlib.figure.Figure object
'''
# params = multicompartment_params()
# ana_params = analysis_params.params()
# file name types
file_names = [
'LaminarCurrentSourceDensity_sum.h5',
'RecExtElectrode_sum.h5']
# CSD # unit nA um^-3 -> muA mm-3
scaling_factors = [1E6, 1.]
# panel titles
panel_titles = [
'LFP&CSD\nexc. syn.',
'LFP&CSD\ninh. syn.',
'LFP&CSD\ncompound',
'CSD variance',
'LFP variance', ]
# labels
labels = [
'exc. syn.',
'inh. syn.',
'SUM']
# some colors for traces
if analysis_params.bw:
colors = ['k', 'gray', 'k']
# lws = [0.75, 0.75, 1.5]
lws = [1.25, 1.25, 1.25]
else:
colors = [analysis_params.colorE, analysis_params.colorI, 'k']
# colors = 'rbk'
# lws = [0.75, 0.75, 1.5]
lws = [1.25, 1.25, 1.25]
# scalebar labels
units = ['$\\mu$A mm$^{-3}$', 'mV']
# depth of each contact site
depth = params.electrodeParams['z']
# #set up figure
# #figure aspect
# ana_params.set_PLOS_2column_fig_style(ratio=0.5)
# fig, axes = plt.subplots(1,5)
# fig.subplots_adjust(left=0.06, right=0.96, wspace=0.4, hspace=0.2)
# clean up
for ax in axes.flatten():
phlp.remove_axis_junk(ax)
for i, (scaling_factor, file_name) in enumerate(
zip(scaling_factors, file_names)):
# get the global data scaling bar range for use in latter plots
# TODO: find nicer solution without creating figure
dum_fig, dum_ax = plt.subplots(1)
vlim_LFP = 0
vlim_CSD = 0
for savefolder in savefolders:
vlimround0 = plot_signal_sum(
dum_ax,
params,
os.path.join(
os.path.split(
params.savefolder)[0],
savefolder,
file_name),
rasterized=False,
scaling_factor=scaling_factor)
if vlimround0 > vlim_LFP:
vlim_LFP = vlimround0
im = plot_signal_sum_colorplot(
dum_ax, params, os.path.join(
os.path.split(
params.savefolder)[0], savefolder, file_name), cmap=plt.get_cmap(
'gray', 21) if analysis_params.bw else plt.get_cmap(
'bwr_r', 21), rasterized=False, scaling_factor=scaling_factor)
if abs(im.get_array()).max() > vlim_CSD:
vlim_CSD = abs(im.get_array()).max()
plt.close(dum_fig)
for j, savefolder in enumerate(savefolders):
ax = axes[j]
if i == 1:
plot_signal_sum(ax, params, os.path.join(os.path.split(params.savefolder)[0], savefolder, file_name),
unit=units[i],
scaling_factor=scaling_factor,
T=T,
color='k',
# color='k' if analysis_params.bw else colors[j],
vlimround=vlim_LFP, rasterized=False)
elif i == 0:
im = plot_signal_sum_colorplot(
ax,
params,
os.path.join(
os.path.split(
params.savefolder)[0],
savefolder,
file_name),
unit=r'($\mu$Amm$^{-3}$)',
T=T,
ylabels=True,
colorbar=False,
fancy=False,
cmap=plt.get_cmap(
'gray',
21) if analysis_params.bw else plt.get_cmap(
'bwr_r',
21),
absmax=vlim_CSD,
rasterized=False,
scaling_factor=scaling_factor)
ax.axis((T[0], T[1], -1550, 50))
ax.set_title(panel_titles[j], va='baseline')
if i == 0:
phlp.annotate_subplot(
ax, ncols=1, nrows=1, letter=panel_labels[j])
if j != 0:
ax.set_yticklabels([])
if i == 0: # and j == 2:
cb = phlp.colorbar(fig, ax, im,
width=0.05, height=0.5,
hoffset=-0.05, voffset=0.5)
cb.set_label('($\\mu$Amm$^{-3}$)', labelpad=0.)
ax.xaxis.set_major_locator(plt.MaxNLocator(3))
if show_xlabels:
ax.set_xlabel(r'$t$ (ms)', labelpad=0.)
else:
ax.set_xlabel('')
#power in CSD
ax = axes[3]
datas = []
for j, savefolder in enumerate(savefolders):
f = h5py.File(os.path.join(os.path.split(params.savefolder)[0],
savefolder,
'LaminarCurrentSourceDensity_sum.h5'),
'r')
data = f['data'][()] * 1E6 # unit nA um^-3 -> muA mm-3
var = data[:, transient:].var(axis=1)
ax.semilogx(var, depth,
color=colors[j], label=labels[j], lw=lws[j], clip_on=False)
datas.append(f['data'][()][:, transient:])
f.close()
# control variances
vardiff = datas[0].var(axis=1) + datas[1].var(axis=1) + np.array([2 * np.cov(x, y)[0, 1]
for (x, y) in zip(datas[0], datas[1])]) - datas[2].var(axis=1)
#ax.semilogx(abs(vardiff), depth, color='gray', lw=1, label='control')
ax.axis(ax.axis('tight'))
ax.set_ylim(-1550, 50)
ax.set_yticks(-np.arange(16) * 100)
if show_xlabels:
ax.set_xlabel(r'$\sigma^2$ ($(\mu$Amm$^{-3})^2$)', labelpad=0.)
ax.set_title(panel_titles[3], va='baseline')
phlp.annotate_subplot(ax, ncols=1, nrows=1, letter=panel_labels[3])
ax.set_yticklabels([])
#power in LFP
ax = axes[4]
datas = []
for j, savefolder in enumerate(savefolders):
f = h5py.File(os.path.join(os.path.split(params.savefolder)[0],
savefolder,
'RecExtElectrode_sum.h5'),
'r')
var = f['data'][()][:, transient:].var(axis=1)
ax.semilogx(var, depth,
color=colors[j], label=labels[j], lw=lws[j], clip_on=False)
datas.append(f['data'][()][:, transient:])
f.close()
# control variances
vardiff = datas[0].var(axis=1) + datas[1].var(axis=1) + np.array([2 * np.cov(x, y)[0, 1]
for (x, y) in zip(datas[0], datas[1])]) - datas[2].var(axis=1)
ax.axis(ax.axis('tight'))
ax.set_ylim(-1550, 50)
ax.set_yticks(-np.arange(16) * 100)
if show_xlabels:
ax.set_xlabel(r'$\sigma^2$ (mV$^2$)', labelpad=0.)
ax.set_title(panel_titles[4], va='baseline')
phlp.annotate_subplot(ax, ncols=1, nrows=1, letter=panel_labels[4])
ax.legend(bbox_to_anchor=(1.3, 1.0), frameon=False)
ax.set_yticklabels([])
# return fig
if __name__ == '__main__':
plt.close('all')
params = multicompartment_params()
ana_params = analysis_params.params()
ana_params.set_PLOS_2column_fig_style(ratio=1)
fig, axes = plt.subplots(2, 5)
fig.subplots_adjust(
left=0.06,
right=0.96,
wspace=0.4,
hspace=0.16,
bottom=0.05,
top=0.95)
fig_exc_inh_contrib(fig, axes[0], params,
savefolders=['simulation_output_modified_ac_exc',
'simulation_output_modified_ac_inh',
'simulation_output_modified_ac_input'],
T=[800, 1000], transient=200, panel_labels='ABCDE',
show_xlabels=False)
fig_exc_inh_contrib(fig,
axes[1],
params,
savefolders=['simulation_output_modified_regular_exc',
'simulation_output_modified_regular_inh',
'simulation_output_modified_regular_input'],
T=[890,
920],
transient=200,
panel_labels='FGHIJ')
fig.savefig('figure_10.pdf', dpi=300, bbox_inches='tight', pad_inches=0)
fig.savefig('figure_10.eps', bbox_inches='tight', pad_inches=0.01)
plt.show()
|
from cloudscape.portal.ui.core.template import PortalTemplate
class AppController(PortalTemplate):
"""
Portal formulas application controller class.
"""
def __init__(self, parent):
super(AppController, self).__init__(parent)
# Construct the request map
self.map = self._construct_map()
def _construct_map(self):
"""
Construct the request map.
"""
return {
'panels': {
'overview': {
'data': self._overview
},
'details': {
'data': self._details
},
'run': {
'data': self._run
}
},
'default': 'overview'
}
def _overview(self):
"""
Construct template data needed to render the formulas overview page.
"""
fd = self.api_call('formula', 'get')
# Split formulas into service/utility and group formulas
su_formula = []
gr_formula = []
for f in fd:
if f['type'] == 'group':
gr_formula.append(f)
else:
su_formula.append(f)
# Return the template data
return {
'groups': gr_formula,
'srv_util': su_formula,
'page': {
'title': 'CloudScape Formulas',
'header': 'Formulas',
'css': [
'formula/overview.css'
],
'contents': [
'app/formula/tables/all.html'
],
'popups': [
'app/formula/popups/create.html',
'app/formula/popups/delete.html'
]
}
}
def _details(self):
"""
Construct the template data needed to render the formula details page.
"""
# Make sure a formula ID parameter is supplied
if not self.request_contains(self.portal.request.get, 'formula'):
return self.set_redirect('/portal/formula?panel=%s' % self.map['default'])
# Check if formula details are retrievable
ed = self.api_call('editor', 'get', {'uuid': self.portal.request.get.formula})
if not ed:
return self.set_redirect('/portal/formula?panel=overview')
# If editing the formula
formula_edit = 'no'
if self.request_contains(self.portal.request.get, 'edit', ['yes']):
formula_edit = 'yes'
# Formula details template data
base_data = {
'formula': {
'state': self.json.dumps({
'locked': ed['locked'],
'locked_by': ed['locked_by']
}),
'uuid': self.portal.request.get.formula,
'edit': formula_edit,
'manifest': self.json.dumps(ed['manifest']),
'templates': {},
'all': self.api_call('formula', 'get'),
'name': ed['name'],
'label': ed['label'],
'desc': ed['desc']
},
'edit': {
'manifest': self.json.dumps(ed['manifest']),
'templates': {}
},
'template': {},
'page': {
'title': 'Formula - \'%s\'' % ed['name'],
'css': [
'formula/details.css',
'css/vendor/chrome.css'
],
'contents': [
'app/formula/tables/details.html'
],
'popups': [
'app/formula/popups/editor/close.html',
'app/formula/popups/editor/add_template.html'
]
}
}
# Construct a list of available template variables
t_vars = {}
if 'fieldsets' in ed['manifest']:
for fieldset in ed['manifest']['fieldsets']:
for field in fieldset['fields']:
t_vars[field['name']] = field['desc']
# Available template variables
base_data['template']['vars'] = t_vars
# Construct a list of templates
t_names = []
t_contents = {}
for name, encoded in ed['templates'].iteritems():
t_names.append(name)
t_contents[name] = encoded
# Set template names / contents / encoded contents
base_data['formula']['templates'] = '["' + '","'.join(t_names) + '"]'
base_data['edit']['templates'] = t_names
base_data['template']['contents'] = t_contents
base_data['template']['encoded'] = self.json.dumps(t_contents)
# Return the template data
return base_data
def _run(self):
"""
Construct and return the template data required to render the formula run page.
"""
# Make sure a formula ID parameter is supplied
if not self.request_contains(self.portal.request.get, 'formula'):
return self.set_redirect('/portal/formula?panel=%s' % self.map['default'])
# Get the formula details
fd = self.api_call('formula', 'get', {'uuid': self.portal.request.get.formula})
if not fd:
return self.set_redirect('/portal/formula?panel=%s' % self.map['default'])
# Get a list of all managed hosts
mh = self.api_call('host', 'get')
# Construct the hosts template element
th = {}
for key, host in enumerate(mh):
th[host['uuid']] = {
'type': host['os_type'],
'label': '%s: %s %s %s - %s' % (host['name'], host['sys']['os']['distro'], host['sys']['os']['version'], host['sys']['os']['arch'], host['ip'])
}
# Set the formula requirements and template files
fr = '' if not 'requires' in fd['manifest']['formula'] else ', '.join(fd['manifest']['formula']['requires'])
ft = ', '.join(fd['templates'])
# Build a list of the supported operating systems
fs = []
for sp in fd['manifest']['formula']['supports']:
fs.append(sp)
fs = ', '.join(fs)
# Formula information
fi = self.OrderedDict([
('UUID', self.portal.request.get.formula),
('Name', fd['name']),
('Label', fd['label']),
('Created', fd['created']),
('Modified', fd['modified']),
('Templates', ft),
('Requires', fr),
('Supports', fs)])
# Base template data
base_data = {
'formula': {
'uuid': self.portal.request.get.formula,
'name': fd['name'],
'label': fd['label'],
'desc': fd['desc'],
'info': fi,
'type': fd['type'],
'setgroup': True if ('setgroup' in fd['manifest']['formula'] and fd['manifest']['formula']['setgroup']) else False,
'actions': {} if not ('actions' in fd['manifest']) else fd['manifest']['actions'],
'fields': {} if not ('fieldsets' in fd['manifest']) else fd['manifest']['fieldsets'],
},
'managed_hosts': th,
'page': {
'title': 'Run Formula - \'%s\'' % fd['name'],
'css': [
'formula/run.css'
],
'contents': [
'app/formula/tables/run.html'
]
}
}
# Look for any group select menus
formula_groups = []
if 'fieldsets' in fd['manifest']:
for fieldset in fd['manifest']['fieldsets']:
for field in fieldset['fields']:
if (field['type'] == 'select_group') and ('group' in field):
kps = field['group'].split(';')
for kp in kps:
kv = kp.split('=')
# Only support formula filters
if kv[0] == 'formula':
# Get all groups for that formula
_groups = self.api_call('host', 'get_group', {'formula_id': kv[1]})
if _groups:
for _group in _groups:
formula_groups.append({
'name': _group['name'],
'uuid': _group['uuid']
})
# If any formula groups found
if formula_groups:
base_data['formula_groups'] = formula_groups
# Return the template data
return base_data
def construct(self):
"""
Construct and return the template object.
"""
# If the panel is not supported
if not self.panel in self.map['panels']:
return self.redirect('portal/formula?panel=%s' % self.map['default'])
# Set the template file
t_file = 'app/formula/%s.html' % self.panel
# Set the template attributes
self.set_template(self.map['panels'][self.panel]['data']())
# Construct and return the template response
return self.response()
|
import unittest, test.test_support
import sys, cStringIO, os
import struct
class SysModuleTest(unittest.TestCase):
def test_original_displayhook(self):
import __builtin__
savestdout = sys.stdout
out = cStringIO.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(__builtin__, "_"):
del __builtin__._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assert_(not hasattr(__builtin__, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(__builtin__._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
sys.stdout = savestdout
def test_lost_displayhook(self):
olddisplayhook = sys.displayhook
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
sys.displayhook = olddisplayhook
def test_custom_displayhook(self):
olddisplayhook = sys.displayhook
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
sys.displayhook = olddisplayhook
def test_original_excepthook(self):
savestderr = sys.stderr
err = cStringIO.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError, exc:
eh(*sys.exc_info())
sys.stderr = savestderr
self.assert_(err.getvalue().endswith("ValueError: 42\n"))
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exc_clear(self):
self.assertRaises(TypeError, sys.exc_clear, 42)
# Verify that exc_info is present and matches exc, then clear it, and
# check that it worked.
def clear_check(exc):
typ, value, traceback = sys.exc_info()
self.assert_(typ is not None)
self.assert_(value is exc)
self.assert_(traceback is not None)
with test.test_support._check_py3k_warnings():
sys.exc_clear()
typ, value, traceback = sys.exc_info()
self.assert_(typ is None)
self.assert_(value is None)
self.assert_(traceback is None)
def clear():
try:
raise ValueError, 42
except ValueError, exc:
clear_check(exc)
# Raise an exception and check that it can be cleared
clear()
# Verify that a frame currently handling an exception is
# unaffected by calling exc_clear in a nested frame.
try:
raise ValueError, 13
except ValueError, exc:
typ1, value1, traceback1 = sys.exc_info()
clear()
typ2, value2, traceback2 = sys.exc_info()
self.assert_(typ1 is typ2)
self.assert_(value1 is exc)
self.assert_(value1 is value2)
self.assert_(traceback1 is traceback2)
# Check that an exception can be cleared outside of an except block
clear_check(exc)
def test_exit(self):
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
try:
sys.exit(0)
except SystemExit, exc:
self.assertEquals(exc.code, 0)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with one entry
# entry will be unpacked
try:
sys.exit(42)
except SystemExit, exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with integer argument
try:
sys.exit((42,))
except SystemExit, exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with string argument
try:
sys.exit("exit")
except SystemExit, exc:
self.assertEquals(exc.code, "exit")
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with two entries
try:
sys.exit((17, 23))
except SystemExit, exc:
self.assertEquals(exc.code, (17, 23))
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# test that the exit machinery handles SystemExits properly
import subprocess
# both unnormalized...
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit, 46"])
self.assertEqual(rc, 46)
# ... and normalized
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit(47)"])
self.assertEqual(rc, 47)
def check_exit_message(code, expected, env=None):
process = subprocess.Popen([sys.executable, "-c", code],
stderr=subprocess.PIPE, env=env)
stdout, stderr = process.communicate()
self.assertEqual(process.returncode, 1)
self.assertTrue(stderr.startswith(expected),
"%s doesn't start with %s" % (repr(stderr), repr(expected)))
# test that stderr buffer if flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the unicode message is encoded to the stderr encoding
env = os.environ.copy()
env['PYTHONIOENCODING'] = 'latin-1'
check_exit_message(
r'import sys; sys.exit(u"h\xe9")',
b"h\xe9", env=env)
def test_getdefaultencoding(self):
if test.test_support.have_unicode:
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assert_(isinstance(sys.getdefaultencoding(), str))
# testing sys.settrace() is done in test_trace.py
# testing sys.setprofile() is done in test_profile.py
def test_setcheckinterval(self):
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEquals(sys.getcheckinterval(), n)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
if hasattr(sys, "getwindowsversion"):
v = sys.getwindowsversion()
self.assert_(isinstance(v, tuple))
self.assertEqual(len(v), 5)
self.assert_(isinstance(v[0], int))
self.assert_(isinstance(v[1], int))
self.assert_(isinstance(v[2], int))
self.assert_(isinstance(v[3], int))
self.assert_(isinstance(v[4], str))
def test_dlopenflags(self):
if hasattr(sys, "setdlopenflags"):
self.assert_(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assert_(isinstance(sys.gettotalrefcount(), int))
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assert_(
SysModuleTest.test_getframe.im_func.func_code \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
def current_frames_with_threads(self):
import threading, thread
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(thread.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = thread.get_ident()
self.assert_(main_id in d)
self.assert_(thread_id in d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assert_(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assert_(sourceline in ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assert_(0 in d)
self.assert_(d[0] is sys._getframe())
def test_attributes(self):
self.assert_(isinstance(sys.api_version, int))
self.assert_(isinstance(sys.argv, list))
self.assert_(sys.byteorder in ("little", "big"))
self.assert_(isinstance(sys.builtin_module_names, tuple))
self.assert_(isinstance(sys.copyright, basestring))
self.assert_(isinstance(sys.exec_prefix, basestring))
self.assert_(isinstance(sys.executable, basestring))
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assert_(isinstance(sys.hexversion, int))
self.assert_(isinstance(sys.maxint, int))
if test.test_support.have_unicode:
self.assert_(isinstance(sys.maxunicode, int))
self.assert_(isinstance(sys.platform, basestring))
self.assert_(isinstance(sys.prefix, basestring))
self.assert_(isinstance(sys.version, basestring))
vi = sys.version_info
self.assert_(isinstance(vi, tuple))
self.assertEqual(len(vi), 5)
self.assert_(isinstance(vi[0], int))
self.assert_(isinstance(vi[1], int))
self.assert_(isinstance(vi[2], int))
self.assert_(vi[3] in ("alpha", "beta", "candidate", "final"))
self.assert_(isinstance(vi[4], int))
def test_43581(self):
# Can't use sys.stdout, as this is a cStringIO object when
# the test runs under regrtest.
self.assert_(sys.__stdout__.encoding == sys.__stderr__.encoding)
def test_sys_flags(self):
self.failUnless(sys.flags)
attrs = ("debug", "py3k_warning", "division_warning", "division_new",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_site", "ignore_environment", "tabcheck", "verbose",
"unicode", "bytes_warning")
for attr in attrs:
self.assert_(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assert_(repr(sys.flags))
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
import subprocess,os
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'],
stdout = subprocess.PIPE, env=env)
out = p.stdout.read().strip()
self.assertEqual(out, unichr(0xa2).encode("cp424"))
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'],
stdout = subprocess.PIPE, env=env)
out = p.stdout.read().strip()
self.assertEqual(out, '?')
def test_call_tracing(self):
self.assertEqual(sys.call_tracing(str, (2,)), "2")
self.assertRaises(TypeError, sys.call_tracing, str, 2)
def test_executable(self):
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to an non existent program name and Python is unable to
# retrieve the real program name
import subprocess
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c", 'import sys; print repr(sys.executable)'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
executable = p.communicate()[0].strip()
p.wait()
self.assert_(executable in ["''", repr(sys.executable)], executable)
class SizeofTest(unittest.TestCase):
TPFLAGS_HAVE_GC = 1<<14
TPFLAGS_HEAPTYPE = 1L<<9
def setUp(self):
self.c = len(struct.pack('c', ' '))
self.H = len(struct.pack('H', 0))
self.i = len(struct.pack('i', 0))
self.l = len(struct.pack('l', 0))
self.P = len(struct.pack('P', 0))
# due to missing size_t information from struct, it is assumed that
# sizeof(Py_ssize_t) = sizeof(void*)
self.header = 'PP'
self.vheader = self.header + 'P'
if hasattr(sys, "gettotalrefcount"):
self.header += '2P'
self.vheader += '2P'
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
self.file = open(test.test_support.TESTFN, 'wb')
def tearDown(self):
self.file.close()
test.test_support.unlink(test.test_support.TESTFN)
def check_sizeof(self, o, size):
result = sys.getsizeof(o)
if ((type(o) == type) and (o.__flags__ & self.TPFLAGS_HEAPTYPE) or\
((type(o) != type) and (type(o).__flags__ & self.TPFLAGS_HAVE_GC))):
size += self.gc_headsize
msg = 'wrong size for %s: got %d, expected %d' \
% (type(o), result, size)
self.assertEqual(result, size, msg)
def calcsize(self, fmt):
"""Wrapper around struct.calcsize which enforces the alignment of the
end of a structure to the alignment requirement of pointer.
Note: This wrapper should only be used if a pointer member is included
and no member with a size larger than a pointer exists.
"""
return struct.calcsize(fmt + '0P')
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
h = self.header
size = self.calcsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), size(h + 'l'))
# but lists are
self.assertEqual(sys.getsizeof([]), size(h + 'P PP') + gc_header_size)
def test_default(self):
h = self.header
size = self.calcsize
self.assertEqual(sys.getsizeof(True, -1), size(h + 'l'))
def test_objecttypes(self):
# check all types defined in Objects/
h = self.header
vh = self.vheader
size = self.calcsize
check = self.check_sizeof
# bool
check(True, size(h + 'l'))
# buffer
with test.test_support._check_py3k_warnings():
check(buffer(''), size(h + '2P2Pil'))
# builtin_function_or_method
check(len, size(h + '3P'))
# bytearray
samples = ['', 'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, size(vh + 'iPP') + x.__alloc__() * self.c)
# bytearray_iterator
check(iter(bytearray()), size(h + 'PP'))
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().func_closure[0], size(h + 'P'))
# classobj (old-style class)
class class_oldstyle():
def method():
pass
check(class_oldstyle, size(h + '6P'))
# instance (old-style class)
check(class_oldstyle(), size(h + '3P'))
# instancemethod (old-style class)
check(class_oldstyle().method, size(h + '4P'))
# complex
check(complex(0,1), size(h + '2d'))
# code
check(get_cell().func_code, size(h + '4i8Pi2P'))
# BaseException
check(BaseException(), size(h + '3P'))
# UnicodeEncodeError
check(UnicodeEncodeError("", u"", 0, 0, ""), size(h + '5P2PP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", "", 0, 0, ""), size(h + '5P2PP'))
# UnicodeTranslateError
check(UnicodeTranslateError(u"", 0, 1, ""), size(h + '5P2PP'))
# method_descriptor (descriptor object)
check(str.lower, size(h + '2PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size(h + '2PP'))
# getset_descriptor (descriptor object)
import __builtin__
check(__builtin__.file.closed, size(h + '2PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size(h + '2P2P'))
# dictproxy
class C(object): pass
check(C.__dict__, size(h + 'P'))
# method-wrapper (descriptor object)
check({}.__iter__, size(h + '2P'))
# dict
check({}, size(h + '3P2P' + 8*'P2P'))
x = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(x, size(h + '3P2P' + 8*'P2P') + 16*size('P2P'))
# dictionary-keyiterator
check({}.iterkeys(), size(h + 'P2PPP'))
# dictionary-valueiterator
check({}.itervalues(), size(h + 'P2PPP'))
# dictionary-itemiterator
check({}.iteritems(), size(h + 'P2PPP'))
# ellipses
check(Ellipsis, size(h + ''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size(h + '32B2iB'))
# enumerate
check(enumerate([]), size(h + 'l3P'))
# file
check(self.file, size(h + '4P2i4P3i3P3i'))
# float
check(float(0), size(h + 'd'))
# sys.floatinfo
check(sys.float_info, size(vh) + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, size(vh + '12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size(h + '9P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size(h + 'P'))
# classmethod
check(bar, size(h + 'P'))
# generator
def get_gen(): yield 1
check(get_gen(), size(h + 'Pi2P'))
# integer
check(1, size(h + 'l'))
check(100, size(h + 'l'))
# iterator
check(iter('abc'), size(h + 'lP'))
# callable-iterator
import re
check(re.finditer('',''), size(h + '2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, size(vh + 'PP') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size(h + 'lP'))
# listreverseiterator (list)
check(reversed([]), size(h + 'lP'))
# long
check(0L, size(vh + 'H') - self.H)
check(1L, size(vh + 'H'))
check(-1L, size(vh + 'H'))
check(32768L, size(vh + 'H') + self.H)
check(32768L*32768L-1, size(vh + 'H') + self.H)
check(32768L*32768L, size(vh + 'H') + 2*self.H)
# module
check(unittest, size(h + 'P'))
# None
check(None, size(h + ''))
# object
check(object(), size(h + ''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size(h + '4Pi'))
# PyCObject
# XXX
# rangeiterator
check(iter(xrange(1)), size(h + '4l'))
# reverse
check(reversed(''), size(h + 'PP'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size(h + '3P2P' + PySet_MINSIZE*'lP' + 'lP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*struct.calcsize('lP'))
check(frozenset(sample), s + newsize*struct.calcsize('lP'))
# setiterator
check(iter(set()), size(h + 'P3P'))
# slice
check(slice(1), size(h + '3P'))
# str
check('', size(vh + 'lic'))
check('abc', size(vh + 'lic') + 3*self.c)
# super
check(super(int), size(h + '3P'))
# tuple
check((), size(vh))
check((1,2,3), size(vh) + 3*self.P)
# tupleiterator
check(iter(()), size(h + 'lP'))
# type
# (PyTypeObject + PyNumberMethods + PyMappingMethods +
# PySequenceMethods + PyBufferProcs)
s = size(vh + 'P2P15Pl4PP9PP11PI') + size('41P 10P 3P 6P')
class newstyleclass(object):
pass
check(newstyleclass, s)
# builtin type
check(int, s)
# NotImplementedType
import types
check(types.NotImplementedType, s)
# unicode
usize = len(u'\0'.encode('unicode-internal'))
samples = [u'', u'1'*100]
# we need to test for both sizes, because we don't know if the string
# has been cached
for s in samples:
check(s, size(h + 'PPlP') + usize * (len(s) + 1))
# weakref
import weakref
check(weakref.ref(int), size(h + '2Pl2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size(h + '2Pl2P'))
# xrange
check(xrange(1), size(h + '3l'))
check(xrange(66000), size(h + '3l'))
def test_pythontypes(self):
# check all types defined in Python/
h = self.header
vh = self.vheader
size = self.calcsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size(h + ''))
# imp.NullImporter
import imp
check(imp.NullImporter(self.file.name), size(h + ''))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb != None:
check(tb, size(h + '2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, size(vh) + self.P * len(sys.flags))
def test_main():
test_classes = (SysModuleTest, SizeofTest)
test.test_support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
|
"""
The :mod:`peregrine.iqgen.bits.doppler_factory` module contains classes and
functions related to object factory for doppler control objects.
"""
from peregrine.iqgen.bits.doppler_poly import Doppler as PolyDoppler
from peregrine.iqgen.bits.doppler_sine import Doppler as SineDoppler
class ObjectFactory(object):
'''
Object factory for doppler control objects.
'''
def __init__(self):
super(ObjectFactory, self).__init__()
def toMapForm(self, obj):
t = type(obj)
if t is PolyDoppler:
return self.__PolyDoppler_ToMap(obj)
elif t is SineDoppler:
return self.__SineDoppler_ToMap(obj)
else:
raise ValueError("Invalid object type")
def fromMapForm(self, data):
t = data['type']
if t == 'PolyDoppler':
return self.__MapTo_PolyDoppler(data)
elif t == 'SineDoppler':
return self.__MapTo_SineDoppler(data)
else:
raise ValueError("Invalid object type")
def __PolyDoppler_ToMap(self, obj):
data = {'type': 'PolyDoppler',
'distance0_m': obj.distance0_m,
'tec_epm2': obj.tec_epm2,
'coeffs': obj.coeffs}
return data
def __SineDoppler_ToMap(self, obj):
data = {'type': 'SineDoppler',
'distance0_m': obj.distance0_m,
'tec_epm2': obj.tec_epm2,
'speed0_mps': obj.speed0_mps,
'amplutude_mps': obj.amplutude_mps,
'period_s': obj.period_s}
return data
def __MapTo_PolyDoppler(self, data):
distance0_m = data['distance0_m']
tec_epm2 = data['tec_epm2']
coeffs = data['coeffs']
return PolyDoppler(distance0_m=distance0_m,
tec_epm2=tec_epm2,
coeffs=coeffs)
def __MapTo_SineDoppler(self, data):
distance0_m = data['distance0_m']
tec_epm2 = data['tec_epm2']
speed0_mps = data['speed0_mps']
amplutude_mps = data['amplutude_mps']
period_s = data['period_s']
return SineDoppler(distance0_m=distance0_m,
tec_epm2=tec_epm2,
speed0_mps=speed0_mps,
amplutude_mps=amplutude_mps,
period_s=period_s)
factoryObject = ObjectFactory()
|
__all__ = ['convert_language', 'list_languages', 'LANGUAGES']
def convert_language(language, to_iso, from_iso=None):
"""Convert a language into another format
:param string language: language
:param int to_iso: convert language to ISO-639-x
:param int from_iso: convert language from ISO-639-x
:return: converted language
:rtype: string
"""
if from_iso == None: # if no from_iso is given, try to guess it
if language.startswith(language[:1].upper()):
from_iso = 0
elif len(language) == 2:
from_iso = 1
elif len(language) == 3:
from_iso = 2
else:
raise ValueError('Invalid input language format')
if isinstance(language, unicode):
language = language.encode('utf-8')
converted_language = None
for language_tuple in LANGUAGES:
if language_tuple[from_iso] == language and language_tuple[to_iso]:
converted_language = language_tuple[to_iso]
break
return converted_language
def list_languages(iso):
"""List languages in the given ISO-639-x format
:param int iso: ISO-639-x format to list
:return: languages in the requested format
:rtype: list
"""
return [l[iso] for l in LANGUAGES if l[iso]]
LANGUAGES = [('Afar', 'aa', 'aar'),
('Abkhazian', 'ab', 'abk'),
('Achinese', '', 'ace'),
('Acoli', '', 'ach'),
('Adangme', '', 'ada'),
('Adyghe; Adygei', '', 'ady'),
('Afro-Asiatic languages', '', 'afa'),
('Afrihili', '', 'afh'),
('Afrikaans', 'af', 'afr'),
('Ainu', '', 'ain'),
('Akan', 'ak', 'aka'),
('Akkadian', '', 'akk'),
('Albanian', 'sq', 'alb'),
('Aleut', '', 'ale'),
('Algonquian languages', '', 'alg'),
('Southern Altai', '', 'alt'),
('Amharic', 'am', 'amh'),
('English, Old (ca.450-1100)', '', 'ang'),
('Angika', '', 'anp'),
('Apache languages', '', 'apa'),
('Arabic', 'ar', 'ara'),
('Official Aramaic (700-300 BCE); Imperial Aramaic (700-300 BCE)', '', 'arc'),
('Aragonese', 'an', 'arg'),
('Armenian', 'hy', 'arm'),
('Mapudungun; Mapuche', '', 'arn'),
('Arapaho', '', 'arp'),
('Artificial languages', '', 'art'),
('Arawak', '', 'arw'),
('Assamese', 'as', 'asm'),
('Asturian; Bable; Leonese; Asturleonese', '', 'ast'),
('Athapascan languages', '', 'ath'),
('Australian languages', '', 'aus'),
('Avaric', 'av', 'ava'),
('Avestan', 'ae', 'ave'),
('Awadhi', '', 'awa'),
('Aymara', 'ay', 'aym'),
('Azerbaijani', 'az', 'aze'),
('Banda languages', '', 'bad'),
('Bamileke languages', '', 'bai'),
('Bashkir', 'ba', 'bak'),
('Baluchi', '', 'bal'),
('Bambara', 'bm', 'bam'),
('Balinese', '', 'ban'),
('Basque', 'eu', 'baq'),
('Basa', '', 'bas'),
('Baltic languages', '', 'bat'),
('Beja; Bedawiyet', '', 'bej'),
('Belarusian', 'be', 'bel'),
('Bemba', '', 'bem'),
('Bengali', 'bn', 'ben'),
('Berber languages', '', 'ber'),
('Bhojpuri', '', 'bho'),
('Bihari languages', 'bh', 'bih'),
('Bikol', '', 'bik'),
('Bini; Edo', '', 'bin'),
('Bislama', 'bi', 'bis'),
('Siksika', '', 'bla'),
('Bantu (Other)', '', 'bnt'),
('Bosnian', 'bs', 'bos'),
('Braj', '', 'bra'),
('Breton', 'br', 'bre'),
('Batak languages', '', 'btk'),
('Buriat', '', 'bua'),
('Buginese', '', 'bug'),
('Bulgarian', 'bg', 'bul'),
('Burmese', 'my', 'bur'),
('Blin; Bilin', '', 'byn'),
('Caddo', '', 'cad'),
('Central American Indian languages', '', 'cai'),
('Galibi Carib', '', 'car'),
('Catalan; Valencian', 'ca', 'cat'),
('Caucasian languages', '', 'cau'),
('Cebuano', '', 'ceb'),
('Celtic languages', '', 'cel'),
('Chamorro', 'ch', 'cha'),
('Chibcha', '', 'chb'),
('Chechen', 'ce', 'che'),
('Chagatai', '', 'chg'),
('Chinese', 'zh', 'chi'),
('Chuukese', '', 'chk'),
('Mari', '', 'chm'),
('Chinook jargon', '', 'chn'),
('Choctaw', '', 'cho'),
('Chipewyan; Dene Suline', '', 'chp'),
('Cherokee', '', 'chr'),
('Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic', 'cu', 'chu'),
('Chuvash', 'cv', 'chv'),
('Cheyenne', '', 'chy'),
('Chamic languages', '', 'cmc'),
('Coptic', '', 'cop'),
('Cornish', 'kw', 'cor'),
('Corsican', 'co', 'cos'),
('Creoles and pidgins, English based', '', 'cpe'),
('Creoles and pidgins, French-based ', '', 'cpf'),
('Creoles and pidgins, Portuguese-based ', '', 'cpp'),
('Cree', 'cr', 'cre'),
('Crimean Tatar; Crimean Turkish', '', 'crh'),
('Creoles and pidgins ', '', 'crp'),
('Kashubian', '', 'csb'),
('Cushitic languages', '', 'cus'),
('Czech', 'cs', 'cze'),
('Dakota', '', 'dak'),
('Danish', 'da', 'dan'),
('Dargwa', '', 'dar'),
('Land Dayak languages', '', 'day'),
('Delaware', '', 'del'),
('Slave (Athapascan)', '', 'den'),
('Dogrib', '', 'dgr'),
('Dinka', '', 'din'),
('Divehi; Dhivehi; Maldivian', 'dv', 'div'),
('Dogri', '', 'doi'),
('Dravidian languages', '', 'dra'),
('Lower Sorbian', '', 'dsb'),
('Duala', '', 'dua'),
('Dutch, Middle (ca.1050-1350)', '', 'dum'),
('Dutch; Flemish', 'nl', 'dut'),
('Dyula', '', 'dyu'),
('Dzongkha', 'dz', 'dzo'),
('Efik', '', 'efi'),
('Egyptian (Ancient)', '', 'egy'),
('Ekajuk', '', 'eka'),
('Elamite', '', 'elx'),
('English', 'en', 'eng'),
('English, Middle (1100-1500)', '', 'enm'),
('Esperanto', 'eo', 'epo'),
('Estonian', 'et', 'est'),
('Ewe', 'ee', 'ewe'),
('Ewondo', '', 'ewo'),
('Fang', '', 'fan'),
('Faroese', 'fo', 'fao'),
('Fanti', '', 'fat'),
('Fijian', 'fj', 'fij'),
('Filipino; Pilipino', '', 'fil'),
('Finnish', 'fi', 'fin'),
('Finno-Ugrian languages', '', 'fiu'),
('Fon', '', 'fon'),
('French', 'fr', 'fre'),
('French, Middle (ca.1400-1600)', '', 'frm'),
('French, Old (842-ca.1400)', '', 'fro'),
('Northern Frisian', '', 'frr'),
('Eastern Frisian', '', 'frs'),
('Western Frisian', 'fy', 'fry'),
('Fulah', 'ff', 'ful'),
('Friulian', '', 'fur'),
('Ga', '', 'gaa'),
('Gayo', '', 'gay'),
('Gbaya', '', 'gba'),
('Germanic languages', '', 'gem'),
('Georgian', 'ka', 'geo'),
('German', 'de', 'ger'),
('Geez', '', 'gez'),
('Gilbertese', '', 'gil'),
('Gaelic; Scottish Gaelic', 'gd', 'gla'),
('Irish', 'ga', 'gle'),
('Galician', 'gl', 'glg'),
('Manx', 'gv', 'glv'),
('German, Middle High (ca.1050-1500)', '', 'gmh'),
('German, Old High (ca.750-1050)', '', 'goh'),
('Gondi', '', 'gon'),
('Gorontalo', '', 'gor'),
('Gothic', '', 'got'),
('Grebo', '', 'grb'),
('Greek, Ancient (to 1453)', '', 'grc'),
('Greek, Modern (1453-)', 'el', 'gre'),
('Guarani', 'gn', 'grn'),
('Swiss German; Alemannic; Alsatian', '', 'gsw'),
('Gujarati', 'gu', 'guj'),
('Gwich\'in', '', 'gwi'),
('Haida', '', 'hai'),
('Haitian; Haitian Creole', 'ht', 'hat'),
('Hausa', 'ha', 'hau'),
('Hawaiian', '', 'haw'),
('Hebrew', 'he', 'heb'),
('Herero', 'hz', 'her'),
('Hiligaynon', '', 'hil'),
('Himachali languages; Western Pahari languages', '', 'him'),
('Hindi', 'hi', 'hin'),
('Hittite', '', 'hit'),
('Hmong; Mong', '', 'hmn'),
('Hiri Motu', 'ho', 'hmo'),
('Croatian', 'hr', 'hrv'),
('Upper Sorbian', '', 'hsb'),
('Hungarian', 'hu', 'hun'),
('Hupa', '', 'hup'),
('Iban', '', 'iba'),
('Igbo', 'ig', 'ibo'),
('Icelandic', 'is', 'ice'),
('Ido', 'io', 'ido'),
('Sichuan Yi; Nuosu', 'ii', 'iii'),
('Ijo languages', '', 'ijo'),
('Inuktitut', 'iu', 'iku'),
('Interlingue; Occidental', 'ie', 'ile'),
('Iloko', '', 'ilo'),
('Interlingua (International Auxiliary Language Association)', 'ia', 'ina'),
('Indic languages', '', 'inc'),
('Indonesian', 'id', 'ind'),
('Indo-European languages', '', 'ine'),
('Ingush', '', 'inh'),
('Inupiaq', 'ik', 'ipk'),
('Iranian languages', '', 'ira'),
('Iroquoian languages', '', 'iro'),
('Italian', 'it', 'ita'),
('Javanese', 'jv', 'jav'),
('Lojban', '', 'jbo'),
('Japanese', 'ja', 'jpn'),
('Judeo-Persian', '', 'jpr'),
('Judeo-Arabic', '', 'jrb'),
('Kara-Kalpak', '', 'kaa'),
('Kabyle', '', 'kab'),
('Kachin; Jingpho', '', 'kac'),
('Kalaallisut; Greenlandic', 'kl', 'kal'),
('Kamba', '', 'kam'),
('Kannada', 'kn', 'kan'),
('Karen languages', '', 'kar'),
('Kashmiri', 'ks', 'kas'),
('Kanuri', 'kr', 'kau'),
('Kawi', '', 'kaw'),
('Kazakh', 'kk', 'kaz'),
('Kabardian', '', 'kbd'),
('Khasi', '', 'kha'),
('Khoisan languages', '', 'khi'),
('Central Khmer', 'km', 'khm'),
('Khotanese; Sakan', '', 'kho'),
('Kikuyu; Gikuyu', 'ki', 'kik'),
('Kinyarwanda', 'rw', 'kin'),
('Kirghiz; Kyrgyz', 'ky', 'kir'),
('Kimbundu', '', 'kmb'),
('Konkani', '', 'kok'),
('Komi', 'kv', 'kom'),
('Kongo', 'kg', 'kon'),
('Korean', 'ko', 'kor'),
('Kosraean', '', 'kos'),
('Kpelle', '', 'kpe'),
('Karachay-Balkar', '', 'krc'),
('Karelian', '', 'krl'),
('Kru languages', '', 'kro'),
('Kurukh', '', 'kru'),
('Kuanyama; Kwanyama', 'kj', 'kua'),
('Kumyk', '', 'kum'),
('Kurdish', 'ku', 'kur'),
('Kutenai', '', 'kut'),
('Ladino', '', 'lad'),
('Lahnda', '', 'lah'),
('Lamba', '', 'lam'),
('Lao', 'lo', 'lao'),
('Latin', 'la', 'lat'),
('Latvian', 'lv', 'lav'),
('Lezghian', '', 'lez'),
('Limburgan; Limburger; Limburgish', 'li', 'lim'),
('Lingala', 'ln', 'lin'),
('Lithuanian', 'lt', 'lit'),
('Mongo', '', 'lol'),
('Lozi', '', 'loz'),
('Luxembourgish; Letzeburgesch', 'lb', 'ltz'),
('Luba-Lulua', '', 'lua'),
('Luba-Katanga', 'lu', 'lub'),
('Ganda', 'lg', 'lug'),
('Luiseno', '', 'lui'),
('Lunda', '', 'lun'),
('Luo (Kenya and Tanzania)', '', 'luo'),
('Lushai', '', 'lus'),
('Macedonian', 'mk', 'mac'),
('Madurese', '', 'mad'),
('Magahi', '', 'mag'),
('Marshallese', 'mh', 'mah'),
('Maithili', '', 'mai'),
('Makasar', '', 'mak'),
('Malayalam', 'ml', 'mal'),
('Mandingo', '', 'man'),
('Maori', 'mi', 'mao'),
('Austronesian languages', '', 'map'),
('Marathi', 'mr', 'mar'),
('Masai', '', 'mas'),
('Malay', 'ms', 'may'),
('Moksha', '', 'mdf'),
('Mandar', '', 'mdr'),
('Mende', '', 'men'),
('Irish, Middle (900-1200)', '', 'mga'),
('Mi\'kmaq; Micmac', '', 'mic'),
('Minangkabau', '', 'min'),
('Uncoded languages', '', 'mis'),
('Mon-Khmer languages', '', 'mkh'),
('Malagasy', 'mg', 'mlg'),
('Maltese', 'mt', 'mlt'),
('Manchu', '', 'mnc'),
('Manipuri', '', 'mni'),
('Manobo languages', '', 'mno'),
('Mohawk', '', 'moh'),
('Mongolian', 'mn', 'mon'),
('Mossi', '', 'mos'),
('Multiple languages', '', 'mul'),
('Munda languages', '', 'mun'),
('Creek', '', 'mus'),
('Mirandese', '', 'mwl'),
('Marwari', '', 'mwr'),
('Mayan languages', '', 'myn'),
('Erzya', '', 'myv'),
('Nahuatl languages', '', 'nah'),
('North American Indian languages', '', 'nai'),
('Neapolitan', '', 'nap'),
('Nauru', 'na', 'nau'),
('Navajo; Navaho', 'nv', 'nav'),
('Ndebele, South; South Ndebele', 'nr', 'nbl'),
('Ndebele, North; North Ndebele', 'nd', 'nde'),
('Ndonga', 'ng', 'ndo'),
('Low German; Low Saxon; German, Low; Saxon, Low', '', 'nds'),
('Nepali', 'ne', 'nep'),
('Nepal Bhasa; Newari', '', 'new'),
('Nias', '', 'nia'),
('Niger-Kordofanian languages', '', 'nic'),
('Niuean', '', 'niu'),
('Norwegian Nynorsk; Nynorsk, Norwegian', 'nn', 'nno'),
('Bokmål, Norwegian; Norwegian Bokmål', 'nb', 'nob'),
('Nogai', '', 'nog'),
('Norse, Old', '', 'non'),
('Norwegian', 'no', 'nor'),
('N\'Ko', '', 'nqo'),
('Pedi; Sepedi; Northern Sotho', '', 'nso'),
('Nubian languages', '', 'nub'),
('Classical Newari; Old Newari; Classical Nepal Bhasa', '', 'nwc'),
('Chichewa; Chewa; Nyanja', 'ny', 'nya'),
('Nyamwezi', '', 'nym'),
('Nyankole', '', 'nyn'),
('Nyoro', '', 'nyo'),
('Nzima', '', 'nzi'),
('Occitan (post 1500); Provençal', 'oc', 'oci'),
('Ojibwa', 'oj', 'oji'),
('Oriya', 'or', 'ori'),
('Oromo', 'om', 'orm'),
('Osage', '', 'osa'),
('Ossetian; Ossetic', 'os', 'oss'),
('Turkish, Ottoman (1500-1928)', '', 'ota'),
('Otomian languages', '', 'oto'),
('Papuan languages', '', 'paa'),
('Pangasinan', '', 'pag'),
('Pahlavi', '', 'pal'),
('Pampanga; Kapampangan', '', 'pam'),
('Panjabi; Punjabi', 'pa', 'pan'),
('Papiamento', '', 'pap'),
('Palauan', '', 'pau'),
('Persian, Old (ca.600-400 B.C.)', '', 'peo'),
('Persian', 'fa', 'per'),
('Philippine languages', '', 'phi'),
('Phoenician', '', 'phn'),
('Pali', 'pi', 'pli'),
('Polish', 'pl', 'pol'),
('Pohnpeian', '', 'pon'),
('Portuguese', 'pt', 'por'),
('Prakrit languages', '', 'pra'),
('Provençal, Old (to 1500)', '', 'pro'),
('Pushto; Pashto', 'ps', 'pus'),
('Reserved for local use', '', 'qaa-qtz'),
('Quechua', 'qu', 'que'),
('Rajasthani', '', 'raj'),
('Rapanui', '', 'rap'),
('Rarotongan; Cook Islands Maori', '', 'rar'),
('Romance languages', '', 'roa'),
('Romansh', 'rm', 'roh'),
('Romany', '', 'rom'),
('Romanian; Moldavian; Moldovan', 'ro', 'rum'),
('Rundi', 'rn', 'run'),
('Aromanian; Arumanian; Macedo-Romanian', '', 'rup'),
('Russian', 'ru', 'rus'),
('Sandawe', '', 'sad'),
('Sango', 'sg', 'sag'),
('Yakut', '', 'sah'),
('South American Indian (Other)', '', 'sai'),
('Salishan languages', '', 'sal'),
('Samaritan Aramaic', '', 'sam'),
('Sanskrit', 'sa', 'san'),
('Sasak', '', 'sas'),
('Santali', '', 'sat'),
('Sicilian', '', 'scn'),
('Scots', '', 'sco'),
('Selkup', '', 'sel'),
('Semitic languages', '', 'sem'),
('Irish, Old (to 900)', '', 'sga'),
('Sign Languages', '', 'sgn'),
('Shan', '', 'shn'),
('Sidamo', '', 'sid'),
('Sinhala; Sinhalese', 'si', 'sin'),
('Siouan languages', '', 'sio'),
('Sino-Tibetan languages', '', 'sit'),
('Slavic languages', '', 'sla'),
('Slovak', 'sk', 'slo'),
('Slovenian', 'sl', 'slv'),
('Southern Sami', '', 'sma'),
('Northern Sami', 'se', 'sme'),
('Sami languages', '', 'smi'),
('Lule Sami', '', 'smj'),
('Inari Sami', '', 'smn'),
('Samoan', 'sm', 'smo'),
('Skolt Sami', '', 'sms'),
('Shona', 'sn', 'sna'),
('Sindhi', 'sd', 'snd'),
('Soninke', '', 'snk'),
('Sogdian', '', 'sog'),
('Somali', 'so', 'som'),
('Songhai languages', '', 'son'),
('Sotho, Southern', 'st', 'sot'),
('Spanish; Castilian', 'es', 'spa'),
('Sardinian', 'sc', 'srd'),
('Sranan Tongo', '', 'srn'),
('Serbian', 'sr', 'srp'),
('Serer', '', 'srr'),
('Nilo-Saharan languages', '', 'ssa'),
('Swati', 'ss', 'ssw'),
('Sukuma', '', 'suk'),
('Sundanese', 'su', 'sun'),
('Susu', '', 'sus'),
('Sumerian', '', 'sux'),
('Swahili', 'sw', 'swa'),
('Swedish', 'sv', 'swe'),
('Classical Syriac', '', 'syc'),
('Syriac', '', 'syr'),
('Tahitian', 'ty', 'tah'),
('Tai languages', '', 'tai'),
('Tamil', 'ta', 'tam'),
('Tatar', 'tt', 'tat'),
('Telugu', 'te', 'tel'),
('Timne', '', 'tem'),
('Tereno', '', 'ter'),
('Tetum', '', 'tet'),
('Tajik', 'tg', 'tgk'),
('Tagalog', 'tl', 'tgl'),
('Thai', 'th', 'tha'),
('Tibetan', 'bo', 'tib'),
('Tigre', '', 'tig'),
('Tigrinya', 'ti', 'tir'),
('Tiv', '', 'tiv'),
('Tokelau', '', 'tkl'),
('Klingon; tlhIngan-Hol', '', 'tlh'),
('Tlingit', '', 'tli'),
('Tamashek', '', 'tmh'),
('Tonga (Nyasa)', '', 'tog'),
('Tonga (Tonga Islands)', 'to', 'ton'),
('Tok Pisin', '', 'tpi'),
('Tsimshian', '', 'tsi'),
('Tswana', 'tn', 'tsn'),
('Tsonga', 'ts', 'tso'),
('Turkmen', 'tk', 'tuk'),
('Tumbuka', '', 'tum'),
('Tupi languages', '', 'tup'),
('Turkish', 'tr', 'tur'),
('Altaic languages', '', 'tut'),
('Tuvalu', '', 'tvl'),
('Twi', 'tw', 'twi'),
('Tuvinian', '', 'tyv'),
('Udmurt', '', 'udm'),
('Ugaritic', '', 'uga'),
('Uighur; Uyghur', 'ug', 'uig'),
('Ukrainian', 'uk', 'ukr'),
('Umbundu', '', 'umb'),
('Undetermined', '', 'und'),
('Urdu', 'ur', 'urd'),
('Uzbek', 'uz', 'uzb'),
('Vai', '', 'vai'),
('Venda', 've', 'ven'),
('Vietnamese', 'vi', 'vie'),
('Volapük', 'vo', 'vol'),
('Votic', '', 'vot'),
('Wakashan languages', '', 'wak'),
('Walamo', '', 'wal'),
('Waray', '', 'war'),
('Washo', '', 'was'),
('Welsh', 'cy', 'wel'),
('Sorbian languages', '', 'wen'),
('Walloon', 'wa', 'wln'),
('Wolof', 'wo', 'wol'),
('Kalmyk; Oirat', '', 'xal'),
('Xhosa', 'xh', 'xho'),
('Yao', '', 'yao'),
('Yapese', '', 'yap'),
('Yiddish', 'yi', 'yid'),
('Yoruba', 'yo', 'yor'),
('Yupik languages', '', 'ypk'),
('Zapotec', '', 'zap'),
('Blissymbols; Blissymbolics; Bliss', '', 'zbl'),
('Zenaga', '', 'zen'),
('Zhuang; Chuang', 'za', 'zha'),
('Zande languages', '', 'znd'),
('Zulu', 'zu', 'zul'),
('Zuni', '', 'zun'),
('No linguistic content; Not applicable', '', 'zxx'),
('Zaza; Dimili; Dimli; Kirdki; Kirmanjki; Zazaki', '', 'zza'),
('Brazilian', 'po', 'pob')]
|
import sys
BLOCK_SIZE = 8000
number = "{0}: ".format(sys.argv[1]) if len(sys.argv) == 2 else ""
stdin = sys.stdin.buffer.read()
lines = stdin.decode("utf8", "ignore").splitlines()
word = lines[0].rstrip()
for filename in lines[1:]:
filename = filename.rstrip()
previous = ""
try:
with open(filename, "rb") as fh:
while True:
current = fh.read(BLOCK_SIZE)
if not current:
break
current = current.decode("utf8", "ignore")
if (word in current or
word in previous[-len(word):] +
current[:len(word)]):
print("{0}{1}".format(number, filename))
break
if len(current) != BLOCK_SIZE:
break
previous = current
except EnvironmentError as err:
print("{0}{1}".format(number, err))
|
import logging
import argparse
import os
import random
import sys
import traceback
import yaml
import numpy as np
from numpy.linalg import norm
from lerot.query import load_queries
from lerot.utils import get_class
def run(run_id, experimenter, args):
logging.info("run %d starts" % run_id)
# initialize log file
log_file = os.path.join(args["output_dir"], "%s-%d.json" % (
args["output_prefix"], run_id))
log_fh = open(log_file, "w")
# Pass the run_id number such that we know which pair of rankers
# we should be interleaving (model data).
if args.get("num_random_draws") is not None:
args["ranker_pair_idx"] = random.choice(xrange(args["num_runs"]))
else:
args["ranker_pair_idx"] = run_id
try:
# initialize experiment
experiment = experimenter(log_fh, args)
# run experiment
experiment.run()
except Exception as e:
traceback.print_exc()
logging.error('Error occured %s: %s' % (type(e), e))
os.remove(log_file)
if __name__ == "__main__":
# parse arguments
parser = argparse.ArgumentParser(
prog="python synthetic-comparison-experiment.py",
description="""
Construct and run a comparison experiment with synthetic data.
Provide either the name of a config file from which the experiment
configuration is read, or provide all arguments listed under Command
line. If both are provided the config file is ignored.""",
usage="%(prog)s FILE | DETAILS")
# option 1: use a config file
file_group = parser.add_argument_group("FILE")
file_group.add_argument("-f", "--file", help="Filename of the config file "
"from which the experiment details should be read.")
# option 2: specify all experiment details as arguments
detail_group = parser.add_argument_group("DETAILS")
detail_group.add_argument("--verbose", type=bool, default=False,
help="If set, print more debug information to stderr.")
detail_group.add_argument("--num_runs", type=int,
help="Number of runs (how many times to repeat the experiment).")
detail_group.add_argument("--run_start_id", type=int, default=0,
help="Starting id for numbering run files.")
detail_group.add_argument("--processes", type=int,
help="Number of processes if the experiments are to be run in parallel.")
detail_group.add_argument("--num_repeat_interleaving", type=int, default=1,
help="Number of times we repeat the interleaving procedure, "
"compute outcomes and click metrics.")
detail_group.add_argument("--num_random_draws",
type=int, default=None,
help="If set, rankers and queries (one query per ranker pair) are drawn randomly. "
"This option only makes sense for model data.")
detail_group.add_argument("--system_comparison", default="none",
help="Can be 'none', 'pareto' or 'evaluation.MetricName'. "
"This is used to identify which system is better in the pair. "
"If this flag is set to 'pareto', "
"we will only interleave pair of rankers when "
"one ranker parate-dominates another "
"(according to --pareto_um_class).")
detail_group.add_argument("--user_model",
help="Class implementing a user model.")
detail_group.add_argument("--user_model_args",
help="Arguments for initializing the user model.")
detail_group.add_argument("--pareto_um_class",
help="Class implementing a user model for pareto dominance "
"(only examination probabilities are used).")
detail_group.add_argument("--pareto_um_args",
help="Arguments for initializing the user model for pareto dominance.")
detail_group.add_argument("--evaluation_methods", nargs="*",
help="List of zero or more evaluation methods to run.")
detail_group.add_argument("--evaluation_methods_args", nargs="*",
help="Arguments for the evaluation methods (one entry per method,"
" in the same order).")
detail_group.add_argument("--offline_metrics",
type=str, default="",
help="Comma-separated list of classes implementing offline metrics. "
"The class has to have a get_value(self, labels, cutoff) method. "
"The value of the corresponding offline metric will be computed "
"for the original A and B rankings and the interleaved list L "
"and added to the result JSON.")
detail_group.add_argument("--compute_online_metrics",
type=bool, default=False,
help="If set, the online metrics will be computed.")
detail_group.add_argument("--compute_interleaved_metrics",
type=bool, default=False,
help="If set, the metrics of the interleaved system will be computed "
"and compared to the original systems A and B. The degradation is "
"aslso computed as measured by offline or online quality metrics.")
# The retrieval system maintains ranking functions, accepts queries and
# generates result lists, and in return receives user clicks to learn from.
detail_group.add_argument("-o", "--output_dir",
help="(Empty) directory for storing output generated by this"
" experiment. Subdirectory for different folds will be generated"
"automatically.")
detail_group.add_argument("--output_prefix",
help="Prefix to be added to output filenames, e.g., the name of the "
"data set, fold, etc. Output files will be stored as OUTPUT_DIR/"
"PREFIX-RUN_ID.json")
detail_group.add_argument("--output_dir_overwrite",
type=bool, default=False,
help="Set to true to overwrite existing output directories. False by "
"default to prevent accidentally deleting previous results.")
detail_group.add_argument("--experimenter", help="Experimenter class name.")
detail_group.add_argument("--rankings",
help="Method to generate input rankings. E.g., 'model' or 'synthetic'.")
# Settings for model rankings.
# These args are only looked at if '--rankings' is set to 'model'.
detail_group.add_argument("--run_dir",
help="Directory from which to load the runs (TREC style).")
detail_group.add_argument("--qrel_file",
help="File from which to load the qrels (TREC style).")
detail_group.add_argument("--Iprob_file",
help="File with vertical orientation (intent probabilites) values.")
detail_group.add_argument("--vert_map_file",
help="Mapping from the vertical names to their ids.")
detail_group.add_argument("--ideal_page_as_rbp",
help="Path to a file containing ideal aggregated search page (for AS_RBP)")
# Synthetic rankings settings (only looked at if --rankings is set to 'synthetic')
detail_group.add_argument("--result_length", type=int,
help="Length of the result lists to show to users for each query. "
"Excludes vertical documents when using 'fixed' vertical placement.")
detail_group.add_argument("--num_relevant", # default="random",
help="Number of relevant documents in the synthetic document lists.")
detail_group.add_argument("--verticals")
detail_group.add_argument("--vertical_posmethod")
detail_group.add_argument("--vertical_vertrel")
detail_group.add_argument("--vertical_blocksize", type=int)
detail_group.add_argument("--vertical_placement")
# run the parser
args = vars(parser.parse_args())
# Overwrite arguments from the config file.
if args["file"]:
with open(args["file"]) as config_file:
file_args = yaml.load(config_file)
for arg, value in file_args.iteritems():
if arg not in args:
raise ValueError(
"Unknown argument: %s, run with -h for details." % arg)
args[arg] = value
#logging.basicConfig(filename=os.path.join(args["output_dir"],
#'experiment.log'), level=logging.INFO)
logging.basicConfig(format='%(asctime)s %(module)s: %(message)s',
level=logging.INFO)
#logging.info("Arguments: %s" % args)
# locate or create directory for the current fold
if not os.path.exists(args["output_dir"]):
os.makedirs(args["output_dir"])
elif not(args["output_dir_overwrite"]) and \
os.listdir(args["output_dir"]):
# make sure the output directory is empty
raise Exception("Output dir %s is not an empty directory. "
"Please use a different directory, or move contents out of the way." %
args["output_dir"])
config_bk = os.path.join(args["output_dir"], "config_bk.yml")
logging.info("Backing up configuration to: %s" % config_bk)
config_bk_file = open(config_bk, "w")
yaml.dump(args, config_bk_file, default_flow_style=False)
config_bk_file.close()
# initialize and run the experiment num_run times
run_start_id = args["run_start_id"]
num_runs = args["num_runs"]
if args.get("num_random_draws") is not None:
# Redefine num_runs, and use args["num_runs"] only when drawing
# pair of rankers in the run() function above.
num_runs = args["num_random_draws"]
assert run_start_id == 0, "Conflicting options"
experimenter = get_class(args["experimenter"])
# set the random seed
random.seed(42)
if "processes" in args and args["processes"] > 1:
from multiprocessing import Pool
pool = Pool(processes=args["processes"])
for run_id in range(run_start_id, run_start_id + num_runs):
pool.apply_async(run, (run_id, experimenter, args,))
pool.close()
pool.join()
else:
for run_id in range(run_start_id, run_start_id + num_runs):
run(run_id, experimenter, args)
|
from __future__ import absolute_import, division, print_function
import getopt
import os.path
import inspect
import six
import configman as cm
from configman import ConfigurationManager, Namespace
from configman import ConfigFileFutureProxy, environment, command_line
from configman.converters import class_converter
def main(app_object=None):
if isinstance(app_object, six.string_types):
app_object = class_converter(app_object)
# the only config parameter is a special one that refers to a class or
# module that defines an application. In order to qualify, a class must
# have a constructor that accepts a DotDict derivative as the sole
# input parameter. It must also have a 'main' function that accepts no
# parameters. For a module to be acceptable, it must have a main
# function that accepts a DotDict derivative as its input parameter.
app_definition = Namespace()
app_definition.add_option('application',
doc='the fully qualified module or class of the '
'application',
default=app_object,
from_string_converter=class_converter
)
app_name = getattr(app_object, 'app_name', 'unknown')
app_version = getattr(app_object, 'app_version', '0.0')
app_description = getattr(app_object, 'app_description', 'no idea')
# create an iterable collection of value sources
# the order is important as these will supply values for the sources
# defined in the_definition_source. The values will be overlain in turn.
# First the os.environ values will be applied. Then any values from an ini
# file parsed by getopt. Finally any values supplied on the command line
# will be applied.
value_sources = (ConfigFileFutureProxy, # alias for allowing the user
# to specify a config file on
# the command line
environment, # alias for os.environ
command_line) # alias for getopt
# set up the manager with the definitions and values
# it isn't necessary to provide the app_name because the
# app_object passed in or loaded by the ConfigurationManager will alredy
# have that information.
config_manager = ConfigurationManager(app_definition,
value_sources,
app_name=app_name,
app_version=app_version,
app_description=app_description,
)
config = config_manager.get_config()
app_object = config.admin.application
if isinstance(app_object, type):
# invocation of the app if the app_object was a class
instance = app_object(config)
instance.main()
elif inspect.ismodule(app_object):
# invocation of the app if the app_object was a module
app_object.main(config)
elif inspect.isfunction(app_object):
# invocation of the app if the app_object was a function
app_object(config)
if __name__ == '__main__':
main()
|
{
'name': 'Migration for purchase payment',
'version': '8.0.1.0.0',
'category': 'Tools',
'author': 'Serv. Tecnol. Avanzados - Pedro M. Baeza, '
'Antiun Ingeniería S.L.',
'website': 'http://www.antiun.com',
'depends': [
'account_payment_extension',
'purchase',
],
'data': [
],
'installable': True,
}
|
"""
Unit tests for instructor_dashboard.py.
"""
import ddt
import datetime
from mock import patch
from nose.plugins.attrib import attr
from pytz import UTC
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from django.test.utils import override_settings
from edxmako.shortcuts import render_to_response
from courseware.tabs import get_course_tab_list
from courseware.tests.factories import UserFactory, StudentModuleFactory
from courseware.tests.helpers import LoginEnrollmentTestCase
from instructor.views.gradebook_api import calculate_page_info
from common.test.utils import XssTestMixin
from student.tests.factories import AdminFactory, CourseEnrollmentFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, TEST_DATA_SPLIT_MODULESTORE
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls
from shoppingcart.models import PaidCourseRegistration, Order, CourseRegCodeItem
from course_modes.models import CourseMode
from student.roles import CourseFinanceAdminRole
from student.models import CourseEnrollment
def intercept_renderer(path, context):
"""
Intercept calls to `render_to_response` and attach the context dict to the
response for examination in unit tests.
"""
# I think Django already does this for you in their TestClient, except
# we're bypassing that by using edxmako. Probably edxmako should be
# integrated better with Django's rendering and event system.
response = render_to_response(path, context)
response.mako_context = context
response.mako_template = path
return response
@attr('shard_3')
@ddt.ddt
class TestInstructorDashboard(ModuleStoreTestCase, LoginEnrollmentTestCase, XssTestMixin):
"""
Tests for the instructor dashboard (not legacy).
"""
def setUp(self):
"""
Set up tests
"""
super(TestInstructorDashboard, self).setUp()
self.course = CourseFactory.create(
grading_policy={"GRADE_CUTOFFS": {"A": 0.75, "B": 0.63, "C": 0.57, "D": 0.5}},
display_name='<script>alert("XSS")</script>'
)
self.course_mode = CourseMode(
course_id=self.course.id,
mode_slug=CourseMode.DEFAULT_MODE_SLUG,
mode_display_name=CourseMode.DEFAULT_MODE.name,
min_price=40
)
self.course_mode.save()
# Create instructor account
self.instructor = AdminFactory.create()
self.client.login(username=self.instructor.username, password="test")
# URL for instructor dash
self.url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
def get_dashboard_enrollment_message(self):
"""
Returns expected dashboard enrollment message with link to Insights.
"""
return 'Enrollment data is now available in <a href="http://example.com/courses/{}" ' \
'target="_blank">Example</a>.'.format(unicode(self.course.id))
def get_dashboard_analytics_message(self):
"""
Returns expected dashboard demographic message with link to Insights.
"""
return 'For analytics about your course, go to <a href="http://example.com/courses/{}" ' \
'target="_blank">Example</a>.'.format(unicode(self.course.id))
def test_instructor_tab(self):
"""
Verify that the instructor tab appears for staff only.
"""
def has_instructor_tab(user, course):
"""Returns true if the "Instructor" tab is shown."""
request = RequestFactory().request()
request.user = user
tabs = get_course_tab_list(request, course)
return len([tab for tab in tabs if tab.name == 'Instructor']) == 1
self.assertTrue(has_instructor_tab(self.instructor, self.course))
student = UserFactory.create()
self.assertFalse(has_instructor_tab(student, self.course))
def test_default_currency_in_the_html_response(self):
"""
Test that checks the default currency_symbol ($) in the response
"""
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
total_amount = PaidCourseRegistration.get_total_amount_of_purchased_item(self.course.id)
response = self.client.get(self.url)
self.assertTrue('${amount}'.format(amount=total_amount) in response.content)
def test_course_name_xss(self):
"""Test that the instructor dashboard correctly escapes course names
with script tags.
"""
response = self.client.get(self.url)
self.assert_no_xss(response, '<script>alert("XSS")</script>')
@override_settings(PAID_COURSE_REGISTRATION_CURRENCY=['PKR', 'Rs'])
def test_override_currency_settings_in_the_html_response(self):
"""
Test that checks the default currency_symbol ($) in the response
"""
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
total_amount = PaidCourseRegistration.get_total_amount_of_purchased_item(self.course.id)
response = self.client.get(self.url)
self.assertIn('{currency}{amount}'.format(currency='Rs', amount=total_amount), response.content)
@patch.dict(settings.FEATURES, {'DISPLAY_ANALYTICS_ENROLLMENTS': False})
@override_settings(ANALYTICS_DASHBOARD_URL='')
def test_no_enrollments(self):
"""
Test enrollment section is hidden.
"""
response = self.client.get(self.url)
# no enrollment information should be visible
self.assertFalse('<h2>Enrollment Information</h2>' in response.content)
@patch.dict(settings.FEATURES, {'DISPLAY_ANALYTICS_ENROLLMENTS': True})
@override_settings(ANALYTICS_DASHBOARD_URL='')
def test_show_enrollments_data(self):
"""
Test enrollment data is shown.
"""
response = self.client.get(self.url)
# enrollment information visible
self.assertTrue('<h2>Enrollment Information</h2>' in response.content)
self.assertTrue('<td>Verified</td>' in response.content)
self.assertTrue('<td>Audit</td>' in response.content)
self.assertTrue('<td>Honor</td>' in response.content)
self.assertTrue('<td>Professional</td>' in response.content)
# dashboard link hidden
self.assertFalse(self.get_dashboard_enrollment_message() in response.content)
@patch.dict(settings.FEATURES, {'DISPLAY_ANALYTICS_ENROLLMENTS': True})
@override_settings(ANALYTICS_DASHBOARD_URL='')
def test_show_enrollment_data_for_prof_ed(self):
# Create both "professional" (meaning professional + verification)
# and "no-id-professional" (meaning professional without verification)
# These should be aggregated for display purposes.
users = [UserFactory() for _ in range(2)]
CourseEnrollment.enroll(users[0], self.course.id, mode="professional")
CourseEnrollment.enroll(users[1], self.course.id, mode="no-id-professional")
response = self.client.get(self.url)
# Check that the number of professional enrollments is two
self.assertContains(response, "<td>Professional</td><td>2</td>")
@patch.dict(settings.FEATURES, {'DISPLAY_ANALYTICS_ENROLLMENTS': False})
@override_settings(ANALYTICS_DASHBOARD_URL='http://example.com')
@override_settings(ANALYTICS_DASHBOARD_NAME='Example')
def test_show_dashboard_enrollment_message(self):
"""
Test enrollment dashboard message is shown and data is hidden.
"""
response = self.client.get(self.url)
# enrollment information hidden
self.assertFalse('<td>Verified</td>' in response.content)
self.assertFalse('<td>Audit</td>' in response.content)
self.assertFalse('<td>Honor</td>' in response.content)
self.assertFalse('<td>Professional</td>' in response.content)
# link to dashboard shown
expected_message = self.get_dashboard_enrollment_message()
self.assertTrue(expected_message in response.content)
@override_settings(ANALYTICS_DASHBOARD_URL='')
@override_settings(ANALYTICS_DASHBOARD_NAME='')
def test_dashboard_analytics_tab_not_shown(self):
"""
Test dashboard analytics tab isn't shown if insights isn't configured.
"""
response = self.client.get(self.url)
analytics_section = '<li class="nav-item"><a href="" data-section="instructor_analytics">Analytics</a></li>'
self.assertFalse(analytics_section in response.content)
@override_settings(ANALYTICS_DASHBOARD_URL='http://example.com')
@override_settings(ANALYTICS_DASHBOARD_NAME='Example')
def test_dashboard_analytics_points_at_insights(self):
"""
Test analytics dashboard message is shown
"""
response = self.client.get(self.url)
analytics_section = '<li class="nav-item"><a href="" data-section="instructor_analytics">Analytics</a></li>'
self.assertTrue(analytics_section in response.content)
# link to dashboard shown
expected_message = self.get_dashboard_analytics_message()
self.assertTrue(expected_message in response.content)
def add_course_to_user_cart(self, cart, course_key):
"""
adding course to user cart
"""
reg_item = PaidCourseRegistration.add_to_order(cart, course_key)
return reg_item
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
def test_total_credit_cart_sales_amount(self):
"""
Test to check the total amount for all the credit card purchases.
"""
student = UserFactory.create()
self.client.login(username=student.username, password="test")
student_cart = Order.get_cart_for_user(student)
item = self.add_course_to_user_cart(student_cart, self.course.id)
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': 4})
self.assertEqual(resp.status_code, 200)
student_cart.purchase()
self.client.login(username=self.instructor.username, password="test")
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(self.course.id)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(self.course.id)
total_amount = single_purchase_total + bulk_purchase_total
response = self.client.get(self.url)
self.assertIn('{currency}{amount}'.format(currency='$', amount=total_amount), response.content)
@ddt.data(
(True, True, True),
(True, False, False),
(True, None, False),
(False, True, False),
(False, False, False),
(False, None, False),
)
@ddt.unpack
def test_ccx_coaches_option_on_admin_list_management_instructor(
self, ccx_feature_flag, enable_ccx, expected_result
):
"""
Test whether the "CCX Coaches" option is visible or hidden depending on the value of course.enable_ccx.
"""
with patch.dict(settings.FEATURES, {'CUSTOM_COURSES_EDX': ccx_feature_flag}):
self.course.enable_ccx = enable_ccx
self.store.update_item(self.course, self.instructor.id)
response = self.client.get(self.url)
self.assertEquals(
expected_result,
'CCX Coaches are able to create their own Custom Courses based on this course' in response.content
)
def test_grade_cutoffs(self):
"""
Verify that grade cutoffs are displayed in the correct order.
"""
response = self.client.get(self.url)
self.assertIn('D: 0.5, C: 0.57, B: 0.63, A: 0.75', response.content)
@patch('instructor.views.gradebook_api.MAX_STUDENTS_PER_PAGE_GRADE_BOOK', 2)
def test_calculate_page_info(self):
page = calculate_page_info(offset=0, total_students=2)
self.assertEqual(page["offset"], 0)
self.assertEqual(page["page_num"], 1)
self.assertEqual(page["next_offset"], None)
self.assertEqual(page["previous_offset"], None)
self.assertEqual(page["total_pages"], 1)
@patch('instructor.views.gradebook_api.render_to_response', intercept_renderer)
@patch('instructor.views.gradebook_api.MAX_STUDENTS_PER_PAGE_GRADE_BOOK', 1)
def test_spoc_gradebook_pages(self):
for i in xrange(2):
username = "user_%d" % i
student = UserFactory.create(username=username)
CourseEnrollmentFactory.create(user=student, course_id=self.course.id)
url = reverse(
'spoc_gradebook',
kwargs={'course_id': self.course.id}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# Max number of student per page is one. Patched setting MAX_STUDENTS_PER_PAGE_GRADE_BOOK = 1
self.assertEqual(len(response.mako_context['students']), 1) # pylint: disable=no-member
@ddt.ddt
class TestInstructorDashboardPerformance(ModuleStoreTestCase, LoginEnrollmentTestCase, XssTestMixin):
"""
Tests for the instructor dashboard from the performance point of view.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
"""
Set up tests
"""
super(TestInstructorDashboardPerformance, self).setUp()
self.course = CourseFactory.create(
grading_policy={"GRADE_CUTOFFS": {"A": 0.75, "B": 0.63, "C": 0.57, "D": 0.5}},
display_name='<script>alert("XSS")</script>',
default_store=ModuleStoreEnum.Type.split
)
self.course_mode = CourseMode(
course_id=self.course.id,
mode_slug=CourseMode.DEFAULT_MODE_SLUG,
mode_display_name=CourseMode.DEFAULT_MODE.name,
min_price=40
)
self.course_mode.save()
# Create instructor account
self.instructor = AdminFactory.create()
self.client.login(username=self.instructor.username, password="test")
def test_spoc_gradebook_mongo_calls(self):
"""
Test that the MongoDB cache is used in API to return grades
"""
# prepare course structure
course = ItemFactory.create(
parent_location=self.course.location,
category="course",
display_name="Test course",
)
students = []
for i in xrange(20):
username = "user_%d" % i
student = UserFactory.create(username=username)
CourseEnrollmentFactory.create(user=student, course_id=self.course.id)
students.append(student)
chapter = ItemFactory.create(
parent=course,
category='chapter',
display_name="Chapter",
publish_item=True,
start=datetime.datetime(2015, 3, 1, tzinfo=UTC),
)
sequential = ItemFactory.create(
parent=chapter,
category='sequential',
display_name="Lesson",
publish_item=True,
start=datetime.datetime(2015, 3, 1, tzinfo=UTC),
metadata={'graded': True, 'format': 'Homework'},
)
vertical = ItemFactory.create(
parent=sequential,
category='vertical',
display_name='Subsection',
publish_item=True,
start=datetime.datetime(2015, 4, 1, tzinfo=UTC),
)
for i in xrange(10):
problem = ItemFactory.create(
category="problem",
parent=vertical,
display_name="A Problem Block %d" % i,
weight=1,
publish_item=False,
metadata={'rerandomize': 'always'},
)
for j in students:
grade = i % 2
StudentModuleFactory.create(
grade=grade,
max_grade=1,
student=j,
course_id=self.course.id,
module_state_key=problem.location
)
# check MongoDB calls count
url = reverse('spoc_gradebook', kwargs={'course_id': self.course.id})
with check_mongo_calls(8):
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
|
"""
The Masked Ball
"""
from ..utils import *
class TB_Pilot1:
"Mystery Pilot"
deathrattle = Summon(CONTROLLER, RandomMinion(cost=COST(SELF)))
tags = {GameTag.DEATHRATTLE: True}
|
"""
Template tags for displaying prices correctly.
Prefer these filters for rendering prices of products and basket lines
or total price of basket, since the will take price display options of
current template context into account (see `PriceDisplayOptions`).
Especially, they convert prices to correct taxness.
There is also a global context function `show_prices` which can be used
to render certain price container elements conditionally.
"""
import django_jinja
import jinja2
from shuup.core.utils.price_display import (
PriceDisplayFilter,
PriceDisplayOptions,
PricePercentPropertyFilter,
PricePropertyFilter,
PriceRangeDisplayFilter,
TotalPriceDisplayFilter,
)
price = PriceDisplayFilter("price")
base_price = PriceDisplayFilter("base_price")
base_unit_price = PriceDisplayFilter("base_unit_price")
discount_amount = PriceDisplayFilter("discount_amount")
discounted_unit_price = PriceDisplayFilter("discounted_unit_price")
unit_discount_amount = PriceDisplayFilter("unit_discount_amount")
is_discounted = PricePropertyFilter("is_discounted")
discount_percent = PricePercentPropertyFilter("discount_percent", "discount_rate")
tax_percent = PricePercentPropertyFilter("tax_percent", "tax_rate")
discount_rate = PricePropertyFilter("discount_rate")
tax_rate = PricePropertyFilter("tax_rate")
price_range = PriceRangeDisplayFilter("price_range")
total_price = TotalPriceDisplayFilter("total_price")
@django_jinja.library.global_function
@jinja2.contextfunction
def show_prices(context):
"""
Return true if price display options has show prices enabled.
:type context: jinja2.runtime.Context
"""
options = PriceDisplayOptions.from_context(context)
return options.show_prices
|
"""
This file is used to import class of files in model folder
"""
from . import model
|
"""
Support executing map reduce tasks.
"""
from __future__ import absolute_import
import gzip
import logging
import logging.config
import os
import StringIO
from hashlib import md5
import luigi
import luigi.contrib.hadoop
import luigi.task
from luigi import configuration
from edx.analytics.tasks.util.manifest import convert_to_manifest_input_if_necessary, remove_manifest_target_if_exists
from edx.analytics.tasks.util.url import get_target_from_url, url_path_join
log = logging.getLogger(__name__)
class MapReduceJobTaskMixin(object):
"""Defines arguments used by downstream tasks to pass to upstream MapReduceJobTask."""
mapreduce_engine = luigi.Parameter(
config_path={'section': 'map-reduce', 'name': 'engine'},
significant=False,
description='Name of the map reduce job engine to use. Use `hadoop` (the default) or `local`.',
)
# TODO: remove these parameters
input_format = luigi.Parameter(
default=None,
significant=False,
description='The input_format for Hadoop job to use. For example, when '
'running with manifest file, specify "oddjob.ManifestTextInputFormat" for input_format.',
)
lib_jar = luigi.ListParameter(
default=[],
significant=False,
description='A list of library jars that the Hadoop job can make use of.',
)
n_reduce_tasks = luigi.Parameter(
default=25,
significant=False,
description='Number of reducer tasks to use in upstream tasks. Scale this to your cluster size.',
)
remote_log_level = luigi.Parameter(
config_path={'section': 'map-reduce', 'name': 'remote_log_level'},
significant=False,
description='Level of logging for the map reduce tasks.',
)
class MapReduceJobTask(MapReduceJobTaskMixin, luigi.contrib.hadoop.JobTask):
"""
Execute a map reduce job. Typically using Hadoop, but can execute the
job in process as well.
"""
def init_hadoop(self):
log_format = '%(asctime)s %(levelname)s %(process)d [%(name)s] %(filename)s:%(lineno)d - %(message)s'
logging.config.dictConfig(
{
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': log_format,
},
},
'handlers': {
'stderr': {
'formatter': 'default',
'class': 'logging.StreamHandler',
},
},
'root': {
'handlers': ['stderr'],
'level': self.remote_log_level.upper(), # pylint: disable=no-member
},
}
)
return super(MapReduceJobTask, self).init_hadoop()
def job_runner(self):
# Lazily import this since this module will be loaded on hadoop worker nodes however stevedore will not be
# available in that environment.
from stevedore import ExtensionManager
extension_manager = ExtensionManager('mapreduce.engine')
try:
engine_class = extension_manager[self.mapreduce_engine].plugin
except KeyError:
raise KeyError('A map reduce engine must be specified in order to run MapReduceJobTasks')
if issubclass(engine_class, MapReduceJobRunner):
engine_kwargs = self._get_engine_parameters_from_targets()
return engine_class(**engine_kwargs)
else:
return engine_class()
def _get_engine_parameters_from_targets(self):
"""
Determine the set of job parameters that should be used to process the input.
Some types of input may not be simple files that Hadoop can process natively out of the box, they might require
special handling by custom input formats. Allow dynamic loading of input formats and the jars that contain them
by setting attributes on the input target.
"""
lib_jar = list(self.lib_jar)
input_format = self.input_format
for input_target in luigi.task.flatten(self.input_hadoop()):
if hasattr(input_target, 'lib_jar'):
lib_jar.extend(input_target.lib_jar)
if hasattr(input_target, 'input_format') and input_target.input_format is not None:
if input_format is not None and input_target.input_format != input_format:
raise RuntimeError('Multiple distinct input formats specified on input targets.')
input_format = input_target.input_format
return {
'libjars_in_hdfs': lib_jar,
'input_format': input_format,
}
@property
def manifest_id(self):
return str(hash(self)).replace('-', 'n')
def input_hadoop(self):
return convert_to_manifest_input_if_necessary(self.manifest_id, super(MapReduceJobTask, self).input_hadoop())
def remove_manifest_target_if_exists(self):
return remove_manifest_target_if_exists(self.manifest_id)
class MapReduceJobRunner(luigi.contrib.hadoop.HadoopJobRunner):
"""
Support more customization of the streaming command.
Args:
libjars_in_hdfs (list): An optional list of library jars that the hadoop job can make use of.
input_format (str): An optional full class name of a hadoop input format to use.
"""
def __init__(self, libjars_in_hdfs=None, input_format=None):
libjars_in_hdfs = libjars_in_hdfs or []
config = configuration.get_config()
streaming_jar = config.get('hadoop', 'streaming-jar', '/tmp/hadoop-streaming.jar')
if config.has_section('job-conf'):
job_confs = dict(config.items('job-conf'))
else:
job_confs = {}
super(MapReduceJobRunner, self).__init__(
streaming_jar,
input_format=input_format,
libjars_in_hdfs=libjars_in_hdfs,
jobconfs=job_confs,
)
class EmulatedMapReduceJobRunner(luigi.contrib.hadoop.JobRunner):
"""
Execute map reduce tasks in process on the machine that is running luigi.
This is a modified version of luigi.contrib.hadoop.LocalJobRunner. The key differences are:
* It gracefully handles .gz input files, decompressing them and streaming them directly to the mapper. This mirrors
the behavior of hadoop's default file input format. Note this only works for files that support `tell()` and
`seek()` since those methods are used by the gzip decompression library.
* It detects ".manifest" files and assumes that they are in fact just a file that contains paths to the real files
that should be processed by the task. It makes use of this information to "do the right thing". This mirrors the
behavior of a manifest input format in hadoop.
* It sets the "map_input_file" environment variable when running the mapper just like the hadoop streaming library.
Other than that it should behave identically to LocalJobRunner.
"""
def group(self, input):
output = StringIO.StringIO()
lines = []
for i, line in enumerate(input):
parts = line.rstrip('\n').split('\t')
blob = md5(str(i)).hexdigest() # pseudo-random blob to make sure the input isn't sorted
lines.append((parts[:-1], blob, line))
for _, _, line in sorted(lines):
output.write(line)
output.seek(0)
return output
def run_job(self, job):
job.init_hadoop()
job.init_mapper()
map_output = StringIO.StringIO()
input_targets = luigi.task.flatten(job.input_hadoop())
for input_target in input_targets:
# if file is a directory, then assume that it's Hadoop output,
# and actually loop through its contents:
if os.path.isdir(input_target.path):
filenames = os.listdir(input_target.path)
for filename in filenames:
url = url_path_join(input_target.path, filename)
input_targets.append(get_target_from_url(url.strip()))
continue
with input_target.open('r') as input_file:
# S3 files not yet supported since they don't support tell() and seek()
if input_target.path.endswith('.gz'):
input_file = gzip.GzipFile(fileobj=input_file)
elif input_target.path.endswith('.manifest'):
for url in input_file:
input_targets.append(get_target_from_url(url.strip()))
continue
os.environ['map_input_file'] = input_target.path
try:
outputs = job._map_input((line[:-1] for line in input_file))
job.internal_writer(outputs, map_output)
finally:
del os.environ['map_input_file']
map_output.seek(0)
reduce_input = self.group(map_output)
try:
reduce_output = job.output().open('w')
except Exception:
reduce_output = StringIO.StringIO()
try:
job._run_reducer(reduce_input, reduce_output)
finally:
try:
reduce_output.close()
except Exception:
pass
class MultiOutputMapReduceJobTask(MapReduceJobTask):
"""
Produces multiple output files from a map reduce job.
The mapper output tuple key is used to determine the name of the file that reducer results are written to. Different
reduce tasks must not write to the same file. Since all values for a given mapper output key are guaranteed to be
processed by the same reduce task, we only allow a single file to be output per key for safety. In the future, the
reducer output key could be used to determine the output file name, however.
"""
output_root = luigi.Parameter(
description='A URL location where the split files will be stored.',
)
delete_output_root = luigi.BoolParameter(
default=False,
significant=False,
description='If True, recursively deletes the `output_root` at task creation.',
)
marker = luigi.Parameter(
config_path={'section': 'map-reduce', 'name': 'marker'},
significant=False,
description='A URL location to a directory where a marker file will be written on task completion.',
)
def output(self):
marker_url = url_path_join(self.marker, str(hash(self)))
return get_target_from_url(marker_url)
def reducer(self, key, values):
"""
Write out values from each key into different output files.
"""
output_path = self.output_path_for_key(key)
if output_path:
log.info('Writing output file: %s', output_path)
output_file_target = get_target_from_url(output_path)
with output_file_target.open('w') as output_file:
self.multi_output_reducer(key, values, output_file)
# Luigi requires the reducer to return an iterable
return iter(tuple())
def multi_output_reducer(self, _key, _values, _output_file):
"""Returns an iterable of strings that are written out to the appropriate output file for this key."""
return iter(tuple())
def output_path_for_key(self, _key):
"""
Returns a URL that is unique to the given key.
All values returned from the reducer for the given key will be output to the file specified by the URL returned
from this function.
"""
return None
def __init__(self, *args, **kwargs):
super(MultiOutputMapReduceJobTask, self).__init__(*args, **kwargs)
if self.delete_output_root:
# If requested, make sure that the output directory is empty. This gets rid
# of any generated data files from a previous run (that might not get
# regenerated in this run). It also makes sure that the marker file
# (i.e. the output target) will be removed, so that external functionality
# will know that the generation of data files is not complete.
output_dir_target = get_target_from_url(self.output_root)
for target in [self.output(), output_dir_target]:
if target.exists():
target.remove()
|
import unittest2
import test_term_count
suite = [
test_term_count
]
|
from weboob.tools.backend import BaseBackend
from weboob.capabilities.calendar import ICapCalendarEvent, BaseCalendarEvent, CATEGORIES, TRANSP, STATUS
from datetime import datetime, time
from .browser import ParisKiwiBrowser
__all__ = ['ParisKiwiBackend']
class ParisKiwiBackend(BaseBackend, ICapCalendarEvent):
NAME = 'pariskiwi'
DESCRIPTION = u'ParisKiwi website'
MAINTAINER = u'Vincent A'
EMAIL = 'dev@indigo.re'
LICENSE = 'AGPLv3+'
VERSION = '0.i'
BROWSER = ParisKiwiBrowser
ASSOCIATED_CATEGORIES = [CATEGORIES.CONCERT]
def search_events(self, query):
if self.has_matching_categories(query):
return self.list_events(query.start_date, query.end_date or None)
def list_events(self, date_from, date_to=None):
for d in self.browser.list_events_all():
if self.matches_date(d, date_from, date_to):
event = self.get_event(d['id'])
if event is not None:
yield event
def get_event(self, _id):
d = self.browser.get_event(_id)
if not d:
return None
return self._make_event(d)
def _make_event(self, d):
event = BaseCalendarEvent(d['id'])
event.city = u'Paris'
event.url = d['url']
event.start_date = d['datetime']
event.end_date = datetime.combine(d['datetime'].date(), time.max)
event.summary = d['summary']
event.category = CATEGORIES.CONCERT
event.description = d['description']
event.status = STATUS.CONFIRMED
event.transp = TRANSP.OPAQUE
if 'price' in d:
event.price = d['price']
if 'address' in d:
event.location = d['address']
return event
def _make_false_event(self):
event = BaseCalendarEvent('0')
event.start_date = event.end_date = datetime.utcfromtimestamp(0)
event.summary = u'NON EXISTING EVENT'
event.status = STATUS.CANCELLED
event.category = CATEGORIES.CONCERT
event.transp = TRANSP.OPAQUE
return event
def matches_date(self, d, date_from, date_to):
if date_from and d['date'] < date_from:
return False
if date_to and d['date'] > date_from:
return False
return True
|
import subprocess
import json
from time import sleep
import urllib
import base64
import zlib
import cPickle
from alignak_test import unittest
from alignak.http.generic_interface import GenericInterface
from alignak.http.receiver_interface import ReceiverInterface
from alignak.http.arbiter_interface import ArbiterInterface
from alignak.http.scheduler_interface import SchedulerInterface
from alignak.http.broker_interface import BrokerInterface
from alignak.check import Check
class fullTest(unittest.TestCase):
def _get_subproc_data(self, name):
try:
print("Try to end %s" % name)
self.procs[name].send_signal(2)
self.procs[name].send_signal(15)
self.procs[name].wait()
except Exception as err:
print("prob on terminate and wait subproc %s: %s" % (name, err))
data = {}
data['out'] = self.procs[name].stdout.read()
data['err'] = self.procs[name].stderr.read()
data['rc'] = self.procs[name].returncode
return data
def tearDown(self):
for name, proc in self.procs.items():
if proc:
self._get_subproc_data(name) # so to terminate / wait it..
def test_daemons_outputs(self):
self.procs = {}
satellite_map = {'arbiter': '7770',
'scheduler': '7768',
'broker': '7772',
'poller': '7771',
'reactionner': '7769',
'receiver': '7773'
}
for daemon in ['scheduler', 'broker', 'poller', 'reactionner', 'receiver']:
args = ["../alignak/bin/alignak_%s.py" %daemon, "-c", "etc/full_test/%sd.ini" % daemon]
self.procs[daemon] = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
args = ["../alignak/bin/alignak_arbiter.py", "-c", "etc/full_test/alignak.cfg"]
self.procs['arbiter'] = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sleep(3)
print("Testing start")
for name, proc in self.procs.items():
ret = proc.poll()
if ret is not None:
print(proc.stdout.read())
print(proc.stderr.read())
self.assertIsNone(ret, "Daemon %s not started!" % name)
print("Testing sat list")
data = urllib.urlopen("http://127.0.0.1:%s/get_satellite_list" % satellite_map['arbiter']).read()
self.assertEqual(data, '{"reactionner": ["reactionner-master"], '
'"broker": ["broker-master"], '
'"arbiter": ["arbiter-master"], '
'"scheduler": ["scheduler-master"], '
'"receiver": ["receiver-1"], '
'"poller": ["poller-fail", "poller-master"]}')
print("Testing have_conf")
for daemon in ['scheduler', 'broker', 'poller', 'reactionner', 'receiver']:
data = urllib.urlopen("http://127.0.0.1:%s/have_conf" % satellite_map[daemon]).read()
self.assertEqual(data, "true", "Daemon %s has no conf!" % daemon)
print("Testing ping")
for name, port in satellite_map.items():
data = urllib.urlopen("http://127.0.0.1:%s/ping" % port).read()
self.assertEqual(data, '"pong"', "Daemon %s did not ping back!" % name)
print("Testing API")
for name, port in satellite_map.items():
data = urllib.urlopen("http://127.0.0.1:%s/api" % port).read()
name_to_interface = {'arbiter': ArbiterInterface,
'scheduler': SchedulerInterface,
'broker': BrokerInterface,
'poller': GenericInterface,
'reactionner': GenericInterface,
'receiver': ReceiverInterface}
expected_data = set(name_to_interface[name](None).api())
self.assertEqual(set(json.loads(data)), expected_data, "Daemon %s has a bad API!" % name)
print("Test get check on scheduler")
# We need to sleep 10s to be sure the first check can be launched now (check_interval = 5)
sleep(4)
raw_data = urllib.urlopen("http://127.0.0.1:%s/get_checks?do_checks=True&poller_tags=['TestPollerTag']" % satellite_map['scheduler']).read()
data = cPickle.loads(zlib.decompress(base64.b64decode(raw_data)))
self.assertIsInstance(data, list, "Data is not a list!")
self.assertNotEqual(len(data), 0, "List is empty!")
for elem in data:
self.assertIsInstance(elem, Check, "One elem of the list is not a Check!")
print("Done testing")
#os.kill(self.arb_proc.pid, signal.SIGHUP) # This should log with debug level the Relaod Conf
#os.kill(self.arb_proc.pid, signal.SIGINT) # This should kill the proc
#data = self._get_subproc_data()
#self.assertRegexpMatches(data['out'], "Reloading configuration")
if __name__ == '__main__':
unittest.main()
|
import pytz
from openerp import SUPERUSER_ID, workflow
from datetime import datetime
from dateutil.relativedelta import relativedelta
from operator import attrgetter
from openerp.tools.safe_eval import safe_eval as eval
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.osv.orm import browse_record_list, browse_record, browse_null
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
from openerp.tools.float_utils import float_compare
class purchase_order(osv.osv):
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
line_obj = self.pool['purchase.order.line']
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
line_price = line_obj._calc_line_base_price(cr, uid, line,
context=context)
line_qty = line_obj._calc_line_quantity(cr, uid, line,
context=context)
for c in self.pool['account.tax'].compute_all(
cr, uid, line.taxes_id, line_price, line_qty,
line.product_id, order.partner_id)['taxes']:
val += c.get('amount', 0.0)
res[order.id]['amount_tax']=cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed']=cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total']=res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _set_minimum_planned_date(self, cr, uid, ids, name, value, arg, context=None):
if not value: return False
if type(ids)!=type([]):
ids=[ids]
pol_obj = self.pool.get('purchase.order.line')
for po in self.browse(cr, uid, ids, context=context):
if po.order_line:
pol_ids = pol_obj.search(cr, uid, [
('order_id', '=', po.id), '|', ('date_planned', '=', po.minimum_planned_date), ('date_planned', '<', value)
], context=context)
pol_obj.write(cr, uid, pol_ids, {'date_planned': value}, context=context)
self.invalidate_cache(cr, uid, context=context)
return True
def _minimum_planned_date(self, cr, uid, ids, field_name, arg, context=None):
res={}
purchase_obj=self.browse(cr, uid, ids, context=context)
for purchase in purchase_obj:
res[purchase.id] = False
if purchase.order_line:
min_date=purchase.order_line[0].date_planned
for line in purchase.order_line:
if line.state == 'cancel':
continue
if line.date_planned < min_date:
min_date=line.date_planned
res[purchase.id]=min_date
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
tot = 0.0
for invoice in purchase.invoice_ids:
if invoice.state not in ('draft','cancel'):
tot += invoice.amount_untaxed
if purchase.amount_untaxed:
res[purchase.id] = tot * 100.0 / purchase.amount_untaxed
else:
res[purchase.id] = 0.0
return res
def _shipped_rate(self, cr, uid, ids, name, arg, context=None):
if not ids: return {}
res = {}
for id in ids:
res[id] = [0.0,0.0]
cr.execute('''SELECT
p.order_id, sum(m.product_qty), m.state
FROM
stock_move m
LEFT JOIN
purchase_order_line p on (p.id=m.purchase_line_id)
WHERE
p.order_id IN %s GROUP BY m.state, p.order_id''',(tuple(ids),))
for oid,nbr,state in cr.fetchall():
if state=='cancel':
continue
if state=='done':
res[oid][0] += nbr or 0.0
res[oid][1] += nbr or 0.0
else:
res[oid][1] += nbr or 0.0
for r in res:
if not res[r][1]:
res[r] = 0.0
else:
res[r] = 100.0 * res[r][0] / res[r][1]
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('purchase.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _get_purchase_order(self, cr, uid, ids, context=None):
result = {}
for order in self.browse(cr, uid, ids, context=context):
result[order.id] = True
return result.keys()
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
res[purchase.id] = all(line.invoiced for line in purchase.order_line if line.state != 'cancel')
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', 'purchase'),
('company_id', '=', company_id)],
limit=1)
return res and res[0] or False
def _get_picking_in(self, cr, uid, context=None):
obj_data = self.pool.get('ir.model.data')
type_obj = self.pool.get('stock.picking.type')
user_obj = self.pool.get('res.users')
company_id = user_obj.browse(cr, uid, uid, context=context).company_id.id
types = type_obj.search(cr, uid, [('code', '=', 'incoming'), ('warehouse_id.company_id', '=', company_id)], context=context)
if not types:
types = type_obj.search(cr, uid, [('code', '=', 'incoming'), ('warehouse_id', '=', False)], context=context)
if not types:
raise osv.except_osv(_('Error!'), _("Make sure you have at least an incoming picking type defined"))
return types[0]
def _get_picking_ids(self, cr, uid, ids, field_names, args, context=None):
res = {}
for po_id in ids:
res[po_id] = []
query = """
SELECT picking_id, po.id FROM stock_picking p, stock_move m, purchase_order_line pol, purchase_order po
WHERE po.id in %s and po.id = pol.order_id and pol.id = m.purchase_line_id and m.picking_id = p.id
GROUP BY picking_id, po.id
"""
cr.execute(query, (tuple(ids), ))
picks = cr.fetchall()
for pick_id, po_id in picks:
res[po_id].append(pick_id)
return res
def _count_all(self, cr, uid, ids, field_name, arg, context=None):
return {
purchase.id: {
'shipment_count': len(purchase.picking_ids),
'invoice_count': len(purchase.invoice_ids),
}
for purchase in self.browse(cr, uid, ids, context=context)
}
STATE_SELECTION = [
('draft', 'Draft PO'),
('sent', 'RFQ'),
('bid', 'Bid Received'),
('confirmed', 'Waiting Approval'),
('approved', 'Purchase Confirmed'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
]
READONLY_STATES = {
'confirmed': [('readonly', True)],
'approved': [('readonly', True)],
'done': [('readonly', True)]
}
_track = {
'state': {
'purchase.mt_rfq_confirmed': lambda self, cr, uid, obj, ctx=None: obj.state == 'confirmed',
'purchase.mt_rfq_approved': lambda self, cr, uid, obj, ctx=None: obj.state == 'approved',
'purchase.mt_rfq_done': lambda self, cr, uid, obj, ctx=None: obj.state == 'done',
},
}
_columns = {
'name': fields.char('Order Reference', required=True, select=True, copy=False,
help="Unique number of the purchase order, "
"computed automatically when the purchase order is created."),
'origin': fields.char('Source Document', copy=False,
help="Reference of the document that generated this purchase order "
"request; a sales order or an internal procurement request."),
'partner_ref': fields.char('Supplier Reference', states={'confirmed':[('readonly',True)],
'approved':[('readonly',True)],
'done':[('readonly',True)]},
copy=False,
help="Reference of the sales order or bid sent by your supplier. "
"It's mainly used to do the matching when you receive the "
"products as this reference is usually written on the "
"delivery order sent by your supplier."),
'date_order':fields.datetime('Order Date', required=True, states={'confirmed':[('readonly',True)],
'approved':[('readonly',True)]},
select=True, help="Depicts the date where the Quotation should be validated and converted into a Purchase Order, by default it's the creation date.",
copy=False),
'date_approve':fields.date('Date Approved', readonly=1, select=True, copy=False,
help="Date on which purchase order has been approved"),
'partner_id':fields.many2one('res.partner', 'Supplier', required=True, states=READONLY_STATES,
change_default=True, track_visibility='always'),
'dest_address_id':fields.many2one('res.partner', 'Customer Address (Direct Delivery)',
states=READONLY_STATES,
help="Put an address if you want to deliver directly from the supplier to the customer. " \
"Otherwise, keep empty to deliver to your own company."
),
'location_id': fields.many2one('stock.location', 'Destination', required=True, domain=[('usage','<>','view')], states=READONLY_STATES),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', required=True, states=READONLY_STATES, help="The pricelist sets the currency used for this purchase order. It also computes the supplier price for the selected products/quantities."),
'currency_id': fields.many2one('res.currency','Currency', required=True, states=READONLY_STATES),
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True,
help="The status of the purchase order or the quotation request. "
"A request for quotation is a purchase order in a 'Draft' status. "
"Then the order has to be confirmed by the user, the status switch "
"to 'Confirmed'. Then the supplier must confirm the order to change "
"the status to 'Approved'. When the purchase order is paid and "
"received, the status becomes 'Done'. If a cancel action occurs in "
"the invoice or in the receipt of goods, the status becomes "
"in exception.",
select=True, copy=False),
'order_line': fields.one2many('purchase.order.line', 'order_id', 'Order Lines',
states={'approved':[('readonly',True)],
'done':[('readonly',True)]},
copy=True),
'validator' : fields.many2one('res.users', 'Validated by', readonly=True, copy=False),
'notes': fields.text('Terms and Conditions'),
'invoice_ids': fields.many2many('account.invoice', 'purchase_invoice_rel', 'purchase_id',
'invoice_id', 'Invoices', copy=False,
help="Invoices generated for a purchase order"),
'picking_ids': fields.function(_get_picking_ids, method=True, type='one2many', relation='stock.picking', string='Picking List', help="This is the list of receipts that have been generated for this purchase order."),
'shipped':fields.boolean('Received', readonly=True, select=True, copy=False,
help="It indicates that a picking has been done"),
'shipped_rate': fields.function(_shipped_rate, string='Received Ratio', type='float'),
'invoiced': fields.function(_invoiced, string='Invoice Received', type='boolean', copy=False,
help="It indicates that an invoice has been validated"),
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced', type='float'),
'invoice_method': fields.selection([('manual','Based on Purchase Order lines'),('order','Based on generated draft invoice'),('picking','Based on incoming shipments')], 'Invoicing Control', required=True,
readonly=True, states={'draft':[('readonly',False)], 'sent':[('readonly',False)]},
help="Based on Purchase Order lines: place individual lines in 'Invoice Control / On Purchase Order lines' from where you can selectively create an invoice.\n" \
"Based on generated invoice: create a draft invoice you can validate later.\n" \
"Based on incoming shipments: let you create an invoice when receipts are validated."
),
'minimum_planned_date':fields.function(_minimum_planned_date, fnct_inv=_set_minimum_planned_date, string='Expected Date', type='date', select=True, help="This is computed as the minimum scheduled date of all purchase order lines' products.",
store = {
'purchase.order.line': (_get_order, ['date_planned'], 10),
'purchase.order': (_get_purchase_order, ['order_line'], 10),
}
),
'amount_untaxed': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Untaxed Amount',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The amount without tax", track_visibility='always'),
'amount_tax': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Taxes',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The tax amount"),
'amount_total': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Total',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The total amount"),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position'),
'payment_term_id': fields.many2one('account.payment.term', 'Payment Term'),
'incoterm_id': fields.many2one('stock.incoterms', 'Incoterm', help="International Commercial Terms are a series of predefined commercial terms used in international transactions."),
'product_id': fields.related('order_line', 'product_id', type='many2one', relation='product.product', string='Product'),
'create_uid': fields.many2one('res.users', 'Responsible'),
'company_id': fields.many2one('res.company', 'Company', required=True, select=1, states={'confirmed': [('readonly', True)], 'approved': [('readonly', True)]}),
'journal_id': fields.many2one('account.journal', 'Journal'),
'bid_date': fields.date('Bid Received On', readonly=True, help="Date on which the bid was received"),
'bid_validity': fields.date('Bid Valid Until', help="Date on which the bid expired"),
'picking_type_id': fields.many2one('stock.picking.type', 'Deliver To', help="This will determine picking type of incoming shipment", required=True,
states={'confirmed': [('readonly', True)], 'approved': [('readonly', True)], 'done': [('readonly', True)]}),
'related_location_id': fields.related('picking_type_id', 'default_location_dest_id', type='many2one', relation='stock.location', string="Related location", store=True),
'related_usage': fields.related('location_id', 'usage', type='char'),
'shipment_count': fields.function(_count_all, type='integer', string='Incoming Shipments', multi=True),
'invoice_count': fields.function(_count_all, type='integer', string='Invoices', multi=True)
}
_defaults = {
'date_order': fields.datetime.now,
'state': 'draft',
'name': lambda obj, cr, uid, context: '/',
'shipped': 0,
'invoice_method': 'order',
'invoiced': 0,
'pricelist_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').browse(cr, uid, context['partner_id']).property_product_pricelist_purchase.id,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.order', context=c),
'journal_id': _get_journal,
'currency_id': lambda self, cr, uid, context: self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id,
'picking_type_id': _get_picking_in,
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'),
]
_name = "purchase.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Purchase Order"
_order = 'date_order desc, id desc'
def create(self, cr, uid, vals, context=None):
if vals.get('name','/')=='/':
vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'purchase.order', context=context) or '/'
context = dict(context or {}, mail_create_nolog=True)
order = super(purchase_order, self).create(cr, uid, vals, context=context)
self.message_post(cr, uid, [order], body=_("RFQ created"), context=context)
return order
def unlink(self, cr, uid, ids, context=None):
purchase_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in purchase_orders:
if s['state'] in ['draft','cancel']:
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'), _('In order to delete a purchase order, you must cancel it first.'))
# automatically sending subflow.delete upon deletion
self.signal_workflow(cr, uid, unlink_ids, 'purchase_cancel')
return super(purchase_order, self).unlink(cr, uid, unlink_ids, context=context)
def copy(self, cr, uid, id, default=None, context=None):
# FORWARDPORT UP TO SAAS-6
new_id = super(purchase_order, self).copy(cr, uid, id, context=context)
for po in self.browse(cr, uid, [new_id], context=context):
for line in po.order_line:
vals = self.pool.get('purchase.order.line').onchange_product_id(
cr, uid, line.id, po.pricelist_id.id, line.product_id.id, line.product_qty,
line.product_uom.id, po.partner_id.id, date_order=po.date_order, context=context
)
line.write({'date_planned': vals['value']['date_planned']})
return new_id
def set_order_line_status(self, cr, uid, ids, status, context=None):
line = self.pool.get('purchase.order.line')
order_line_ids = []
proc_obj = self.pool.get('procurement.order')
for order in self.browse(cr, uid, ids, context=context):
if status in ('draft', 'cancel'):
order_line_ids += [po_line.id for po_line in order.order_line]
else: # Do not change the status of already cancelled lines
order_line_ids += [po_line.id for po_line in order.order_line if po_line.state != 'cancel']
if order_line_ids:
line.write(cr, uid, order_line_ids, {'state': status}, context=context)
if order_line_ids and status == 'cancel':
procs = proc_obj.search(cr, uid, [('purchase_line_id', 'in', order_line_ids)], context=context)
if procs:
proc_obj.write(cr, uid, procs, {'state': 'exception'}, context=context)
return True
def button_dummy(self, cr, uid, ids, context=None):
return True
def onchange_pricelist(self, cr, uid, ids, pricelist_id, context=None):
if not pricelist_id:
return {}
return {'value': {'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id}}
#Destination address is used when dropshipping
def onchange_dest_address_id(self, cr, uid, ids, address_id, context=None):
if not address_id:
return {}
address = self.pool.get('res.partner')
values = {}
supplier = address.browse(cr, uid, address_id, context=context)
if supplier:
location_id = supplier.property_stock_customer.id
values.update({'location_id': location_id})
return {'value':values}
def onchange_picking_type_id(self, cr, uid, ids, picking_type_id, context=None):
value = {}
if picking_type_id:
picktype = self.pool.get("stock.picking.type").browse(cr, uid, picking_type_id, context=context)
if picktype.default_location_dest_id:
value.update({'location_id': picktype.default_location_dest_id.id, 'related_usage': picktype.default_location_dest_id.usage})
value.update({'related_location_id': picktype.default_location_dest_id.id})
return {'value': value}
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
partner = self.pool.get('res.partner')
if not partner_id:
return {'value': {
'fiscal_position': False,
'payment_term_id': False,
}}
company_id = context.get('company_id') or self.pool.get('res.users')._get_company(cr, uid, context=context)
if not company_id:
raise osv.except_osv(_('Error!'), _('There is no default company for the current user!'))
fp = self.pool['account.fiscal.position'].get_fiscal_position(cr, uid, company_id, partner_id, context=context)
supplier_address = partner.address_get(cr, uid, [partner_id], ['default'], context=context)
supplier = partner.browse(cr, uid, partner_id, context=context)
return {'value': {
'pricelist_id': supplier.property_product_pricelist_purchase.id,
'fiscal_position': fp or supplier.property_account_position and supplier.property_account_position.id,
'payment_term_id': supplier.property_supplier_payment_term.id or False,
}}
def invoice_open(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
if not inv_ids:
raise osv.except_osv(_('Error!'), _('Please create Invoices.'))
#choose the view_mode accordingly
if len(inv_ids)>1:
result['domain'] = "[('id','in',["+','.join(map(str, inv_ids))+"])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
def view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show.
'''
context = dict(context or {})
mod_obj = self.pool.get('ir.model.data')
wizard_obj = self.pool.get('purchase.order.line_invoice')
#compute the number of invoices to display
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
if po.invoice_method == 'manual':
if not po.invoice_ids:
context.update({'active_ids' : [line.id for line in po.order_line if line.state != 'cancel']})
wizard_obj.makeInvoices(cr, uid, [], context=context)
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
res_id = res and res[1] or False
return {
'name': _('Supplier Invoices'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'in_invoice', 'journal_type': 'purchase'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': inv_ids and inv_ids[0] or False,
}
def view_picking(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing picking orders of given purchase order ids.
'''
if context is None:
context = {}
mod_obj = self.pool.get('ir.model.data')
dummy, action_id = tuple(mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree'))
action = self.pool.get('ir.actions.act_window').read(cr, uid, action_id, context=context)
pick_ids = []
for po in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in po.picking_ids]
#override the context to get rid of the default filtering on picking type
action['context'] = {}
#choose the view_mode accordingly
if len(pick_ids) > 1:
action['domain'] = "[('id','in',[" + ','.join(map(str, pick_ids)) + "])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_form')
action['views'] = [(res and res[1] or False, 'form')]
action['res_id'] = pick_ids and pick_ids[0] or False
return action
def wkf_approve_order(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved', 'date_approve': fields.date.context_today(self,cr,uid,context=context)})
return True
def wkf_bid_received(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state':'bid', 'bid_date': fields.date.context_today(self,cr,uid,context=context)})
def wkf_send_rfq(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi purchase template message loaded by default
'''
if not context:
context= {}
ir_model_data = self.pool.get('ir.model.data')
try:
if context.get('send_rfq', False):
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase')[1]
else:
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase_done')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'purchase.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
})
return {
'name': _('Compose Email'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def print_quotation(self, cr, uid, ids, context=None):
'''
This function prints the request for quotation and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
self.signal_workflow(cr, uid, ids, 'send_rfq')
return self.pool['report'].get_action(cr, uid, ids, 'purchase.report_purchasequotation', context=context)
def wkf_confirm_order(self, cr, uid, ids, context=None):
todo = []
for po in self.browse(cr, uid, ids, context=context):
if not any(line.state != 'cancel' for line in po.order_line):
raise osv.except_osv(_('Error!'),_('You cannot confirm a purchase order without any purchase order line.'))
if po.invoice_method == 'picking' and not any([l.product_id and l.product_id.type in ('product', 'consu') and l.state != 'cancel' for l in po.order_line]):
raise osv.except_osv(
_('Error!'),
_("You cannot confirm a purchase order with Invoice Control Method 'Based on incoming shipments' that doesn't contain any stockable item."))
for line in po.order_line:
if line.state=='draft':
todo.append(line.id)
self.pool.get('purchase.order.line').action_confirm(cr, uid, todo, context)
for id in ids:
self.write(cr, uid, [id], {'state' : 'confirmed', 'validator' : uid}, context=context)
return True
def _choose_account_from_po_line(self, cr, uid, po_line, context=None):
fiscal_obj = self.pool.get('account.fiscal.position')
property_obj = self.pool.get('ir.property')
if po_line.product_id:
acc_id = po_line.product_id.property_account_expense.id
if not acc_id:
acc_id = po_line.product_id.categ_id.property_account_expense_categ.id
if not acc_id:
raise osv.except_osv(_('Error!'), _('Define an expense account for this product: "%s" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))
else:
acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category', context=context).id
fpos = po_line.order_id.fiscal_position or False
return fiscal_obj.map_account(cr, uid, fpos, acc_id)
def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):
"""Collects require data from purchase order line that is used to create invoice line
for that purchase order line
:param account_id: Expense account of the product of PO line if any.
:param browse_record order_line: Purchase order line browse record
:return: Value for fields of invoice lines.
:rtype: dict
"""
return {
'name': order_line.name,
'account_id': account_id,
'price_unit': order_line.price_unit or 0.0,
'quantity': order_line.product_qty,
'product_id': order_line.product_id.id or False,
'uos_id': order_line.product_uom.id or False,
'invoice_line_tax_id': [(6, 0, [x.id for x in order_line.taxes_id])],
'account_analytic_id': order_line.account_analytic_id.id or False,
'purchase_line_id': order_line.id,
}
def _prepare_invoice(self, cr, uid, order, line_ids, context=None):
"""Prepare the dict of values to create the new invoice for a
purchase order. This method may be overridden to implement custom
invoice generation (making sure to call super() to establish
a clean extension chain).
:param browse_record order: purchase.order record to invoice
:param list(int) line_ids: list of invoice line IDs that must be
attached to the invoice
:return: dict of value to create() the invoice
"""
journal_ids = self.pool['account.journal'].search(
cr, uid, [('type', '=', 'purchase'),
('company_id', '=', order.company_id.id)],
limit=1)
if not journal_ids:
raise osv.except_osv(
_('Error!'),
_('Define purchase journal for this company: "%s" (id:%d).') % \
(order.company_id.name, order.company_id.id))
return {
'name': order.partner_ref or order.name,
'reference': order.partner_ref or order.name,
'account_id': order.partner_id.property_account_payable.id,
'type': 'in_invoice',
'partner_id': order.partner_id.id,
'currency_id': order.currency_id.id,
'journal_id': len(journal_ids) and journal_ids[0] or False,
'invoice_line': [(6, 0, line_ids)],
'origin': order.name,
'fiscal_position': order.fiscal_position.id or False,
'payment_term': order.payment_term_id.id or False,
'company_id': order.company_id.id,
}
def action_cancel_draft(self, cr, uid, ids, context=None):
if not len(ids):
return False
self.write(cr, uid, ids, {'state':'draft','shipped':0})
self.set_order_line_status(cr, uid, ids, 'draft', context=context)
for po in self.browse(cr, SUPERUSER_ID, ids, context=context):
for picking in po.picking_ids:
picking.move_lines.write({'purchase_line_id': False})
for invoice in po.invoice_ids:
po.write({'invoice_ids': [(3, invoice.id, _)]})
for po_line in po.order_line:
for invoice_line in po_line.invoice_lines:
po_line.write({'invoice_lines': [(3, invoice_line.id, _)]})
for p_id in ids:
# Deleting the existing instance of workflow for PO
self.delete_workflow(cr, uid, [p_id]) # TODO is it necessary to interleave the calls?
self.create_workflow(cr, uid, [p_id])
return True
def wkf_po_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'done'}, context=context)
self.set_order_line_status(cr, uid, ids, 'done', context=context)
def action_invoice_create(self, cr, uid, ids, context=None):
"""Generates invoice for given ids of purchase orders and links that invoice ID to purchase order.
:param ids: list of ids of purchase orders.
:return: ID of created invoice.
:rtype: int
"""
context = dict(context or {})
inv_obj = self.pool.get('account.invoice')
inv_line_obj = self.pool.get('account.invoice.line')
res = False
uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
for order in self.browse(cr, uid, ids, context=context):
context.pop('force_company', None)
if order.company_id.id != uid_company_id:
#if the company of the document is different than the current user company, force the company in the context
#then re-do a browse to read the property fields for the good company.
context['force_company'] = order.company_id.id
order = self.browse(cr, uid, order.id, context=context)
# generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line
inv_lines = []
for po_line in order.order_line:
if po_line.state == 'cancel':
continue
acc_id = self._choose_account_from_po_line(cr, uid, po_line, context=context)
inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)
inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)
inv_lines.append(inv_line_id)
po_line.write({'invoice_lines': [(4, inv_line_id)]})
# get invoice data and create invoice
inv_data = self._prepare_invoice(cr, uid, order, inv_lines, context=context)
inv_id = inv_obj.create(cr, uid, inv_data, context=context)
# compute the invoice
inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)
# Link this new invoice to related purchase order
order.write({'invoice_ids': [(4, inv_id)]})
res = inv_id
return res
def invoice_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved'}, context=context)
return True
def has_stockable_product(self, cr, uid, ids, *args):
for order in self.browse(cr, uid, ids):
for order_line in order.order_line:
if order_line.state == 'cancel':
continue
if order_line.product_id and order_line.product_id.type in ('product', 'consu'):
return True
return False
def wkf_action_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
self.set_order_line_status(cr, uid, ids, 'cancel', context=context)
def action_cancel(self, cr, uid, ids, context=None):
for purchase in self.browse(cr, uid, ids, context=context):
for pick in purchase.picking_ids:
for move in pick.move_lines:
if pick.state == 'done':
raise osv.except_osv(
_('Unable to cancel the purchase order %s.') % (purchase.name),
_('You have already received some goods for it. '))
self.pool.get('stock.picking').action_cancel(cr, uid, [x.id for x in purchase.picking_ids if x.state != 'cancel'], context=context)
for inv in purchase.invoice_ids:
if inv and inv.state not in ('cancel', 'draft'):
raise osv.except_osv(
_('Unable to cancel this purchase order.'),
_('You must first cancel all invoices related to this purchase order.'))
self.pool.get('account.invoice') \
.signal_workflow(cr, uid, map(attrgetter('id'), purchase.invoice_ids), 'invoice_cancel')
self.signal_workflow(cr, uid, ids, 'purchase_cancel')
return True
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, group_id, context=None):
''' prepare the stock move data from the PO line. This function returns a list of dictionary ready to be used in stock.move's create()'''
product_uom = self.pool.get('product.uom')
price_unit = order_line.price_unit
if order_line.taxes_id:
taxes = self.pool['account.tax'].compute_all(cr, uid, order_line.taxes_id, price_unit, 1.0,
order_line.product_id, order.partner_id)
price_unit = taxes['total']
if order_line.product_uom.id != order_line.product_id.uom_id.id:
price_unit *= order_line.product_uom.factor / order_line.product_id.uom_id.factor
if order.currency_id.id != order.company_id.currency_id.id:
#we don't round the price_unit, as we may want to store the standard price with more digits than allowed by the currency
price_unit = self.pool.get('res.currency').compute(cr, uid, order.currency_id.id, order.company_id.currency_id.id, price_unit, round=False, context=context)
res = []
if order.location_id.usage == 'customer':
name = order_line.product_id.with_context(dict(context or {}, lang=order.dest_address_id.lang)).name
else:
name = order_line.name or ''
move_template = {
'name': name,
'product_id': order_line.product_id.id,
'product_uom': order_line.product_uom.id,
'product_uos': order_line.product_uom.id,
'date': order.date_order,
'date_expected': fields.date.date_to_datetime(self, cr, uid, order_line.date_planned, context),
'location_id': order.partner_id.property_stock_supplier.id,
'location_dest_id': order.location_id.id,
'picking_id': picking_id,
'partner_id': order.dest_address_id.id,
'move_dest_id': False,
'state': 'draft',
'purchase_line_id': order_line.id,
'company_id': order.company_id.id,
'price_unit': price_unit,
'picking_type_id': order.picking_type_id.id,
'group_id': group_id,
'procurement_id': False,
'origin': order.name,
'route_ids': order.picking_type_id.warehouse_id and [(6, 0, [x.id for x in order.picking_type_id.warehouse_id.route_ids])] or [],
'warehouse_id':order.picking_type_id.warehouse_id.id,
'invoice_state': order.invoice_method == 'picking' and '2binvoiced' or 'none',
}
diff_quantity = order_line.product_qty
for procurement in order_line.procurement_ids:
procurement_qty = product_uom._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, to_uom_id=order_line.product_uom.id)
tmp = move_template.copy()
tmp.update({
'product_uom_qty': min(procurement_qty, diff_quantity),
'product_uos_qty': min(procurement_qty, diff_quantity),
'move_dest_id': procurement.move_dest_id.id, #move destination is same as procurement destination
'group_id': procurement.group_id.id or group_id, #move group is same as group of procurements if it exists, otherwise take another group
'procurement_id': procurement.id,
'invoice_state': procurement.rule_id.invoice_state or (procurement.location_id and procurement.location_id.usage == 'customer' and procurement.invoice_state=='2binvoiced' and '2binvoiced') or (order.invoice_method == 'picking' and '2binvoiced') or 'none', #dropship case takes from sale
'propagate': procurement.rule_id.propagate,
})
diff_quantity -= min(procurement_qty, diff_quantity)
res.append(tmp)
#if the order line has a bigger quantity than the procurement it was for (manually changed or minimal quantity), then
#split the future stock move in two because the route followed may be different.
if float_compare(diff_quantity, 0.0, precision_rounding=order_line.product_uom.rounding) > 0:
move_template['product_uom_qty'] = diff_quantity
move_template['product_uos_qty'] = diff_quantity
res.append(move_template)
return res
def _create_stock_moves(self, cr, uid, order, order_lines, picking_id=False, context=None):
"""Creates appropriate stock moves for given order lines, whose can optionally create a
picking if none is given or no suitable is found, then confirms the moves, makes them
available, and confirms the pickings.
If ``picking_id`` is provided, the stock moves will be added to it, otherwise a standard
incoming picking will be created to wrap the stock moves (default behavior of the stock.move)
Modules that wish to customize the procurements or partition the stock moves over
multiple stock pickings may override this method and call ``super()`` with
different subsets of ``order_lines`` and/or preset ``picking_id`` values.
:param browse_record order: purchase order to which the order lines belong
:param list(browse_record) order_lines: purchase order line records for which picking
and moves should be created.
:param int picking_id: optional ID of a stock picking to which the created stock moves
will be added. A new picking will be created if omitted.
:return: None
"""
stock_move = self.pool.get('stock.move')
todo_moves = []
new_group = self.pool.get("procurement.group").create(cr, uid, {'name': order.name, 'partner_id': order.partner_id.id}, context=context)
for order_line in order_lines:
if order_line.state == 'cancel':
continue
if not order_line.product_id:
continue
if order_line.product_id.type in ('product', 'consu'):
for vals in self._prepare_order_line_move(cr, uid, order, order_line, picking_id, new_group, context=context):
move = stock_move.create(cr, uid, vals, context=context)
todo_moves.append(move)
todo_moves = stock_move.action_confirm(cr, uid, todo_moves)
stock_move.force_assign(cr, uid, todo_moves)
def test_moves_done(self, cr, uid, ids, context=None):
'''PO is done at the delivery side if all the incoming shipments are done'''
for purchase in self.browse(cr, uid, ids, context=context):
for picking in purchase.picking_ids:
if picking.state != 'done':
return False
return True
def test_moves_except(self, cr, uid, ids, context=None):
''' PO is in exception at the delivery side if one of the picking is canceled
and the other pickings are completed (done or canceled)
'''
at_least_one_canceled = False
alldoneorcancel = True
for purchase in self.browse(cr, uid, ids, context=context):
for picking in purchase.picking_ids:
if picking.state == 'cancel':
at_least_one_canceled = True
if picking.state not in ['done', 'cancel']:
alldoneorcancel = False
return at_least_one_canceled and alldoneorcancel
def move_lines_get(self, cr, uid, ids, *args):
res = []
for order in self.browse(cr, uid, ids, context={}):
for line in order.order_line:
res += [x.id for x in line.move_ids]
return res
def action_picking_create(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids):
picking_vals = {
'picking_type_id': order.picking_type_id.id,
'partner_id': order.partner_id.id,
'date': order.date_order,
'origin': order.name
}
picking_id = self.pool.get('stock.picking').create(cr, uid, picking_vals, context=context)
self._create_stock_moves(cr, uid, order, order.order_line, picking_id, context=context)
return picking_id
def picking_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'shipped':1,'state':'approved'}, context=context)
# Do check on related procurements:
proc_obj = self.pool.get("procurement.order")
po_lines = []
for po in self.browse(cr, uid, ids, context=context):
po_lines += [x.id for x in po.order_line if x.state != 'cancel']
if po_lines:
procs = proc_obj.search(cr, uid, [('purchase_line_id', 'in', po_lines)], context=context)
if procs:
proc_obj.check(cr, uid, procs, context=context)
self.message_post(cr, uid, ids, body=_("Products received"), context=context)
return True
def do_merge(self, cr, uid, ids, context=None):
"""
To merge similar type of purchase orders.
Orders will only be merged if:
* Purchase Orders are in draft
* Purchase Orders belong to the same partner
* Purchase Orders are have same stock location, same pricelist, same currency
Lines will only be merged if:
* Order lines are exactly the same except for the quantity and unit
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: new purchase order id
"""
#TOFIX: merged order line should be unlink
def make_key(br, fields):
list_key = []
for field in fields:
field_val = getattr(br, field)
if field in ('product_id', 'account_analytic_id'):
if not field_val:
field_val = False
if isinstance(field_val, browse_record):
field_val = field_val.id
elif isinstance(field_val, browse_null):
field_val = False
elif isinstance(field_val, browse_record_list):
field_val = ((6, 0, tuple([v.id for v in field_val])),)
list_key.append((field, field_val))
list_key.sort()
return tuple(list_key)
context = dict(context or {})
# Compute what the new orders should contain
new_orders = {}
order_lines_to_move = {}
for porder in [order for order in self.browse(cr, uid, ids, context=context) if order.state == 'draft']:
order_key = make_key(porder, ('partner_id', 'location_id', 'pricelist_id', 'currency_id'))
new_order = new_orders.setdefault(order_key, ({}, []))
new_order[1].append(porder.id)
order_infos = new_order[0]
order_lines_to_move.setdefault(order_key, [])
if not order_infos:
order_infos.update({
'origin': porder.origin,
'date_order': porder.date_order,
'partner_id': porder.partner_id.id,
'dest_address_id': porder.dest_address_id.id,
'picking_type_id': porder.picking_type_id.id,
'location_id': porder.location_id.id,
'pricelist_id': porder.pricelist_id.id,
'currency_id': porder.currency_id.id,
'state': 'draft',
'order_line': {},
'notes': '%s' % (porder.notes or '',),
'fiscal_position': porder.fiscal_position and porder.fiscal_position.id or False,
})
else:
if porder.date_order < order_infos['date_order']:
order_infos['date_order'] = porder.date_order
if porder.notes:
order_infos['notes'] = (order_infos['notes'] or '') + ('\n%s' % (porder.notes,))
if porder.origin:
order_infos['origin'] = (order_infos['origin'] or '') + ' ' + porder.origin
order_lines_to_move[order_key] += [order_line.id for order_line in porder.order_line
if order_line.state != 'cancel']
allorders = []
orders_info = {}
for order_key, (order_data, old_ids) in new_orders.iteritems():
# skip merges with only one order
if len(old_ids) < 2:
allorders += (old_ids or [])
continue
# cleanup order line data
for key, value in order_data['order_line'].iteritems():
del value['uom_factor']
value.update(dict(key))
order_data['order_line'] = [(6, 0, order_lines_to_move[order_key])]
# create the new order
context.update({'mail_create_nolog': True})
neworder_id = self.create(cr, uid, order_data)
self.message_post(cr, uid, [neworder_id], body=_("RFQ created"), context=context)
orders_info.update({neworder_id: old_ids})
allorders.append(neworder_id)
# make triggers pointing to the old orders point to the new order
for old_id in old_ids:
self.redirect_workflow(cr, uid, [(old_id, neworder_id)])
self.signal_workflow(cr, uid, [old_id], 'purchase_cancel')
return orders_info
def _set_po_lines_invoiced(self, cr, uid, ids, context=None):
for po in self.browse(cr, uid, ids, context=context):
is_invoiced = []
if po.invoice_method == 'picking':
# We determine the invoiced state of the PO line based on the invoiced state
# of the associated moves. This should cover all possible cases:
# - all moves are done and invoiced
# - a PO line is split into multiple moves (e.g. if multiple pickings): some
# pickings are done, some are in progress, some are cancelled
for po_line in po.order_line:
if (po_line.move_ids and
all(move.state in ('done', 'cancel') for move in po_line.move_ids) and
not all(move.state == 'cancel' for move in po_line.move_ids) and
all(move.invoice_state == 'invoiced' for move in po_line.move_ids if move.state == 'done')
and po_line.invoice_lines and all(line.invoice_id.state not in ['draft', 'cancel'] for line in po_line.invoice_lines)):
is_invoiced.append(po_line.id)
elif po_line.product_id.type == 'service':
is_invoiced.append(po_line.id)
else:
for po_line in po.order_line:
if (po_line.invoice_lines and
all(line.invoice_id.state not in ['draft', 'cancel'] for line in po_line.invoice_lines)):
is_invoiced.append(po_line.id)
if is_invoiced:
self.pool['purchase.order.line'].write(cr, uid, is_invoiced, {'invoiced': True})
workflow.trg_write(uid, 'purchase.order', po.id, cr)
class purchase_order_line(osv.osv):
def _calc_line_base_price(self, cr, uid, line, context=None):
"""Return the base price of the line to be used for tax calculation.
This function can be extended by other modules to modify this base
price (adding a discount, for example).
"""
return line.price_unit
def _calc_line_quantity(self, cr, uid, line, context=None):
"""Return the base quantity of the line to be used for the subtotal.
This function can be extended by other modules to modify this base
quantity (adding for example offers 3x2 and so on).
"""
return line.product_qty
def _amount_line(self, cr, uid, ids, prop, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
tax_obj = self.pool.get('account.tax')
for line in self.browse(cr, uid, ids, context=context):
line_price = self._calc_line_base_price(cr, uid, line,
context=context)
line_qty = self._calc_line_quantity(cr, uid, line,
context=context)
taxes = tax_obj.compute_all(cr, uid, line.taxes_id, line_price,
line_qty, line.product_id,
line.order_id.partner_id)
cur = line.order_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
return res
def _get_uom_id(self, cr, uid, context=None):
try:
proxy = self.pool.get('ir.model.data')
result = proxy.get_object_reference(cr, uid, 'product', 'product_uom_unit')
return result[1]
except Exception, ex:
return False
_columns = {
'name': fields.text('Description', required=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'date_planned': fields.date('Scheduled Date', required=True, select=True),
'taxes_id': fields.many2many('account.tax', 'purchase_order_taxe', 'ord_id', 'tax_id', 'Taxes'),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_id': fields.many2one('product.product', 'Product', domain=[('purchase_ok','=',True)], change_default=True),
'move_ids': fields.one2many('stock.move', 'purchase_line_id', 'Reservation', readonly=True, ondelete='set null'),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute= dp.get_precision('Account')),
'order_id': fields.many2one('purchase.order', 'Order Reference', select=True, required=True, ondelete='cascade'),
'account_analytic_id':fields.many2one('account.analytic.account', 'Analytic Account',),
'company_id': fields.related('order_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'state': fields.selection([('draft', 'Draft'), ('confirmed', 'Confirmed'), ('done', 'Done'), ('cancel', 'Cancelled')],
'Status', required=True, readonly=True, copy=False,
help=' * The \'Draft\' status is set automatically when purchase order in draft status. \
\n* The \'Confirmed\' status is set automatically as confirm when purchase order in confirm status. \
\n* The \'Done\' status is set automatically when purchase order is set as done. \
\n* The \'Cancelled\' status is set automatically when user cancel purchase order.'),
'invoice_lines': fields.many2many('account.invoice.line', 'purchase_order_line_invoice_rel',
'order_line_id', 'invoice_id', 'Invoice Lines',
readonly=True, copy=False),
'invoiced': fields.boolean('Invoiced', readonly=True, copy=False),
'partner_id': fields.related('order_id', 'partner_id', string='Partner', readonly=True, type="many2one", relation="res.partner", store=True),
'date_order': fields.related('order_id', 'date_order', string='Order Date', readonly=True, type="datetime"),
'procurement_ids': fields.one2many('procurement.order', 'purchase_line_id', string='Associated procurements'),
}
_defaults = {
'product_uom' : _get_uom_id,
'product_qty': lambda *a: 1.0,
'state': lambda *args: 'draft',
'invoiced': lambda *a: 0,
}
_table = 'purchase_order_line'
_name = 'purchase.order.line'
_description = 'Purchase Order Line'
def unlink(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if line.order_id.state in ['approved', 'done'] and line.state not in ['draft', 'cancel']:
raise osv.except_osv(_('Invalid Action!'), _('Cannot delete a purchase order line which is in state \'%s\'.') %(line.state,))
procurement_obj = self.pool.get('procurement.order')
procurement_ids_to_except = procurement_obj.search(cr, uid, [('purchase_line_id', 'in', ids)], context=context)
if procurement_ids_to_except:
for po_id in procurement_ids_to_except:
procurement_obj.message_post(cr, uid, po_id, body=_('Purchase order line deleted.'), context=context)
procurement_obj.write(cr, uid, procurement_ids_to_except, {'state': 'exception'}, context=context)
return super(purchase_order_line, self).unlink(cr, uid, ids, context=context)
def onchange_product_uom(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, state='draft', context=None):
"""
onchange handler of product_uom.
"""
if context is None:
context = {}
if not uom_id:
return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
context = dict(context, purchase_uom_check=True)
return self.onchange_product_id(cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=date_order, fiscal_position_id=fiscal_position_id, date_planned=date_planned,
name=name, price_unit=price_unit, state=state, context=context)
def _get_date_planned(self, cr, uid, supplier_info, date_order_str, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for
PO Lines that correspond to the given product.supplierinfo,
when ordered at `date_order_str`.
:param browse_record | False supplier_info: product.supplierinfo, used to
determine delivery delay (if False, default delay = 0)
:param str date_order_str: date of order field, as a string in
DEFAULT_SERVER_DATETIME_FORMAT
:rtype: datetime
:return: desired Schedule Date for the PO line
"""
supplier_delay = int(supplier_info.delay) if supplier_info else 0
return datetime.strptime(date_order_str, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta(days=supplier_delay)
def action_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
# We will group by PO first, so we do the check only once for each PO
purchase_orders = list(set([x.order_id for x in self.browse(cr, uid, ids, context=context)]))
for purchase in purchase_orders:
if all([l.state == 'cancel' for l in purchase.order_line]):
self.pool.get('purchase.order').action_cancel(cr, uid, [purchase.id], context=context)
def _check_product_uom_group(self, cr, uid, context=None):
group_uom = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'group_uom')
res = [user for user in group_uom.users if user.id == uid]
return len(res) and True or False
def onchange_product_id(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, state='draft', context=None):
"""
onchange handler of product_id.
"""
if context is None:
context = {}
res = {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
if not product_id:
if not uom_id:
uom_id = self.default_get(cr, uid, ['product_uom'], context=context).get('product_uom', False)
res['value']['product_uom'] = uom_id
return res
product_product = self.pool.get('product.product')
product_uom = self.pool.get('product.uom')
res_partner = self.pool.get('res.partner')
product_pricelist = self.pool.get('product.pricelist')
account_fiscal_position = self.pool.get('account.fiscal.position')
account_tax = self.pool.get('account.tax')
# - check for the presence of partner_id and pricelist_id
#if not partner_id:
# raise osv.except_osv(_('No Partner!'), _('Select a partner in purchase order to choose a product.'))
#if not pricelist_id:
# raise osv.except_osv(_('No Pricelist !'), _('Select a price list in the purchase order form before choosing a product.'))
# - determine name and notes based on product in partner lang.
context_partner = context.copy()
if partner_id:
lang = res_partner.browse(cr, uid, partner_id).lang
context_partner.update( {'lang': lang, 'partner_id': partner_id} )
product = product_product.browse(cr, uid, product_id, context=context_partner)
#call name_get() with partner in the context to eventually match name and description in the seller_ids field
if not name or not uom_id:
# The 'or not uom_id' part of the above condition can be removed in master. See commit message of the rev. introducing this line.
dummy, name = product_product.name_get(cr, uid, product_id, context=context_partner)[0]
if product.description_purchase:
name += '\n' + product.description_purchase
res['value'].update({'name': name})
# - set a domain on product_uom
res['domain'] = {'product_uom': [('category_id','=',product.uom_id.category_id.id)]}
# - check that uom and product uom belong to the same category
product_uom_po_id = product.uom_po_id.id
if not uom_id:
uom_id = product_uom_po_id
if product.uom_id.category_id.id != product_uom.browse(cr, uid, uom_id, context=context).category_id.id:
if context.get('purchase_uom_check') and self._check_product_uom_group(cr, uid, context=context):
res['warning'] = {'title': _('Warning!'), 'message': _('Selected Unit of Measure does not belong to the same category as the product Unit of Measure.')}
uom_id = product_uom_po_id
res['value'].update({'product_uom': uom_id})
# - determine product_qty and date_planned based on seller info
if not date_order:
date_order = fields.datetime.now()
supplierinfo = False
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Product Unit of Measure')
for supplier in product.seller_ids:
if partner_id and (supplier.name.id == partner_id):
supplierinfo = supplier
if supplierinfo.product_uom.id != uom_id:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier only sells this product by %s') % supplierinfo.product_uom.name }
min_qty = product_uom._compute_qty(cr, uid, supplierinfo.product_uom.id, supplierinfo.min_qty, to_uom_id=uom_id)
if float_compare(min_qty , qty, precision_digits=precision) == 1: # If the supplier quantity is greater than entered from user, set minimal.
if qty:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier has a minimal quantity set to %s %s, you should not purchase less.') % (supplierinfo.min_qty, supplierinfo.product_uom.name)}
qty = min_qty
dt = self._get_date_planned(cr, uid, supplierinfo, date_order, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
qty = qty or 1.0
res['value'].update({'date_planned': date_planned or dt})
if qty:
res['value'].update({'product_qty': qty})
price = price_unit
if price_unit is False or price_unit is None:
# - determine price_unit and taxes_id
if pricelist_id:
date_order_str = datetime.strptime(date_order, DEFAULT_SERVER_DATETIME_FORMAT).strftime(DEFAULT_SERVER_DATE_FORMAT)
price = product_pricelist.price_get(cr, uid, [pricelist_id],
product.id, qty or 1.0, partner_id or False, {'uom': uom_id, 'date': date_order_str})[pricelist_id]
else:
price = product.standard_price
if uid == SUPERUSER_ID:
company_id = self.pool['res.users'].browse(cr, uid, [uid]).company_id.id
taxes = product.supplier_taxes_id.filtered(lambda r: r.company_id.id == company_id)
else:
taxes = product.supplier_taxes_id
fpos = fiscal_position_id and account_fiscal_position.browse(cr, uid, fiscal_position_id, context=context) or False
taxes_ids = account_fiscal_position.map_tax(cr, uid, fpos, taxes, context=context)
price = self.pool['account.tax']._fix_tax_included_price(cr, uid, price, product.supplier_taxes_id, taxes_ids)
res['value'].update({'price_unit': price, 'taxes_id': taxes_ids})
return res
product_id_change = onchange_product_id
product_uom_change = onchange_product_uom
def action_confirm(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'confirmed'}, context=context)
return True
class procurement_rule(osv.osv):
_inherit = 'procurement.rule'
def _get_action(self, cr, uid, context=None):
return [('buy', _('Buy'))] + super(procurement_rule, self)._get_action(cr, uid, context=context)
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'purchase_line_id': fields.many2one('purchase.order.line', 'Purchase Order Line'),
'purchase_id': fields.related('purchase_line_id', 'order_id', type='many2one', relation='purchase.order', string='Purchase Order'),
}
def propagate_cancel(self, cr, uid, procurement, context=None):
if procurement.rule_id.action == 'buy' and procurement.purchase_line_id:
purchase_line_obj = self.pool.get('purchase.order.line')
if procurement.purchase_line_id.state not in ('draft', 'cancel'):
raise osv.except_osv(_('Error!'),
_('Can not cancel this procurement as the related purchase order has been confirmed already. Please cancel the purchase order first. '))
new_qty, new_price = self._calc_new_qty_price(cr, uid, procurement, cancel=True, context=context)
if new_qty != procurement.purchase_line_id.product_qty:
purchase_line_obj.write(cr, uid, [procurement.purchase_line_id.id], {'product_qty': new_qty, 'price_unit': new_price}, context=context)
if float_compare(new_qty, 0.0, precision_rounding=procurement.product_uom.rounding) != 1:
purchase_line_obj.action_cancel(cr, uid, [procurement.purchase_line_id.id], context=context)
purchase_line_obj.unlink(cr, uid, [procurement.purchase_line_id.id], context=context)
return super(procurement_order, self).propagate_cancel(cr, uid, procurement, context=context)
def _run(self, cr, uid, procurement, context=None):
if procurement.rule_id and procurement.rule_id.action == 'buy':
#make a purchase order for the procurement
return self.make_po(cr, uid, [procurement.id], context=context)[procurement.id]
return super(procurement_order, self)._run(cr, uid, procurement, context=context)
def _check(self, cr, uid, procurement, context=None):
if procurement.purchase_line_id:
if procurement.purchase_line_id.order_id.shipped:
return True
elif procurement.move_ids:
moves = self.pool.get('stock.move').browse(cr, uid, [x.id for x in procurement.move_ids], context=context)
return all(move.state == 'done' for move in moves)
return super(procurement_order, self)._check(cr, uid, procurement, context=context)
def _check_supplier_info(self, cr, uid, ids, context=None):
''' Check the supplier info field of a product and write an error message on the procurement if needed.
Returns True if all needed information is there, False if some configuration mistake is detected.
'''
partner_obj = self.pool.get('res.partner')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
message = ''
partner = procurement.product_id.seller_id #Taken Main Supplier of Product of Procurement.
if not procurement.product_id.seller_ids:
message = _('No supplier defined for this product !')
elif not partner:
message = _('No default supplier defined for this product')
elif not partner_obj.address_get(cr, uid, [partner.id], ['delivery'])['delivery']:
message = _('No address defined for the supplier')
if message:
if procurement.message != message:
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
return False
if user.company_id and user.company_id.partner_id:
if partner.id == user.company_id.partner_id.id:
raise osv.except_osv(_('Configuration Error!'), _('The product "%s" has been defined with your company as reseller which seems to be a configuration error!' % procurement.product_id.name))
return True
def create_procurement_purchase_order(self, cr, uid, procurement, po_vals, line_vals, context=None):
"""Create the purchase order from the procurement, using
the provided field values, after adding the given purchase
order line in the purchase order.
:params procurement: the procurement object generating the purchase order
:params dict po_vals: field values for the new purchase order (the
``order_line`` field will be overwritten with one
single line, as passed in ``line_vals``).
:params dict line_vals: field values of the single purchase order line that
the purchase order will contain.
:return: id of the newly created purchase order
:rtype: int
"""
po_vals.update({'order_line': [(0,0,line_vals)]})
return self.pool.get('purchase.order').create(cr, uid, po_vals, context=context)
def _get_purchase_schedule_date(self, cr, uid, procurement, company, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for the
Purchase Order Lines created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:rtype: datetime
:return: the desired Schedule Date for the PO lines
"""
procurement_date_planned = datetime.strptime(procurement.date_planned, DEFAULT_SERVER_DATETIME_FORMAT)
schedule_date = (procurement_date_planned - relativedelta(days=company.po_lead))
return schedule_date
def _get_purchase_order_date(self, cr, uid, procurement, company, schedule_date, context=None):
"""Return the datetime value to use as Order Date (``date_order``) for the
Purchase Order created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:param datetime schedule_date: desired Scheduled Date for the Purchase Order lines.
:rtype: datetime
:return: the desired Order Date for the PO
"""
seller_delay = int(procurement.product_id.seller_delay)
return schedule_date - relativedelta(days=seller_delay)
def _get_product_supplier(self, cr, uid, procurement, context=None):
''' returns the main supplier of the procurement's product given as argument'''
supplierinfo = self.pool['product.supplierinfo']
company_supplier = supplierinfo.search(cr, uid,
[('product_tmpl_id', '=', procurement.product_id.product_tmpl_id.id), ('company_id', '=', procurement.company_id.id)], limit=1, context=context)
if company_supplier:
return supplierinfo.browse(cr, uid, company_supplier[0], context=context).name
return procurement.product_id.seller_id
def _get_po_line_values_from_proc(self, cr, uid, procurement, partner, company, schedule_date, context=None):
if context is None:
context = {}
uom_obj = self.pool.get('product.uom')
pricelist_obj = self.pool.get('product.pricelist')
prod_obj = self.pool.get('product.product')
acc_pos_obj = self.pool.get('account.fiscal.position')
po_obj = self.pool.get('purchase.order')
seller_qty = procurement.product_id.seller_qty if procurement.location_id.usage != 'customer' else 0.0
pricelist_id = partner.property_product_pricelist_purchase.id
uom_id = procurement.product_id.uom_po_id.id
qty = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, uom_id)
if seller_qty:
qty = max(qty, seller_qty)
price = pricelist_obj.price_get(cr, uid, [pricelist_id], procurement.product_id.id, qty, partner.id, dict(context, uom=uom_id))[pricelist_id]
#Passing partner_id to context for purchase order line integrity of Line name
new_context = context.copy()
new_context.update({'lang': partner.lang, 'partner_id': partner.id})
product = prod_obj.browse(cr, uid, procurement.product_id.id, context=new_context)
taxes_ids = procurement.product_id.supplier_taxes_id
taxes_ids = taxes_ids.filtered(lambda x: x.company_id.id == procurement.company_id.id)
# It is necessary to have the appropriate fiscal position to get the right tax mapping
fiscal_position = False
fiscal_position_id = po_obj.onchange_partner_id(cr, uid, None, partner.id, context=dict(context, company_id=procurement.company_id.id))['value']['fiscal_position']
if fiscal_position_id:
fiscal_position = acc_pos_obj.browse(cr, uid, fiscal_position_id, context=context)
taxes = acc_pos_obj.map_tax(cr, uid, fiscal_position, taxes_ids, context=context)
name = product.display_name
if product.description_purchase:
name += '\n' + product.description_purchase
return {
'name': name,
'product_qty': qty,
'product_id': procurement.product_id.id,
'product_uom': uom_id,
'price_unit': price or 0.0,
'date_planned': schedule_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'taxes_id': [(6, 0, taxes)],
}
def _calc_new_qty_price(self, cr, uid, procurement, po_line=None, cancel=False, context=None):
if not po_line:
po_line = procurement.purchase_line_id
uom_obj = self.pool.get('product.uom')
qty = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty,
procurement.product_id.uom_po_id.id)
if cancel:
qty = -qty
# Make sure we use the minimum quantity of the partner corresponding to the PO
# This does not apply in case of dropshipping
supplierinfo_min_qty = 0.0
if po_line.order_id.location_id.usage != 'customer':
if po_line.product_id.seller_id.id == po_line.order_id.partner_id.id:
supplierinfo_min_qty = po_line.product_id.seller_qty
else:
supplierinfo_obj = self.pool.get('product.supplierinfo')
supplierinfo_ids = supplierinfo_obj.search(cr, uid, [('name', '=', po_line.order_id.partner_id.id), ('product_tmpl_id', '=', po_line.product_id.product_tmpl_id.id)])
supplierinfo_min_qty = supplierinfo_obj.browse(cr, uid, supplierinfo_ids).min_qty
if supplierinfo_min_qty == 0.0:
qty += po_line.product_qty
else:
# Recompute quantity by adding existing running procurements.
for proc in po_line.procurement_ids:
qty += uom_obj._compute_qty(cr, uid, proc.product_uom.id, proc.product_qty,
proc.product_id.uom_po_id.id) if proc.state == 'running' else 0.0
qty = max(qty, supplierinfo_min_qty) if qty > 0.0 else 0.0
price = po_line.price_unit
if qty != po_line.product_qty:
pricelist_obj = self.pool.get('product.pricelist')
pricelist_id = po_line.order_id.partner_id.property_product_pricelist_purchase.id
price = pricelist_obj.price_get(cr, uid, [pricelist_id], procurement.product_id.id, qty, po_line.order_id.partner_id.id, {'uom': procurement.product_id.uom_po_id.id})[pricelist_id]
return qty, price
def update_origin_po(self, cr, uid, po, proc, context=None):
pass
def make_po(self, cr, uid, ids, context=None):
""" Resolve the purchase from procurement, which may result in a new PO creation, a new PO line creation or a quantity change on existing PO line.
Note that some operations (as the PO creation) are made as SUPERUSER because the current user may not have rights to do it (mto product launched by a sale for example)
@return: dictionary giving for each procurement its related resolving PO line.
"""
res = {}
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
po_obj = self.pool.get('purchase.order')
po_line_obj = self.pool.get('purchase.order.line')
seq_obj = self.pool.get('ir.sequence')
pass_ids = []
linked_po_ids = []
sum_po_line_ids = []
for procurement in self.browse(cr, uid, ids, context=context):
ctx_company = dict(context or {}, force_company=procurement.company_id.id)
partner = self._get_product_supplier(cr, uid, procurement, context=ctx_company)
if not partner:
self.message_post(cr, uid, [procurement.id], _('There is no supplier associated to product %s') % (procurement.product_id.name))
res[procurement.id] = False
else:
schedule_date = self._get_purchase_schedule_date(cr, uid, procurement, company, context=context)
purchase_date = self._get_purchase_order_date(cr, uid, procurement, company, schedule_date, context=context)
line_vals = self._get_po_line_values_from_proc(cr, uid, procurement, partner, company, schedule_date, context=ctx_company)
#look for any other draft PO for the same supplier, to attach the new line on instead of creating a new draft one
available_draft_po_ids = po_obj.search(cr, uid, [
('partner_id', '=', partner.id), ('state', '=', 'draft'), ('picking_type_id', '=', procurement.rule_id.picking_type_id.id),
('location_id', '=', procurement.location_id.id), ('company_id', '=', procurement.company_id.id), ('dest_address_id', '=', procurement.partner_dest_id.id)], context=context)
if available_draft_po_ids:
po_id = available_draft_po_ids[0]
po_rec = po_obj.browse(cr, uid, po_id, context=context)
po_to_update = {'origin': ', '.join(filter(None, set([po_rec.origin, procurement.origin])))}
#if the product has to be ordered earlier those in the existing PO, we replace the purchase date on the order to avoid ordering it too late
if datetime.strptime(po_rec.date_order, DEFAULT_SERVER_DATETIME_FORMAT) > purchase_date:
po_to_update.update({'date_order': purchase_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})
po_obj.write(cr, uid, [po_id], po_to_update, context=context)
#look for any other PO line in the selected PO with same product and UoM to sum quantities instead of creating a new po line
available_po_line_ids = po_line_obj.search(cr, uid, [('order_id', '=', po_id), ('product_id', '=', line_vals['product_id']), ('product_uom', '=', line_vals['product_uom'])], context=context)
if available_po_line_ids:
po_line = po_line_obj.browse(cr, uid, available_po_line_ids[0], context=context)
po_line_id = po_line.id
new_qty, new_price = self._calc_new_qty_price(cr, uid, procurement, po_line=po_line, context=context)
if new_qty > po_line.product_qty:
po_line_obj.write(cr, SUPERUSER_ID, po_line.id, {'product_qty': new_qty, 'price_unit': new_price}, context=context)
self.update_origin_po(cr, uid, po_rec, procurement, context=context)
sum_po_line_ids.append(procurement.id)
else:
line_vals.update(order_id=po_id)
po_line_id = po_line_obj.create(cr, SUPERUSER_ID, line_vals, context=context)
linked_po_ids.append(procurement.id)
else:
name = seq_obj.get(cr, uid, 'purchase.order', context=context) or _('PO: %s') % procurement.name
po_vals = {
'name': name,
'origin': procurement.origin,
'partner_id': partner.id,
'location_id': procurement.location_id.id,
'picking_type_id': procurement.rule_id.picking_type_id.id,
'pricelist_id': partner.property_product_pricelist_purchase.id,
'currency_id': partner.property_product_pricelist_purchase and partner.property_product_pricelist_purchase.currency_id.id or procurement.company_id.currency_id.id,
'date_order': purchase_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'company_id': procurement.company_id.id,
'fiscal_position': po_obj.onchange_partner_id(cr, uid, None, partner.id, context=dict(context, company_id=procurement.company_id.id))['value']['fiscal_position'],
'payment_term_id': partner.property_supplier_payment_term.id or False,
'dest_address_id': procurement.partner_dest_id.id,
}
po_id = self.create_procurement_purchase_order(cr, SUPERUSER_ID, procurement, po_vals, line_vals, context=dict(context, company_id=po_vals['company_id']))
po_line_id = po_obj.browse(cr, uid, po_id, context=context).order_line[0].id
pass_ids.append(procurement.id)
res[procurement.id] = po_line_id
self.write(cr, uid, [procurement.id], {'purchase_line_id': po_line_id}, context=context)
if pass_ids:
self.message_post(cr, uid, pass_ids, body=_("Draft Purchase Order created"), context=context)
if linked_po_ids:
self.message_post(cr, uid, linked_po_ids, body=_("Purchase line created and linked to an existing Purchase Order"), context=context)
if sum_po_line_ids:
self.message_post(cr, uid, sum_po_line_ids, body=_("Quantity added in existing Purchase Order Line"), context=context)
return res
class mail_mail(osv.Model):
_name = 'mail.mail'
_inherit = 'mail.mail'
def _postprocess_sent_message(self, cr, uid, mail, context=None, mail_sent=True):
if mail_sent and mail.model == 'purchase.order':
obj = self.pool.get('purchase.order').browse(cr, uid, mail.res_id, context=context)
if obj.state == 'draft':
self.pool.get('purchase.order').signal_workflow(cr, uid, [mail.res_id], 'send_rfq')
return super(mail_mail, self)._postprocess_sent_message(cr, uid, mail=mail, context=context, mail_sent=mail_sent)
class product_template(osv.Model):
_name = 'product.template'
_inherit = 'product.template'
def _get_buy_route(self, cr, uid, context=None):
buy_route = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'purchase.route_warehouse0_buy')
if buy_route:
return [buy_route]
return []
def _purchase_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0)
for template in self.browse(cr, uid, ids, context=context):
res[template.id] = sum([p.purchase_count for p in template.product_variant_ids])
return res
_columns = {
'purchase_ok': fields.boolean('Can be Purchased', help="Specify if the product can be selected in a purchase order line."),
'purchase_count': fields.function(_purchase_count, string='# Purchases', type='integer'),
}
_defaults = {
'purchase_ok': 1,
'route_ids': _get_buy_route,
}
def action_view_purchases(self, cr, uid, ids, context=None):
products = self._get_products(cr, uid, ids, context=context)
result = self._get_act_window_dict(cr, uid, 'purchase.action_purchase_line_product_tree', context=context)
result['domain'] = "[('product_id','in',[" + ','.join(map(str, products)) + "])]"
return result
class product_product(osv.Model):
_name = 'product.product'
_inherit = 'product.product'
def _purchase_count(self, cr, uid, ids, field_name, arg, context=None):
Purchase = self.pool['purchase.order']
return {
product_id: Purchase.search_count(cr,uid, [('order_line.product_id', '=', product_id)], context=context)
for product_id in ids
}
def action_view_purchases(self, cr, uid, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
result = self.pool['product.template']._get_act_window_dict(cr, uid, 'purchase.action_purchase_line_product_tree', context=context)
result['domain'] = "[('product_id','in',[" + ','.join(map(str, ids)) + "])]"
return result
_columns = {
'purchase_count': fields.function(_purchase_count, string='# Purchases', type='integer'),
}
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, context=None):
context = context or {}
if context.get('default_model') == 'purchase.order' and context.get('default_res_id'):
context = dict(context, mail_post_autofollow=True)
self.pool.get('purchase.order').signal_workflow(cr, uid, [context['default_res_id']], 'send_rfq')
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
class account_invoice(osv.Model):
""" Override account_invoice to add Chatter messages on the related purchase
orders, logging the invoice receipt or payment. """
_inherit = 'account.invoice'
def invoice_validate(self, cr, uid, ids, context=None):
res = super(account_invoice, self).invoice_validate(cr, uid, ids, context=context)
purchase_order_obj = self.pool.get('purchase.order')
# read access on purchase.order object is not required
if not purchase_order_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
user_id = SUPERUSER_ID
else:
user_id = uid
po_ids = purchase_order_obj.search(cr, user_id, [('invoice_ids', 'in', ids)], context=context)
for po_id in po_ids:
purchase_order_obj.message_post(cr, user_id, po_id, body=_("Invoice received"), context=context)
purchase_order_obj._set_po_lines_invoiced(cr, user_id, [po_id], context=context)
return res
def confirm_paid(self, cr, uid, ids, context=None):
res = super(account_invoice, self).confirm_paid(cr, uid, ids, context=context)
purchase_order_obj = self.pool.get('purchase.order')
# read access on purchase.order object is not required
if not purchase_order_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
user_id = SUPERUSER_ID
else:
user_id = uid
po_ids = purchase_order_obj.search(cr, user_id, [('invoice_ids', 'in', ids)], context=context)
for po_id in po_ids:
purchase_order_obj.message_post(cr, user_id, po_id, body=_("Invoice paid"), context=context)
return res
class account_invoice_line(osv.Model):
""" Override account_invoice_line to add the link to the purchase order line it is related to"""
_inherit = 'account.invoice.line'
_columns = {
'purchase_line_id': fields.many2one('purchase.order.line',
'Purchase Order Line', ondelete='set null', select=True,
readonly=True),
}
|
from django.db import models
from django.db.models import Count
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.core.exceptions import ValidationError
from django.utils.functional import cached_property
from taiga.base.utils.slug import slugify_uniquely
from taiga.base.utils.dicts import dict_sum
from taiga.projects.notifications.mixins import WatchedModelMixin
import itertools
import datetime
class Milestone(WatchedModelMixin, models.Model):
name = models.CharField(max_length=200, db_index=True, null=False, blank=False,
verbose_name=_("name"))
# TODO: Change the unique restriction to a unique together with the project id
slug = models.SlugField(max_length=250, db_index=True, null=False, blank=True,
verbose_name=_("slug"))
owner = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True,
related_name="owned_milestones", verbose_name=_("owner"))
project = models.ForeignKey("projects.Project", null=False, blank=False,
related_name="milestones", verbose_name=_("project"))
estimated_start = models.DateField(verbose_name=_("estimated start date"))
estimated_finish = models.DateField(verbose_name=_("estimated finish date"))
created_date = models.DateTimeField(null=False, blank=False,
verbose_name=_("created date"),
default=timezone.now)
modified_date = models.DateTimeField(null=False, blank=False,
verbose_name=_("modified date"))
closed = models.BooleanField(default=False, null=False, blank=True,
verbose_name=_("is closed"))
disponibility = models.FloatField(default=0.0, null=True, blank=True,
verbose_name=_("disponibility"))
order = models.PositiveSmallIntegerField(default=1, null=False, blank=False,
verbose_name=_("order"))
_importing = None
_total_closed_points_by_date = None
class Meta:
verbose_name = "milestone"
verbose_name_plural = "milestones"
ordering = ["project", "created_date"]
unique_together = [("name", "project"), ("slug", "project")]
permissions = (
("view_milestone", "Can view milestone"),
)
def __str__(self):
return self.name
def __repr__(self):
return "<Milestone {0}>".format(self.id)
def clean(self):
# Don't allow draft entries to have a pub_date.
if self.estimated_start and self.estimated_finish and self.estimated_start > self.estimated_finish:
raise ValidationError(_('The estimated start must be previous to the estimated finish.'))
def save(self, *args, **kwargs):
if not self._importing or not self.modified_date:
self.modified_date = timezone.now()
if not self.slug:
self.slug = slugify_uniquely(self.name, self.__class__)
super().save(*args, **kwargs)
@cached_property
def cached_user_stories(self):
return (self.user_stories.prefetch_related("role_points", "role_points__points")
.annotate(num_tasks=Count("tasks")))
def _get_user_stories_points(self, user_stories):
role_points = [us.role_points.all() for us in user_stories]
flat_role_points = itertools.chain(*role_points)
flat_role_dicts = map(lambda x: {x.role_id: x.points.value if x.points.value else 0}, flat_role_points)
return dict_sum(*flat_role_dicts)
@property
def total_points(self):
return self._get_user_stories_points(
[us for us in self.cached_user_stories]
)
@property
def closed_points(self):
return self._get_user_stories_points(
[us for us in self.cached_user_stories if us.is_closed]
)
def total_closed_points_by_date(self, date):
# Milestone instance will keep a cache of the total closed points by date
if self._total_closed_points_by_date is None:
self._total_closed_points_by_date = {}
# We need to keep the milestone user stories indexed by id in a dict
user_stories = {}
for us in self.cached_user_stories:
us._total_us_points = sum(self._get_user_stories_points([us]).values())
user_stories[us.id] = us
tasks = self.tasks.\
select_related("user_story").\
exclude(finished_date__isnull=True).\
exclude(user_story__isnull=True)
# For each finished task we try to know the proporional part of points
# it represetnts from the user story and add it to the closed points
# for that date
# This calulation is the total user story points divided by its number of tasks
for task in tasks:
user_story = user_stories.get(task.user_story.id, None)
if user_story is None:
total_us_points = 0
us_tasks_counter = 0
else:
total_us_points = user_story._total_us_points
us_tasks_counter = user_story.num_tasks
# If the task was finished before starting the sprint it needs
# to be included
finished_date = task.finished_date.date()
if finished_date < self.estimated_start:
finished_date = self.estimated_start
points_by_date = self._total_closed_points_by_date.get(finished_date, 0)
if us_tasks_counter != 0:
points_by_date += total_us_points / us_tasks_counter
self._total_closed_points_by_date[finished_date] = points_by_date
# At this point self._total_closed_points_by_date keeps a dict where the
# finished date of the task is the key and the value is the increment of points
# We are transforming this dict of increments in an acumulation one including
# all the dates from the sprint
acumulated_date_points = 0
current_date = self.estimated_start
while current_date <= self.estimated_finish:
acumulated_date_points += self._total_closed_points_by_date.get(current_date, 0)
self._total_closed_points_by_date[current_date] = acumulated_date_points
current_date = current_date + datetime.timedelta(days=1)
return self._total_closed_points_by_date.get(date, 0)
|
from braces.views import CsrfExemptMixin
from braces.views import JSONResponseMixin
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render
from django.utils.decorators import method_decorator
from django.views import View
from core.localization.messages import PASSWORDS_UNEQUAL, SETTINGS_SAVED, OFM_PASSWORDS_UNEQUAL, NOT_LOGGED_IN
from core.models import ParsingSetting
from users.models import OFMUser
def _handle_account_data_change(request, email, password, password2):
if email:
if OFMUser.objects.filter(email=email).exclude(id=request.user.id).exists():
messages.error(request, "Ein anderer Account existiert bereits mit dieser E-Mail-Adresse.")
return
request.user.email = email
if password and password2:
if password != password2:
messages.error(request, PASSWORDS_UNEQUAL)
return
request.user.set_password(password)
request.user.save()
messages.success(request, SETTINGS_SAVED)
def _handle_ofm_data_change(request, ofm_password, ofm_password2):
if ofm_password != ofm_password2:
messages.error(request, OFM_PASSWORDS_UNEQUAL)
return redirect('core:account:settings')
request.user.ofm_password = ofm_password
request.user.save()
messages.success(request, SETTINGS_SAVED)
def settings_view(request):
if request.user.is_authenticated():
if request.POST:
email = request.POST.get('email')
password = request.POST.get('password')
password2 = request.POST.get('password2')
ofm_password = request.POST.get('ofm_password')
ofm_password2 = request.POST.get('ofm_password2')
if email or (password and password2):
_handle_account_data_change(request, email, password, password2)
elif ofm_password and ofm_password2:
_handle_ofm_data_change(request, ofm_password, ofm_password2)
else:
messages.error(request, "Die Daten waren nicht vollständig. Bitte überprüfe die Eingabe.")
return render(request, 'core/account/settings.html')
else:
messages.error(request, NOT_LOGGED_IN)
return redirect('core:account:login')
@method_decorator(login_required, name='dispatch')
class GetParsingSettingsView(JSONResponseMixin, View):
def get(self, request):
parsing_setting, _ = ParsingSetting.objects.get_or_create(user=request.user)
settings_dict = dict()
settings_dict['parsing_player_statistics'] = parsing_setting.parsing_chain_includes_player_statistics
settings_dict['parsing_awp_boundaries'] = parsing_setting.parsing_chain_includes_awp_boundaries
settings_dict['parsing_finances'] = parsing_setting.parsing_chain_includes_finances
settings_dict['parsing_matches'] = parsing_setting.parsing_chain_includes_matches
settings_dict['parsing_match_details'] = parsing_setting.parsing_chain_includes_match_details
settings_dict['parsing_match_details_only_for_current_matchday'] = \
parsing_setting.parsing_chain_includes_match_details_only_for_current_matchday
settings_dict['parsing_stadium_details'] = parsing_setting.parsing_chain_includes_stadium_details
settings_dict['parsing_transfers'] = parsing_setting.parsing_chain_includes_transfers
return self.render_json_response(settings_dict)
@method_decorator(login_required, name='dispatch')
class UpdateParsingSettingItemStatusView(CsrfExemptMixin, JSONResponseMixin, View):
def post(self, request):
parsing_setting, _ = ParsingSetting.objects.get_or_create(user=request.user)
parsing_player_statistics = self._validate_boolean(
request.POST.get('parsing_player_statistics',
default=parsing_setting.parsing_chain_includes_player_statistics))
parsing_awp_boundaries = self._validate_boolean(
request.POST.get('parsing_awp_boundaries',
default=parsing_setting.parsing_chain_includes_awp_boundaries))
parsing_finances = self._validate_boolean(
request.POST.get('parsing_finances',
default=parsing_setting.parsing_chain_includes_finances))
parsing_matches = self._validate_boolean(
request.POST.get('parsing_matches',
default=parsing_setting.parsing_chain_includes_matches))
parsing_match_details = self._validate_boolean(
request.POST.get('parsing_match_details',
default=parsing_setting.parsing_chain_includes_match_details))
parsing_match_details_only_for_current_matchday = self._validate_boolean(
request.POST.get('parsing_match_details_only_for_current_matchday',
default=parsing_setting.parsing_chain_includes_match_details_only_for_current_matchday)
)
parsing_stadium_details = self._validate_boolean(
request.POST.get('parsing_stadium_details',
default=parsing_setting.parsing_chain_includes_stadium_details))
parsing_transfers = self._validate_boolean(
request.POST.get('parsing_transfers',
default=parsing_setting.parsing_chain_includes_transfers))
if not parsing_matches:
parsing_match_details = False
parsing_stadium_details = False
if not parsing_match_details:
parsing_match_details_only_for_current_matchday = False
parsing_stadium_details = False
parsing_setting.parsing_chain_includes_player_statistics = parsing_player_statistics
parsing_setting.parsing_chain_includes_awp_boundaries = parsing_awp_boundaries
parsing_setting.parsing_chain_includes_finances = parsing_finances
parsing_setting.parsing_chain_includes_matches = parsing_matches
parsing_setting.parsing_chain_includes_match_details = parsing_match_details
parsing_setting.parsing_chain_includes_match_details_only_for_current_matchday = \
parsing_match_details_only_for_current_matchday
parsing_setting.parsing_chain_includes_stadium_details = parsing_stadium_details
parsing_setting.parsing_chain_includes_transfers = parsing_transfers
parsing_setting.save()
return self.render_json_response({'success': True})
@staticmethod
def _validate_boolean(value):
if value == 'true':
return True
elif value == 'false':
return False
else:
return value
|
import sys, unittest, math, os
import xml.etree.ElementTree as et
from JSBSim_utils import CreateFDM, SandBox, CopyAircraftDef
class TestAccelerometer(unittest.TestCase):
def setUp(self):
self.sandbox = SandBox()
def tearDown(self):
self.sandbox.erase()
def AddAccelerometersToAircraft(self, script_path):
tree, aircraft_name, b = CopyAircraftDef(script_path, self.sandbox)
system_tag = et.SubElement(tree.getroot(), 'system')
system_tag.attrib['file'] = 'accelerometers'
tree.write(self.sandbox('aircraft', aircraft_name, aircraft_name+'.xml'))
def testOrbit(self):
script_name = 'ball_orbit.xml'
script_path = self.sandbox.path_to_jsbsim_file('scripts', script_name)
self.AddAccelerometersToAircraft(script_path)
# The time step is too small in ball_orbit so let's increase it to 0.1s
# for a quicker run
tree = et.parse(self.sandbox.elude(script_path))
run_tag = tree.getroot().find('./run')
run_tag.attrib['dt'] = '0.1'
tree.write(self.sandbox(script_name))
fdm = CreateFDM(self.sandbox)
fdm.set_aircraft_path('aircraft')
fdm.load_script(script_name)
# Switch the accel on
fdm.set_property_value('fcs/accelerometer/on', 1.0)
fdm.run_ic()
while fdm.run():
self.assertAlmostEqual(fdm.get_property_value('fcs/accelerometer/X'),
0.0, delta=1E-8)
self.assertAlmostEqual(fdm.get_property_value('fcs/accelerometer/Y'),
0.0, delta=1E-8)
self.assertAlmostEqual(fdm.get_property_value('fcs/accelerometer/Z'),
0.0, delta=1E-8)
self.assertAlmostEqual(fdm.get_property_value('accelerations/a-pilot-x-ft_sec2'),
0.0, delta=1E-8)
self.assertAlmostEqual(fdm.get_property_value('accelerations/a-pilot-y-ft_sec2'),
0.0, delta=1E-8)
self.assertAlmostEqual(fdm.get_property_value('accelerations/a-pilot-z-ft_sec2'),
0.0, delta=1E-8)
del fdm
def testOnGround(self):
script_name = 'c1721.xml'
script_path = self.sandbox.path_to_jsbsim_file('scripts', script_name)
self.AddAccelerometersToAircraft(script_path)
fdm = CreateFDM(self.sandbox)
fdm.set_aircraft_path('aircraft')
fdm.load_script(script_path)
# Switch the accel on
fdm.set_property_value('fcs/accelerometer/on', 1.0)
# Use the standard gravity (i.e. GM/r^2)
fdm.set_property_value('simulation/gravity-model', 0)
# Simplifies the transformation to compare the accelerometer with the
# gravity
fdm.set_property_value('ic/psi-true-rad', 0.0)
fdm.run_ic()
for i in xrange(500):
fdm.run()
ax = fdm.get_property_value('accelerations/udot-ft_sec2')
ay = fdm.get_property_value('accelerations/vdot-ft_sec2')
az = fdm.get_property_value('accelerations/wdot-ft_sec2')
g = fdm.get_property_value('accelerations/gravity-ft_sec2')
theta = fdm.get_property_value('attitude/theta-rad')
# There is a lag of one time step between the computations of the
# accelerations and the update of the accelerometer
fdm.run()
fax = fdm.get_property_value('fcs/accelerometer/X')
fay = fdm.get_property_value('fcs/accelerometer/Y')
faz = fdm.get_property_value('fcs/accelerometer/Z')
fax -= ax
faz -= az
self.assertAlmostEqual(fay, 0.0, delta=1E-6)
self.assertAlmostEqual(fax / (g * math.sin(theta)), 1.0, delta=1E-5)
self.assertAlmostEqual(faz / (g * math.cos(theta)), -1.0, delta=1E-7)
del fdm
def testSteadyFlight(self):
script_name = 'c1722.xml'
script_path = self.sandbox.path_to_jsbsim_file('scripts', script_name)
self.AddAccelerometersToAircraft(script_path)
fdm = CreateFDM(self.sandbox)
fdm.set_aircraft_path('aircraft')
fdm.load_script(script_path)
# Switch the accel on
fdm.set_property_value('fcs/accelerometer/on', 1.0)
# Use the standard gravity (i.e. GM/r^2)
fdm.set_property_value('simulation/gravity-model', 0)
# Simplifies the transformation to compare the accelerometer with the
# gravity
fdm.set_property_value('ic/psi-true-rad', 0.0)
fdm.run_ic()
while fdm.get_property_value('simulation/sim-time-sec') <= 0.5:
fdm.run()
fdm.set_property_value('simulation/do_simple_trim', 1)
ax = fdm.get_property_value('accelerations/udot-ft_sec2')
ay = fdm.get_property_value('accelerations/vdot-ft_sec2')
az = fdm.get_property_value('accelerations/wdot-ft_sec2')
g = fdm.get_property_value('accelerations/gravity-ft_sec2')
theta = fdm.get_property_value('attitude/theta-rad')
# There is a lag of one time step between the computations of the
# accelerations and the update of the accelerometer
fdm.run()
fax = fdm.get_property_value('fcs/accelerometer/X')
fay = fdm.get_property_value('fcs/accelerometer/Y')
faz = fdm.get_property_value('fcs/accelerometer/Z')
fax -= ax
fay -= ay
faz -= az
# Deltas are relaxed because the tolerances of the trimming algorithm
# are quite relaxed themselves.
self.assertAlmostEqual(faz / (g * math.cos(theta)), -1.0, delta=1E-5)
self.assertAlmostEqual(fax / (g * math.sin(theta)), 1.0, delta=1E-5)
self.assertAlmostEqual(math.sqrt(fax*fax+fay*fay+faz*faz)/g, 1.0, delta=1E-6)
del fdm
def testSpinningBodyOnOrbit(self):
script_name = 'ball_orbit.xml'
script_path = self.sandbox.path_to_jsbsim_file('scripts', script_name)
self.AddAccelerometersToAircraft(script_path)
fdm = CreateFDM(self.sandbox)
fdm.set_aircraft_path('aircraft')
fdm.load_model('ball')
# Offset the CG along Y (by 30")
fdm.set_property_value('inertia/pointmass-weight-lbs[1]', 50.0)
aircraft_path = self.sandbox.elude(self.sandbox.path_to_jsbsim_file('aircraft', 'ball'))
fdm.load_ic(os.path.join(aircraft_path, 'reset00.xml'), False)
# Switch the accel on
fdm.set_property_value('fcs/accelerometer/on', 1.0)
# Set the orientation such that the spinning axis is Z.
fdm.set_property_value('ic/phi-rad', 0.5*math.pi)
# Set the angular velocities to 0.0 in the ECEF frame. The angular
# velocity R_{inertial} will therefore be equal to the Earth rotation
# rate 7.292115E-5 rad/sec
fdm.set_property_value('ic/p-rad_sec', 0.0)
fdm.set_property_value('ic/q-rad_sec', 0.0)
fdm.set_property_value('ic/r-rad_sec', 0.0)
fdm.run_ic()
fax = fdm.get_property_value('fcs/accelerometer/X')
fay = fdm.get_property_value('fcs/accelerometer/Y')
faz = fdm.get_property_value('fcs/accelerometer/Z')
cgy_ft = fdm.get_property_value('inertia/cg-y-in') / 12.
omega = 0.00007292115 # Earth rotation rate in rad/sec
self.assertAlmostEqual(fdm.get_property_value('accelerations/a-pilot-x-ft_sec2'),
fax, delta=1E-8)
self.assertAlmostEqual(fdm.get_property_value('accelerations/a-pilot-y-ft_sec2'),
fay, delta=1E-8)
self.assertAlmostEqual(fdm.get_property_value('accelerations/a-pilot-z-ft_sec2'),
faz, delta=1E-8)
# Acceleration along X should be zero
self.assertAlmostEqual(fax, 0.0, delta=1E-8)
# Acceleration along Y should be equal to r*omega^2
self.assertAlmostEqual(fay / (cgy_ft * omega * omega), 1.0, delta=1E-7)
# Acceleration along Z should be zero
self.assertAlmostEqual(faz, 0.0, delta=1E-8)
suite = unittest.TestLoader().loadTestsFromTestCase(TestAccelerometer)
test_result = unittest.TextTestRunner(verbosity=2).run(suite)
if test_result.failures or test_result.errors:
sys.exit(-1) # 'make test' will report the test failed.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.