repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
HydrelioxGitHub/PiDDL | ZTPAGE.py | 1 | 2847 | # coding: utf-8
from urllib2 import urlopen
import urllib2
import bs4 as BeautifulSoup
class ZTPage:
def __init__(self, url):
self.url = url
self.update()
def update(self):
self.update_content()
self.parse_type()
self.parse_infos()
self.parse_links()
def update_content(self):
req = urllib2.Request(self.url, headers={'User-Agent': "Magic Browser"})
html = urlopen(req).read()
soup = BeautifulSoup.BeautifulSoup(html, "html5lib")
self.content = soup.find('div', class_="maincont")
def parse_type(self):
if "series" in self.url:
self.type = "Show"
if "films" in self.url:
self.type = "Movie"
def parse_links(self):
liste = {}
host = 'error'
html = self.content.find('div', class_="contentl").find_all(["span", "a"])
for elem in html:
if ('span' == elem.name) and (unicode(elem.string) != 'None'):
host = elem.string
liste[host] = {}
if elem.name == 'a':
elem.string = elem.string.replace("Episode", '').replace('Final', '').strip()
episode_number = int(elem.string)
liste[host][episode_number] = elem.attrs['href']
self.links = liste
def parse_infos(self):
# Retreive Title
title = self.content.find('div', class_="titrearticles").h1.string
if self.type == "Show":
title = title.split("-")
self.title = title[0].strip()
# Retreive Season for TV Shows
self.season = int(title[1].replace("Saison", "").replace('[Complete]', '').strip())
if self.type == "Movie":
self.title = title.strip()
# Retreive Language, Format, Codec ...
info = self.content.find('div', class_="corps").div.span.span.b.strong.string
first_part = info.split('|')[0]
second_part = info.split('|')[1]
self.language = first_part.split(' ')[1].strip()
self.currentEpisode = first_part.split(' ')[0].strip()
self.currentEpisode = self.currentEpisode.replace('[', '')
self.currentEpisode = int(self.currentEpisode.split('/')[0])
# Pb encodage ...
quality = second_part.replace("Qualit", '').strip()
quality = quality[1:]
# ...
self.quality = quality.strip()
def get_available_hosts(self):
return self.links.keys()
def get_tvshow_link(self, host, episodenumber):
alllinks = self.links[host]
link = alllinks[episodenumber]
return link
def print_report(self):
print self.url
print self.title
print self.season
print self.quality
print self.language
print self.currentEpisode
print self.links
| gpl-2.0 |
Deepomatic/DIGITS | examples/classification/use_archive.py | 5 | 3009 | #!/usr/bin/env python2
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
"""
Classify an image using a model archive file
"""
import argparse
import os
import tarfile
import tempfile
import time
import zipfile
from example import classify
def unzip_archive(archive):
"""
Unzips an archive into a temporary directory
Returns a link to that directory
Arguments:
archive -- the path to an archive file
"""
assert os.path.exists(archive), 'File not found - %s' % archive
tmpdir = os.path.join(tempfile.gettempdir(), os.path.basename(archive))
assert tmpdir != archive # That wouldn't work out
if os.path.exists(tmpdir):
# files are already extracted
pass
else:
if tarfile.is_tarfile(archive):
print 'Extracting tarfile ...'
with tarfile.open(archive) as tf:
tf.extractall(path=tmpdir)
elif zipfile.is_zipfile(archive):
print 'Extracting zipfile ...'
with zipfile.ZipFile(archive) as zf:
zf.extractall(path=tmpdir)
else:
raise ValueError('Unknown file type for %s' % os.path.basename(archive))
return tmpdir
def classify_with_archive(archive, image_files, batch_size=None, use_gpu=True):
"""
"""
tmpdir = unzip_archive(archive)
caffemodel = None
deploy_file = None
mean_file = None
labels_file = None
for filename in os.listdir(tmpdir):
full_path = os.path.join(tmpdir, filename)
if filename.endswith('.caffemodel'):
caffemodel = full_path
elif filename == 'deploy.prototxt':
deploy_file = full_path
elif filename.endswith('.binaryproto'):
mean_file = full_path
elif filename == 'labels.txt':
labels_file = full_path
else:
print 'Unknown file:', filename
assert caffemodel is not None, 'Caffe model file not found'
assert deploy_file is not None, 'Deploy file not found'
classify(caffemodel, deploy_file, image_files,
mean_file=mean_file, labels_file=labels_file,
batch_size=batch_size, use_gpu=use_gpu)
if __name__ == '__main__':
script_start_time = time.time()
parser = argparse.ArgumentParser(description='Classification example using an archive - DIGITS')
# Positional arguments
parser.add_argument('archive', help='Path to a DIGITS model archive')
parser.add_argument('image_file', nargs='+', help='Path[s] to an image')
# Optional arguments
parser.add_argument('--batch-size', type=int)
parser.add_argument('--nogpu', action='store_true', help="Don't use the GPU")
args = vars(parser.parse_args())
classify_with_archive(args['archive'], args['image_file'],
batch_size=args['batch_size'],
use_gpu=(not args['nogpu']),
)
print 'Script took %f seconds.' % (time.time() - script_start_time,)
| bsd-3-clause |
wbsavage/shinken | shinken/autoslots.py | 2 | 2375 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""The AutoSlots Class is a MetaClass: it manages how other classes
are created (Classes, not instances of theses classes).
Here it's role is to create the __slots__ list of the class with
all properties of Class.properties and Class.running_properties
so we do not have to add manually all properties to the __slots__
list when we add a new entry"""
class AutoSlots(type):
# new is call when we create a new Class
# that have metaclass = AutoSlots
# CLS is AutoSlots
# name is string of the Class (like Service)
# bases are the Classes of which Class inherits (like SchedulingItem)
# dct is the new Class dict (like all method of Service)
# Some properties names are not allowed in __slots__ like 2d_coords of
# Host, so we must tag them in properties with no_slots
def __new__(cls, name, bases, dct):
# Thanks to Bertrand Mathieu to the set idea
slots = dct.get('__slots__', set())
# Now get properties from properties and running_properties
if 'properties' in dct:
props = dct['properties']
slots.update((p for p in props
if not props[p].no_slots))
if 'running_properties' in dct:
props = dct['running_properties']
slots.update((p for p in props
if not props[p].no_slots))
dct['__slots__'] = tuple(slots)
return type.__new__(cls, name, bases, dct)
| agpl-3.0 |
kbrebanov/ansible-modules-extras | database/vertica/vertica_role.py | 15 | 8414 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: vertica_role
version_added: '2.0'
short_description: Adds or removes Vertica database roles and assigns roles to them.
description:
- Adds or removes Vertica database role and, optionally, assign other roles.
options:
name:
description:
- Name of the role to add or remove.
required: true
assigned_roles:
description:
- Comma separated list of roles to assign to the role.
aliases: ['assigned_role']
required: false
default: null
state:
description:
- Whether to create C(present), drop C(absent) or lock C(locked) a role.
required: false
choices: ['present', 'absent']
default: present
db:
description:
- Name of the Vertica database.
required: false
default: null
cluster:
description:
- Name of the Vertica cluster.
required: false
default: localhost
port:
description:
- Vertica cluster port to connect to.
required: false
default: 5433
login_user:
description:
- The username used to authenticate with.
required: false
default: dbadmin
login_password:
description:
- The password used to authenticate with.
required: false
default: null
notes:
- The default authentication assumes that you are either logging in as or sudo'ing
to the C(dbadmin) account on the host.
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: "Dariusz Owczarek (@dareko)"
"""
EXAMPLES = """
- name: creating a new vertica role
vertica_role: name=role_name db=db_name state=present
- name: creating a new vertica role with other role assigned
vertica_role: name=role_name assigned_role=other_role_name state=present
"""
try:
import pyodbc
except ImportError:
pyodbc_found = False
else:
pyodbc_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
class NotSupportedError(Exception):
pass
class CannotDropError(Exception):
pass
# module specific functions
def get_role_facts(cursor, role=''):
facts = {}
cursor.execute("""
select r.name, r.assigned_roles
from roles r
where (? = '' or r.name ilike ?)
""", role, role)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
role_key = row.name.lower()
facts[role_key] = {
'name': row.name,
'assigned_roles': []}
if row.assigned_roles:
facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
return facts
def update_roles(role_facts, cursor, role,
existing, required):
for assigned_role in set(existing) - set(required):
cursor.execute("revoke {0} from {1}".format(assigned_role, role))
for assigned_role in set(required) - set(existing):
cursor.execute("grant {0} to {1}".format(assigned_role, role))
def check(role_facts, role, assigned_roles):
role_key = role.lower()
if role_key not in role_facts:
return False
if assigned_roles and cmp(sorted(assigned_roles), sorted(role_facts[role_key]['assigned_roles'])) != 0:
return False
return True
def present(role_facts, cursor, role, assigned_roles):
role_key = role.lower()
if role_key not in role_facts:
cursor.execute("create role {0}".format(role))
update_roles(role_facts, cursor, role, [], assigned_roles)
role_facts.update(get_role_facts(cursor, role))
return True
else:
changed = False
if assigned_roles and cmp(sorted(assigned_roles), sorted(role_facts[role_key]['assigned_roles'])) != 0:
update_roles(role_facts, cursor, role,
role_facts[role_key]['assigned_roles'], assigned_roles)
changed = True
if changed:
role_facts.update(get_role_facts(cursor, role))
return changed
def absent(role_facts, cursor, role, assigned_roles):
role_key = role.lower()
if role_key in role_facts:
update_roles(role_facts, cursor, role,
role_facts[role_key]['assigned_roles'], [])
cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name']))
del role_facts[role_key]
return True
else:
return False
# module logic
def main():
module = AnsibleModule(
argument_spec=dict(
role=dict(required=True, aliases=['name']),
assigned_roles=dict(default=None, aliases=['assigned_role']),
state=dict(default='present', choices=['absent', 'present']),
db=dict(default=None),
cluster=dict(default='localhost'),
port=dict(default='5433'),
login_user=dict(default='dbadmin'),
login_password=dict(default=None),
), supports_check_mode = True)
if not pyodbc_found:
module.fail_json(msg="The python pyodbc module is required.")
role = module.params['role']
assigned_roles = []
if module.params['assigned_roles']:
assigned_roles = module.params['assigned_roles'].split(',')
assigned_roles = filter(None, assigned_roles)
state = module.params['state']
db = ''
if module.params['db']:
db = module.params['db']
changed = False
try:
dsn = (
"Driver=Vertica;"
"Server={0};"
"Port={1};"
"Database={2};"
"User={3};"
"Password={4};"
"ConnectionLoadBalance={5}"
).format(module.params['cluster'], module.params['port'], db,
module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception:
e = get_exception()
module.fail_json(msg="Unable to connect to database: {0}.".format(e))
try:
role_facts = get_role_facts(cursor)
if module.check_mode:
changed = not check(role_facts, role, assigned_roles)
elif state == 'absent':
try:
changed = absent(role_facts, cursor, role, assigned_roles)
except pyodbc.Error:
e = get_exception()
module.fail_json(msg=str(e))
elif state == 'present':
try:
changed = present(role_facts, cursor, role, assigned_roles)
except pyodbc.Error:
e = get_exception()
module.fail_json(msg=str(e))
except NotSupportedError:
e = get_exception()
module.fail_json(msg=str(e), ansible_facts={'vertica_roles': role_facts})
except CannotDropError:
e = get_exception()
module.fail_json(msg=str(e), ansible_facts={'vertica_roles': role_facts})
except SystemExit:
# avoid catching this on python 2.4
raise
except Exception:
e = get_exception()
module.fail_json(msg=e)
module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts})
if __name__ == '__main__':
main()
| gpl-3.0 |
jmptrader/dirigible-spreadsheet | dirigible/sheet/tests/test_importer.py | 2 | 9144 | # Copyright (c) 2010 Resolver Systems Ltd, PythonAnywhere LLP
# See LICENSE.md
#
from __future__ import with_statement
try:
import unittest2 as unittest
except ImportError:
import unittest
from mock import Mock, patch
from StringIO import StringIO
import xlrd
from dirigible.test_utils import ResolverTestCase
from sheet.importer import (
DirigibleImportError, worksheet_from_csv, worksheet_from_excel
)
from sheet.worksheet import Worksheet
class WorksheetFromCSVTest(ResolverTestCase):
def test_should_put_data_into_existing_worksheet_with_offset_for_excel_and_auto(self):
for excel_encoding in [True, False]:
csv = StringIO()
csv.write('abc,123\n')
csv.write('def, \n')
csv.size = 10
csv.seek(0)
existing_worksheet = Worksheet()
for row in range(1, 6):
for col in range(1, 5):
existing_worksheet[col, row].formula = 'old'
worksheet = worksheet_from_csv(existing_worksheet, csv, 2, 3, excel_encoding)
self.assertEquals(worksheet.A1.formula, 'old')
self.assertEquals(worksheet.B1.formula, 'old')
self.assertEquals(worksheet.A2.formula, 'old')
self.assertEquals(worksheet.B3.formula, 'abc')
self.assertEquals(worksheet.C3.formula, '123')
self.assertEquals(worksheet.B4.formula, 'def')
self.assertEquals(worksheet.C4.formula, ' ')
self.assertEquals(worksheet.C5.formula, 'old')
self.assertEquals(worksheet.D3.formula, 'old')
self.assertEquals(worksheet.B5.formula, 'old')
def test_excel_csv_import_recognises_accents_and_currency_symbols(self):
excel_csv = StringIO()
excel_csv.write(u"\xe9".encode('windows-1252'))
excel_csv.write(u"\xa3".encode('windows-1252'))
excel_csv.write(u"\u20ac".encode('windows-1252'))
excel_csv.name = 'filename'
excel_csv.size = 10
excel_csv.seek(0)
worksheet = worksheet_from_csv(Worksheet(), excel_csv, 3, 4, True)
self.assertEquals(worksheet.C4.formula, u"\xe9\xa3\u20ac")
def test_excel_csv_import_handles_carriage_returns_in_cells(self):
excel_csv = StringIO()
excel_csv.write(u'"carriage\nreturn!"\r\n'.encode('windows-1252'))
excel_csv.write(u"normal line\r\n".encode('windows-1252'))
excel_csv.name = 'filename'
excel_csv.size = 10
excel_csv.seek(0)
worksheet = worksheet_from_csv(Worksheet(), excel_csv, 2, 3, True)
self.assertEquals(worksheet.B3.formula, "carriage\nreturn!")
self.assertEquals(worksheet.B4.formula, "normal line")
def test_autodetect_csv_import_handles_carriage_returns_in_cells(self):
excel_csv = StringIO()
excel_csv.write(u'"carriage\nreturn!"\r\n'.encode('utf-8'))
excel_csv.write(u"normal line\r\n".encode('utf-8'))
excel_csv.name = 'filename'
excel_csv.size = 10
excel_csv.seek(0)
worksheet = worksheet_from_csv(Worksheet(), excel_csv, 2, 3, False)
self.assertEquals(worksheet.B3.formula, "carriage\nreturn!")
self.assertEquals(worksheet.B4.formula, "normal line")
def test_autodetect_can_handle_japanese_utf8(self):
some_kanji = u'\u65b0\u4e16\u7d00\u30a8\u30f4\u30a1\u30f3\u30b2\u30ea\u30aa\u30f3'
japanese_file = StringIO()
japanese_file.write(some_kanji.encode('utf-8'))
japanese_file.name = 'filename'
japanese_file.size = 10
japanese_file.seek(0)
worksheet = worksheet_from_csv(Worksheet(), japanese_file, 1, 1, False)
self.assertEquals(worksheet.A1.formula, some_kanji)
def test_excel_csv_import_survives_japanes_utf8(self):
some_kanji = u'\u65b0\u4e16\u7d00\u30a8\u30f4\u30a1\u30f3\u30b2\u30ea\u30aa\u30f3'
japanese_file = StringIO()
japanese_file.write(some_kanji.encode('utf-8'))
japanese_file.name = 'filename'
japanese_file.size = 10
japanese_file.seek(0)
worksheet_from_csv(Worksheet(), japanese_file, 1, 1, True)
#should not raise
def test_import_excel_csv_raises_on_null_bytes(self):
bin_file = StringIO()
bin_file.write("\xFF\x00\xFF")
bin_file.name = 'filename'
bin_file.size = 10
bin_file.seek(0)
self.assertRaises(DirigibleImportError,
lambda : worksheet_from_csv(Worksheet(), bin_file, 2, 1, True)
)
def test_autodetect_import_csv_raises_on_null_bytes(self):
bin_file = StringIO()
bin_file.write("\xFF\x00\xFF")
bin_file.name = 'filename'
bin_file.size = 10
bin_file.seek(0)
self.assertRaises(DirigibleImportError,
lambda : worksheet_from_csv(Worksheet(), bin_file, 1, 1, False)
)
@patch('sheet.importer.UniversalDetector')
def test_autodetect_import_csv_raises_on_failure_to_detect_encoding(
self, mock_UniversalDetector
):
mock_detector = Mock()
mock_UniversalDetector.return_value = mock_detector
mock_detector.result = {'encoding':None}
mock_file = StringIO()
mock_file.write("\xFF\x00\xFF")
mock_file.name = 'filename'
mock_file.size = 10
self.assertRaises(DirigibleImportError,
lambda : worksheet_from_csv(Worksheet(), mock_file, 1, 1, False)
)
class WorksheetFromExcelTest(ResolverTestCase):
def test_populates_worksheet_formulae_from_excel_values(self):
mock_excel_worksheet = Mock()
def mock_cell(row, col):
mock_cell = Mock()
mock_cell.value = '%s, %s' % (col, row)
return mock_cell
mock_excel_worksheet.cell.side_effect = mock_cell
mock_excel_worksheet.nrows = 4
mock_excel_worksheet.ncols = 3
worksheet = worksheet_from_excel(mock_excel_worksheet)
for col in range(mock_excel_worksheet.ncols):
for row in range(mock_excel_worksheet.nrows):
self.assertEquals(worksheet[col + 1, row + 1].formula, '%s, %s' % (col, row))
def test_populates_worksheet_handles_float_source_values(self):
mock_excel_worksheet = Mock()
def mock_cell(row, col):
mock_cell = Mock()
mock_cell.value = col + row + 0.1
return mock_cell
mock_excel_worksheet.cell.side_effect = mock_cell
mock_excel_worksheet.nrows = 4
mock_excel_worksheet.ncols = 3
worksheet = worksheet_from_excel(mock_excel_worksheet)
for col in range(mock_excel_worksheet.ncols):
for row in range(mock_excel_worksheet.nrows):
self.assertEquals(worksheet[col + 1, row + 1].formula, '%s' % (col + row + 0.1, ))
@patch('sheet.importer.xldate_as_tuple')
def test_converts_excel_dates_to_python_datetime(self, mock_xlrd_date_as_tuple):
mock_excel_worksheet = Mock()
def mock_cell(row, col):
mock_cell = Mock()
mock_cell.ctype = xlrd.XL_CELL_DATE
mock_cell.value = (row, col)
return mock_cell
mock_excel_worksheet.cell.side_effect = mock_cell
mock_excel_worksheet.nrows = 4
mock_excel_worksheet.ncols = 3
def mock_xlrd_date_as_tuple_function(cell_value, datemode):
row, col = cell_value
self.assertEquals(datemode, mock_excel_worksheet.book.datemode)
return (2011, row, col, 1, 2, 3)
mock_xlrd_date_as_tuple.side_effect = mock_xlrd_date_as_tuple_function
worksheet = worksheet_from_excel(mock_excel_worksheet)
for col in range(mock_excel_worksheet.ncols):
for row in range(mock_excel_worksheet.nrows):
self.assertEquals(
worksheet[col + 1, row + 1].formula,
'=DateTime(2011, %s, %s, 1, 2, 3)' % (row, col)
)
@patch('sheet.importer.xldate_as_tuple')
def test_handles_excel_errors(self, mock_xlrd_date_as_tuple):
mock_excel_worksheet = Mock()
errors = {
(0,0) : (0x0, '=#NULL!'),
(1,0) : (0x7, '=#DIV/0!'),
(2,0) : (0xf, '=#VALUE!'),
(3,0) : (0x17, '=#REF!'),
(4,0) : (0x1d, '=#NAME?'),
(5,0) : (0x24, '=#NUM!'),
(6,0) : (0x2a, '=#N/A'),
}
def mock_cell(row, col):
mock_cell = Mock()
mock_cell.ctype = xlrd.XL_CELL_ERROR
mock_cell.value = errors[row, col][0]
return mock_cell
mock_excel_worksheet.cell.side_effect = mock_cell
mock_excel_worksheet.nrows = 7
mock_excel_worksheet.ncols = 1
worksheet = worksheet_from_excel(mock_excel_worksheet)
for col in range(mock_excel_worksheet.ncols):
for row in range(mock_excel_worksheet.nrows):
self.assertEquals(
worksheet[col + 1, row + 1].formula,
errors[row, col][1]
)
| mit |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python2.7/_abcoll.py | 14 | 18415 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
DON'T USE THIS MODULE DIRECTLY! The classes here should be imported
via collections; they are defined here only to alleviate certain
bootstrapping issues. Unit tests are in test_collections.
"""
from abc import ABCMeta, abstractmethod
import sys
__all__ = ["Hashable", "Iterable", "Iterator",
"Sized", "Container", "Callable",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
]
### ONE-TRICK PONIES ###
def _hasattr(C, attr):
try:
return any(attr in B.__dict__ for B in C.__mro__)
except AttributeError:
# Old-style class
return hasattr(C, attr)
class Hashable:
__metaclass__ = ABCMeta
@abstractmethod
def __hash__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Hashable:
try:
for B in C.__mro__:
if "__hash__" in B.__dict__:
if B.__dict__["__hash__"]:
return True
break
except AttributeError:
# Old-style class
if getattr(C, "__hash__", None):
return True
return NotImplemented
class Iterable:
__metaclass__ = ABCMeta
@abstractmethod
def __iter__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Iterable:
if _hasattr(C, "__iter__"):
return True
return NotImplemented
Iterable.register(str)
class Iterator(Iterable):
@abstractmethod
def next(self):
'Return the next item from the iterator. When exhausted, raise StopIteration'
raise StopIteration
def __iter__(self):
return self
@classmethod
def __subclasshook__(cls, C):
if cls is Iterator:
if _hasattr(C, "next") and _hasattr(C, "__iter__"):
return True
return NotImplemented
class Sized:
__metaclass__ = ABCMeta
@abstractmethod
def __len__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Sized:
if _hasattr(C, "__len__"):
return True
return NotImplemented
class Container:
__metaclass__ = ABCMeta
@abstractmethod
def __contains__(self, x):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Container:
if _hasattr(C, "__contains__"):
return True
return NotImplemented
class Callable:
__metaclass__ = ABCMeta
@abstractmethod
def __call__(self, *args, **kwds):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Callable:
if _hasattr(C, "__call__"):
return True
return NotImplemented
### SETS ###
class Set(Sized, Iterable, Container):
"""A set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__ and __len__.
To override the comparisons (presumably for speed, as the
semantics are fixed), redefine __le__ and __ge__,
then the other operations will automatically follow suit.
"""
def __le__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) > len(other):
return False
for elem in self:
if elem not in other:
return False
return True
def __lt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) < len(other) and self.__le__(other)
def __gt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) > len(other) and self.__ge__(other)
def __ge__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) < len(other):
return False
for elem in other:
if elem not in self:
return False
return True
def __eq__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) == len(other) and self.__le__(other)
def __ne__(self, other):
return not (self == other)
@classmethod
def _from_iterable(cls, it):
'''Construct an instance of the class from any iterable input.
Must override this method if the class constructor signature
does not accept an iterable for an input.
'''
return cls(it)
def __and__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
return self._from_iterable(value for value in other if value in self)
__rand__ = __and__
def isdisjoint(self, other):
'Return True if two sets have a null intersection.'
for value in other:
if value in self:
return False
return True
def __or__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
chain = (e for s in (self, other) for e in s)
return self._from_iterable(chain)
__ror__ = __or__
def __sub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in self
if value not in other)
def __rsub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in other
if value not in self)
def __xor__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return (self - other) | (other - self)
__rxor__ = __xor__
# Sets are not hashable by default, but subclasses can change this
__hash__ = None
def _hash(self):
"""Compute the hash value of a set.
Note that we don't define __hash__: not all sets are hashable.
But if you define a hashable set type, its __hash__ should
call this function.
This must be compatible __eq__.
All sets ought to compare equal if they contain the same
elements, regardless of how they are implemented, and
regardless of the order of the elements; so there's not much
freedom for __eq__ or __hash__. We match the algorithm used
by the built-in frozenset type.
"""
MAX = sys.maxint
MASK = 2 * MAX + 1
n = len(self)
h = 1927868237 * (n + 1)
h &= MASK
for x in self:
hx = hash(x)
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
h &= MASK
h = h * 69069 + 907133923
h &= MASK
if h > MAX:
h -= MASK + 1
if h == -1:
h = 590923713
return h
Set.register(frozenset)
class MutableSet(Set):
"""A mutable set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__, __len__,
add(), and discard().
To override the comparisons (presumably for speed, as the
semantics are fixed), all you have to do is redefine __le__ and
then the other operations will automatically follow suit.
"""
@abstractmethod
def add(self, value):
"""Add an element."""
raise NotImplementedError
@abstractmethod
def discard(self, value):
"""Remove an element. Do not raise an exception if absent."""
raise NotImplementedError
def remove(self, value):
"""Remove an element. If not a member, raise a KeyError."""
if value not in self:
raise KeyError(value)
self.discard(value)
def pop(self):
"""Return the popped value. Raise KeyError if empty."""
it = iter(self)
try:
value = next(it)
except StopIteration:
raise KeyError
self.discard(value)
return value
def clear(self):
"""This is slow (creates N new iterators!) but effective."""
try:
while True:
self.pop()
except KeyError:
pass
def __ior__(self, it):
for value in it:
self.add(value)
return self
def __iand__(self, it):
for value in (self - it):
self.discard(value)
return self
def __ixor__(self, it):
if it is self:
self.clear()
else:
if not isinstance(it, Set):
it = self._from_iterable(it)
for value in it:
if value in self:
self.discard(value)
else:
self.add(value)
return self
def __isub__(self, it):
if it is self:
self.clear()
else:
for value in it:
self.discard(value)
return self
MutableSet.register(set)
### MAPPINGS ###
class Mapping(Sized, Iterable, Container):
"""A Mapping is a generic container for associating key/value
pairs.
This class provides concrete generic implementations of all
methods except for __getitem__, __iter__, and __len__.
"""
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.'
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def iterkeys(self):
'D.iterkeys() -> an iterator over the keys of D'
return iter(self)
def itervalues(self):
'D.itervalues() -> an iterator over the values of D'
for key in self:
yield self[key]
def iteritems(self):
'D.iteritems() -> an iterator over the (key, value) items of D'
for key in self:
yield (key, self[key])
def keys(self):
"D.keys() -> list of D's keys"
return list(self)
def items(self):
"D.items() -> list of D's (key, value) pairs, as 2-tuples"
return [(key, self[key]) for key in self]
def values(self):
"D.values() -> list of D's values"
return [self[key] for key in self]
# Mappings are not hashable by default, but subclasses can change this
__hash__ = None
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
def __ne__(self, other):
return not (self == other)
class MappingView(Sized):
def __init__(self, mapping):
self._mapping = mapping
def __len__(self):
return len(self._mapping)
def __repr__(self):
return '{0.__class__.__name__}({0._mapping!r})'.format(self)
class KeysView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, key):
return key in self._mapping
def __iter__(self):
for key in self._mapping:
yield key
class ItemsView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, item):
key, value = item
try:
v = self._mapping[key]
except KeyError:
return False
else:
return v == value
def __iter__(self):
for key in self._mapping:
yield (key, self._mapping[key])
class ValuesView(MappingView):
def __contains__(self, value):
for key in self._mapping:
if value == self._mapping[key]:
return True
return False
def __iter__(self):
for key in self._mapping:
yield self._mapping[key]
class MutableMapping(Mapping):
"""A MutableMapping is a generic container for associating
key/value pairs.
This class provides concrete generic implementations of all
methods except for __getitem__, __setitem__, __delitem__,
__iter__, and __len__.
"""
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@abstractmethod
def __delitem__(self, key):
raise KeyError
__marker = object()
def pop(self, key, default=__marker):
'''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
'''D.popitem() -> (k, v), remove and return some (key, value) pair
as a 2-tuple; but raise KeyError if D is empty.
'''
try:
key = next(iter(self))
except StopIteration:
raise KeyError
value = self[key]
del self[key]
return key, value
def clear(self):
'D.clear() -> None. Remove all items from D.'
try:
while True:
self.popitem()
except KeyError:
pass
def update(*args, **kwds):
''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F.
If E present and has a .keys() method, does: for k in E: D[k] = E[k]
If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v
In either case, this is followed by: for k, v in F.items(): D[k] = v
'''
if len(args) > 2:
raise TypeError("update() takes at most 2 positional "
"arguments ({} given)".format(len(args)))
elif not args:
raise TypeError("update() takes at least 1 argument (0 given)")
self = args[0]
other = args[1] if len(args) >= 2 else ()
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D'
try:
return self[key]
except KeyError:
self[key] = default
return default
MutableMapping.register(dict)
### SEQUENCES ###
class Sequence(Sized, Iterable, Container):
"""All the operations on a read-only sequence.
Concrete subclasses must override __new__ or __init__,
__getitem__, and __len__.
"""
@abstractmethod
def __getitem__(self, index):
raise IndexError
def __iter__(self):
i = 0
try:
while True:
v = self[i]
yield v
i += 1
except IndexError:
return
def __contains__(self, value):
for v in self:
if v == value:
return True
return False
def __reversed__(self):
for i in reversed(range(len(self))):
yield self[i]
def index(self, value):
'''S.index(value) -> integer -- return first index of value.
Raises ValueError if the value is not present.
'''
for i, v in enumerate(self):
if v == value:
return i
raise ValueError
def count(self, value):
'S.count(value) -> integer -- return number of occurrences of value'
return sum(1 for v in self if v == value)
Sequence.register(tuple)
Sequence.register(basestring)
Sequence.register(buffer)
Sequence.register(xrange)
class MutableSequence(Sequence):
"""All the operations on a read-only sequence.
Concrete subclasses must provide __new__ or __init__,
__getitem__, __setitem__, __delitem__, __len__, and insert().
"""
@abstractmethod
def __setitem__(self, index, value):
raise IndexError
@abstractmethod
def __delitem__(self, index):
raise IndexError
@abstractmethod
def insert(self, index, value):
'S.insert(index, object) -- insert object before index'
raise IndexError
def append(self, value):
'S.append(object) -- append object to the end of the sequence'
self.insert(len(self), value)
def reverse(self):
'S.reverse() -- reverse *IN PLACE*'
n = len(self)
for i in range(n//2):
self[i], self[n-i-1] = self[n-i-1], self[i]
def extend(self, values):
'S.extend(iterable) -- extend sequence by appending elements from the iterable'
for v in values:
self.append(v)
def pop(self, index=-1):
'''S.pop([index]) -> item -- remove and return item at index (default last).
Raise IndexError if list is empty or index is out of range.
'''
v = self[index]
del self[index]
return v
def remove(self, value):
'''S.remove(value) -- remove first occurrence of value.
Raise ValueError if the value is not present.
'''
del self[self.index(value)]
def __iadd__(self, values):
self.extend(values)
return self
MutableSequence.register(list)
| gpl-2.0 |
darknao/piOClock | ssd1351.py | 1 | 13500 | #!/bin/env python
# -*- coding: UTF-8 -*-
# ----------------------------------------------------------------------
# ssd1351.py from https://github.com/guyc/py-gaugette
# ported by Jason Porritt,
# and reworked by darknao,
# based on original work by Guy Carpenter for display.py
#
# This library works with
# Adafruit's 128x96 SPI color OLED http://www.adafruit.com/products/1673
#
# The code is based heavily on Adafruit's Arduino library
# https://github.com/adafruit/Adafruit_SSD1351
# written by Limor Fried/Ladyada for Adafruit Industries.
#
# It has the following dependencies:
# wiringpi2 for GPIO
# spidev for SPI
# PIL for easy drawing capabilities
# numpy for fast RGB888 to RGB565 convertion
# ----------------------------------------------------------------------
# NEED HEAVY CLEANING !
import wiringpi2
import spidev
import time
import sys
from PIL import Image, ImageDraw, ImageFont
import logging
import numpy as np
import tools
class SSD1351:
# SSD1351 Commands
EXTERNAL_VCC = 0x1
SWITCH_CAP_VCC = 0x2
MEMORY_MODE_HORIZ = 0x00
MEMORY_MODE_VERT = 0x01
CMD_SETCOLUMN = 0x15
CMD_SETROW = 0x75
CMD_WRITERAM = 0x5C
CMD_READRAM = 0x5D
CMD_SETREMAP = 0xA0
CMD_STARTLINE = 0xA1
CMD_DISPLAYOFFSET = 0xA2
CMD_DISPLAYALLOFF = 0xA4
CMD_DISPLAYALLON = 0xA5
CMD_NORMALDISPLAY = 0xA6
CMD_INVERTDISPLAY = 0xA7
CMD_FUNCTIONSELECT = 0xAB
CMD_DISPLAYOFF = 0xAE
CMD_DISPLAYON = 0xAF
CMD_PRECHARGE = 0xB1
CMD_DISPLAYENHANCE = 0xB2
CMD_CLOCKDIV = 0xB3
CMD_SETVSL = 0xB4
CMD_SETGPIO = 0xB5
CMD_PRECHARGE2 = 0xB6
CMD_SETGRAY = 0xB8
CMD_USELUT = 0xB9
CMD_PRECHARGELEVEL = 0xBB
CMD_VCOMH = 0xBE
CMD_CONTRASTABC = 0xC1
CMD_CONTRASTMASTER = 0xC7
CMD_MUXRATIO = 0xCA
CMD_COMMANDLOCK = 0xFD
CMD_HORIZSCROLL = 0x96
CMD_STOPSCROLL = 0x9E
CMD_STARTSCROLL = 0x9F
# Device name will be /dev/spidev-{bus}.{device}
# dc_pin is the data/commmand pin. This line is HIGH for data, LOW for command.
# We will keep d/c low and bump it high only for commands with data
# reset is normally HIGH, and pulled LOW to reset the display
def __init__(self, bus=0, device=0, dc_pin="P9_15", reset_pin="P9_13", rows=128, cols=128):
self.cols = cols
self.rows = rows
self.dc_pin = dc_pin
self.reset_pin = reset_pin
# SPI
self.spi = spidev.SpiDev(bus, device)
self.spi.max_speed_hz = 16000000 # 16Mhz
# GPIO
self.gpio = wiringpi2.GPIO(wiringpi2.GPIO.WPI_MODE_PINS)
self.gpio.pinMode(self.reset_pin, self.gpio.OUTPUT)
self.gpio.digitalWrite(self.reset_pin, self.gpio.HIGH)
self.gpio.pinMode(self.dc_pin, self.gpio.OUTPUT)
self.gpio.digitalWrite(self.dc_pin, self.gpio.LOW)
# Drawing tools
self.im = Image.new("RGB", (cols, rows), 'black')
self.draw = ImageDraw.Draw(self.im)
# logging
self.log = logging.getLogger(self.__class__.__name__)
self.log.setLevel(logging.INFO)
self.contrast = 15
def reset(self):
self.gpio.digitalWrite(self.reset_pin, self.gpio.LOW)
time.sleep(0.010) # 10ms
self.gpio.digitalWrite(self.reset_pin, self.gpio.HIGH)
def command(self, cmd, cmddata=None):
# already low
# self.gpio.digitalWrite(self.dc_pin, self.gpio.LOW)
if type(cmd) == list:
self.spi.writebytes(cmd)
else:
self.spi.writebytes([cmd])
if cmddata is not None:
if type(cmddata) == list:
self.data(cmddata)
else:
self.data([cmddata])
def data(self, bytes):
self.gpio.digitalWrite(self.dc_pin, self.gpio.HIGH)
max_xfer = 1024
start = 0
remaining = len(bytes)
while remaining>0:
count = remaining if remaining <= max_xfer else max_xfer
remaining -= count
self.spi.writebytes(bytes[start:start+count])
start += count
self.gpio.digitalWrite(self.dc_pin, self.gpio.LOW)
def begin(self, vcc_state = SWITCH_CAP_VCC):
time.sleep(0.001) # 1ms
self.reset()
self.command(self.CMD_COMMANDLOCK, 0x12)
self.command(self.CMD_COMMANDLOCK, 0xB1)
self.command(self.CMD_DISPLAYOFF)
self.command(self.CMD_CLOCKDIV, 0xF1)
# support for 128x128 line mode
self.command(self.CMD_MUXRATIO, 127)
self.command(self.CMD_SETREMAP, 0x74)
self.command(self.CMD_SETCOLUMN, [0x00, self.cols-1])
self.command(self.CMD_SETROW, [0x00, self.rows-1])
# TODO Support 96-row display
self.command(self.CMD_STARTLINE, 96)
self.command(self.CMD_DISPLAYOFFSET, 0x00)
self.command(self.CMD_SETGPIO, 0x00)
self.command(self.CMD_FUNCTIONSELECT, 0x01)
self.command(self.CMD_PRECHARGE, 0x32)
self.command(self.CMD_VCOMH, 0x05)
self.command(self.CMD_NORMALDISPLAY)
self.set_contrast(200) # c8 -> 200
self.set_master_contrast(10)
self.command(self.CMD_SETVSL, [0xA0, 0xB5, 0x55])
self.command(self.CMD_PRECHARGE2, 0x01)
self.command(self.CMD_DISPLAYON)
def set_master_contrast(self, level):
# 0 to 15
level &= 0x0F
self.command(self.CMD_CONTRASTMASTER, level)
def set_contrast(self, level):
# 0 to 255
level &= 0xFF
self.command(self.CMD_CONTRASTABC, [level, level, level])
self.contrast = level
def invert_display(self):
self.command(self.CMD_INVERTDISPLAY)
def normal_display(self):
self.command(self.CMD_NORMALDISPLAY)
def scale(self, x, inLow, inHigh, outLow, outHigh):
return ((x - inLow) / float(inHigh) * outHigh) + outLow
def encode_color(self, color):
red = (color >> 16) & 0xFF
green = (color >> 8) & 0xFF
blue = color & 0xFF
redScaled = int(self.scale(red, 0, 0xFF, 0, 0x1F))
greenScaled = int(self.scale(green, 0, 0xFF, 0, 0x3F))
blueScaled = int(self.scale(blue, 0, 0xFF, 0, 0x1F))
return (((redScaled << 6) | greenScaled) << 5) | blueScaled
def color565(self, r, g, b):
# 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
# r r r r r g g g g g g b b b b b
# r = 31 g = 63 b = 31
redScaled = int(self.scale(r, 0, 0xFF, 0, 0x1F))
greenScaled = int(self.scale(g, 0, 0xFF, 0, 0x3F))
blueScaled = int(self.scale(b, 0, 0xFF, 0, 0x1F))
return (((redScaled << 6) | greenScaled) << 5) | blueScaled
def goTo(self, x, y):
if x >= self.cols or y >= self.rows:
return
# set x and y coordinate
self.command(self.CMD_SETCOLUMN, [x, self.cols-1])
self.command(self.CMD_SETROW, [y, self.rows-1])
self.command(self.CMD_WRITERAM)
def drawPixel(self, x, y, color):
if x >= self.cols or y >= self.rows:
return
if x < 0 or y < 0:
return
color = self.encode_color(color)
# set location
self.goTo(x, y)
self.data([color >> 8, color & 0xFF])
def clear(self):
"""Clear display buffer"""
self.im = Image.new("RGB", (self.cols, self.rows), 'black')
self.draw = ImageDraw.Draw(self.im)
def text_center(self, string, color, font=None, size=10):
if font is None:
font = ImageFont.truetype("/usr/share/fonts/truetype/droid/DroidSansMono.ttf", size)
text_size = self.draw.textsize(string, font=font)
text_x = max((self.cols-text_size[0])/2, 0)
text_y = max((self.rows-text_size[1])/2, 0)
self.draw_text(text_x, text_y, string, color, font=font, size=size)
return text_x, text_y
def text_center_y(self, text_y, string, color, font=None, size=10):
if font is None:
font = ImageFont.truetype("/usr/share/fonts/truetype/droid/DroidSansMono.ttf", size)
text_size = self.draw.textsize(string, font=font)
text_x = max((self.cols-text_size[0])/2, 0)
self.draw_text(text_x, text_y, string, color, font=font, size=size)
return text_x, text_y
def draw_text(self, x, y, string, color, font=None, size=10):
if font is None:
font = ImageFont.truetype("/usr/share/fonts/truetype/droid/DroidSansMono.ttf", size)
self.draw.text((x, y), string, font=font, fill=color)
return self.draw.textsize(string, font=font)
def fillScreen(self, fillcolor):
self.rawFillRect(0, 0, self.cols, self.rows, fillcolor)
def rawFillRect(self, x, y, w, h, fillcolor):
self.log.debug("fillScreen start")
# Bounds check
if (x >= self.cols) or (y >= self.rows):
return
# Y bounds check
if y+h > self.rows:
h = self.rows - y - 1
# X bounds check
if x+w > self.cols:
w = self.cols - x - 1
self.setDisplay(x, y, x+(w-1), y+(h-1))
color = self.encode_color(fillcolor)
self.data([color >> 8, color & 0xFF] * w*h)
self.log.debug("fillScreen end")
def setDisplay(self, startx, starty, endx, endy):
if startx >= self.cols or starty >= self.rows:
return
# Y bounds check
if endx > self.cols - 1:
endx = self.cols - 1
# X bounds check
if endy > self.rows - 1:
endy = self.rows - 1
# set x and y coordinate
# print "x:%d y:%d endx:%d endy:%d" % (startx, starty, endx, endy)
self.command(self.CMD_SETCOLUMN, [startx, endx])
self.command(self.CMD_SETROW, [starty, endy])
self.command(self.CMD_WRITERAM)
def im2list(self):
"""Convert PIL RGB888 Image to SSD1351 RAM buffer"""
image = np.array(self.im).reshape(-1, 3)
image[:,0] *= 0.121
image[:,1] *= 0.247
image[:,2] *= 0.121
d = np.left_shift(image, [11, 5, 0]).sum(axis=1)
data =np.dstack(((d>>8)&0xff, d&0xff)).flatten()
return data.tolist()
def display(self, x=0, y=0, w=None, h=None):
"""Send display buffer to the device"""
self.log.debug("disp in")
if h is None:
h = self.rows
if w is None:
w = self.cols
x = max(x, 0)
y = max(y, 0)
w = min(w, self.cols)
h = min(h, self.rows)
if w-x < 0:
return
self.log.debug("set display")
self.setDisplay(x, y, w-1, h-1)
self.log.debug("set display end")
data = []
start = y * self.cols + x
end = h * self.cols + w
self.log.debug("get data")
self.data(self.im2list())
self.log.debug("disp out")
@tools.timed
def dump_disp(self):
"""Dump display buffer on screen,
for debugging purpose"""
image = np.array(self.im).reshape(-1, 3)
for r in range(0, self.rows,2):
txt = [None,] * self.cols
start = r*self.cols
end = start + self.cols * 2
line = image[start:end]
for c in range(len(line)):
idx = c % self.cols
if line[c].sum() > 0:
if txt[idx] is None:
txt[idx] = '▀'
elif txt[idx] == '▀':
txt[idx] = '█'
else:
txt[idx] = '▄'
else:
if txt[idx] is None:
txt[idx] = ' '
print ''.join(txt) + '║'
@tools.timed
def dump_disp2(self):
#image = list(self.im.convert("I").getdata())
image = np.array(self.im)
for row, r in enumerate(image):
if row % 2 == 0:
txt = [None,] * self.cols
for idx, c in enumerate(r):
if c.sum() > 0:
if txt[idx] is None:
txt[idx] = '▀'
elif txt[idx] == '▀':
txt[idx] = '█'
else:
txt[idx] = '▄'
else:
if txt[idx] is None:
txt[idx] = ' '
print ''.join(txt) + '║'
if __name__ == '__main__':
import datetime
import time
import ssd1351
import random
from PIL import ImageFont
import psutil
import logging
import os
log = logging.getLogger("clock")
logging.basicConfig(
format='%(asctime)-23s - %(levelname)-7s - %(name)s - %(message)s')
log.setLevel(logging.INFO)
RESET_PIN = 15
DC_PIN = 16
led = ssd1351.SSD1351(reset_pin=15, dc_pin=16, rows=96)
led.begin()
led.fillScreen(0)
color = 0x000000
bands = 10
color_step = 0xFF / bands
color_width = led.cols / 3
for x in range(0, led.rows, led.rows/bands):
led.rawFillRect(0, x, color_width, bands, color&0xff0000)
led.rawFillRect(color_width, x, color_width*2, bands, color&0xff00)
led.rawFillRect(color_width*2, x, color_width*3, bands, color&0xff)
color = (color + (color_step << 16) + (color_step << 8) + (color_step)) & 0xFFFFFF
| gpl-3.0 |
austinvernsonger/metagoofil | hachoir_parser/video/asf.py | 86 | 12869 | """
Advanced Streaming Format (ASF) parser, format used by Windows Media Video
(WMF) and Windows Media Audio (WMA).
Informations:
- http://www.microsoft.com/windows/windowsmedia/forpros/format/asfspec.aspx
- http://swpat.ffii.org/pikta/xrani/asf/index.fr.html
Author: Victor Stinner
Creation: 5 august 2006
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, ParserError,
UInt16, UInt32, UInt64,
TimestampWin64, TimedeltaWin64,
String, PascalString16, Enum,
Bit, Bits, PaddingBits,
PaddingBytes, NullBytes, RawBytes)
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_core.text_handler import (
displayHandler, filesizeHandler)
from hachoir_core.tools import humanBitRate
from itertools import izip
from hachoir_parser.video.fourcc import audio_codec_name, video_fourcc_name
from hachoir_parser.common.win32 import BitmapInfoHeader, GUID
MAX_HEADER_SIZE = 100 * 1024 # bytes
class AudioHeader(FieldSet):
guid = "F8699E40-5B4D-11CF-A8FD-00805F5C442B"
def createFields(self):
yield Enum(UInt16(self, "twocc"), audio_codec_name)
yield UInt16(self, "channels")
yield UInt32(self, "sample_rate")
yield UInt32(self, "bit_rate")
yield UInt16(self, "block_align")
yield UInt16(self, "bits_per_sample")
yield UInt16(self, "codec_specific_size")
size = self["codec_specific_size"].value
if size:
yield RawBytes(self, "codec_specific", size)
class BitrateMutualExclusion(FieldSet):
guid = "D6E229DC-35DA-11D1-9034-00A0C90349BE"
mutex_name = {
"D6E22A00-35DA-11D1-9034-00A0C90349BE": "Language",
"D6E22A01-35DA-11D1-9034-00A0C90349BE": "Bitrate",
"D6E22A02-35DA-11D1-9034-00A0C90349BE": "Unknown",
}
def createFields(self):
yield Enum(GUID(self, "exclusion_type"), self.mutex_name)
yield UInt16(self, "nb_stream")
for index in xrange(self["nb_stream"].value):
yield UInt16(self, "stream[]")
class VideoHeader(FieldSet):
guid = "BC19EFC0-5B4D-11CF-A8FD-00805F5C442B"
def createFields(self):
if False:
yield UInt32(self, "width0")
yield UInt32(self, "height0")
yield PaddingBytes(self, "reserved[]", 7)
yield UInt32(self, "width")
yield UInt32(self, "height")
yield PaddingBytes(self, "reserved[]", 2)
yield UInt16(self, "depth")
yield Enum(String(self, "codec", 4, charset="ASCII"), video_fourcc_name)
yield NullBytes(self, "padding", 20)
else:
yield UInt32(self, "width")
yield UInt32(self, "height")
yield PaddingBytes(self, "reserved[]", 1)
yield UInt16(self, "format_data_size")
if self["format_data_size"].value < 40:
raise ParserError("Unknown format data size")
yield BitmapInfoHeader(self, "bmp_info", use_fourcc=True)
class FileProperty(FieldSet):
guid = "8CABDCA1-A947-11CF-8EE4-00C00C205365"
def createFields(self):
yield GUID(self, "guid")
yield filesizeHandler(UInt64(self, "file_size"))
yield TimestampWin64(self, "creation_date")
yield UInt64(self, "pckt_count")
yield TimedeltaWin64(self, "play_duration")
yield TimedeltaWin64(self, "send_duration")
yield UInt64(self, "preroll")
yield Bit(self, "broadcast", "Is broadcast?")
yield Bit(self, "seekable", "Seekable stream?")
yield PaddingBits(self, "reserved[]", 30)
yield filesizeHandler(UInt32(self, "min_pckt_size"))
yield filesizeHandler(UInt32(self, "max_pckt_size"))
yield displayHandler(UInt32(self, "max_bitrate"), humanBitRate)
class HeaderExtension(FieldSet):
guid = "5FBF03B5-A92E-11CF-8EE3-00C00C205365"
def createFields(self):
yield GUID(self, "reserved[]")
yield UInt16(self, "reserved[]")
yield UInt32(self, "size")
if self["size"].value:
yield RawBytes(self, "data", self["size"].value)
class Header(FieldSet):
guid = "75B22630-668E-11CF-A6D9-00AA0062CE6C"
def createFields(self):
yield UInt32(self, "obj_count")
yield PaddingBytes(self, "reserved[]", 2)
for index in xrange(self["obj_count"].value):
yield Object(self, "object[]")
class Metadata(FieldSet):
guid = "75B22633-668E-11CF-A6D9-00AA0062CE6C"
names = ("title", "author", "copyright", "xxx", "yyy")
def createFields(self):
for index in xrange(5):
yield UInt16(self, "size[]")
for name, size in izip(self.names, self.array("size")):
if size.value:
yield String(self, name, size.value, charset="UTF-16-LE", strip=" \0")
class Descriptor(FieldSet):
"""
See ExtendedContentDescription class.
"""
TYPE_BYTE_ARRAY = 1
TYPE_NAME = {
0: "Unicode",
1: "Byte array",
2: "BOOL (32 bits)",
3: "DWORD (32 bits)",
4: "QWORD (64 bits)",
5: "WORD (16 bits)"
}
def createFields(self):
yield PascalString16(self, "name", "Name", charset="UTF-16-LE", strip="\0")
yield Enum(UInt16(self, "type"), self.TYPE_NAME)
yield UInt16(self, "value_length")
type = self["type"].value
size = self["value_length"].value
name = "value"
if type == 0 and (size % 2) == 0:
yield String(self, name, size, charset="UTF-16-LE", strip="\0")
elif type in (2, 3):
yield UInt32(self, name)
elif type == 4:
yield UInt64(self, name)
else:
yield RawBytes(self, name, size)
class ExtendedContentDescription(FieldSet):
guid = "D2D0A440-E307-11D2-97F0-00A0C95EA850"
def createFields(self):
yield UInt16(self, "count")
for index in xrange(self["count"].value):
yield Descriptor(self, "descriptor[]")
class Codec(FieldSet):
"""
See CodecList class.
"""
type_name = {
1: "video",
2: "audio"
}
def createFields(self):
yield Enum(UInt16(self, "type"), self.type_name)
yield UInt16(self, "name_len", "Name length in character (byte=len*2)")
if self["name_len"].value:
yield String(self, "name", self["name_len"].value*2, "Name", charset="UTF-16-LE", strip=" \0")
yield UInt16(self, "desc_len", "Description length in character (byte=len*2)")
if self["desc_len"].value:
yield String(self, "desc", self["desc_len"].value*2, "Description", charset="UTF-16-LE", strip=" \0")
yield UInt16(self, "info_len")
if self["info_len"].value:
yield RawBytes(self, "info", self["info_len"].value)
class CodecList(FieldSet):
guid = "86D15240-311D-11D0-A3A4-00A0C90348F6"
def createFields(self):
yield GUID(self, "reserved[]")
yield UInt32(self, "count")
for index in xrange(self["count"].value):
yield Codec(self, "codec[]")
class SimpleIndexEntry(FieldSet):
"""
See SimpleIndex class.
"""
def createFields(self):
yield UInt32(self, "pckt_number")
yield UInt16(self, "pckt_count")
class SimpleIndex(FieldSet):
guid = "33000890-E5B1-11CF-89F4-00A0C90349CB"
def createFields(self):
yield GUID(self, "file_id")
yield TimedeltaWin64(self, "entry_interval")
yield UInt32(self, "max_pckt_count")
yield UInt32(self, "entry_count")
for index in xrange(self["entry_count"].value):
yield SimpleIndexEntry(self, "entry[]")
class BitRate(FieldSet):
"""
See BitRateList class.
"""
def createFields(self):
yield Bits(self, "stream_index", 7)
yield PaddingBits(self, "reserved", 9)
yield displayHandler(UInt32(self, "avg_bitrate"), humanBitRate)
class BitRateList(FieldSet):
guid = "7BF875CE-468D-11D1-8D82-006097C9A2B2"
def createFields(self):
yield UInt16(self, "count")
for index in xrange(self["count"].value):
yield BitRate(self, "bit_rate[]")
class Data(FieldSet):
guid = "75B22636-668E-11CF-A6D9-00AA0062CE6C"
def createFields(self):
yield GUID(self, "file_id")
yield UInt64(self, "packet_count")
yield PaddingBytes(self, "reserved", 2)
size = (self.size - self.current_size) / 8
yield RawBytes(self, "data", size)
class StreamProperty(FieldSet):
guid = "B7DC0791-A9B7-11CF-8EE6-00C00C205365"
def createFields(self):
yield GUID(self, "type")
yield GUID(self, "error_correction")
yield UInt64(self, "time_offset")
yield UInt32(self, "data_len")
yield UInt32(self, "error_correct_len")
yield Bits(self, "stream_index", 7)
yield Bits(self, "reserved[]", 8)
yield Bit(self, "encrypted", "Content is encrypted?")
yield UInt32(self, "reserved[]")
size = self["data_len"].value
if size:
tag = self["type"].value
if tag in Object.TAG_INFO:
name, parser = Object.TAG_INFO[tag][0:2]
yield parser(self, name, size=size*8)
else:
yield RawBytes(self, "data", size)
size = self["error_correct_len"].value
if size:
yield RawBytes(self, "error_correct", size)
class Object(FieldSet):
# This list is converted to a dictionnary later where the key is the GUID
TAG_INFO = (
("header", Header, "Header object"),
("file_prop", FileProperty, "File property"),
("header_ext", HeaderExtension, "Header extension"),
("codec_list", CodecList, "Codec list"),
("simple_index", SimpleIndex, "Simple index"),
("data", Data, "Data object"),
("stream_prop[]", StreamProperty, "Stream properties"),
("bit_rates", BitRateList, "Bit rate list"),
("ext_desc", ExtendedContentDescription, "Extended content description"),
("metadata", Metadata, "Metadata"),
("video_header", VideoHeader, "Video"),
("audio_header", AudioHeader, "Audio"),
("bitrate_mutex", BitrateMutualExclusion, "Bitrate mutual exclusion"),
)
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
tag = self["guid"].value
if tag not in self.TAG_INFO:
self.handler = None
return
info = self.TAG_INFO[tag]
self._name = info[0]
self.handler = info[1]
def createFields(self):
yield GUID(self, "guid")
yield filesizeHandler(UInt64(self, "size"))
size = self["size"].value - self.current_size/8
if 0 < size:
if self.handler:
yield self.handler(self, "content", size=size*8)
else:
yield RawBytes(self, "content", size)
tag_info_list = Object.TAG_INFO
Object.TAG_INFO = dict( (parser[1].guid, parser) for parser in tag_info_list )
class AsfFile(Parser):
MAGIC = "\x30\x26\xB2\x75\x8E\x66\xCF\x11\xA6\xD9\x00\xAA\x00\x62\xCE\x6C"
PARSER_TAGS = {
"id": "asf",
"category": "video",
"file_ext": ("wmv", "wma", "asf"),
"mime": (u"video/x-ms-asf", u"video/x-ms-wmv", u"audio/x-ms-wma"),
"min_size": 24*8,
"description": "Advanced Streaming Format (ASF), used for WMV (video) and WMA (audio)",
"magic": ((MAGIC, 0),),
}
FILE_TYPE = {
"video/x-ms-wmv": (".wmv", u"Window Media Video (wmv)"),
"video/x-ms-asf": (".asf", u"ASF container"),
"audio/x-ms-wma": (".wma", u"Window Media Audio (wma)"),
}
endian = LITTLE_ENDIAN
def validate(self):
magic = self.MAGIC
if self.stream.readBytes(0, len(magic)) != magic:
return "Invalid magic"
header = self[0]
if not(30 <= header["size"].value <= MAX_HEADER_SIZE):
return "Invalid header size (%u)" % header["size"].value
return True
def createMimeType(self):
audio = False
for prop in self.array("header/content/stream_prop"):
guid = prop["content/type"].value
if guid == VideoHeader.guid:
return u"video/x-ms-wmv"
if guid == AudioHeader.guid:
audio = True
if audio:
return u"audio/x-ms-wma"
else:
return u"video/x-ms-asf"
def createFields(self):
while not self.eof:
yield Object(self, "object[]")
def createDescription(self):
return self.FILE_TYPE[self.mime_type][1]
def createFilenameSuffix(self):
return self.FILE_TYPE[self.mime_type][0]
def createContentSize(self):
if self[0].name != "header":
return None
return self["header/content/file_prop/content/file_size"].value * 8
| gpl-2.0 |
GbalsaC/bitnamiP | lms/djangoapps/teams/views.py | 18 | 35363 | """HTTP endpoints for the Teams API."""
from django.shortcuts import render_to_response
from courseware.courses import get_course_with_access, has_access
from django.http import Http404
from django.conf import settings
from django.core.paginator import Paginator
from django.views.generic.base import View
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.views import APIView
from rest_framework.authentication import (
SessionAuthentication,
OAuth2Authentication
)
from rest_framework import status
from rest_framework import permissions
from django.db.models import Count
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from student.models import CourseEnrollment, CourseAccessRole
from student.roles import CourseStaffRole
from openedx.core.lib.api.parsers import MergePatchParser
from openedx.core.lib.api.permissions import IsStaffOrReadOnly
from openedx.core.lib.api.view_utils import (
RetrievePatchAPIView,
add_serializer_errors,
build_api_error,
ExpandableFieldViewMixin
)
from openedx.core.lib.api.serializers import PaginationSerializer
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from .models import CourseTeam, CourseTeamMembership
from .serializers import CourseTeamSerializer, CourseTeamCreationSerializer, TopicSerializer, MembershipSerializer
from .errors import AlreadyOnTeamInCourse, NotEnrolledInCourseForTeam
# Constants
TOPICS_PER_PAGE = 12
class TeamsDashboardView(View):
"""
View methods related to the teams dashboard.
"""
def get(self, request, course_id):
"""
Renders the teams dashboard, which is shown on the "Teams" tab.
Raises a 404 if the course specified by course_id does not exist, the
user is not registered for the course, or the teams feature is not enabled.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, "load", course_key)
if not is_feature_enabled(course):
raise Http404
if not CourseEnrollment.is_enrolled(request.user, course.id) and \
not has_access(request.user, 'staff', course, course.id):
raise Http404
sort_order = 'name'
topics = get_ordered_topics(course, sort_order)
topics_page = Paginator(topics, TOPICS_PER_PAGE).page(1)
topics_serializer = PaginationSerializer(instance=topics_page, context={'sort_order': sort_order})
context = {
"course": course, "topics": topics_serializer.data, "topics_url": reverse('topics_list', request=request)
}
return render_to_response("teams/teams.html", context)
def is_feature_enabled(course):
"""
Returns True if the teams feature is enabled.
"""
return settings.FEATURES.get('ENABLE_TEAMS', False) and course.teams_enabled
def has_team_api_access(user, course_key, access_username=None):
"""Returns True if the user has access to the Team API for the course
given by `course_key`. The user must either be enrolled in the course,
be course staff, or be global staff.
Args:
user (User): The user to check access for.
course_key (CourseKey): The key to the course which we are checking access to.
access_username (string): If provided, access_username must match user.username for non staff access.
Returns:
bool: True if the user has access, False otherwise.
"""
if user.is_staff:
return True
if CourseStaffRole(course_key).has_user(user):
return True
if not access_username or access_username == user.username:
return CourseEnrollment.is_enrolled(user, course_key)
return False
class TeamsListView(ExpandableFieldViewMixin, GenericAPIView):
"""
**Use Cases**
Get or create a course team.
**Example Requests**:
GET /api/team/v0/teams
POST /api/team/v0/teams
**Query Parameters for GET**
* course_id: Filters the result to teams belonging to the given
course. Required.
* topic_id: Filters the result to teams associated with the given
topic.
* text_search: Currently not supported.
* order_by: Must be one of the following:
* name: Orders results by case insensitive team name (default).
* open_slots: Orders results by most open slots.
* last_activity: Currently not supported.
* page_size: Number of results to return per page.
* page: Page number to retrieve.
* include_inactive: If true, inactive teams will be returned. The
default is to not include inactive teams.
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in and enrolled, the response contains:
* count: The total number of teams matching the request.
* next: The URL to the next page of results, or null if this is the
last page.
* previous: The URL to the previous page of results, or null if this
is the first page.
* num_pages: The total number of pages in the result.
* results: A list of the teams matching the request.
* id: The team's unique identifier.
* name: The name of the team.
* is_active: True if the team is currently active. If false, the
team is considered "soft deleted" and will not be included by
default in results.
* course_id: The identifier for the course this team belongs to.
* topic_id: Optionally specifies which topic the team is associated
with.
* date_created: Date and time when the team was created.
* description: A description of the team.
* country: Optionally specifies which country the team is
associated with.
* language: Optionally specifies which language the team is
associated with.
* membership: A list of the users that are members of the team.
See membership endpoint for more detail.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in, a 401 error is returned.
If the user is not enrolled in the course specified by course_id or
is not course or global staff, a 403 error is returned.
If the specified course_id is not valid or the user attempts to
use an unsupported query parameter, a 400 error is returned.
If the response does not exist, a 404 error is returned. For
example, the course_id may not reference a real course or the page
number may be beyond the last page.
**Response Values for POST**
Any logged in user who has verified their email address can create
a team. The format mirrors that of a GET for an individual team,
but does not include the id, is_active, date_created, or membership
fields. id is automatically computed based on name.
If the user is not logged in, a 401 error is returned.
If the user is not enrolled in the course, or is not course or
global staff, a 403 error is returned.
If the course_id is not valid or extra fields are included in the
request, a 400 error is returned.
If the specified course does not exist, a 404 error is returned.
"""
# OAuth2Authentication must come first to return a 401 for unauthenticated users
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
paginate_by = 10
paginate_by_param = 'page_size'
pagination_serializer_class = PaginationSerializer
serializer_class = CourseTeamSerializer
def get(self, request):
"""GET /api/team/v0/teams/"""
result_filter = {
'is_active': True
}
if 'course_id' in request.QUERY_PARAMS:
course_id_string = request.QUERY_PARAMS['course_id']
try:
course_key = CourseKey.from_string(course_id_string)
# Ensure the course exists
if not modulestore().has_course(course_key):
return Response(status=status.HTTP_404_NOT_FOUND)
result_filter.update({'course_id': course_key})
except InvalidKeyError:
error = build_api_error(
ugettext_noop("The supplied course id {course_id} is not valid."),
course_id=course_id_string,
)
return Response(error, status=status.HTTP_400_BAD_REQUEST)
if not has_team_api_access(request.user, course_key):
return Response(status=status.HTTP_403_FORBIDDEN)
else:
return Response(
build_api_error(ugettext_noop("course_id must be provided")),
status=status.HTTP_400_BAD_REQUEST
)
if 'topic_id' in request.QUERY_PARAMS:
result_filter.update({'topic_id': request.QUERY_PARAMS['topic_id']})
if 'include_inactive' in request.QUERY_PARAMS and request.QUERY_PARAMS['include_inactive'].lower() == 'true':
del result_filter['is_active']
if 'text_search' in request.QUERY_PARAMS:
return Response(
build_api_error(ugettext_noop("text_search is not yet supported.")),
status=status.HTTP_400_BAD_REQUEST
)
queryset = CourseTeam.objects.filter(**result_filter)
order_by_input = request.QUERY_PARAMS.get('order_by', 'name')
if order_by_input == 'name':
queryset = queryset.extra(select={'lower_name': "lower(name)"})
order_by_field = 'lower_name'
elif order_by_input == 'open_slots':
queryset = queryset.annotate(team_size=Count('users'))
order_by_field = 'team_size'
elif order_by_input == 'last_activity':
return Response(
build_api_error(ugettext_noop("last_activity is not yet supported")),
status=status.HTTP_400_BAD_REQUEST
)
queryset = queryset.order_by(order_by_field)
if not queryset:
return Response(status=status.HTTP_404_NOT_FOUND)
page = self.paginate_queryset(queryset)
serializer = self.get_pagination_serializer(page)
return Response(serializer.data) # pylint: disable=maybe-no-member
def post(self, request):
"""POST /api/team/v0/teams/"""
field_errors = {}
course_key = None
course_id = request.DATA.get('course_id')
try:
course_key = CourseKey.from_string(course_id)
# Ensure the course exists
if not modulestore().has_course(course_key):
return Response(status=status.HTTP_404_NOT_FOUND)
except InvalidKeyError:
field_errors['course_id'] = build_api_error(
ugettext_noop('The supplied course_id {course_id} is not valid.'),
course_id=course_id
)
if course_key and not has_team_api_access(request.user, course_key):
return Response(status=status.HTTP_403_FORBIDDEN)
data = request.DATA.copy()
data['course_id'] = course_key
serializer = CourseTeamCreationSerializer(data=data)
add_serializer_errors(serializer, data, field_errors)
if field_errors:
return Response({
'field_errors': field_errors,
}, status=status.HTTP_400_BAD_REQUEST)
else:
team = serializer.save()
return Response(CourseTeamSerializer(team).data)
class TeamsDetailView(ExpandableFieldViewMixin, RetrievePatchAPIView):
"""
**Use Cases**
Get or update a course team's information. Updates are supported
only through merge patch.
**Example Requests**:
GET /api/team/v0/teams/{team_id}}
PATCH /api/team/v0/teams/{team_id} "application/merge-patch+json"
**Query Parameters for GET**
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in, the response contains the following fields:
* id: The team's unique identifier.
* name: The name of the team.
* is_active: True if the team is currently active. If false, the team
is considered "soft deleted" and will not be included by default in
results.
* course_id: The identifier for the course this team belongs to.
* topic_id: Optionally specifies which topic the team is
associated with.
* date_created: Date and time when the team was created.
* description: A description of the team.
* country: Optionally specifies which country the team is
associated with.
* language: Optionally specifies which language the team is
associated with.
* membership: A list of the users that are members of the team. See
membership endpoint for more detail.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in, a 401 error is returned.
If the user is not course or global staff, a 403 error is returned.
If the specified team does not exist, a 404 error is returned.
**Response Values for PATCH**
Only staff can patch teams.
If the user is anonymous or inactive, a 401 is returned.
If the user is logged in and the team does not exist, a 404 is returned.
If the user is not course or global staff and the team does exist,
a 403 is returned.
If "application/merge-patch+json" is not the specified content type,
a 415 error is returned.
If the update could not be completed due to validation errors, this
method returns a 400 error with all error messages in the
"field_errors" field of the returned JSON.
"""
class IsEnrolledOrIsStaff(permissions.BasePermission):
"""Permission that checks to see if the user is enrolled in the course or is staff."""
def has_object_permission(self, request, view, obj):
"""Returns true if the user is enrolled or is staff."""
return has_team_api_access(request.user, obj.course_id)
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated, IsStaffOrReadOnly, IsEnrolledOrIsStaff,)
lookup_field = 'team_id'
serializer_class = CourseTeamSerializer
parser_classes = (MergePatchParser,)
def get_queryset(self):
"""Returns the queryset used to access the given team."""
return CourseTeam.objects.all()
class TopicListView(GenericAPIView):
"""
**Use Cases**
Retrieve a list of topics associated with a single course.
**Example Requests**
GET /api/team/v0/topics/?course_id={course_id}
**Query Parameters for GET**
* course_id: Filters the result to topics belonging to the given
course (required).
* order_by: Orders the results. Currently only 'name' is supported,
and is also the default value.
* page_size: Number of results to return per page.
* page: Page number to retrieve.
**Response Values for GET**
If the user is not logged in, a 401 error is returned.
If the course_id is not given or an unsupported value is passed for
order_by, returns a 400 error.
If the user is not logged in, is not enrolled in the course, or is
not course or global staff, returns a 403 error.
If the course does not exist, returns a 404 error.
Otherwise, a 200 response is returned containing the following
fields:
* count: The total number of topics matching the request.
* next: The URL to the next page of results, or null if this is the
last page.
* previous: The URL to the previous page of results, or null if this
is the first page.
* num_pages: The total number of pages in the result.
* results: A list of the topics matching the request.
* id: The topic's unique identifier.
* name: The name of the topic.
* description: A description of the topic.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
paginate_by = TOPICS_PER_PAGE
paginate_by_param = 'page_size'
pagination_serializer_class = PaginationSerializer
serializer_class = TopicSerializer
def get(self, request):
"""GET /api/team/v0/topics/?course_id={course_id}"""
course_id_string = request.QUERY_PARAMS.get('course_id', None)
if course_id_string is None:
return Response({
'field_errors': {
'course_id': build_api_error(
ugettext_noop("The supplied course id {course_id} is not valid."),
course_id=course_id_string
)
}
}, status=status.HTTP_400_BAD_REQUEST)
try:
course_id = CourseKey.from_string(course_id_string)
except InvalidKeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
# Ensure the course exists
course_module = modulestore().get_course(course_id)
if course_module is None: # course is None if not found
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_team_api_access(request.user, course_id):
return Response(status=status.HTTP_403_FORBIDDEN)
ordering = request.QUERY_PARAMS.get('order_by', 'name')
if ordering == 'name':
topics = get_ordered_topics(course_module, ordering)
else:
return Response({
'developer_message': "unsupported order_by value {}".format(ordering),
'user_message': _(u"The ordering {} is not supported").format(ordering),
}, status=status.HTTP_400_BAD_REQUEST)
page = self.paginate_queryset(topics)
serializer = self.get_pagination_serializer(page)
serializer.context = {'sort_order': ordering}
return Response(serializer.data) # pylint: disable=maybe-no-member
def get_ordered_topics(course_module, ordering):
"""Return a sorted list of team topics.
Arguments:
course_module (xmodule): the course which owns the team topics
ordering (str): the key belonging to topic dicts by which we sort
Returns:
list: a list of sorted team topics
"""
return sorted(course_module.teams_topics, key=lambda t: t[ordering].lower())
class TopicDetailView(APIView):
"""
**Use Cases**
Retrieve a single topic from a course.
**Example Requests**
GET /api/team/v0/topics/{topic_id},{course_id}
**Query Parameters for GET**
* topic_id: The ID of the topic to retrieve (required).
* course_id: The ID of the course to retrieve the topic from
(required).
**Response Values for GET**
If the user is not logged in, a 401 error is returned.
If the topic_id course_id are not given or an unsupported value is
passed for order_by, returns a 400 error.
If the user is not enrolled in the course, or is not course or
global staff, returns a 403 error.
If the course does not exist, returns a 404 error.
Otherwise, a 200 response is returned containing the following fields:
* id: The topic's unique identifier.
* name: The name of the topic.
* description: A description of the topic.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, topic_id, course_id):
"""GET /api/team/v0/topics/{topic_id},{course_id}/"""
try:
course_id = CourseKey.from_string(course_id)
except InvalidKeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
# Ensure the course exists
course_module = modulestore().get_course(course_id)
if course_module is None:
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_team_api_access(request.user, course_id):
return Response(status=status.HTTP_403_FORBIDDEN)
topics = [t for t in course_module.teams_topics if t['id'] == topic_id]
if len(topics) == 0:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = TopicSerializer(topics[0])
return Response(serializer.data)
class MembershipListView(ExpandableFieldViewMixin, GenericAPIView):
"""
**Use Cases**
List course team memberships or add a user to a course team.
**Example Requests**:
GET /api/team/v0/team_membership
POST /api/team/v0/team_membership
**Query Parameters for GET**
At least one of username and team_id must be provided.
* username: Returns membership records only for the specified user.
If the requesting user is not staff then only memberships for
teams associated with courses in which the requesting user is
enrolled are returned.
* team_id: Returns only membership records associated with the
specified team. The requesting user must be staff or enrolled in
the course associated with the team.
* page_size: Number of results to return per page.
* page: Page number to retrieve.
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in and enrolled, the response contains:
* count: The total number of memberships matching the request.
* next: The URL to the next page of results, or null if this is the
last page.
* previous: The URL to the previous page of results, or null if this
is the first page.
* num_pages: The total number of pages in the result.
* results: A list of the memberships matching the request.
* user: The user associated with the membership. This field may
contain an expanded or collapsed representation.
* team: The team associated with the membership. This field may
contain an expanded or collapsed representation.
* date_joined: The date and time the membership was created.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in and active, a 401 error is returned.
If neither team_id nor username are provided, a 400 error is
returned.
If team_id is provided but the team does not exist, a 404 error is
returned.
This endpoint uses 404 error codes to avoid leaking information
about team or user existence. Specifically, a 404 error will be
returned if a logged in user specifies a team_id for a course
they are not enrolled in.
Additionally, when username is specified the list of returned
memberships will be filtered to memberships in teams associated
with courses that the requesting user is enrolled in.
**Response Values for POST**
Any logged in user enrolled in a course can enroll themselves in a
team in the course. Course and global staff can enroll any user in
a team, with a few exceptions noted below.
If the user is not logged in and active, a 401 error is returned.
If username and team are not provided in the posted JSON, a 400
error is returned describing the missing fields.
If the specified team does not exist, a 404 error is returned.
If the user is not staff and is not enrolled in the course
associated with the team they are trying to join, or if they are
trying to add a user other than themselves to a team, a 404 error
is returned. This is to prevent leaking information about the
existence of teams and users.
If the specified user does not exist, a 404 error is returned.
If the user is already a member of a team in the course associated
with the team they are trying to join, a 400 error is returned.
This applies to both staff and students.
If the user is not enrolled in the course associated with the team
they are trying to join, a 400 error is returned. This can occur
when a staff user posts a request adding another user to a team.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
serializer_class = MembershipSerializer
paginate_by = 10
paginate_by_param = 'page_size'
pagination_serializer_class = PaginationSerializer
def get(self, request):
"""GET /api/team/v0/team_membership"""
queryset = CourseTeamMembership.objects.all()
specified_username_or_team = False
if 'team_id' in request.QUERY_PARAMS:
specified_username_or_team = True
team_id = request.QUERY_PARAMS['team_id']
try:
team = CourseTeam.objects.get(team_id=team_id)
except CourseTeam.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_team_api_access(request.user, team.course_id):
return Response(status=status.HTTP_404_NOT_FOUND)
queryset = queryset.filter(team__team_id=team_id)
if 'username' in request.QUERY_PARAMS:
specified_username_or_team = True
if not request.user.is_staff:
enrolled_courses = (
CourseEnrollment.enrollments_for_user(request.user).values_list('course_id', flat=True)
)
staff_courses = (
CourseAccessRole.objects.filter(user=request.user, role='staff').values_list('course_id', flat=True)
)
valid_courses = [
CourseKey.from_string(course_key_string)
for course_list in [enrolled_courses, staff_courses]
for course_key_string in course_list
]
queryset = queryset.filter(team__course_id__in=valid_courses)
queryset = queryset.filter(user__username=request.QUERY_PARAMS['username'])
if not specified_username_or_team:
return Response(
build_api_error(ugettext_noop("username or team_id must be specified.")),
status=status.HTTP_400_BAD_REQUEST
)
page = self.paginate_queryset(queryset)
serializer = self.get_pagination_serializer(page)
return Response(serializer.data) # pylint: disable=maybe-no-member
def post(self, request):
"""POST /api/team/v0/team_membership"""
field_errors = {}
if 'username' not in request.DATA:
field_errors['username'] = build_api_error(ugettext_noop("Username is required."))
if 'team_id' not in request.DATA:
field_errors['team_id'] = build_api_error(ugettext_noop("Team id is required."))
if field_errors:
return Response({
'field_errors': field_errors,
}, status=status.HTTP_400_BAD_REQUEST)
try:
team = CourseTeam.objects.get(team_id=request.DATA['team_id'])
except CourseTeam.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
username = request.DATA['username']
if not has_team_api_access(request.user, team.course_id, access_username=username):
return Response(status=status.HTTP_404_NOT_FOUND)
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
try:
membership = team.add_user(user)
except AlreadyOnTeamInCourse:
return Response(
build_api_error(
ugettext_noop("The user {username} is already a member of a team in this course."),
username=username
),
status=status.HTTP_400_BAD_REQUEST
)
except NotEnrolledInCourseForTeam:
return Response(
build_api_error(
ugettext_noop("The user {username} is not enrolled in the course associated with this team."),
username=username
),
status=status.HTTP_400_BAD_REQUEST
)
serializer = self.get_serializer(instance=membership)
return Response(serializer.data)
class MembershipDetailView(ExpandableFieldViewMixin, GenericAPIView):
"""
**Use Cases**
Gets individual course team memberships or removes a user from a course team.
**Example Requests**:
GET /api/team/v0/team_membership/{team_id},{username}
DELETE /api/team/v0/team_membership/{team_id},{username}
**Query Parameters for GET**
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in and enrolled, or is course or global staff
the response contains:
* user: The user associated with the membership. This field may
contain an expanded or collapsed representation.
* team: The team associated with the membership. This field may
contain an expanded or collapsed representation.
* date_joined: The date and time the membership was created.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in and active, a 401 error is returned.
If specified team does not exist, a 404 error is returned.
If the user is logged in but is not enrolled in the course
associated with the specified team, or is not staff, a 404 error is
returned. This avoids leaking information about course or team
existence.
If the membership does not exist, a 404 error is returned.
**Response Values for DELETE**
Any logged in user enrolled in a course can remove themselves from
a team in the course. Course and global staff can remove any user
from a team. Successfully deleting a membership will return a 204
response with no content.
If the user is not logged in and active, a 401 error is returned.
If the specified team or username does not exist, a 404 error is
returned.
If the user is not staff and is attempting to remove another user
from a team, a 404 error is returned. This prevents leaking
information about team and user existence.
If the membership does not exist, a 404 error is returned.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
serializer_class = MembershipSerializer
def get_team(self, team_id):
"""Returns the team with team_id, or throws Http404 if it does not exist."""
try:
return CourseTeam.objects.get(team_id=team_id)
except CourseTeam.DoesNotExist:
raise Http404
def get_membership(self, username, team):
"""Returns the membership for the given user and team, or throws Http404 if it does not exist."""
try:
return CourseTeamMembership.objects.get(user__username=username, team=team)
except CourseTeamMembership.DoesNotExist:
raise Http404
def get(self, request, team_id, username):
"""GET /api/team/v0/team_membership/{team_id},{username}"""
team = self.get_team(team_id)
if not has_team_api_access(request.user, team.course_id):
return Response(status=status.HTTP_404_NOT_FOUND)
membership = self.get_membership(username, team)
serializer = self.get_serializer(instance=membership)
return Response(serializer.data)
def delete(self, request, team_id, username):
"""DELETE /api/team/v0/team_membership/{team_id},{username}"""
team = self.get_team(team_id)
if has_team_api_access(request.user, team.course_id, access_username=username):
membership = self.get_membership(username, team)
membership.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response(status=status.HTTP_404_NOT_FOUND)
| agpl-3.0 |
kumar303/olympia | src/olympia/devhub/tests/test_views_ownership.py | 2 | 18767 | """Tests related to the ``devhub.addons.owner`` view."""
from django.core import mail
import six
from pyquery import PyQuery as pq
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.addons.models import Addon, AddonUser
from olympia.amo.tests import TestCase, formset
from olympia.devhub.forms import LicenseForm
from olympia.versions.models import License, Version
class TestOwnership(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestOwnership, self).setUp()
self.addon = Addon.objects.get(id=3615)
self.version = self.addon.current_version
self.url = self.addon.get_dev_url('owner')
assert self.client.login(email='del@icio.us')
def formset(self, *args, **kw):
defaults = {'builtin': License.OTHER, 'text': 'filler'}
defaults.update(kw)
return formset(*args, **defaults)
def get_version(self):
return Version.objects.get(id=self.version.id)
def get_addon(self):
return Addon.objects.get(id=self.addon.id)
class TestEditPolicy(TestOwnership):
def formset(self, *args, **kw):
init = self.client.get(self.url).context['user_form'].initial_forms
args = args + tuple(f.initial for f in init)
return super(TestEditPolicy, self).formset(*args, **kw)
def test_edit_eula(self):
old_eula = self.addon.eula
data = self.formset(eula='new eula', has_eula=True)
response = self.client.post(self.url, data)
assert response.status_code == 302
addon = self.get_addon()
assert six.text_type(addon.eula) == 'new eula'
assert addon.eula.id == old_eula.id
def test_delete_eula(self):
assert self.addon.eula
response = self.client.post(self.url, self.formset(has_eula=False))
assert response.status_code == 302
assert self.get_addon().eula is None
def test_edit_eula_locale(self):
self.addon.eula = {'de': 'some eula', 'en-US': ''}
self.addon.save()
res = self.client.get(self.url.replace('en-US', 'it'))
doc = pq(res.content)
assert doc('#id_has_eula').attr('checked') == 'checked'
def test_no_policy_form_for_static_themes(self):
self.addon.update(type=amo.ADDON_STATICTHEME)
response = self.client.get(self.url)
assert response.status_code == 200
assert 'policy_form' not in response.context
class TestEditLicense(TestOwnership):
def setUp(self):
super(TestEditLicense, self).setUp()
self.version.license = None
self.version.save()
self.license = License.objects.create(builtin=1, name='bsd',
url='license.url', on_form=True)
self.cc_license = License.objects.create(
builtin=11, name='copyright', url='license.url',
creative_commons=True, on_form=True)
def formset(self, *args, **kw):
init = self.client.get(self.url).context['user_form'].initial_forms
args = args + tuple(f.initial for f in init)
kw['initial_count'] = len(init)
data = super(TestEditLicense, self).formset(*args, **kw)
if 'text' not in kw:
del data['text']
return data
def test_no_license(self):
data = self.formset(builtin='')
response = self.client.post(self.url, data)
assert response.status_code == 200
license_form = response.context['license_form']
assert license_form.errors == {'builtin': [u'This field is required.']}
def test_no_license_required_for_unlisted(self):
self.make_addon_unlisted(self.addon)
data = self.formset(builtin='')
response = self.client.post(self.url, data)
assert response.status_code == 302
def test_success_add_builtin(self):
data = self.formset(builtin=1)
response = self.client.post(self.url, data)
assert response.status_code == 302
assert self.license == self.get_version().license
assert ActivityLog.objects.filter(
action=amo.LOG.CHANGE_LICENSE.id).count() == 1
def test_success_add_builtin_creative_commons(self):
self.addon.update(type=amo.ADDON_STATICTHEME) # cc licenses for themes
data = self.formset(builtin=11)
response = self.client.post(self.url, data)
assert response.status_code == 302
assert self.cc_license == self.get_version().license
assert ActivityLog.objects.filter(
action=amo.LOG.CHANGE_LICENSE.id).count() == 1
def test_success_add_custom(self):
data = self.formset(builtin=License.OTHER, text='text', name='name')
response = self.client.post(self.url, data)
assert response.status_code == 302
license = self.get_version().license
assert six.text_type(license.text) == 'text'
assert six.text_type(license.name) == 'name'
assert license.builtin == License.OTHER
def test_success_edit_custom(self):
data = self.formset(builtin=License.OTHER, text='text', name='name')
response = self.client.post(self.url, data)
license_one = self.get_version().license
data = self.formset(builtin=License.OTHER, text='woo', name='name')
response = self.client.post(self.url, data)
assert response.status_code == 302
license_two = self.get_version().license
assert six.text_type(license_two.text) == 'woo'
assert six.text_type(license_two.name) == 'name'
assert license_two.builtin == License.OTHER
assert license_two.id == license_one.id
def test_success_switch_license(self):
data = self.formset(builtin=1)
response = self.client.post(self.url, data)
license_one = self.get_version().license
data = self.formset(builtin=License.OTHER, text='text', name='name')
response = self.client.post(self.url, data)
assert response.status_code == 302
license_two = self.get_version().license
assert six.text_type(license_two.text) == 'text'
assert six.text_type(license_two.name) == 'name'
assert license_two.builtin == License.OTHER
assert license_one != license_two
# Make sure the old license wasn't edited.
license = License.objects.get(builtin=1)
assert six.text_type(license.name) == 'bsd'
data = self.formset(builtin=1)
response = self.client.post(self.url, data)
assert response.status_code == 302
license_three = self.get_version().license
assert license_three == license_one
def test_custom_has_text(self):
data = self.formset(builtin=License.OTHER, name='name')
response = self.client.post(self.url, data)
assert response.status_code == 200
self.assertFormError(response, 'license_form', None,
'License text is required when choosing Other.')
def test_custom_has_name(self):
data = self.formset(builtin=License.OTHER, text='text')
response = self.client.post(self.url, data)
assert response.status_code == 302
license = self.get_version().license
assert six.text_type(license.text) == 'text'
assert six.text_type(license.name) == 'Custom License'
assert license.builtin == License.OTHER
def test_no_version(self):
# Make sure nothing bad happens if there's no version.
self.addon.update(_current_version=None)
Version.objects.all().delete()
data = self.formset(builtin=License.OTHER, text='text')
response = self.client.post(self.url, data)
assert response.status_code == 302
def test_license_details_links(self):
# Check that builtin licenses get details links.
doc = pq(six.text_type(LicenseForm(version=self.version)))
for license in License.objects.builtins():
radio = 'input.license[value="%s"]' % license.builtin
assert doc(radio).parent().text() == (
six.text_type(license.name) + ' Details')
assert doc(radio + '+ a').attr('href') == license.url
assert doc('input[name=builtin]:last-child').parent().text() == 'Other'
def test_license_logs(self):
data = self.formset(builtin=License.OTHER, text='text')
self.version.addon.update(status=amo.STATUS_APPROVED)
self.client.post(self.url, data)
assert ActivityLog.objects.all().count() == 1
self.version.license = License.objects.all()[1]
self.version.save()
data = self.formset(builtin=License.OTHER, text='text')
self.client.post(self.url, data)
assert ActivityLog.objects.all().count() == 2
class TestEditAuthor(TestOwnership):
def test_reorder_authors(self):
"""
Re-ordering authors should not generate role changes in the
ActivityLog.
"""
# flip form-0-position
form = self.client.get(self.url).context['user_form'].initial_forms[0]
user_data = {
'user': 'regular@mozilla.com',
'listed': True,
'role': amo.AUTHOR_ROLE_DEV,
'position': 0
}
data = self.formset(form.initial, user_data, initial_count=1)
response = self.client.post(self.url, data)
assert response.status_code == 302
form = self.client.get(self.url).context['user_form'].initial_forms[0]
u1 = form.initial
u1['position'] = 1
form = self.client.get(self.url).context['user_form'].initial_forms[1]
u2 = form.initial
data = self.formset(u1, u2)
orig = ActivityLog.objects.all().count()
response = self.client.post(self.url, data)
self.assert3xx(response, self.url, 302)
assert ActivityLog.objects.all().count() == orig
def test_success_add_user(self):
qs = (AddonUser.objects.filter(addon=3615)
.values_list('user', flat=True))
assert list(qs.all()) == [55021]
form = self.client.get(self.url).context['user_form'].initial_forms[0]
user_data = {
'user': 'regular@mozilla.com',
'listed': True,
'role': amo.AUTHOR_ROLE_DEV,
'position': 0
}
data = self.formset(form.initial, user_data, initial_count=1)
response = self.client.post(self.url, data)
self.assert3xx(response, self.url, 302)
assert list(qs.all()) == [55021, 999]
# An email has been sent to the authors to warn them.
author_added = mail.outbox[0]
assert author_added.subject == ('An author has been added to your '
'add-on')
# Make sure all the authors are aware of the addition.
assert 'del@icio.us' in author_added.to # The original author.
assert 'regular@mozilla.com' in author_added.to # The new one.
def test_success_edit_user(self):
# Add an author b/c we can't edit anything about the current one.
form = self.client.get(self.url).context['user_form'].initial_forms[0]
user_data = {
'user': 'regular@mozilla.com',
'listed': True,
'role': amo.AUTHOR_ROLE_DEV,
'position': 1
}
data = self.formset(form.initial, user_data, initial_count=1)
self.client.post(self.url, data)
assert AddonUser.objects.get(addon=3615, user=999).listed
# Edit the user we just added.
user_form = self.client.get(self.url).context['user_form']
one, two = user_form.initial_forms
del two.initial['listed']
empty = {
'user': '',
'listed': True,
'role': 5,
'position': 0
}
data = self.formset(one.initial, two.initial, empty, initial_count=2)
response = self.client.post(self.url, data)
self.assert3xx(response, self.url, 302)
assert not AddonUser.objects.get(addon=3615, user=999).listed
def test_change_user_role(self):
# Add an author b/c we can't edit anything about the current one.
form = self.client.get(self.url).context['user_form'].initial_forms[0]
user_data = {
'user': 'regular@mozilla.com',
'listed': True,
'role': amo.AUTHOR_ROLE_DEV,
'position': 1
}
data = self.formset(form.initial, user_data, initial_count=1)
self.client.post(self.url, data)
assert AddonUser.objects.get(addon=3615, user=999).listed
# Edit the user we just added.
user_form = self.client.get(self.url).context['user_form']
one, two = user_form.initial_forms
two.initial['role'] = amo.AUTHOR_ROLE_OWNER
empty = {
'user': '',
'listed': True,
'role': amo.AUTHOR_ROLE_OWNER,
'position': 0
}
data = self.formset(one.initial, two.initial, empty, initial_count=2)
response = self.client.post(self.url, data)
self.assert3xx(response, self.url, 302)
# An email has been sent to the authors to warn them.
author_edit = mail.outbox[1] # First mail was for the addition.
assert author_edit.subject == ('An author has a role changed on your '
'add-on')
# Make sure all the authors are aware of the addition.
assert 'del@icio.us' in author_edit.to # The original author.
assert 'regular@mozilla.com' in author_edit.to # The edited one.
def test_add_user_twice(self):
form = self.client.get(self.url).context['user_form'].initial_forms[0]
user_data = {
'user': 'regular@mozilla.com',
'listed': True,
'role': amo.AUTHOR_ROLE_DEV,
'position': 1
}
data = self.formset(
form.initial, user_data, user_data, initial_count=1)
response = self.client.post(self.url, data)
assert response.status_code == 200
assert response.context['user_form'].non_form_errors() == (
['An author can only be listed once.'])
def test_success_delete_user(self):
# Add a new user so we have one to delete.
user_data = {
'user': 'regular@mozilla.com',
'listed': True,
'role': amo.AUTHOR_ROLE_OWNER,
'position': 1
}
data = self.formset(user_data, initial_count=0)
self.client.post(self.url, data)
one, two = self.client.get(self.url).context['user_form'].initial_forms
one.initial['DELETE'] = True
data = self.formset(one.initial, two.initial, initial_count=2)
response = self.client.post(self.url, data)
assert response.status_code == 302
assert 999 == AddonUser.objects.get(addon=3615).user_id
# An email has been sent to the authors to warn them.
author_delete = mail.outbox[1] # First mail was for the addition.
assert author_delete.subject == ('An author has been removed from your'
' add-on')
# Make sure all the authors are aware of the addition.
assert 'del@icio.us' in author_delete.to # The original author.
assert 'regular@mozilla.com' in author_delete.to # The removed one.
def test_switch_owner(self):
# See if we can transfer ownership in one POST.
form = self.client.get(self.url).context['user_form'].initial_forms[0]
form.initial['user'] = 'regular@mozilla.com'
data = self.formset(form.initial, initial_count=1)
response = self.client.post(self.url, data)
assert response.status_code == 302
assert 999 == AddonUser.objects.get(addon=3615).user_id
assert ActivityLog.objects.filter(
action=amo.LOG.ADD_USER_WITH_ROLE.id).count() == 1
assert ActivityLog.objects.filter(
action=amo.LOG.REMOVE_USER_WITH_ROLE.id).count() == 1
def test_only_owner_can_edit(self):
form = self.client.get(self.url).context['user_form'].initial_forms[0]
user_data = {
'user': 'regular@mozilla.com',
'listed': True,
'role': amo.AUTHOR_ROLE_DEV,
'position': 0
}
data = self.formset(form.initial, user_data, initial_count=1)
self.client.post(self.url, data)
self.client.login(email='regular@mozilla.com')
self.client.post(self.url, data, follow=True)
# Try deleting the other AddonUser
one, two = self.client.get(self.url).context['user_form'].initial_forms
one.initial['DELETE'] = True
data = self.formset(one.initial, two.initial, initial_count=2)
response = self.client.post(self.url, data, follow=True)
assert response.status_code == 403
assert AddonUser.objects.filter(addon=3615).count() == 2
def test_must_have_listed(self):
form = self.client.get(self.url).context['user_form'].initial_forms[0]
form.initial['listed'] = False
data = self.formset(form.initial, initial_count=1)
response = self.client.post(self.url, data)
assert response.context['user_form'].non_form_errors() == (
['At least one author must be listed.'])
def test_must_have_owner(self):
form = self.client.get(self.url).context['user_form'].initial_forms[0]
form.initial['role'] = amo.AUTHOR_ROLE_DEV
data = self.formset(form.initial, initial_count=1)
response = self.client.post(self.url, data)
assert response.context['user_form'].non_form_errors() == (
['Must have at least one owner.'])
def test_must_have_owner_delete(self):
form = self.client.get(self.url).context['user_form'].initial_forms[0]
form.initial['DELETE'] = True
data = self.formset(form.initial, initial_count=1)
response = self.client.post(self.url, data)
assert response.context['user_form'].non_form_errors() == (
['Must have at least one owner.'])
class TestEditAuthorStaticTheme(TestEditAuthor):
def setUp(self):
super(TestEditAuthorStaticTheme, self).setUp()
self.addon.update(type=amo.ADDON_STATICTHEME)
self.cc_license = License.objects.create(
builtin=11, url='license.url',
creative_commons=True, on_form=True)
def formset(self, *args, **kw):
defaults = {'builtin': 11}
defaults.update(kw)
return formset(*args, **defaults)
def test_reorder_authors(self):
self.get_version().update(license=self.cc_license)
super(TestEditAuthorStaticTheme, self).test_reorder_authors()
| bsd-3-clause |
bnx05/pytest-selenium | test_parameters.py | 1 | 2603 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import time
from selenium import webdriver
sample_email_address = "demo@engagespark.com"
sample_password = "Password123"
email_addresses = ["invalid_email", "another_invalid_email@", "not_another_invalid_email@blah"]
passwords = ["weak_password", "generic_password", "bleep_password"]
browser = webdriver.Firefox()
browser.maximize_window()
# this test checks the maxlength attribute of the login and password fields
@pytest.mark.parametrize("field_name, maxlength", [
("login", "75"),
("password", "128"),
])
def test_assert_field_maxlength(field_name, maxlength):
browser.get("https://start.engagespark.com/sign-in/")
time.sleep(5)
browser.find_element_by_name(field_name).get_attribute("maxlength") == maxlength
# this test asserts the string length of values entered in the login and
# password fields
@pytest.mark.parametrize("field_name, sample_string, string_length", [
("login", sample_email_address, 20),
("password", sample_password, 11),
])
def test_assert_email_and_password_length(field_name, sample_string, string_length):
browser.get("https://start.engagespark.com/sign-in/")
time.sleep(5)
browser.find_element_by_name(field_name).click()
browser.find_element_by_name(field_name).send_keys(sample_string)
assert len(browser.find_element_by_name(field_name).get_attribute("value")) == string_length
# this test checks if the login button is enabled after entering different
# combinations of invalid values in the email and password fields
@pytest.mark.parametrize("email", email_addresses)
@pytest.mark.parametrize("password", passwords)
def test_assert_login_button_enabled(email, password):
browser.get("https://start.engagespark.com/sign-in/")
time.sleep(5)
browser.find_element_by_name("login").click()
browser.find_element_by_name("login").send_keys(email)
browser.find_element_by_name("password").click()
browser.find_element_by_name("password").send_keys(password)
assert browser.find_element_by_xpath("//button[contains(text(), 'Login')]").is_enabled()
# this test checks if the values entered into the email field contain '@'
@pytest.mark.parametrize("email", [
"123@abc.org",
"info@engagespark.com",
"blah",
])
def test_assert_valid_email_entry(email):
browser.get("https://start.engagespark.com/sign-in/")
time.sleep(5)
browser.find_element_by_name("login").click()
browser.find_element_by_name("login").send_keys(email)
assert "@" in browser.find_element_by_name("login").get_attribute("value")
| mit |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/spyderplugins/widgets/condapackagesgui.py | 2 | 69644 | # -*- coding: utf-8 -*-
#
# Copyright © 2014 Gonzalo Peña (@goanpeca)
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""Conda Packager Manager Widget
Maybe this package manager should be shipped as a different module
in pipy? spyder_package_manager? so that spyder updates could
be handled easily?
"""
# pylint: disable=R0903
# pylint: disable=R0911
# pylint: disable=R0201
from __future__ import with_statement, print_function
import sys
import platform
import os
import os.path as osp
import json
import shutil
from spyderlib.qt.QtGui import (QGridLayout, QVBoxLayout, QHBoxLayout, QFont,
QDialogButtonBox, QToolButton, QLineEdit,
QComboBox, QProgressBar, QSpacerItem, QMenu,
QPushButton, QPixmap, QIcon, QCheckBox, QLabel,
QWidget, QSortFilterProxyModel, QTableView,
QAbstractItemView, QDialog, QPalette,
QDesktopServices)
from spyderlib.qt.QtCore import (QSize, Qt, QAbstractTableModel, QModelIndex,
QPoint, QUrl, QObject, Signal, QThread,
QByteArray)
from spyderlib.qt.QtNetwork import QNetworkRequest, QNetworkAccessManager
from spyderlib.qt.compat import to_qvariant
from spyderlib.utils import programs
from spyderlib.utils.qthelpers import get_icon, create_action, add_actions
from spyderlib.baseconfig import (get_conf_path, get_translation,
get_image_path, get_module_data_path)
from spyderlib.py3compat import to_text_string, u, is_unicode
from spyderlib.py3compat import configparser as cp
#import conda_api_q
_ = get_translation("p_condapackages", dirname="spyderplugins")
CONDA_PATH = programs.find_program('conda')
def sort_versions(versions=(), reverse=False, sep=u'.'):
"""Sort a list of version number strings
This function ensures that the package sorting based on number name is
performed correctly when including alpha, dev rc1 etc...
"""
if versions == []:
return []
digits = u'0123456789'
def toint(x):
try:
n = int(x)
except:
n = x
return n
versions = list(versions)
new_versions, alpha, sizes = [], set(), set()
for item in versions:
it = item.split(sep)
temp = []
for i in it:
x = toint(i)
if not isinstance(x, int):
x = u(x)
middle = x.lstrip(digits).rstrip(digits)
tail = toint(x.lstrip(digits).replace(middle, u''))
head = toint(x.rstrip(digits).replace(middle, u''))
middle = toint(middle)
res = [head, middle, tail]
while u'' in res:
res.remove(u'')
for r in res:
if is_unicode(r):
alpha.add(r)
else:
res = [x]
temp += res
sizes.add(len(temp))
new_versions.append(temp)
# replace letters found by a negative number
replace_dic = {}
alpha = sorted(alpha, reverse=True)
if len(alpha):
replace_dic = dict(zip(alpha, list(range(-1, -(len(alpha)+1), -1))))
# Complete with zeros based on longest item and replace alphas with number
nmax = max(sizes)
for i in range(len(new_versions)):
item = []
for z in new_versions[i]:
if z in replace_dic:
item.append(replace_dic[z])
else:
item.append(z)
nzeros = nmax - len(item)
item += [0]*nzeros
item += [versions[i]]
new_versions[i] = item
new_versions = sorted(new_versions, reverse=reverse)
return [n[-1] for n in new_versions]
# Constants
COLUMNS = (NAME, DESCRIPTION, VERSION, STATUS, URL, LICENSE, INSTALL,
REMOVE, UPGRADE, DOWNGRADE, ENDCOL) = list(range(11))
ACTION_COLUMNS = [INSTALL, REMOVE, UPGRADE, DOWNGRADE]
TYPES = (INSTALLED, NOT_INSTALLED, UPGRADABLE, DOWNGRADABLE, ALL_INSTALLABLE,
ALL, NOT_INSTALLABLE, MIXGRADABLE, CREATE, CLONE,
REMOVE_ENV) = list(range(11))
COMBOBOX_VALUES_ORDERED = [_(u'Installed'), _(u'Not installed'),
_(u'Upgradable'), _(u'Downgradable'),
_(u'All instalable'), _(u'All')]
COMBOBOX_VALUES = dict(zip(COMBOBOX_VALUES_ORDERED, TYPES))
HIDE_COLUMNS = [STATUS, URL, LICENSE]
ROOT = 'root'
class CondaPackagesModel(QAbstractTableModel):
"""Abstract Model to handle the packages in a conda environment"""
def __init__(self, parent, packages_names, packages_versions, row_data):
super(CondaPackagesModel, self).__init__(parent)
self._parent = parent
self._packages_names = packages_names
self._packages_versions = packages_versions
self._rows = row_data
self._icons = {
'upgrade.active': get_icon('conda_upgrade_active.png'),
'upgrade.inactive': get_icon('conda_upgrade_inactive.png'),
'upgrade.pressed': get_icon('conda_upgrade_pressed.png'),
'downgrade.active': get_icon('conda_downgrade_active.png'),
'downgrade.inactive': get_icon('conda_downgrade_inactive.png'),
'downgrade.pressed': get_icon('conda_downgrade_pressed.png'),
'add.active': get_icon('conda_add_active.png'),
'add.inactive': get_icon('conda_add_inactive.png'),
'add.pressed': get_icon('conda_add_pressed.png'),
'remove.active': get_icon('conda_remove_active.png'),
'remove.inactive': get_icon('conda_remove_inactive.png'),
'remove.pressed': get_icon('conda_remove_pressed.png')}
def _update_cell(self, row, column):
start = self.index(row, column)
end = self.index(row, column)
self.dataChanged.emit(start, end)
def flags(self, index):
"""Override Qt method"""
if not index.isValid():
return Qt.ItemIsEnabled
column = index.column()
if column in (NAME, DESCRIPTION, VERSION):
return Qt.ItemFlags(Qt.ItemIsEnabled)
elif column in ACTION_COLUMNS:
return Qt.ItemFlags(Qt.ItemIsEnabled)
elif column == ENDCOL:
return Qt.ItemFlags(Qt.NoItemFlags)
else:
return Qt.ItemFlags(Qt.ItemIsEnabled)
def data(self, index, role=Qt.DisplayRole):
"""Override Qt method"""
if not index.isValid() or not 0 <= index.row() < len(self._rows):
return to_qvariant()
row = index.row()
column = index.column()
# Carefull here with the order, this has to be adjusted manually
if self._rows[row] == row:
[name, description, version, status, url, license_, i, r, u, d] =\
[u'', u'', '-', -1, u'', u'', False, False, False, False]
else:
[name, description, version, status, url, license_, i, r, u,
d] = self._rows[row]
if role == Qt.DisplayRole:
if column == NAME:
return to_qvariant(name)
elif column == VERSION:
return to_qvariant(version)
elif column == STATUS:
return to_qvariant(status)
elif column == DESCRIPTION:
return to_qvariant(description)
elif role == Qt.TextAlignmentRole:
if column in [NAME, DESCRIPTION]:
return to_qvariant(int(Qt.AlignLeft | Qt.AlignVCenter))
else:
return to_qvariant(int(Qt.AlignHCenter | Qt.AlignVCenter))
elif role == Qt.DecorationRole:
if column == INSTALL:
if status == NOT_INSTALLED:
if i:
return to_qvariant(self._icons['add.pressed'])
else:
return to_qvariant(self._icons['add.active'])
else:
return to_qvariant(self._icons['add.inactive'])
elif column == REMOVE:
if (status == INSTALLED or status == UPGRADABLE or
status == DOWNGRADABLE or status == MIXGRADABLE):
if r:
return to_qvariant(self._icons['remove.pressed'])
else:
return to_qvariant(self._icons['remove.active'])
else:
return to_qvariant(self._icons['remove.inactive'])
elif column == UPGRADE:
if status == UPGRADABLE or status == MIXGRADABLE:
if u:
return to_qvariant(self._icons['upgrade.pressed'])
else:
return to_qvariant(self._icons['upgrade.active'])
else:
return to_qvariant(self._icons['upgrade.inactive'])
elif column == DOWNGRADE:
if status == DOWNGRADABLE or status == MIXGRADABLE:
if d:
return to_qvariant(self._icons['downgrade.pressed'])
else:
return to_qvariant(self._icons['downgrade.active'])
else:
return to_qvariant(self._icons['downgrade.inactive'])
elif role == Qt.ToolTipRole:
if column == INSTALL and status == NOT_INSTALLED:
return to_qvariant(_('Install package'))
elif column == REMOVE and (status == INSTALLED or
status == UPGRADABLE or
status == DOWNGRADABLE or
status == MIXGRADABLE):
return to_qvariant(_('Remove package'))
elif column == UPGRADE and (status == INSTALLED or
status == UPGRADABLE or
status == MIXGRADABLE):
return to_qvariant(_('Upgrade package'))
elif column == DOWNGRADE and (status == INSTALLED or
status == DOWNGRADABLE or
status == MIXGRADABLE):
return to_qvariant(_('Downgrade package'))
elif role == Qt.ForegroundRole:
palette = QPalette()
if column in [NAME, DESCRIPTION, VERSION]:
if status in [INSTALLED, UPGRADABLE, DOWNGRADABLE,
MIXGRADABLE]:
color = palette.color(QPalette.WindowText)
return to_qvariant(color)
elif status in [NOT_INSTALLED, NOT_INSTALLABLE]:
color = palette.color(QPalette.Mid)
return to_qvariant(color)
return to_qvariant()
def headerData(self, section, orientation, role=Qt.DisplayRole):
"""Override Qt method"""
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return to_qvariant(int(Qt.AlignHCenter | Qt.AlignVCenter))
return to_qvariant(int(Qt.AlignRight | Qt.AlignVCenter))
elif role == Qt.ToolTipRole:
column = section
if column == INSTALL:
return to_qvariant(_('Install package'))
elif column == REMOVE:
return to_qvariant(_('Remove package'))
elif column == UPGRADE:
return to_qvariant(_('Upgrade package'))
elif column == DOWNGRADE:
return to_qvariant(_('Downgrade package'))
if orientation == Qt.Horizontal:
if section == NAME:
return to_qvariant(_("Name"))
elif section == VERSION:
return to_qvariant(_("Version"))
elif section == DESCRIPTION:
return to_qvariant(_("Description"))
elif section == STATUS:
return to_qvariant(_("Status"))
elif section == INSTALL:
return to_qvariant(_("I"))
elif section == REMOVE:
return to_qvariant(_("R"))
elif section == UPGRADE:
return to_qvariant(_("U"))
elif section == DOWNGRADE:
return to_qvariant(_("D"))
else:
return to_qvariant()
def rowCount(self, index=QModelIndex()):
"""Override Qt method"""
return len(self._packages_names)
def columnCount(self, index=QModelIndex()):
"""Override Qt method"""
return len(COLUMNS)
def row(self, rownum):
""" """
return self._rows[rownum]
def first_index(self):
""" """
return self.index(0, 0)
def last_index(self):
""" """
return self.index(self.rowCount() - 1, self.columnCount() - 1)
def update_row_icon(self, row, column):
""" """
if column in ACTION_COLUMNS:
r = self._rows[row]
actual_state = r[column]
r[column] = not actual_state
self._rows[row] = r
self._update_cell(row, column)
def is_installable(self, model_index):
""" """
row = model_index.row()
status = self._rows[row][STATUS]
return status == NOT_INSTALLED
def is_removable(self, model_index):
""" """
row = model_index.row()
status = self._rows[row][STATUS]
return status in [UPGRADABLE, DOWNGRADABLE, INSTALLED, MIXGRADABLE]
def is_upgradable(self, model_index):
""" """
row = model_index.row()
status = self._rows[row][STATUS]
return status == UPGRADABLE or status == MIXGRADABLE
def is_downgradable(self, model_index):
""" """
row = model_index.row()
status = self._rows[row][STATUS]
return status == DOWNGRADABLE or status == MIXGRADABLE
def get_package_versions(self, name, versiononly=True):
""" Gives all the compatible package canonical name
name : str
name of the package
versiononly : bool
if True, returns version number only, otherwise canonical name
"""
versions = self._packages_versions
if name in versions:
if versiononly:
ver = versions[name]
temp = []
for ve in ver:
n, v, b = conda_api_q.split_canonical_name(ve)
temp.append(v)
return temp
else:
return versions[name]
else:
return []
def get_package_version(self, name):
""" """
packages = self._packages_names
if name in packages:
rownum = packages.index(name)
return self.row(rownum)[VERSION]
else:
return u''
class MultiColumnSortFilterProxy(QSortFilterProxyModel):
"""Implements a QSortFilterProxyModel that allows for custom filtering.
Add new filter functions using add_filter_function(). New functions should
accept two arguments, the column to be filtered and the currently set
filter string, and should return True to accept the row, False otherwise.
Filter functions are stored in a dictionary for easy removal by key. Use
the add_filter_function() and remove_filter_function() methods for access.
The filterString is used as the main pattern matching string for filter
functions. This could easily be expanded to handle regular expressions if
needed.
Copyright https://gist.github.com/dbridges/4732790
"""
def __init__(self, parent=None):
super(MultiColumnSortFilterProxy, self).__init__(parent)
# if parent is stored as self.parent then PySide gives the following
# TypeError: 'CondaPackagesTable' object is not callable
self._parent = parent
self._filter_string = ''
self._filter_status = ALL
self._filter_functions = {}
def set_filter(self, text, status):
"""
text : string
The string to be used for pattern matching.
status : int
TODO: add description
"""
self._filter_string = text.lower()
self._filter_status = status
self.invalidateFilter()
def add_filter_function(self, name, new_function):
"""
name : hashable object
The object to be used as the key for
this filter function. Use this object
to remove the filter function in the future.
Typically this is a self descriptive string.
new_function : function
A new function which must take two arguments,
the row to be tested and the ProxyModel's current
filterString. The function should return True if
the filter accepts the row, False otherwise.
ex:
model.add_filter_function(
'test_columns_1_and_2',
lambda r,s: (s in r[1] and s in r[2]))
"""
self._filter_functions[name] = new_function
self.invalidateFilter()
def remove_filter_function(self, name):
"""Removes the filter function associated with name, if it exists
name : hashable object
"""
if name in self._filter_functions.keys():
del self._filter_functions[name]
self.invalidateFilter()
def filterAcceptsRow(self, row_num, parent):
"""Qt override
Reimplemented from base class to allow the use of custom filtering
"""
model = self.sourceModel()
# The source model should have a method called row()
# which returns the table row as a python list.
tests = [func(model.row(row_num), self._filter_string,
self._filter_status) for func in
self._filter_functions.values()]
return False not in tests # Changes this to any or all!
class CondaPackagesTable(QTableView):
""" """
WIDTH_NAME = 120
WIDTH_ACTIONS = 24
WIDTH_VERSION = 70
def __init__(self, parent):
super(CondaPackagesTable, self).__init__(parent)
self._parent = parent
self._searchbox = u''
self._filterbox = ALL
self.row_count = None
# To manage icon states
self._model_index_clicked = None
self.valid = False
self.column_ = None
self.current_index = None
# To prevent triggering the keyrelease after closing a dialog
# but hititng enter on it
self.pressed_here = False
self.source_model = None
self.proxy_model = None
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setSelectionMode(QAbstractItemView.SingleSelection)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.verticalHeader().hide()
self.setAlternatingRowColors(True)
self.setShowGrid(False)
self.setWordWrap(True)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self._palette = QPalette()
# Header setup
self._hheader = self.horizontalHeader()
self._hheader.setResizeMode(self._hheader.Fixed)
self._hheader.setStyleSheet("""QHeaderView {border: 0px;
border-radius: 0px;};""")
self.setPalette(self._palette)
self.sortByColumn(NAME, Qt.AscendingOrder)
self.setContextMenuPolicy(Qt.CustomContextMenu)
def setup_model(self, packages_names, packages_versions, row_data):
""" """
self.proxy_model = MultiColumnSortFilterProxy(self)
self.setModel(self.proxy_model)
self.source_model = CondaPackagesModel(self, packages_names,
packages_versions, row_data)
self.proxy_model.setSourceModel(self.source_model)
self.hide_columns()
# Custom Proxy Model setup
self.proxy_model.setDynamicSortFilter(True)
filter_text = \
(lambda row, text, status: (
all([t in row[NAME].lower() for t in
to_text_string(text).lower().split()]) or
all([t in row[DESCRIPTION].lower() for t in
to_text_string(text).split()])))
filter_status = (lambda row, text, status: to_text_string(row[STATUS])
in to_text_string(status))
self.model().add_filter_function('status-search', filter_status)
self.model().add_filter_function('text-search', filter_text)
# signals and slots
self.verticalScrollBar().valueChanged.connect(self.resize_rows)
def resize_rows(self):
""" """
delta_y = 10
height = self.height()
y = 0
while y < height:
row = self.rowAt(y)
self.resizeRowToContents(row)
row_height = self.rowHeight(row)
self.setRowHeight(row, row_height + delta_y)
y += self.rowHeight(row) + delta_y
def hide_columns(self):
""" """
for col in HIDE_COLUMNS:
self.hideColumn(col)
def filter_changed(self):
"""Trigger the filter"""
group = self._filterbox
text = self._searchbox
if group in [ALL]:
group = ''.join([to_text_string(INSTALLED),
to_text_string(UPGRADABLE),
to_text_string(NOT_INSTALLED),
to_text_string(DOWNGRADABLE),
to_text_string(MIXGRADABLE),
to_text_string(NOT_INSTALLABLE)])
elif group in [INSTALLED]:
group = ''.join([to_text_string(INSTALLED),
to_text_string(UPGRADABLE),
to_text_string(DOWNGRADABLE),
to_text_string(MIXGRADABLE)])
elif group in [UPGRADABLE]:
group = ''.join([to_text_string(UPGRADABLE),
to_text_string(MIXGRADABLE)])
elif group in [DOWNGRADABLE]:
group = ''.join([to_text_string(DOWNGRADABLE),
to_text_string(MIXGRADABLE)])
elif group in [ALL_INSTALLABLE]:
group = ''.join([to_text_string(INSTALLED),
to_text_string(UPGRADABLE),
to_text_string(NOT_INSTALLED),
to_text_string(DOWNGRADABLE),
to_text_string(MIXGRADABLE)])
else:
group = to_text_string(group)
if self.proxy_model is not None:
self.proxy_model.set_filter(text, group)
self.resize_rows()
# update label count
count = self.verticalHeader().count()
if count == 0:
count_text = _("0 packages available ")
elif count == 1:
count_text = _("1 package available ")
elif count > 1:
count_text = str(count) + _(" packages available ")
if text != '':
count_text = count_text + _('matching "{0}"').format(text)
self._parent._update_status(status=count_text, hide=False)
def search_string_changed(self, text):
""" """
text = to_text_string(text)
self._searchbox = text
self.filter_changed()
def filter_status_changed(self, text):
""" """
for key, val in COMBOBOX_VALUES.iteritems():
if str(val) == to_text_string(text):
group = val
break
self._filterbox = group
self.filter_changed()
def resizeEvent(self, event):
"""Override Qt method"""
w = self.width()
self.setColumnWidth(NAME, self.WIDTH_NAME)
self.setColumnWidth(VERSION, self.WIDTH_VERSION)
w_new = w - (self.WIDTH_NAME + self.WIDTH_VERSION +
(len(ACTION_COLUMNS) + 1)*self.WIDTH_ACTIONS)
self.setColumnWidth(DESCRIPTION, w_new)
for col in ACTION_COLUMNS:
self.setColumnWidth(col, self.WIDTH_ACTIONS)
QTableView.resizeEvent(self, event)
self.resize_rows()
def keyPressEvent(self, event):
"""Override Qt method"""
QTableView.keyPressEvent(self, event)
if event.key() in [Qt.Key_Enter, Qt.Key_Return]:
index = self.currentIndex()
self.action_pressed(index)
self.pressed_here = True
def keyReleaseEvent(self, event):
"""Override Qt method"""
QTableView.keyReleaseEvent(self, event)
if event.key() in [Qt.Key_Enter, Qt.Key_Return] and self.pressed_here:
self.action_released()
self.pressed_here = False
def mousePressEvent(self, event):
"""Override Qt method"""
QTableView.mousePressEvent(self, event)
self.current_index = self.currentIndex()
if event.button() == Qt.LeftButton:
pos = QPoint(event.x(), event.y())
index = self.indexAt(pos)
self.action_pressed(index)
elif event.button() == Qt.RightButton:
self.context_menu_requested(event)
def mouseReleaseEvent(self, event):
"""Override Qt method"""
if event.button() == Qt.LeftButton:
self.action_released()
def action_pressed(self, index):
""" """
column = index.column()
if self.proxy_model is not None:
model_index = self.proxy_model.mapToSource(index)
model = self.source_model
self._model_index_clicked = model_index
self.valid = False
if ((column == INSTALL and model.is_installable(model_index)) or
(column == REMOVE and model.is_removable(model_index)) or
(column == UPGRADE and model.is_upgradable(model_index)) or
(column == DOWNGRADE and model.is_downgradable(model_index))):
model.update_row_icon(model_index.row(), model_index.column())
self.valid = True
self.column_ = column
else:
self._model_index_clicked = None
self.valid = False
def action_released(self):
""" """
model_index = self._model_index_clicked
if model_index:
self.source_model.update_row_icon(model_index.row(),
model_index.column())
if self.valid:
name = self.source_model.row(model_index.row())[NAME]
versions = self.source_model.get_package_versions(name)
version = self.source_model.get_package_version(name)
action = self.column_
self._parent._run_action(name, action, version, versions)
def context_menu_requested(self, event):
""" Custom context menu"""
index = self.current_index
model_index = self.proxy_model.mapToSource(index)
row = self.source_model.row(model_index.row())
name, license_ = row[NAME], row[LICENSE]
pos = QPoint(event.x(), event.y())
self._menu = QMenu(self)
metadata = self._parent.get_package_metadata(name)
pypi = metadata['pypi']
home = metadata['home']
dev = metadata['dev']
docs = metadata['docs']
q_pypi = QIcon(get_image_path('python.png'))
q_home = QIcon(get_image_path('home.png'))
q_docs = QIcon(get_image_path('conda_docs.png'))
if 'git' in dev:
q_dev = QIcon(get_image_path('conda_github.png'))
elif 'bitbucket' in dev:
q_dev = QIcon(get_image_path('conda_bitbucket.png'))
else:
q_dev = QIcon()
if 'mit' in license_.lower():
lic = 'http://opensource.org/licenses/MIT'
elif 'bsd' == license_.lower():
lic = 'http://opensource.org/licenses/BSD-3-Clause'
else:
lic = None
actions = []
if license_ != '':
actions.append(create_action(self, _('License: ' + license_),
icon=QIcon(), triggered=lambda:
self.open_url(lic)))
actions.append(None)
if pypi != '':
actions.append(create_action(self, _('Python Package Index'),
icon=q_pypi, triggered=lambda:
self.open_url(pypi)))
if home != '':
actions.append(create_action(self, _('Homepage'),
icon=q_home, triggered=lambda:
self.open_url(home)))
if docs != '':
actions.append(create_action(self, _('Documentation'),
icon=q_docs, triggered=lambda:
self.open_url(docs)))
if dev != '':
actions.append(create_action(self, _('Development'),
icon=q_dev, triggered=lambda:
self.open_url(dev)))
if len(actions):
add_actions(self._menu, actions)
self._menu.popup(self.viewport().mapToGlobal(pos))
def open_url(self, url):
"""Open link from action in default operating system browser"""
if url is None:
return
QDesktopServices.openUrl(QUrl(url))
class DownloadManager(QObject):
"""Synchronous download manager
used http://qt-project.org/doc/qt-4.8/
network-downloadmanager-downloadmanager-cpp.html
as inspiration
"""
def __init__(self, parent, on_finished_func, on_progress_func, save_path):
super(DownloadManager, self).__init__(parent)
self._parent = parent
self._on_finished_func = on_finished_func
self._on_progress_func = on_progress_func
self._manager = QNetworkAccessManager(self)
self._request = None
self._reply = None
self._queue = None # [['filename', 'uri'], ...]
self._url = None # current url in process
self._filename = None # current filename in process
self._save_path = None # current defined save path
self._error = None # error number
self._free = True # lock process flag
self.set_save_path(save_path)
def _start_next_download(self):
""" """
if self._free:
if len(self._queue) != 0:
self._free = False
self._filename, self._url = self._queue.pop(0)
full_path = osp.join(self._save_path, self._filename)
if osp.isfile(full_path):
# compare file versions by getting headers first
self._get(header_only=True)
else:
# file does not exists, first download
self._get()
# print(full_path)
else:
self._on_finished_func()
def _get(self, header_only=False):
"""Download file specified by uri"""
self._request = QNetworkRequest(QUrl(self._url))
self._reply = None
self._error = None
if header_only:
self._reply = self._manager.head(self._request)
self._reply.finished.connect(self._on_downloaded_headers)
else:
self._reply = self._manager.get(self._request)
self._reply.finished.connect(self._on_downloaded)
self._reply.downloadProgress.connect(self._on_progress)
def _on_downloaded_headers(self):
"""On header from uri downloaded"""
# handle error for headers...
error_code = self._reply.error()
if error_code > 0:
self._on_errors(error_code)
return None
fullpath = osp.join(self._save_path, self._filename)
headers = {}
data = self._reply.rawHeaderPairs()
for d in data:
if isinstance(d[0], QByteArray):
d = [d[0].data(), d[1].data()]
key = to_text_string(d[0], encoding='ascii')
value = to_text_string(d[1], encoding='ascii')
headers[key.lower()] = value
if len(headers) != 0:
header_filesize = int(headers['content-length'])
local_filesize = int(osp.getsize(fullpath))
if header_filesize == local_filesize:
self._free = True
self._start_next_download()
else:
self._get()
def _on_downloaded(self):
"""On file downloaded"""
# check if errors
error_code = self._reply.error()
if error_code > 0:
self._on_errors(error_code)
return None
# process data if no errors
data = self._reply.readAll()
self._save_file(data)
def _on_errors(self, e):
"""On download errors"""
self._free = True # otherwise update button cannot work!
self._error = e
self._on_finished_func()
def _on_progress(self, downloaded_size, total_size):
"""On Partial progress"""
self._on_progress_func([downloaded_size, total_size])
def _save_file(self, data):
""" """
if not osp.isdir(self._save_path):
os.mkdir(self._save_path)
fullpath = osp.join(self._save_path, self._filename)
if isinstance(data, QByteArray):
data = data.data()
with open(fullpath, 'wb') as f:
f.write(data)
self._free = True
self._start_next_download()
# public api
# ----------
def set_save_path(self, path):
""" """
self._save_path = path
def set_queue(self, queue):
"""[['filename', 'uri'], ['filename', 'uri'], ...]"""
self._queue = queue
def get_errors(self):
""" """
return self._error
def start_download(self):
""" """
self._start_next_download()
def stop_download(self):
""" """
pass
class SearchLineEdit(QLineEdit):
"""Line edit search widget with icon and remove all button"""
def __init__(self, parent, icon=True):
super(SearchLineEdit, self).__init__(parent)
self.setTextMargins(1, 0, 20, 0)
if icon:
self.setTextMargins(18, 0, 20, 0)
self._label = QLabel(self)
self._pixmap_icon = QPixmap(get_image_path('conda_search.png',
'png'))
self._label.setPixmap(self._pixmap_icon)
self._label.setStyleSheet('''border: 0px; padding-bottom: 2px;
padding-left: 1px;''')
self._pixmap = QPixmap(get_image_path(('conda_del.png')))
self.button_clear = QToolButton(self)
self.button_clear.setIcon(QIcon(self._pixmap))
self.button_clear.setIconSize(QSize(18, 18))
self.button_clear.setCursor(Qt.ArrowCursor)
self.button_clear.setStyleSheet("""QToolButton
{background: transparent;
padding: 0px; border: none; margin:0px; }""")
self.button_clear.setVisible(False)
# signals and slots
self.button_clear.clicked.connect(self.clear_text)
self.textChanged.connect(self._toggle_visibility)
self.textEdited.connect(self._toggle_visibility)
# layout
self._layout = QHBoxLayout(self)
self._layout.addWidget(self.button_clear, 0, Qt.AlignRight)
self._layout.setSpacing(0)
self._layout.setContentsMargins(0, 2, 2, 0)
def _toggle_visibility(self):
""" """
if len(self.text()) == 0:
self.button_clear.setVisible(False)
else:
self.button_clear.setVisible(True)
# public api
# ----------
def clear_text(self):
""" """
self.setText('')
self.setFocus()
class CondaDependenciesModel(QAbstractTableModel):
""" """
def __init__(self, parent, dic):
super(CondaDependenciesModel, self).__init__(parent)
self._parent = parent
self._packages = dic
self._rows = []
self._bold_rows = []
if len(dic) == 0:
self._rows = [[_(u'Updating dependency list...'), u'']]
self._bold_rows.append(0)
else:
if 'actions' in dic:
dic = dic['actions']
titles = {'FETCH': _('Packages to download'),
'UNLINK': _('Packages to unlink'),
'LINK': _('Packages to link'),
'EXTRACT': _('Packages to extract')
}
order = ['FETCH', 'EXTRACT', 'LINK', 'UNLINK']
row = 0
for key in order:
if key in dic:
self._rows.append([u(titles[key]), ''])
self._bold_rows.append(row)
row += 1
for item in dic[key]:
name, version, build = \
conda_api_q.split_canonical_name(item)
self._rows.append([name, version])
row += 1
def flags(self, index):
"""Override Qt method"""
if not index.isValid():
return Qt.ItemIsEnabled
column = index.column()
if column in [0, 1]:
return Qt.ItemFlags(Qt.ItemIsEnabled)
else:
return Qt.ItemFlags(Qt.NoItemFlags)
def data(self, index, role=Qt.DisplayRole):
"""Override Qt method"""
if not index.isValid() or not 0 <= index.row() < len(self._rows):
return to_qvariant()
row = index.row()
column = index.column()
# Carefull here with the order, this has to be adjusted manually
if self._rows[row] == row:
name, size, = [u'', u'']
else:
name, size = self._rows[row]
if role == Qt.DisplayRole:
if column == 0:
return to_qvariant(name)
elif column == 1:
return to_qvariant(size)
elif role == Qt.TextAlignmentRole:
if column in [0]:
return to_qvariant(int(Qt.AlignLeft | Qt.AlignVCenter))
elif column in [1]:
return to_qvariant(int(Qt.AlignHCenter | Qt.AlignVCenter))
elif role == Qt.ForegroundRole:
return to_qvariant()
elif role == Qt.FontRole:
font = QFont()
if row in self._bold_rows:
font.setBold(True)
return to_qvariant(font)
else:
font.setBold(False)
return to_qvariant(font)
return to_qvariant()
def rowCount(self, index=QModelIndex()):
"""Override Qt method"""
return len(self._rows)
def columnCount(self, index=QModelIndex()):
"""Override Qt method"""
return 2
def row(self, rownum):
""" """
return self._rows[rownum]
class CondaPackageActionDialog(QDialog):
""" """
def __init__(self, parent, env, name, action, version, versions):
super(CondaPackageActionDialog, self).__init__(parent)
self._parent = parent
self._env = env
self._version_text = None
self._name = name
self._dependencies_dic = {}
self._conda_process = \
conda_api_q.CondaProcess(self, self._on_process_finished)
# widgets
self.label = QLabel(self)
self.combobox_version = QComboBox()
self.label_version = QLabel(self)
self.widget_version = None
self.table_dependencies = None
self.checkbox = QCheckBox(_('Install dependencies (recommended)'))
self.bbox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel,
Qt.Horizontal, self)
self.button_ok = self.bbox.button(QDialogButtonBox.Ok)
self.button_cancel = self.bbox.button(QDialogButtonBox.Cancel)
self.button_cancel.setDefault(True)
self.button_cancel.setAutoDefault(True)
dialog_size = QSize(300, 90)
# helper variable values
action_title = {UPGRADE: _("Upgrade package"),
DOWNGRADE: _("Downgrade package"),
REMOVE: _("Remove package"),
INSTALL: _("Install package")}
# Versions might have duplicates from different builds
versions = sort_versions(list(set(versions)), reverse=True)
# FIXME: There is a bug, a package installed by anaconda has version
# astropy 0.4 and the linked list 0.4 but the available versions
# in the json file do not include 0.4 but 0.4rc1... so...
# temporal fix is to check if inside list otherwise show full list
if action == UPGRADE:
if version in versions:
index = versions.index(version)
versions = versions[:index]
else:
versions = versions
elif action == DOWNGRADE:
if version in versions:
index = versions.index(version)
versions = versions[index+1:]
else:
versions = versions
elif action == REMOVE:
versions = [version]
self.combobox_version.setEnabled(False)
if len(versions) == 1:
if action == REMOVE:
labeltext = _('Package version to remove:')
else:
labeltext = _('Package version available:')
self.label_version.setText(versions[0])
self.widget_version = self.label_version
else:
labeltext = _("Select package version:")
self.combobox_version.addItems(versions)
self.widget_version = self.combobox_version
self.label.setText(labeltext)
self.label_version.setAlignment(Qt.AlignLeft)
self.table_dependencies = QWidget(self)
self._layout = QGridLayout()
self._layout.addWidget(self.label, 0, 0, Qt.AlignVCenter | Qt.AlignLeft)
self._layout.addWidget(self.widget_version, 0, 1, Qt.AlignVCenter |
Qt.AlignRight)
self.widgets = [self.checkbox, self.button_ok, self.widget_version,
self.table_dependencies]
row_index = 1
# Create a Table
if action in [INSTALL, UPGRADE, DOWNGRADE]:
table = QTableView(self)
dialog_size = QSize(dialog_size.width() + 40, 300)
self.table_dependencies = table
row_index = 1
self._layout.addItem(QSpacerItem(10, 5), row_index, 0)
self._layout.addWidget(self.checkbox, row_index + 1, 0, 1, 2)
self.checkbox.setChecked(True)
self._changed_version(versions[0])
table.setSelectionBehavior(QAbstractItemView.SelectRows)
table.verticalHeader().hide()
table.horizontalHeader().hide()
table.setAlternatingRowColors(True)
table.setShowGrid(False)
table.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
table.horizontalHeader().setStretchLastSection(True)
self._layout.addWidget(self.table_dependencies, row_index + 2, 0, 1, 2,
Qt.AlignHCenter)
self._layout.addItem(QSpacerItem(10, 5), row_index + 3, 0)
self._layout.addWidget(self.bbox, row_index + 6, 0, 1, 2, Qt.AlignHCenter)
title = "{0}: {1}".format(action_title[action], name)
self.setLayout(self._layout)
self.setMinimumSize(dialog_size)
self.setFixedSize(dialog_size)
self.setWindowTitle(title)
self.setModal(True)
# signals and slots
self.bbox.accepted.connect(self.accept)
self.bbox.rejected.connect(self.close)
self.combobox_version.currentIndexChanged.connect(
self._changed_version)
self.checkbox.stateChanged.connect(self._changed_checkbox)
def _changed_version(self, version, dependencies=True):
""" """
self._set_gui_disabled(True)
install_dependencies = (self.checkbox.checkState() == 2)
self._version_text = to_text_string(version)
self._get_dependencies(install_dependencies)
def _get_dependencies(self, dependencies=True):
""" """
name = [self._name + '=' + self._version_text]
self._conda_process.dependencies(name=self._env, pkgs=name,
dep=dependencies)
def _changed_checkbox(self, state):
""" """
if state:
self._changed_version(self._version_text)
else:
self._changed_version(self._version_text, dependencies=False)
def _on_process_finished(self, boo):
""" """
if self.isVisible():
dic = self._conda_process.output
self.dependencies_dic = dic
self._set_dependencies_table()
self._set_gui_disabled(False)
def _set_dependencies_table(self):
""" """
table = self.table_dependencies
dic = self.dependencies_dic
table.setModel(CondaDependenciesModel(self, dic))
table.resizeColumnsToContents()
table.resizeColumnToContents(1)
def _set_gui_disabled(self, value):
""" """
if value:
table = self.table_dependencies
table.setModel(CondaDependenciesModel(self, {}))
table.resizeColumnsToContents()
table.setDisabled(True)
else:
table = self.table_dependencies
table.setDisabled(False)
for widget in self.widgets:
widget.setDisabled(value)
class CondaPackagesWidget(QWidget):
"""Conda Packages Widget"""
VERSION = '1.0.0'
# Location of updated repo.json files from continuum/binstar
CONDA_CONF_PATH = get_conf_path('conda')
# Location of continuum/anaconda default repos shipped with spyder
DATA_PATH = get_module_data_path('spyderplugins', 'data')
# file inside DATA_PATH with metadata for conda packages
DATABASE_FILE = 'packages.ini'
def __init__(self, parent):
super(CondaPackagesWidget, self).__init__(parent)
self._parent = parent
self._status = '' # Statusbar message
self._conda_process = \
conda_api_q.CondaProcess(self, self._on_conda_process_ready,
self._on_conda_process_partial)
self._prefix = conda_api_q.ROOT_PREFIX
self._download_manager = DownloadManager(self,
self._on_download_finished,
self._on_download_progress,
self.CONDA_CONF_PATH)
self._thread = QThread(self)
self._worker = None
self._db_metadata = cp.ConfigParser()
self._db_file = CondaPackagesWidget.DATABASE_FILE
self._db_metadata.readfp(open(osp.join(self.DATA_PATH, self._db_file)))
self._packages_names = None
self._row_data = None
# Hardcoded channels for the moment
self._default_channels = [
['_free_', 'http://repo.continuum.io/pkgs/free'],
['_pro_', 'http://repo.continuum.io/pkgs/pro']
]
self._extra_channels = []
# pyqt not working with ssl some bug here on the anaconda compilation
# [['binstar_goanpeca_', 'https://conda.binstar.org/goanpeca']]
self._repo_name = None # linux-64, win-32, etc...
self._channels = None # [['filename', 'channel url'], ...]
self._repo_files = None # [filepath, filepath, ...]
self._packages = {}
self._download_error = None
self._error = None
# defined in self._setup() if None or in self.set_env method
self._selected_env = None
# widgets
self.combobox_filter = QComboBox(self)
self.button_update = QPushButton(_('Update package index'))
self.textbox_search = SearchLineEdit(self)
self.table = CondaPackagesTable(self)
self.status_bar = QLabel(self)
self.progress_bar = QProgressBar(self)
self.widgets = [self.button_update, self.combobox_filter,
self.textbox_search, self.table]
# setup widgets
self.combobox_filter.addItems([k for k in COMBOBOX_VALUES_ORDERED])
self.combobox_filter.setCurrentIndex(ALL)
self.combobox_filter.setMinimumWidth(120)
self.progress_bar.setVisible(False)
self.progress_bar.setTextVisible(False)
self.progress_bar.setMaximumHeight(16)
self.progress_bar.setMaximumWidth(130)
self.setWindowTitle(_("Conda Packages"))
self.setMinimumSize(QSize(480, 300))
# signals and slots
self.combobox_filter.currentIndexChanged.connect(self.filter_package)
self.button_update.clicked.connect(self.update_package_index)
self.textbox_search.textChanged.connect(self.search_package)
# NOTE: do not try to save the QSpacerItems in a variable for reuse
# it will crash python on exit if you do!
# layout setup
self._spacer_w = 250
self._spacer_h = 5
self._top_layout = QHBoxLayout()
self._top_layout.addWidget(self.combobox_filter)
self._top_layout.addWidget(self.button_update)
self._top_layout.addWidget(self.textbox_search)
self._middle_layout = QVBoxLayout()
self._middle_layout.addWidget(self.table)
self._bottom_layout = QHBoxLayout()
self._bottom_layout.addWidget(self.status_bar, Qt.AlignLeft)
self._bottom_layout.addWidget(self.progress_bar, Qt.AlignRight)
self._layout = QVBoxLayout(self)
self._layout.addItem(QSpacerItem(self._spacer_w, self._spacer_h))
self._layout.addLayout(self._top_layout)
self._layout.addLayout(self._middle_layout)
self._layout.addItem(QSpacerItem(self._spacer_w, self._spacer_h))
self._layout.addLayout(self._bottom_layout)
self._layout.addItem(QSpacerItem(self._spacer_w, self._spacer_h))
self.setLayout(self._layout)
# setup
if self._supports_architecture():
self.update_package_index()
pass
else:
status = _('no packages supported for this architecture!')
self._update_status(progress=[0, 0], hide=True, status=status)
def _supports_architecture(self):
""" """
self._set_repo_name()
if self._repo_name is None:
return False
else:
return True
def _set_repo_name(self):
"""Get python system and bitness, and return default repo name"""
system = sys.platform.lower()
bitness = 64 if sys.maxsize > 2**32 else 32
machine = platform.machine()
fname = [None, None]
if 'win' in system:
fname[0] = 'win'
elif 'lin' in system:
fname[0] = 'linux'
elif 'osx' in system or 'darwin' in system: # TODO: is this correct?
fname[0] = 'osx'
else:
return None
if bitness == 32:
fname[1] = '32'
elif bitness == 64:
fname[1] = '64'
else:
return None
# armv6l
if machine.startswith('armv6'):
fname[1] = 'armv6l'
self._repo_name = '-'.join(fname)
def _set_channels(self):
""" """
default = self._default_channels
extra = self._extra_channels
body = self._repo_name
tail = '/repodata.json'
channels = []
files = []
for channel in default + extra:
prefix = channel[0]
url = '{0}/{1}{2}'.format(channel[1], body, tail)
name = '{0}{1}.json'.format(prefix, body)
channels.append([name, url])
files.append(osp.join(self.CONDA_CONF_PATH, name))
self._repo_files = files
self._channels = channels
def _download_repodata(self):
"""download the latest version available of the repo(s)"""
status = _('Updating package index...')
self._update_status(hide=True, progress=[0, 0], status=status)
self._download_manager.set_queue(self._channels)
self._download_manager.start_download()
# --- callback download manager
# ------------------------------------------------------------------------
def _on_download_progress(self, progress):
"""function called by download manager when receiving data
progress : [int, int]
A two item list of integers with relating [downloaded, total]
"""
self._update_status(hide=True, progress=progress, status=None)
def _on_download_finished(self):
"""function called by download manager when finished all downloads
this will be called even if errors were encountered, so error handling
is done here as well
"""
error = self._download_manager.get_errors()
if error is not None:
self._update_status(hide=False)
if not osp.isdir(self.CONDA_CONF_PATH):
os.mkdir(self.CONDA_CONF_PATH)
for repo_file in self._repo_files:
# if a file does not exists, look for one in DATA_PATH
if not osp.isfile(repo_file):
filename = osp.basename(repo_file)
bck_repo_file = osp.join(self.DATA_PATH, filename)
# if available copy to CONDA_CONF_PATH
if osp.isfile(bck_repo_file):
shutil.copy(bck_repo_file, repo_file)
# otherwise remove from the repo_files list
else:
self._repo_files.remove(repo_file)
self._error = None
self._setup_widget()
# ------------------------------------------------------------------------
def _setup_widget(self):
""" """
if self._selected_env is None:
self._selected_env = ROOT
self._thread.terminate()
self._thread = QThread(self)
self._worker = Worker(self, self._repo_files, self._selected_env,
self._prefix)
self._worker.sig_status_updated.connect(self._update_status)
self._worker.sig_ready.connect(self._worker_ready)
self._worker.sig_ready.connect(self._thread.quit)
self._worker.moveToThread(self._thread)
self._thread.started.connect(self._worker._prepare_model)
self._thread.start()
def _worker_ready(self):
""" """
self._packages_names = self._worker.packages_names
self._packages_versions = self._worker.packages_versions
self._row_data = self._worker.row_data
# depending on the size of table this might lock the gui for a moment
self.table.setup_model(self._packages_names, self._packages_versions,
self._row_data)
self.table.filter_changed()
self._update_status(hide=False)
def _update_status(self, status=None, hide=True, progress=None):
"""Update status bar, progress bar display and widget visibility
status : str
TODO:
hide : bool
TODO:
progress : [int, int]
TODO:
"""
for widget in self.widgets:
widget.setDisabled(hide)
self.progress_bar.setVisible(hide)
if status is not None:
self._status = status
self.status_bar.setText(self._status)
if progress is not None:
self.progress_bar.setMinimum(0)
self.progress_bar.setMaximum(progress[1])
self.progress_bar.setValue(progress[0])
def _run_action(self, name, action, version, versions):
""" """
env = self._selected_env
dlg = CondaPackageActionDialog(self, env, name, action, version,
versions)
if dlg.exec_():
dic = {}
self.status = 'Processing'
self._update_status(hide=True)
self.repaint()
env = self._selected_env
ver1 = dlg.label_version.text()
ver2 = dlg.combobox_version.currentText()
pkg = u'{0}={1}{2}'.format(name, ver1, ver2)
dep = dlg.checkbox.checkState()
state = dlg.checkbox.isEnabled()
dlg.close()
dic['name'] = env
dic['pkg'] = pkg
dic['dep'] = not (dep == 0 and state)
self._run_conda_process(action, dic)
def _run_conda_process(self, action, dic):
""" """
cp = self._conda_process
name = dic['name']
if 'pkg' in dic and 'dep' in dic:
pkgs = dic['pkg']
dep = dic['dep']
if action == INSTALL or action == UPGRADE or action == DOWNGRADE:
status = _('Installing <b>') + dic['pkg'] + '</b>'
status = status + _(' into <i>') + dic['name'] + '</i>'
cp.install(name=name, pkgs=[pkgs], dep=dep)
elif action == REMOVE:
status = (_('Removing <b>') + dic['pkg'] + '</b>' + _(' from <i>')
+ dic['name'] + '</i>')
cp.remove(pkgs, name=name)
# --- actions to be implemented in case of environment needs
elif action == CREATE:
status = _('Creating environment <b>') + dic['name'] + '</b>'
elif action == CLONE:
status = (_('Cloning ') + '<i>' + dic['cloned from']
+ _('</i> into <b>') + dic['name'] + '</b>')
elif action == REMOVE_ENV:
status = _('Removing environment <b>') + dic['name'] + '</b>'
self._update_status(hide=True, status=status, progress=[0, 0])
def _on_conda_process_ready(self):
""" """
error = self._conda_process.error
if error is None:
status = _('there was an error')
self._update_status(hide=False, status=status)
else:
self._update_status(hide=True)
self._setup_widget()
def _on_conda_process_partial(self):
""" """
try:
partial = self._conda_process.partial.split('\n')[0]
partial = json.loads(partial)
except:
partial = {'progress': 0, 'maxval': 0}
progress = partial['progress']
maxval = partial['maxval']
if 'fetch' in partial:
status = _('Downloading <b>') + partial['fetch'] + '</b>'
elif 'name' in partial:
status = _('Installing and linking <b>') + partial['name'] + '</b>'
else:
progress = 0
maxval = 0
status = None
self._update_status(status=status, progress=[progress, maxval])
# public api
# ----------
def update_package_index(self):
""" """
self._set_channels()
self._download_repodata()
def search_package(self, text):
""" """
self.table.search_string_changed(text)
def filter_package(self, value):
""" """
self.table.filter_status_changed(value)
def get_package_metadata(self, name):
""" """
db = self._db_metadata
metadata = dict(description='', url='', pypi='', home='', docs='',
dev='')
for key in metadata:
name_lower = name.lower()
for name_key in (name_lower, name_lower.split('-')[0]):
try:
metadata[key] = db.get(name_key, key)
break
except (cp.NoSectionError, cp.NoOptionError):
pass
return metadata
def set_environment(self, env):
"""Reset environent to reflect this environment in the pacakge model"""
# TODO: check if env exists!
self._selected_env = env
self._setup_widget()
class Worker(QObject):
""" helper class to preprocess the repodata.json file(s) information into
an usefull format for the CondaPackagesModel class without blocking the GUI
in case the number of packages or channels grows too large
"""
sig_ready = Signal()
sig_status_updated = Signal(str, bool, list)
def __init__(self, parent, repo_files, env, prefix):
QObject.__init__(self)
self._parent = parent
self._repo_files = repo_files
self._env = env
self._prefix = prefix
self.packages_names = None
self.row_data = None
self.packages_versions = None
# define helper function locally
self._get_package_metadata = parent.get_package_metadata
def _prepare_model(self):
""" """
self._load_packages()
self._setup_data()
def _load_packages(self):
""" """
self.sig_status_updated.emit(_('Loading conda packages...'), True,
[0, 0])
grouped_usable_packages = {}
packages_all = []
for repo_file in self._repo_files:
with open(repo_file, 'r') as f:
data = json.load(f)
# info = data['info']
packages = data['packages']
if packages is not None:
packages_all.append(packages)
for key in packages:
val = packages[key]
name = val['name'].lower()
grouped_usable_packages[name] = list()
for packages in packages_all:
for key in packages:
val = packages[key]
name = val['name'].lower()
grouped_usable_packages[name].append([key, val])
self._packages = grouped_usable_packages
def _setup_data(self):
""" """
self._packages_names = []
self._rows = []
self._packages_versions = {} # the canonical name of versions compat
self._packages_linked = {}
self._packages_versions_number = {}
self._packages_versions_all = {} # the canonical name of all versions
self._packages_upgradable = {}
self._packages_downgradable = {}
self._packages_installable = {}
self._packages_licenses_all = {}
self._conda_api = conda_api_q
cp = self._conda_api
# TODO: Do we want to exclude some packages? If we plan to continue
# with the projects in spyder idea, we might as well hide spyder
# from the possible instalable apps...
# exclude_names = ['emptydummypackage'] # FIXME: packages to exclude?
# First do the list of linked packages so in case there is no json
# We can show at least that
self._packages_linked = {}
canonical_names = sorted(list(cp.linked(self._prefix)))
# This has to do with the versions of the selected environment, NOT
# with the python version running!
pyver, numpyver, pybuild, numpybuild = None, None, None, None
for canonical_name in canonical_names:
n, v, b = cp.split_canonical_name(canonical_name)
self._packages_linked[n] = [n, v, b, canonical_name]
if n == 'python':
pyver = v
pybuild = b
elif n == 'numpy':
numpyver = v
numpybuild = b
if self._packages == {}:
self._packages_names = sorted([l for l in self._packages_linked])
self._rows = list(range(len(self._packages_names)))
for n in self._packages_linked:
val = self._packages_linked[n]
v = val[-1]
self._packages[n] = [[v, v]]
else:
self._packages_names = sorted([key for key in
self._packages])
self._rows = list(range(len(self._packages_names)))
for n in self._packages:
self._packages_licenses_all[n] = {}
pybuild = 'py' + ''.join(pyver.split('.'))[:-1] + '_' # + pybuild
if numpyver is None and numpybuild is None:
numpybuild = ''
else:
numpybuild = 'np' + ''.join(numpyver.split('.'))[:-1]
for n in self._packages_names:
self._packages_versions_all[n] = \
sort_versions([s[0] for s in self._packages[n]],
reverse=True)
for s in self._packages[n]:
val = s[1]
if 'version' in val:
ver = val['version']
if 'license' in val:
lic = val['license']
self._packages_licenses_all[n][ver] = lic
# Now clean versions depending on the build version of python and numpy
# FIXME: there is an issue here... at this moment a package with same
# version but only differing in the build number will get added
# Now it assumes that there is a python installed in the root
for name in self._packages_versions_all:
tempver_cano = []
tempver_num = []
for ver in self._packages_versions_all[name]:
n, v, b = cp.split_canonical_name(ver)
if 'np' in b and 'py' in b:
if numpybuild + pybuild in b:
tempver_cano.append(ver)
tempver_num.append(v)
elif 'py' in b:
if pybuild in b:
tempver_cano.append(ver)
tempver_num.append(v)
elif 'np' in b:
if numpybuild in b:
tempver_cano.append(ver)
tempver_num.append(v)
else:
tempver_cano.append(ver)
tempver_num.append(v)
self._packages_versions[name] = sort_versions(tempver_cano,
reverse=True)
self._packages_versions_number[name] = sort_versions(tempver_num,
reverse=True)
# FIXME: Check what to do with different builds??
# For the moment here a set is used to remove duplicate versions
for n in self._packages_linked:
vals = self._packages_linked[n]
canonical_name = vals[-1]
current_ver = vals[1]
# fix error when package installed from other channels besides
# the standard ones
if n in self._packages_versions_number:
vers = self._packages_versions_number[n]
vers = sort_versions(list(set(vers)), reverse=True)
self._packages_upgradable[n] = not current_ver == vers[0]
self._packages_downgradable[n] = not current_ver == vers[-1]
for row, name in enumerate(self._packages_names):
if name in self._packages_linked:
version = self._packages_linked[name][1]
if (self._packages_upgradable[name] and
self._packages_downgradable[name]):
status = MIXGRADABLE
elif self._packages_upgradable[name]:
status = UPGRADABLE
elif self._packages_downgradable[name]:
status = DOWNGRADABLE
else:
status = INSTALLED
else:
vers = self._packages_versions_number[name]
vers = sort_versions(list(set(vers)), reverse=True)
version = '-'
if len(vers) == 0:
status = NOT_INSTALLABLE
else:
status = NOT_INSTALLED
metadata = self._get_package_metadata(name)
description = metadata['description']
url = metadata['url']
if version in self._packages_licenses_all[name]:
if self._packages_licenses_all[name][version]:
license_ = self._packages_licenses_all[name][version]
else:
license_ = u''
else:
license_ = u''
self._rows[row] = [name, description, version, status, url,
license_, False, False, False, False]
self.row_data = self._rows
self.packages_names = self._packages_names
self.packages_versions = self._packages_versions
self.sig_ready.emit()
# TODO: update packages.ini file
# TODO: Define some automatic tests that can include the following:
# Test 1
# Find out if all the urls in the packages.ini file lead to a webpage
# or if they produce a 404 error
# Test 2
# Test installation of custom packages
# Test 3
# nothing is loaded on the package listing but clicking on it will produce an
# nonetype error
def test():
"""Run conda packages widget test"""
from spyderlib.utils.qthelpers import qapplication
app = qapplication()
widget = CondaPackagesWidget(None)
widget.show()
sys.exit(app.exec_())
if __name__ == '__main__':
test()
| gpl-3.0 |
TangXT/edx-platform | lms/djangoapps/shoppingcart/migrations/0002_auto__add_field_paidcourseregistration_mode.py | 182 | 8687 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'PaidCourseRegistration.mode'
db.add_column('shoppingcart_paidcourseregistration', 'mode',
self.gf('django.db.models.fields.SlugField')(default='honor', max_length=50),
keep_default=False)
def backwards(self, orm):
# Deleting field 'PaidCourseRegistration.mode'
db.delete_column('shoppingcart_paidcourseregistration', 'mode')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
| agpl-3.0 |
servo/servo | tests/wpt/web-platform-tests/tools/wpt/tests/test_wpt.py | 3 | 14627 | import errno
import os
import shutil
import socket
import subprocess
import sys
import tempfile
import time
try:
from urllib.request import urlopen
from urllib.error import URLError
except ImportError:
from urllib2 import urlopen, URLError
import pytest
here = os.path.abspath(os.path.dirname(__file__))
from tools.wpt import utils, wpt
def is_port_8000_in_use():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("127.0.0.1", 8000))
except socket.error as e:
if e.errno == errno.EADDRINUSE:
return True
else:
raise e
finally:
s.close()
return False
def get_persistent_manifest_path():
directory = ("~/meta" if os.environ.get('TRAVIS') == "true"
else wpt.localpaths.repo_root)
return os.path.join(directory, "MANIFEST.json")
@pytest.fixture(scope="module", autouse=True)
def init_manifest():
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["manifest", "--no-download",
"--path", get_persistent_manifest_path()])
assert excinfo.value.code == 0
@pytest.fixture
def manifest_dir():
try:
path = tempfile.mkdtemp()
shutil.copyfile(get_persistent_manifest_path(),
os.path.join(path, "MANIFEST.json"))
yield path
finally:
utils.rmtree(path)
@pytest.fixture
def temp_test():
os.makedirs("../../.tools-tests")
test_count = {"value": 0}
def make_test(body):
test_count["value"] += 1
test_name = ".tools-tests/%s.html" % test_count["value"]
test_path = "../../%s" % test_name
with open(test_path, "w") as handle:
handle.write("""
<!DOCTYPE html>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script>%s</script>
""" % body)
return test_name
yield make_test
utils.rmtree("../../.tools-tests")
def test_missing():
with pytest.raises(SystemExit):
wpt.main(argv=["#missing-command"])
def test_help():
# TODO: It seems like there's a bug in argparse that makes this argument order required
# should try to work around that
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["--help"])
assert excinfo.value.code == 0
@pytest.mark.slow
def test_list_tests(manifest_dir):
"""The `--list-tests` option should not produce an error under normal
conditions."""
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--metadata", manifest_dir, "--list-tests",
"--channel", "dev", "--yes", "chrome",
"/dom/nodes/Element-tagName.html"])
assert excinfo.value.code == 0
@pytest.mark.slow
def test_list_tests_missing_manifest(manifest_dir):
"""The `--list-tests` option should not produce an error in the absence of
a test manifest file."""
os.remove(os.path.join(manifest_dir, "MANIFEST.json"))
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run",
# This test triggers the creation of a new manifest
# file which is not necessary to ensure successful
# process completion. Specifying the current directory
# as the tests source via the --tests` option
# drastically reduces the time to execute the test.
"--tests", here,
"--metadata", manifest_dir,
"--list-tests",
"--yes",
"firefox", "/dom/nodes/Element-tagName.html"])
assert excinfo.value.code == 0
@pytest.mark.slow
def test_list_tests_invalid_manifest(manifest_dir):
"""The `--list-tests` option should not produce an error in the presence of
a malformed test manifest file."""
manifest_filename = os.path.join(manifest_dir, "MANIFEST.json")
assert os.path.isfile(manifest_filename)
with open(manifest_filename, "a+") as handle:
handle.write("extra text which invalidates the file")
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run",
# This test triggers the creation of a new manifest
# file which is not necessary to ensure successful
# process completion. Specifying the current directory
# as the tests source via the --tests` option
# drastically reduces the time to execute the test.
"--tests", here,
"--metadata", manifest_dir,
"--list-tests",
"--yes",
"firefox", "/dom/nodes/Element-tagName.html"])
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.remote_network
def test_run_zero_tests():
"""A test execution describing zero tests should be reported as an error
even in the presence of the `--no-fail-on-unexpected` option."""
if is_port_8000_in_use():
pytest.skip("port 8000 already in use")
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--no-pause", "--channel", "dev",
"chrome", "/non-existent-dir/non-existent-file.html"])
assert excinfo.value.code != 0
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--no-pause", "--no-fail-on-unexpected",
"--channel", "dev", "chrome",
"/non-existent-dir/non-existent-file.html"])
assert excinfo.value.code != 0
@pytest.mark.slow
@pytest.mark.remote_network
def test_run_failing_test():
"""Failing tests should be reported with a non-zero exit status unless the
`--no-fail-on-unexpected` option has been specified."""
if is_port_8000_in_use():
pytest.skip("port 8000 already in use")
failing_test = "/infrastructure/expected-fail/failing-test.html"
assert os.path.isfile("../../%s" % failing_test)
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--no-pause", "--channel", "dev",
"chrome", failing_test])
assert excinfo.value.code != 0
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--no-pause", "--no-fail-on-unexpected",
"--channel", "dev", "chrome", failing_test])
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.remote_network
def test_run_verify_unstable(temp_test):
"""Unstable tests should be reported with a non-zero exit status. Stable
tests should be reported with a zero exit status."""
if is_port_8000_in_use():
pytest.skip("port 8000 already in use")
unstable_test = temp_test("""
test(function() {
if (localStorage.getItem('wpt-unstable-test-flag')) {
throw new Error();
}
localStorage.setItem('wpt-unstable-test-flag', 'x');
}, 'my test');
""")
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--verify", "--channel", "dev",
"chrome", unstable_test])
assert excinfo.value.code != 0
stable_test = temp_test("test(function() {}, 'my test');")
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--verify", "--channel", "dev",
"chrome", stable_test])
assert excinfo.value.code == 0
def test_files_changed(capsys):
commit = "9047ac1d9f51b1e9faa4f9fad9c47d109609ab09"
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["files-changed", "%s~..%s" % (commit, commit)])
assert excinfo.value.code == 0
out, err = capsys.readouterr()
expected = """html/browsers/offline/appcache/workers/appcache-worker.html
html/browsers/offline/appcache/workers/resources/appcache-dedicated-worker-not-in-cache.js
html/browsers/offline/appcache/workers/resources/appcache-shared-worker-not-in-cache.js
html/browsers/offline/appcache/workers/resources/appcache-worker-data.py
html/browsers/offline/appcache/workers/resources/appcache-worker-import.py
html/browsers/offline/appcache/workers/resources/appcache-worker.manifest
html/browsers/offline/appcache/workers/resources/appcache-worker.py
""".replace("/", os.path.sep)
assert out == expected
assert err == ""
def test_files_changed_null(capsys):
commit = "9047ac1d9f51b1e9faa4f9fad9c47d109609ab09"
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["files-changed", "--null", "%s~..%s" % (commit, commit)])
assert excinfo.value.code == 0
out, err = capsys.readouterr()
expected = "\0".join(["html/browsers/offline/appcache/workers/appcache-worker.html",
"html/browsers/offline/appcache/workers/resources/appcache-dedicated-worker-not-in-cache.js",
"html/browsers/offline/appcache/workers/resources/appcache-shared-worker-not-in-cache.js",
"html/browsers/offline/appcache/workers/resources/appcache-worker-data.py",
"html/browsers/offline/appcache/workers/resources/appcache-worker-import.py",
"html/browsers/offline/appcache/workers/resources/appcache-worker.manifest",
"html/browsers/offline/appcache/workers/resources/appcache-worker.py",
""]).replace("/", os.path.sep)
assert out == expected
assert err == ""
def test_files_changed_ignore():
from tools.wpt.testfiles import exclude_ignored
files = ["resources/testharness.js", "resources/webidl2/index.js", "test/test.js"]
changed, ignored = exclude_ignored(files, ignore_rules=["resources/testharness*"])
assert changed == [os.path.join(wpt.wpt_root, item) for item in
["resources/webidl2/index.js", "test/test.js"]]
assert ignored == [os.path.join(wpt.wpt_root, item) for item in
["resources/testharness.js"]]
def test_files_changed_ignore_rules():
from tools.wpt.testfiles import compile_ignore_rule
assert compile_ignore_rule("foo*bar*/baz").pattern == r"^foo\*bar[^/]*/baz$"
assert compile_ignore_rule("foo**bar**/baz").pattern == r"^foo\*\*bar.*/baz$"
assert compile_ignore_rule("foobar/baz/*").pattern == "^foobar/baz/[^/]*$"
assert compile_ignore_rule("foobar/baz/**").pattern == "^foobar/baz/.*$"
@pytest.mark.slow # this updates the manifest
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
@pytest.mark.skipif(sys.platform == "win32",
reason="https://github.com/web-platform-tests/wpt/issues/12934")
def test_tests_affected(capsys, manifest_dir):
# This doesn't really work properly for random commits because we test the files in
# the current working directory for references to the changed files, not the ones at
# that specific commit. But we can at least test it returns something sensible.
# The test will fail if the file we assert is renamed, so we choose a stable one.
commit = "3a055e818218f548db240c316654f3cc1aeeb733"
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["tests-affected", "--metadata", manifest_dir, "%s~..%s" % (commit, commit)])
assert excinfo.value.code == 0
out, err = capsys.readouterr()
assert "infrastructure/reftest-wait.html" in out
@pytest.mark.slow # this updates the manifest
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
@pytest.mark.skipif(sys.platform == "win32",
reason="https://github.com/web-platform-tests/wpt/issues/12934")
def test_tests_affected_idlharness(capsys, manifest_dir):
commit = "47cea8c38b88c0ddd3854e4edec0c5b6f2697e62"
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["tests-affected", "--metadata", manifest_dir, "%s~..%s" % (commit, commit)])
assert excinfo.value.code == 0
out, err = capsys.readouterr()
assert ("webrtc-identity/idlharness.https.window.js\n" +
"webrtc-insertable-streams/idlharness.https.window.js\n" +
"webrtc-stats/idlharness.window.js\n" +
"webrtc-stats/supported-stats.html\n" +
"webrtc/idlharness.https.window.js\n") == out
@pytest.mark.slow # this updates the manifest
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
@pytest.mark.skipif(sys.platform == "win32",
reason="https://github.com/web-platform-tests/wpt/issues/12934")
def test_tests_affected_null(capsys, manifest_dir):
# This doesn't really work properly for random commits because we test the files in
# the current working directory for references to the changed files, not the ones at
# that specific commit. But we can at least test it returns something sensible.
# The test will fail if the file we assert is renamed, so we choose a stable one.
commit = "2614e3316f1d3d1a744ed3af088d19516552a5de"
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["tests-affected", "--null", "--metadata", manifest_dir, "%s~..%s" % (commit, commit)])
assert excinfo.value.code == 0
out, err = capsys.readouterr()
tests = out.split("\0")
assert "dom/idlharness.any.js" in tests
assert "xhr/idlharness.any.js" in tests
@pytest.mark.slow
@pytest.mark.skipif(sys.platform == "win32",
reason="no os.setsid/killpg to easily cleanup the process tree")
def test_serve():
if is_port_8000_in_use():
pytest.skip("port 8000 already in use")
p = subprocess.Popen([os.path.join(wpt.localpaths.repo_root, "wpt"), "serve"],
preexec_fn=os.setsid)
start = time.time()
try:
while True:
if p.poll() is not None:
assert False, "server not running"
if time.time() - start > 60:
assert False, "server did not start responding within 60s"
try:
resp = urlopen("http://web-platform.test:8000")
print(resp)
except URLError:
print("URLError")
time.sleep(1)
else:
assert resp.code == 200
break
finally:
os.killpg(p.pid, 15)
# The following commands are slow running and used implicitly in other CI
# jobs, so we skip them here:
# wpt manifest
# wpt lint
| mpl-2.0 |
okfn/pdftables | test/test_linesegments.py | 3 | 1793 | import pdftables.line_segments as line_segments
from nose.tools import assert_equals, raises
from pdftables.line_segments import LineSegment
def segments(segments):
return [line_segments.LineSegment.make(a, b) for a, b in segments]
def test_segments_generator():
seg1, seg2 = segs = segments([(1, 4), (2, 3)])
values = list(line_segments.segments_generator(segs))
assert_equals(
[(1, seg1, False),
(2, seg2, False),
(3, seg2, True),
(4, seg1, True)],
values
)
def test_histogram_segments():
segs = segments([(1, 4), (2, 3)])
values = list(line_segments.histogram_segments(segs))
assert_equals([((1, 2), 1), ((2, 3), 2), ((3, 4), 1)], values)
def test_segment_histogram():
segs = segments([(1, 4), (2, 3)])
values = list(line_segments.segment_histogram(segs))
assert_equals([(1, 2, 3, 4), (1, 2, 1)], values)
@raises(RuntimeError)
def test_malformed_input_segments_generator():
segs = segments([(1, -1)])
list(line_segments.segments_generator(segs))
def test_hat_point_generator():
segs = segments([(1, 4), (2, 3)])
result = list(line_segments.hat_point_generator(segs))
x = 2.5
expected = [(1, set()),
(2, set([LineSegment(start=1, end=4, object=None)])),
(x, set([LineSegment(start=1, end=4, object=None),
LineSegment(start=2, end=3, object=None)])),
(3, set([LineSegment(start=1, end=4, object=None)])),
(4, set())]
assert_equals(expected, result)
def test_hat_generator():
segs = segments([(0, 4), (1, 3)])
result = list(line_segments.hat_generator(segs))
expected = [(0, 0), (1, 0.75), (2.0, 2.0), (3, 0.75), (4, 0)]
assert_equals(expected, result)
| bsd-2-clause |
pyatil/jenkins-job-builder | tests/cmd/test_cmd.py | 15 | 1229 | import os
from six.moves import configparser, StringIO
import testtools
from jenkins_jobs import cmd
from tests.base import mock
class CmdTestsBase(testtools.TestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), 'fixtures')
parser = cmd.create_parser()
def setUp(self):
super(CmdTestsBase, self).setUp()
# Testing the cmd module can sometimes result in the CacheStorage class
# attempting to create the cache directory multiple times as the tests
# are run in parallel. Stub out the CacheStorage to ensure that each
# test can safely create the cache directory without risk of
# interference.
cache_patch = mock.patch('jenkins_jobs.builder.CacheStorage',
autospec=True)
self.cache_mock = cache_patch.start()
self.addCleanup(cache_patch.stop)
self.config = configparser.ConfigParser()
self.config.readfp(StringIO(cmd.DEFAULT_CONF))
class CmdTests(CmdTestsBase):
def test_with_empty_args(self):
"""
User passes no args, should fail with SystemExit
"""
with mock.patch('sys.stderr'):
self.assertRaises(SystemExit, cmd.main, [])
| apache-2.0 |
ThomasFeher/audacity | lib-src/lv2/sord/waflib/Tools/tex.py | 177 | 8492 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,re
from waflib import Utils,Task,Errors,Logs
from waflib.TaskGen import feature,before_method
re_bibunit=re.compile(r'\\(?P<type>putbib)\[(?P<file>[^\[\]]*)\]',re.M)
def bibunitscan(self):
node=self.inputs[0]
nodes=[]
if not node:return nodes
code=node.read()
for match in re_bibunit.finditer(code):
path=match.group('file')
if path:
for k in['','.bib']:
Logs.debug('tex: trying %s%s'%(path,k))
fi=node.parent.find_resource(path+k)
if fi:
nodes.append(fi)
else:
Logs.debug('tex: could not find %s'%path)
Logs.debug("tex: found the following bibunit files: %s"%nodes)
return nodes
exts_deps_tex=['','.ltx','.tex','.bib','.pdf','.png','.eps','.ps']
exts_tex=['.ltx','.tex']
re_tex=re.compile(r'\\(?P<type>include|bibliography|putbib|includegraphics|input|import|bringin|lstinputlisting)(\[[^\[\]]*\])?{(?P<file>[^{}]*)}',re.M)
g_bibtex_re=re.compile('bibdata',re.M)
class tex(Task.Task):
bibtex_fun,_=Task.compile_fun('${BIBTEX} ${BIBTEXFLAGS} ${SRCFILE}',shell=False)
bibtex_fun.__doc__="""
Execute the program **bibtex**
"""
makeindex_fun,_=Task.compile_fun('${MAKEINDEX} ${MAKEINDEXFLAGS} ${SRCFILE}',shell=False)
makeindex_fun.__doc__="""
Execute the program **makeindex**
"""
def exec_command(self,cmd,**kw):
bld=self.generator.bld
try:
if not kw.get('cwd',None):
kw['cwd']=bld.cwd
except AttributeError:
bld.cwd=kw['cwd']=bld.variant_dir
return Utils.subprocess.Popen(cmd,**kw).wait()
def scan_aux(self,node):
nodes=[node]
re_aux=re.compile(r'\\@input{(?P<file>[^{}]*)}',re.M)
def parse_node(node):
code=node.read()
for match in re_aux.finditer(code):
path=match.group('file')
found=node.parent.find_or_declare(path)
if found and found not in nodes:
Logs.debug('tex: found aux node '+found.abspath())
nodes.append(found)
parse_node(found)
parse_node(node)
return nodes
def scan(self):
node=self.inputs[0]
nodes=[]
names=[]
seen=[]
if not node:return(nodes,names)
def parse_node(node):
if node in seen:
return
seen.append(node)
code=node.read()
global re_tex
for match in re_tex.finditer(code):
for path in match.group('file').split(','):
if path:
add_name=True
found=None
for k in exts_deps_tex:
Logs.debug('tex: trying %s%s'%(path,k))
found=node.parent.find_resource(path+k)
for tsk in self.generator.tasks:
if not found or found in tsk.outputs:
break
else:
nodes.append(found)
add_name=False
for ext in exts_tex:
if found.name.endswith(ext):
parse_node(found)
break
if add_name:
names.append(path)
parse_node(node)
for x in nodes:
x.parent.get_bld().mkdir()
Logs.debug("tex: found the following : %s and names %s"%(nodes,names))
return(nodes,names)
def check_status(self,msg,retcode):
if retcode!=0:
raise Errors.WafError("%r command exit status %r"%(msg,retcode))
def bibfile(self):
for aux_node in self.aux_nodes:
try:
ct=aux_node.read()
except(OSError,IOError):
Logs.error('Error reading %s: %r'%aux_node.abspath())
continue
if g_bibtex_re.findall(ct):
Logs.warn('calling bibtex')
self.env.env={}
self.env.env.update(os.environ)
self.env.env.update({'BIBINPUTS':self.TEXINPUTS,'BSTINPUTS':self.TEXINPUTS})
self.env.SRCFILE=aux_node.name[:-4]
self.check_status('error when calling bibtex',self.bibtex_fun())
def bibunits(self):
try:
bibunits=bibunitscan(self)
except OSError:
Logs.error('error bibunitscan')
else:
if bibunits:
fn=['bu'+str(i)for i in xrange(1,len(bibunits)+1)]
if fn:
Logs.warn('calling bibtex on bibunits')
for f in fn:
self.env.env={'BIBINPUTS':self.TEXINPUTS,'BSTINPUTS':self.TEXINPUTS}
self.env.SRCFILE=f
self.check_status('error when calling bibtex',self.bibtex_fun())
def makeindex(self):
try:
idx_path=self.idx_node.abspath()
os.stat(idx_path)
except OSError:
Logs.warn('index file %s absent, not calling makeindex'%idx_path)
else:
Logs.warn('calling makeindex')
self.env.SRCFILE=self.idx_node.name
self.env.env={}
self.check_status('error when calling makeindex %s'%idx_path,self.makeindex_fun())
def bibtopic(self):
p=self.inputs[0].parent.get_bld()
if os.path.exists(os.path.join(p.abspath(),'btaux.aux')):
self.aux_nodes+=p.ant_glob('*[0-9].aux')
def run(self):
env=self.env
if not env['PROMPT_LATEX']:
env.append_value('LATEXFLAGS','-interaction=batchmode')
env.append_value('PDFLATEXFLAGS','-interaction=batchmode')
env.append_value('XELATEXFLAGS','-interaction=batchmode')
fun=self.texfun
node=self.inputs[0]
srcfile=node.abspath()
texinputs=self.env.TEXINPUTS or''
self.TEXINPUTS=node.parent.get_bld().abspath()+os.pathsep+node.parent.get_src().abspath()+os.pathsep+texinputs+os.pathsep
self.cwd=self.inputs[0].parent.get_bld().abspath()
Logs.warn('first pass on %s'%self.__class__.__name__)
self.env.env={}
self.env.env.update(os.environ)
self.env.env.update({'TEXINPUTS':self.TEXINPUTS})
self.env.SRCFILE=srcfile
self.check_status('error when calling latex',fun())
self.aux_nodes=self.scan_aux(node.change_ext('.aux'))
self.idx_node=node.change_ext('.idx')
self.bibtopic()
self.bibfile()
self.bibunits()
self.makeindex()
hash=''
for i in range(10):
prev_hash=hash
try:
hashes=[Utils.h_file(x.abspath())for x in self.aux_nodes]
hash=Utils.h_list(hashes)
except(OSError,IOError):
Logs.error('could not read aux.h')
pass
if hash and hash==prev_hash:
break
Logs.warn('calling %s'%self.__class__.__name__)
self.env.env={}
self.env.env.update(os.environ)
self.env.env.update({'TEXINPUTS':self.TEXINPUTS})
self.env.SRCFILE=srcfile
self.check_status('error when calling %s'%self.__class__.__name__,fun())
class latex(tex):
texfun,vars=Task.compile_fun('${LATEX} ${LATEXFLAGS} ${SRCFILE}',shell=False)
class pdflatex(tex):
texfun,vars=Task.compile_fun('${PDFLATEX} ${PDFLATEXFLAGS} ${SRCFILE}',shell=False)
class xelatex(tex):
texfun,vars=Task.compile_fun('${XELATEX} ${XELATEXFLAGS} ${SRCFILE}',shell=False)
class dvips(Task.Task):
run_str='${DVIPS} ${DVIPSFLAGS} ${SRC} -o ${TGT}'
color='BLUE'
after=['latex','pdflatex','xelatex']
class dvipdf(Task.Task):
run_str='${DVIPDF} ${DVIPDFFLAGS} ${SRC} ${TGT}'
color='BLUE'
after=['latex','pdflatex','xelatex']
class pdf2ps(Task.Task):
run_str='${PDF2PS} ${PDF2PSFLAGS} ${SRC} ${TGT}'
color='BLUE'
after=['latex','pdflatex','xelatex']
@feature('tex')
@before_method('process_source')
def apply_tex(self):
if not getattr(self,'type',None)in['latex','pdflatex','xelatex']:
self.type='pdflatex'
tree=self.bld
outs=Utils.to_list(getattr(self,'outs',[]))
self.env['PROMPT_LATEX']=getattr(self,'prompt',1)
deps_lst=[]
if getattr(self,'deps',None):
deps=self.to_list(self.deps)
for filename in deps:
n=self.path.find_resource(filename)
if not n:
self.bld.fatal('Could not find %r for %r'%(filename,self))
if not n in deps_lst:
deps_lst.append(n)
for node in self.to_nodes(self.source):
if self.type=='latex':
task=self.create_task('latex',node,node.change_ext('.dvi'))
elif self.type=='pdflatex':
task=self.create_task('pdflatex',node,node.change_ext('.pdf'))
elif self.type=='xelatex':
task=self.create_task('xelatex',node,node.change_ext('.pdf'))
task.env=self.env
if deps_lst:
for n in deps_lst:
if not n in task.dep_nodes:
task.dep_nodes.append(n)
v=dict(os.environ)
p=node.parent.abspath()+os.pathsep+self.path.abspath()+os.pathsep+self.path.get_bld().abspath()+os.pathsep+v.get('TEXINPUTS','')+os.pathsep
v['TEXINPUTS']=p
if self.type=='latex':
if'ps'in outs:
tsk=self.create_task('dvips',task.outputs,node.change_ext('.ps'))
tsk.env.env=dict(v)
if'pdf'in outs:
tsk=self.create_task('dvipdf',task.outputs,node.change_ext('.pdf'))
tsk.env.env=dict(v)
elif self.type=='pdflatex':
if'ps'in outs:
self.create_task('pdf2ps',task.outputs,node.change_ext('.ps'))
self.source=[]
def configure(self):
v=self.env
for p in'tex latex pdflatex xelatex bibtex dvips dvipdf ps2pdf makeindex pdf2ps'.split():
try:
self.find_program(p,var=p.upper())
except self.errors.ConfigurationError:
pass
v['DVIPSFLAGS']='-Ppdf'
| gpl-2.0 |
ToontownUprising/src | toontown/hood/GZHoodAI.py | 2 | 2411 | from pandac.PandaModules import *
from toontown.dna.DNAParser import DNAGroup, DNAVisGroup
from toontown.hood import HoodAI
from toontown.hood import ZoneUtil
from toontown.safezone.DistributedGolfKartAI import DistributedGolfKartAI
from toontown.toonbase import ToontownGlobals
class GZHoodAI(HoodAI.HoodAI):
def __init__(self, air):
HoodAI.HoodAI.__init__(self, air,
ToontownGlobals.GolfZone,
ToontownGlobals.GolfZone)
self.golfKarts = []
self.startup()
def startup(self):
HoodAI.HoodAI.startup(self)
self.createGolfKarts()
def findGolfKarts(self, dnaGroup, zoneId, area, overrideDNAZone=False):
golfKarts = []
if isinstance(dnaGroup, DNAGroup) and ('golf_kart' in dnaGroup.getName()):
nameInfo = dnaGroup.getName().split('_')
golfCourse = int(nameInfo[2])
for i in xrange(dnaGroup.getNumChildren()):
childDnaGroup = dnaGroup.at(i)
if 'starting_block' in childDnaGroup.getName():
pos = childDnaGroup.getPos()
hpr = childDnaGroup.getHpr()
golfKart = DistributedGolfKartAI(
self.air, golfCourse,
pos[0], pos[1], pos[2], hpr[0], hpr[1], hpr[2])
golfKart.generateWithRequired(zoneId)
golfKarts.append(golfKart)
elif isinstance(dnaGroup, DNAVisGroup) and (not overrideDNAZone):
zoneId = ZoneUtil.getTrueZoneId(int(dnaGroup.getName().split(':')[0]), zoneId)
for i in xrange(dnaGroup.getNumChildren()):
foundGolfKarts = self.findGolfKarts(dnaGroup.at(i), zoneId, area, overrideDNAZone=overrideDNAZone)
golfKarts.extend(foundGolfKarts)
return golfKarts
def createGolfKarts(self):
self.golfKarts = []
for zoneId in self.getZoneTable():
dnaData = self.air.dnaDataMap.get(zoneId, None)
zoneId = ZoneUtil.getTrueZoneId(zoneId, self.zoneId)
if dnaData.getName() == 'root':
area = ZoneUtil.getCanonicalZoneId(zoneId)
foundGolfKarts = self.findGolfKarts(dnaData, zoneId, area, overrideDNAZone=True)
self.golfKarts.extend(foundGolfKarts)
for golfKart in self.golfKarts:
golfKart.start()
| mit |
iuliat/nova | nova/db/sqlalchemy/migrate_repo/versions/216_havana.py | 44 | 64425 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.changeset import UniqueConstraint
from migrate import ForeignKeyConstraint
from oslo_log import log as logging
from sqlalchemy import Boolean, BigInteger, Column, DateTime, Enum, Float
from sqlalchemy import dialects
from sqlalchemy import ForeignKey, Index, Integer, MetaData, String, Table
from sqlalchemy import Text
from sqlalchemy.types import NullType
from nova.i18n import _LE
LOG = logging.getLogger(__name__)
# Note on the autoincrement flag: this is defaulted for primary key columns
# of integral type, so is no longer set explicitly in such cases.
# NOTE(dprince): This wrapper allows us to easily match the Folsom MySQL
# Schema. In Folsom we created tables as latin1 and converted them to utf8
# later. This conversion causes some of the Text columns on MySQL to get
# created as mediumtext instead of just text.
def MediumText():
return Text().with_variant(dialects.mysql.MEDIUMTEXT(), 'mysql')
def Inet():
return String(length=43).with_variant(dialects.postgresql.INET(),
'postgresql')
def InetSmall():
return String(length=39).with_variant(dialects.postgresql.INET(),
'postgresql')
def _create_shadow_tables(migrate_engine):
meta = MetaData(migrate_engine)
meta.reflect(migrate_engine)
table_names = list(meta.tables.keys())
meta.bind = migrate_engine
for table_name in table_names:
table = Table(table_name, meta, autoload=True)
columns = []
for column in table.columns:
column_copy = None
# NOTE(boris-42): BigInteger is not supported by sqlite, so
# after copy it will have NullType, other
# types that are used in Nova are supported by
# sqlite.
if isinstance(column.type, NullType):
column_copy = Column(column.name, BigInteger(), default=0)
if table_name == 'instances' and column.name == 'locked_by':
enum = Enum('owner', 'admin',
name='shadow_instances0locked_by')
column_copy = Column(column.name, enum)
else:
column_copy = column.copy()
columns.append(column_copy)
shadow_table_name = 'shadow_' + table_name
shadow_table = Table(shadow_table_name, meta, *columns,
mysql_engine='InnoDB')
try:
shadow_table.create()
except Exception:
LOG.info(repr(shadow_table))
LOG.exception(_LE('Exception while creating table.'))
raise
def _populate_instance_types(instance_types_table):
default_inst_types = {
'm1.tiny': dict(mem=512, vcpus=1, root_gb=1, eph_gb=0, flavid=1),
'm1.small': dict(mem=2048, vcpus=1, root_gb=20, eph_gb=0, flavid=2),
'm1.medium': dict(mem=4096, vcpus=2, root_gb=40, eph_gb=0, flavid=3),
'm1.large': dict(mem=8192, vcpus=4, root_gb=80, eph_gb=0, flavid=4),
'm1.xlarge': dict(mem=16384, vcpus=8, root_gb=160, eph_gb=0, flavid=5)
}
try:
i = instance_types_table.insert()
for name, values in default_inst_types.items():
i.execute({'name': name, 'memory_mb': values["mem"],
'vcpus': values["vcpus"], 'deleted': 0,
'root_gb': values["root_gb"],
'ephemeral_gb': values["eph_gb"],
'rxtx_factor': 1,
'swap': 0,
'flavorid': values["flavid"],
'disabled': False,
'is_public': True})
except Exception:
LOG.info(repr(instance_types_table))
LOG.exception(_LE('Exception while seeding instance_types table'))
raise
# NOTE(dprince): we add these here so our schema contains dump tables
# which were added in migration 209 (in Havana). We can drop these in
# Icehouse: https://bugs.launchpad.net/nova/+bug/1266538
def _create_dump_tables(migrate_engine):
meta = MetaData(migrate_engine)
meta.reflect(migrate_engine)
table_names = ['compute_node_stats', 'compute_nodes', 'instance_actions',
'instance_actions_events', 'instance_faults', 'migrations']
for table_name in table_names:
table = Table(table_name, meta, autoload=True)
dump_table_name = 'dump_' + table.name
columns = []
for column in table.columns:
# NOTE(dprince): The dump_ tables were originally created from an
# earlier schema version so we don't want to add the pci_stats
# column so that schema diffs are exactly the same.
if column.name == 'pci_stats':
continue
else:
columns.append(column.copy())
table_dump = Table(dump_table_name, meta, *columns,
mysql_engine='InnoDB')
table_dump.create()
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
agent_builds = Table('agent_builds', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('hypervisor', String(length=255)),
Column('os', String(length=255)),
Column('architecture', String(length=255)),
Column('version', String(length=255)),
Column('url', String(length=255)),
Column('md5hash', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
aggregate_hosts = Table('aggregate_hosts', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('host', String(length=255)),
Column('aggregate_id', Integer, ForeignKey('aggregates.id'),
nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
aggregate_metadata = Table('aggregate_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('aggregate_id', Integer, ForeignKey('aggregates.id'),
nullable=False),
Column('key', String(length=255), nullable=False),
Column('value', String(length=255), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
aggregates = Table('aggregates', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
block_device_mapping = Table('block_device_mapping', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('device_name', String(length=255), nullable=True),
Column('delete_on_termination', Boolean),
Column('snapshot_id', String(length=36), nullable=True),
Column('volume_id', String(length=36), nullable=True),
Column('volume_size', Integer),
Column('no_device', Boolean),
Column('connection_info', MediumText()),
Column('instance_uuid', String(length=36)),
Column('deleted', Integer),
Column('source_type', String(length=255), nullable=True),
Column('destination_type', String(length=255), nullable=True),
Column('guest_format', String(length=255), nullable=True),
Column('device_type', String(length=255), nullable=True),
Column('disk_bus', String(length=255), nullable=True),
Column('boot_index', Integer),
Column('image_id', String(length=36), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
bw_usage_cache = Table('bw_usage_cache', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('start_period', DateTime, nullable=False),
Column('last_refreshed', DateTime),
Column('bw_in', BigInteger),
Column('bw_out', BigInteger),
Column('mac', String(length=255)),
Column('uuid', String(length=36)),
Column('last_ctr_in', BigInteger()),
Column('last_ctr_out', BigInteger()),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
cells = Table('cells', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('api_url', String(length=255)),
Column('weight_offset', Float),
Column('weight_scale', Float),
Column('name', String(length=255)),
Column('is_parent', Boolean),
Column('deleted', Integer),
Column('transport_url', String(length=255), nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
certificates = Table('certificates', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('file_name', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
compute_node_stats = Table('compute_node_stats', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('compute_node_id', Integer, nullable=False),
Column('key', String(length=255), nullable=False),
Column('value', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
compute_nodes = Table('compute_nodes', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('service_id', Integer, nullable=False),
Column('vcpus', Integer, nullable=False),
Column('memory_mb', Integer, nullable=False),
Column('local_gb', Integer, nullable=False),
Column('vcpus_used', Integer, nullable=False),
Column('memory_mb_used', Integer, nullable=False),
Column('local_gb_used', Integer, nullable=False),
Column('hypervisor_type', MediumText(), nullable=False),
Column('hypervisor_version', Integer, nullable=False),
Column('cpu_info', MediumText(), nullable=False),
Column('disk_available_least', Integer),
Column('free_ram_mb', Integer),
Column('free_disk_gb', Integer),
Column('current_workload', Integer),
Column('running_vms', Integer),
Column('hypervisor_hostname', String(length=255)),
Column('deleted', Integer),
Column('host_ip', InetSmall()),
Column('supported_instances', Text),
Column('pci_stats', Text, nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
console_pools = Table('console_pools', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', InetSmall()),
Column('username', String(length=255)),
Column('password', String(length=255)),
Column('console_type', String(length=255)),
Column('public_hostname', String(length=255)),
Column('host', String(length=255)),
Column('compute_host', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
# NOTE(mriedem): DB2 can't create the FK since we don't have the unique
# constraint on instances.uuid because it's nullable (so a unique
# constraint isn't created for instances.uuid, only a unique index).
consoles_instance_uuid_column_args = ['instance_uuid', String(length=36)]
if migrate_engine.name != 'ibm_db_sa':
consoles_instance_uuid_column_args.append(
ForeignKey('instances.uuid', name='consoles_instance_uuid_fkey'))
consoles = Table('consoles', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_name', String(length=255)),
Column('password', String(length=255)),
Column('port', Integer),
Column('pool_id', Integer, ForeignKey('console_pools.id')),
Column(*consoles_instance_uuid_column_args),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
dns_domains = Table('dns_domains', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('domain', String(length=255), primary_key=True, nullable=False),
Column('scope', String(length=255)),
Column('availability_zone', String(length=255)),
Column('project_id', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
fixed_ips = Table('fixed_ips', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', InetSmall()),
Column('network_id', Integer),
Column('allocated', Boolean),
Column('leased', Boolean),
Column('reserved', Boolean),
Column('virtual_interface_id', Integer),
Column('host', String(length=255)),
Column('instance_uuid', String(length=36)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
floating_ips = Table('floating_ips', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', InetSmall()),
Column('fixed_ip_id', Integer),
Column('project_id', String(length=255)),
Column('host', String(length=255)),
Column('auto_assigned', Boolean),
Column('pool', String(length=255)),
Column('interface', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_faults = Table('instance_faults', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_uuid', String(length=36)),
Column('code', Integer, nullable=False),
Column('message', String(length=255)),
Column('details', MediumText()),
Column('host', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_id_mappings = Table('instance_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_info_caches = Table('instance_info_caches', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('network_info', MediumText()),
Column('instance_uuid', String(length=36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
groups = Table('instance_groups', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('uuid', String(length=36), nullable=False),
Column('name', String(length=255)),
UniqueConstraint('uuid', 'deleted',
name='uniq_instance_groups0uuid0deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_metadata = Table('instance_group_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
Column('group_id', Integer, ForeignKey('instance_groups.id'),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_policy = Table('instance_group_policy', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('policy', String(length=255)),
Column('group_id', Integer, ForeignKey('instance_groups.id'),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_member = Table('instance_group_member', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_id', String(length=255)),
Column('group_id', Integer, ForeignKey('instance_groups.id'),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
instance_metadata = Table('instance_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
Column('instance_uuid', String(length=36), nullable=True),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_system_metadata = Table('instance_system_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_uuid', String(length=36), nullable=False),
Column('key', String(length=255), nullable=False),
Column('value', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_type_extra_specs = Table('instance_type_extra_specs', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_type_id', Integer, ForeignKey('instance_types.id'),
nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_type_projects = Table('instance_type_projects', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_type_id', Integer, nullable=False),
Column('project_id', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_types = Table('instance_types', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('name', String(length=255)),
Column('id', Integer, primary_key=True, nullable=False),
Column('memory_mb', Integer, nullable=False),
Column('vcpus', Integer, nullable=False),
Column('swap', Integer, nullable=False),
Column('vcpu_weight', Integer),
Column('flavorid', String(length=255)),
Column('rxtx_factor', Float),
Column('root_gb', Integer),
Column('ephemeral_gb', Integer),
Column('disabled', Boolean),
Column('is_public', Boolean),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
inst_lock_enum = Enum('owner', 'admin', name='instances0locked_by')
instances = Table('instances', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('internal_id', Integer),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('image_ref', String(length=255)),
Column('kernel_id', String(length=255)),
Column('ramdisk_id', String(length=255)),
Column('launch_index', Integer),
Column('key_name', String(length=255)),
Column('key_data', MediumText()),
Column('power_state', Integer),
Column('vm_state', String(length=255)),
Column('memory_mb', Integer),
Column('vcpus', Integer),
Column('hostname', String(length=255)),
Column('host', String(length=255)),
Column('user_data', MediumText()),
Column('reservation_id', String(length=255)),
Column('scheduled_at', DateTime),
Column('launched_at', DateTime),
Column('terminated_at', DateTime),
Column('display_name', String(length=255)),
Column('display_description', String(length=255)),
Column('availability_zone', String(length=255)),
Column('locked', Boolean),
Column('os_type', String(length=255)),
Column('launched_on', MediumText()),
Column('instance_type_id', Integer),
Column('vm_mode', String(length=255)),
Column('uuid', String(length=36)),
Column('architecture', String(length=255)),
Column('root_device_name', String(length=255)),
Column('access_ip_v4', InetSmall()),
Column('access_ip_v6', InetSmall()),
Column('config_drive', String(length=255)),
Column('task_state', String(length=255)),
Column('default_ephemeral_device', String(length=255)),
Column('default_swap_device', String(length=255)),
Column('progress', Integer),
Column('auto_disk_config', Boolean),
Column('shutdown_terminate', Boolean),
Column('disable_terminate', Boolean),
Column('root_gb', Integer),
Column('ephemeral_gb', Integer),
Column('cell_name', String(length=255)),
Column('node', String(length=255)),
Column('deleted', Integer),
Column('locked_by', inst_lock_enum),
Column('cleaned', Integer, default=0),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_actions = Table('instance_actions', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('action', String(length=255)),
Column('instance_uuid', String(length=36)),
Column('request_id', String(length=255)),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('start_time', DateTime),
Column('finish_time', DateTime),
Column('message', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
instance_actions_events = Table('instance_actions_events', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('event', String(length=255)),
Column('action_id', Integer, ForeignKey('instance_actions.id')),
Column('start_time', DateTime),
Column('finish_time', DateTime),
Column('result', String(length=255)),
Column('traceback', Text),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
iscsi_targets = Table('iscsi_targets', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('target_num', Integer),
Column('host', String(length=255)),
Column('volume_id', String(length=36), nullable=True),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
key_pairs = Table('key_pairs', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=255)),
Column('user_id', String(length=255)),
Column('fingerprint', String(length=255)),
Column('public_key', MediumText()),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
migrations = Table('migrations', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('source_compute', String(length=255)),
Column('dest_compute', String(length=255)),
Column('dest_host', String(length=255)),
Column('status', String(length=255)),
Column('instance_uuid', String(length=36)),
Column('old_instance_type_id', Integer),
Column('new_instance_type_id', Integer),
Column('source_node', String(length=255)),
Column('dest_node', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
networks = Table('networks', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('injected', Boolean),
Column('cidr', Inet()),
Column('netmask', InetSmall()),
Column('bridge', String(length=255)),
Column('gateway', InetSmall()),
Column('broadcast', InetSmall()),
Column('dns1', InetSmall()),
Column('vlan', Integer),
Column('vpn_public_address', InetSmall()),
Column('vpn_public_port', Integer),
Column('vpn_private_address', InetSmall()),
Column('dhcp_start', InetSmall()),
Column('project_id', String(length=255)),
Column('host', String(length=255)),
Column('cidr_v6', Inet()),
Column('gateway_v6', InetSmall()),
Column('label', String(length=255)),
Column('netmask_v6', InetSmall()),
Column('bridge_interface', String(length=255)),
Column('multi_host', Boolean),
Column('dns2', InetSmall()),
Column('uuid', String(length=36)),
Column('priority', Integer),
Column('rxtx_base', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
pci_devices_uc_name = 'uniq_pci_devices0compute_node_id0address0deleted'
pci_devices = Table('pci_devices', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Integer, default=0, nullable=False),
Column('id', Integer, primary_key=True),
Column('compute_node_id', Integer, nullable=False),
Column('address', String(12), nullable=False),
Column('product_id', String(4)),
Column('vendor_id', String(4)),
Column('dev_type', String(8)),
Column('dev_id', String(255)),
Column('label', String(255), nullable=False),
Column('status', String(36), nullable=False),
Column('extra_info', Text, nullable=True),
Column('instance_uuid', String(36), nullable=True),
Index('ix_pci_devices_compute_node_id_deleted',
'compute_node_id', 'deleted'),
Index('ix_pci_devices_instance_uuid_deleted',
'instance_uuid', 'deleted'),
UniqueConstraint('compute_node_id',
'address', 'deleted',
name=pci_devices_uc_name),
mysql_engine='InnoDB',
mysql_charset='utf8')
provider_fw_rules = Table('provider_fw_rules', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('protocol', String(length=5)),
Column('from_port', Integer),
Column('to_port', Integer),
Column('cidr', Inet()),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quota_classes = Table('quota_classes', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('class_name', String(length=255)),
Column('resource', String(length=255)),
Column('hard_limit', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quota_usages = Table('quota_usages', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('project_id', String(length=255)),
Column('resource', String(length=255)),
Column('in_use', Integer, nullable=False),
Column('reserved', Integer, nullable=False),
Column('until_refresh', Integer),
Column('deleted', Integer),
Column('user_id', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quotas = Table('quotas', meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('project_id', String(length=255)),
Column('resource', String(length=255), nullable=False),
Column('hard_limit', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
uniq_name = "uniq_project_user_quotas0user_id0project_id0resource0deleted"
project_user_quotas = Table('project_user_quotas', meta,
Column('id', Integer, primary_key=True,
nullable=False),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('user_id',
String(length=255),
nullable=False),
Column('project_id',
String(length=255),
nullable=False),
Column('resource',
String(length=255),
nullable=False),
Column('hard_limit', Integer, nullable=True),
UniqueConstraint('user_id', 'project_id', 'resource',
'deleted', name=uniq_name),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
reservations = Table('reservations', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('usage_id', Integer, nullable=False),
Column('project_id', String(length=255)),
Column('resource', String(length=255)),
Column('delta', Integer, nullable=False),
Column('expire', DateTime),
Column('deleted', Integer),
Column('user_id', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
s3_images = Table('s3_images', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_group_instance_association = \
Table('security_group_instance_association', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('security_group_id', Integer),
Column('instance_uuid', String(length=36)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_group_rules = Table('security_group_rules', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('parent_group_id', Integer, ForeignKey('security_groups.id')),
Column('protocol', String(length=255)),
Column('from_port', Integer),
Column('to_port', Integer),
Column('cidr', Inet()),
Column('group_id', Integer, ForeignKey('security_groups.id')),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_groups = Table('security_groups', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=255)),
Column('description', String(length=255)),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_group_default_rules = Table('security_group_default_rules', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer, default=0),
Column('id', Integer, primary_key=True, nullable=False),
Column('protocol', String(length=5)),
Column('from_port', Integer),
Column('to_port', Integer),
Column('cidr', Inet()),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
services = Table('services', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('host', String(length=255)),
Column('binary', String(length=255)),
Column('topic', String(length=255)),
Column('report_count', Integer, nullable=False),
Column('disabled', Boolean),
Column('deleted', Integer),
Column('disabled_reason', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
snapshot_id_mappings = Table('snapshot_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
snapshots = Table('snapshots', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', String(length=36), primary_key=True, nullable=False),
Column('volume_id', String(length=36), nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('status', String(length=255)),
Column('progress', String(length=255)),
Column('volume_size', Integer),
Column('scheduled_at', DateTime),
Column('display_name', String(length=255)),
Column('display_description', String(length=255)),
Column('deleted', String(length=36)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
task_log = Table('task_log', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('task_name', String(length=255), nullable=False),
Column('state', String(length=255), nullable=False),
Column('host', String(length=255), nullable=False),
Column('period_beginning', DateTime, nullable=False),
Column('period_ending', DateTime, nullable=False),
Column('message', String(length=255), nullable=False),
Column('task_items', Integer),
Column('errors', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
virtual_interfaces = Table('virtual_interfaces', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', String(length=255)),
Column('network_id', Integer),
Column('uuid', String(length=36)),
Column('instance_uuid', String(length=36), nullable=True),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_id_mappings = Table('volume_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volumes = Table('volumes', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', String(length=36), primary_key=True, nullable=False),
Column('ec2_id', String(length=255)),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('host', String(length=255)),
Column('size', Integer),
Column('availability_zone', String(length=255)),
Column('mountpoint', String(length=255)),
Column('status', String(length=255)),
Column('attach_status', String(length=255)),
Column('scheduled_at', DateTime),
Column('launched_at', DateTime),
Column('terminated_at', DateTime),
Column('display_name', String(length=255)),
Column('display_description', String(length=255)),
Column('provider_location', String(length=256)),
Column('provider_auth', String(length=256)),
Column('snapshot_id', String(length=36)),
Column('volume_type_id', Integer),
Column('instance_uuid', String(length=36)),
Column('attach_time', DateTime),
Column('deleted', String(length=36)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_usage_cache = Table('volume_usage_cache', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('volume_id', String(36), nullable=False),
Column('tot_last_refreshed', DateTime(timezone=False)),
Column('tot_reads', BigInteger(), default=0),
Column('tot_read_bytes', BigInteger(), default=0),
Column('tot_writes', BigInteger(), default=0),
Column('tot_write_bytes', BigInteger(), default=0),
Column('curr_last_refreshed', DateTime(timezone=False)),
Column('curr_reads', BigInteger(), default=0),
Column('curr_read_bytes', BigInteger(), default=0),
Column('curr_writes', BigInteger(), default=0),
Column('curr_write_bytes', BigInteger(), default=0),
Column('deleted', Integer),
Column("instance_uuid", String(length=36)),
Column("project_id", String(length=36)),
Column("user_id", String(length=36)),
Column("availability_zone", String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instances.create()
Index('project_id', instances.c.project_id).create()
Index('uuid', instances.c.uuid, unique=True).create()
# create all tables
tables = [aggregates, console_pools, instance_types,
security_groups, snapshots, volumes,
# those that are children and others later
agent_builds, aggregate_hosts, aggregate_metadata,
block_device_mapping, bw_usage_cache, cells,
certificates, compute_node_stats, compute_nodes, consoles,
dns_domains, fixed_ips, floating_ips,
instance_faults, instance_id_mappings, instance_info_caches,
instance_metadata, instance_system_metadata,
instance_type_extra_specs, instance_type_projects,
instance_actions, instance_actions_events,
groups, group_metadata, group_policy, group_member,
iscsi_targets, key_pairs, migrations, networks,
pci_devices, provider_fw_rules, quota_classes, quota_usages,
quotas, project_user_quotas,
reservations, s3_images, security_group_instance_association,
security_group_rules, security_group_default_rules,
services, snapshot_id_mappings, task_log,
virtual_interfaces,
volume_id_mappings,
volume_usage_cache]
for table in tables:
try:
table.create()
except Exception:
LOG.info(repr(table))
LOG.exception(_LE('Exception while creating table.'))
raise
# task log unique constraint
task_log_uc = "uniq_task_log0task_name0host0period_beginning0period_ending"
task_log_cols = ('task_name', 'host', 'period_beginning', 'period_ending')
uc = UniqueConstraint(*task_log_cols, table=task_log, name=task_log_uc)
uc.create()
# networks unique constraint
UniqueConstraint('vlan', 'deleted', table=networks,
name='uniq_networks0vlan0deleted').create()
# instance_type_name constraint
UniqueConstraint('name', 'deleted', table=instance_types,
name='uniq_instance_types0name0deleted').create()
# flavorid unique constraint
UniqueConstraint('flavorid', 'deleted', table=instance_types,
name='uniq_instance_types0flavorid0deleted').create()
# keypair contraint
UniqueConstraint('user_id', 'name', 'deleted', table=key_pairs,
name='uniq_key_pairs0user_id0name0deleted').create()
# instance_type_projects constraint
inst_type_uc_name = 'uniq_instance_type_projects0instance_type_id0' + \
'project_id0deleted'
UniqueConstraint('instance_type_id', 'project_id', 'deleted',
table=instance_type_projects,
name=inst_type_uc_name).create()
# floating_ips unique constraint
UniqueConstraint('address', 'deleted',
table=floating_ips,
name='uniq_floating_ips0address0deleted').create()
# instance_info_caches
UniqueConstraint('instance_uuid',
table=instance_info_caches,
name='uniq_instance_info_caches0instance_uuid').create()
UniqueConstraint('address', 'deleted',
table=virtual_interfaces,
name='uniq_virtual_interfaces0address0deleted').create()
# cells
UniqueConstraint('name', 'deleted',
table=cells,
name='uniq_cells0name0deleted').create()
# security_groups
uc = UniqueConstraint('project_id', 'name', 'deleted',
table=security_groups,
name='uniq_security_groups0project_id0name0deleted')
uc.create()
# quotas
UniqueConstraint('project_id', 'resource', 'deleted',
table=quotas,
name='uniq_quotas0project_id0resource0deleted').create()
# fixed_ips
UniqueConstraint('address', 'deleted',
table=fixed_ips,
name='uniq_fixed_ips0address0deleted').create()
# services
UniqueConstraint('host', 'topic', 'deleted',
table=services,
name='uniq_services0host0topic0deleted').create()
UniqueConstraint('host', 'binary', 'deleted',
table=services,
name='uniq_services0host0binary0deleted').create()
# agent_builds
uc_name = 'uniq_agent_builds0hypervisor0os0architecture0deleted'
UniqueConstraint('hypervisor', 'os', 'architecture', 'deleted',
table=agent_builds,
name=uc_name).create()
uc_name = 'uniq_console_pools0host0console_type0compute_host0deleted'
UniqueConstraint('host', 'console_type', 'compute_host', 'deleted',
table=console_pools,
name=uc_name).create()
uc_name = 'uniq_aggregate_hosts0host0aggregate_id0deleted'
UniqueConstraint('host', 'aggregate_id', 'deleted',
table=aggregate_hosts,
name=uc_name).create()
uc_name = 'uniq_aggregate_metadata0aggregate_id0key0deleted'
UniqueConstraint('aggregate_id', 'key', 'deleted',
table=aggregate_metadata,
name=uc_name).create()
uc_name = 'uniq_instance_type_extra_specs0instance_type_id0key0deleted'
UniqueConstraint('instance_type_id', 'key', 'deleted',
table=instance_type_extra_specs,
name=uc_name).create()
# created first (to preserve ordering for schema diffs)
mysql_pre_indexes = [
Index('instance_type_id', instance_type_projects.c.instance_type_id),
Index('project_id', dns_domains.c.project_id),
Index('fixed_ip_id', floating_ips.c.fixed_ip_id),
Index('network_id', virtual_interfaces.c.network_id),
Index('network_id', fixed_ips.c.network_id),
Index('fixed_ips_virtual_interface_id_fkey',
fixed_ips.c.virtual_interface_id),
Index('address', fixed_ips.c.address),
Index('fixed_ips_instance_uuid_fkey', fixed_ips.c.instance_uuid),
Index('instance_uuid', instance_system_metadata.c.instance_uuid),
Index('iscsi_targets_volume_id_fkey', iscsi_targets.c.volume_id),
Index('snapshot_id', block_device_mapping.c.snapshot_id),
Index('usage_id', reservations.c.usage_id),
Index('virtual_interfaces_instance_uuid_fkey',
virtual_interfaces.c.instance_uuid),
Index('volume_id', block_device_mapping.c.volume_id),
Index('security_group_id',
security_group_instance_association.c.security_group_id),
]
# Common indexes (indexes we apply to all databases)
# NOTE: order specific for MySQL diff support
common_indexes = [
# aggregate_metadata
Index('aggregate_metadata_key_idx', aggregate_metadata.c.key),
# agent_builds
Index('agent_builds_hypervisor_os_arch_idx',
agent_builds.c.hypervisor,
agent_builds.c.os,
agent_builds.c.architecture),
# block_device_mapping
Index('block_device_mapping_instance_uuid_idx',
block_device_mapping.c.instance_uuid),
Index('block_device_mapping_instance_uuid_device_name_idx',
block_device_mapping.c.instance_uuid,
block_device_mapping.c.device_name),
# NOTE(dprince): This is now a duplicate index on MySQL and needs to
# be removed there. We leave it here so the Index ordering
# matches on schema diffs (for MySQL).
# See Havana migration 186_new_bdm_format where we dropped the
# virtual_name column.
# IceHouse fix is here: https://bugs.launchpad.net/nova/+bug/1265839
Index(
'block_device_mapping_instance_uuid_virtual_name_device_name_idx',
block_device_mapping.c.instance_uuid,
block_device_mapping.c.device_name),
Index('block_device_mapping_instance_uuid_volume_id_idx',
block_device_mapping.c.instance_uuid,
block_device_mapping.c.volume_id),
# bw_usage_cache
Index('bw_usage_cache_uuid_start_period_idx',
bw_usage_cache.c.uuid, bw_usage_cache.c.start_period),
Index('certificates_project_id_deleted_idx',
certificates.c.project_id, certificates.c.deleted),
Index('certificates_user_id_deleted_idx', certificates.c.user_id,
certificates.c.deleted),
# compute_node_stats
Index('ix_compute_node_stats_compute_node_id',
compute_node_stats.c.compute_node_id),
Index('compute_node_stats_node_id_and_deleted_idx',
compute_node_stats.c.compute_node_id,
compute_node_stats.c.deleted),
# consoles
Index('consoles_instance_uuid_idx', consoles.c.instance_uuid),
# dns_domains
Index('dns_domains_domain_deleted_idx',
dns_domains.c.domain, dns_domains.c.deleted),
# fixed_ips
Index('fixed_ips_host_idx', fixed_ips.c.host),
Index('fixed_ips_network_id_host_deleted_idx', fixed_ips.c.network_id,
fixed_ips.c.host, fixed_ips.c.deleted),
Index('fixed_ips_address_reserved_network_id_deleted_idx',
fixed_ips.c.address, fixed_ips.c.reserved,
fixed_ips.c.network_id, fixed_ips.c.deleted),
Index('fixed_ips_deleted_allocated_idx', fixed_ips.c.address,
fixed_ips.c.deleted, fixed_ips.c.allocated),
# floating_ips
Index('floating_ips_host_idx', floating_ips.c.host),
Index('floating_ips_project_id_idx', floating_ips.c.project_id),
Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
floating_ips.c.pool, floating_ips.c.deleted,
floating_ips.c.fixed_ip_id, floating_ips.c.project_id),
# group_member
Index('instance_group_member_instance_idx',
group_member.c.instance_id),
# group_metadata
Index('instance_group_metadata_key_idx', group_metadata.c.key),
# group_policy
Index('instance_group_policy_policy_idx', group_policy.c.policy),
# instances
Index('instances_reservation_id_idx',
instances.c.reservation_id),
Index('instances_terminated_at_launched_at_idx',
instances.c.terminated_at,
instances.c.launched_at),
Index('instances_task_state_updated_at_idx',
instances.c.task_state,
instances.c.updated_at),
Index('instances_host_deleted_idx', instances.c.host,
instances.c.deleted),
Index('instances_uuid_deleted_idx', instances.c.uuid,
instances.c.deleted),
Index('instances_host_node_deleted_idx', instances.c.host,
instances.c.node, instances.c.deleted),
Index('instances_host_deleted_cleaned_idx',
instances.c.host, instances.c.deleted,
instances.c.cleaned),
# instance_actions
Index('instance_uuid_idx', instance_actions.c.instance_uuid),
Index('request_id_idx', instance_actions.c.request_id),
# instance_faults
Index('instance_faults_host_idx', instance_faults.c.host),
Index('instance_faults_instance_uuid_deleted_created_at_idx',
instance_faults.c.instance_uuid, instance_faults.c.deleted,
instance_faults.c.created_at),
# instance_id_mappings
Index('ix_instance_id_mappings_uuid', instance_id_mappings.c.uuid),
# instance_metadata
Index('instance_metadata_instance_uuid_idx',
instance_metadata.c.instance_uuid),
# instance_type_extra_specs
Index('instance_type_extra_specs_instance_type_id_key_idx',
instance_type_extra_specs.c.instance_type_id,
instance_type_extra_specs.c.key),
# iscsi_targets
Index('iscsi_targets_host_idx', iscsi_targets.c.host),
Index('iscsi_targets_host_volume_id_deleted_idx',
iscsi_targets.c.host, iscsi_targets.c.volume_id,
iscsi_targets.c.deleted),
# migrations
Index('migrations_by_host_nodes_and_status_idx',
migrations.c.deleted, migrations.c.source_compute,
migrations.c.dest_compute, migrations.c.source_node,
migrations.c.dest_node, migrations.c.status),
Index('migrations_instance_uuid_and_status_idx',
migrations.c.deleted, migrations.c.instance_uuid,
migrations.c.status),
# networks
Index('networks_host_idx', networks.c.host),
Index('networks_cidr_v6_idx', networks.c.cidr_v6),
Index('networks_bridge_deleted_idx', networks.c.bridge,
networks.c.deleted),
Index('networks_project_id_deleted_idx', networks.c.project_id,
networks.c.deleted),
Index('networks_uuid_project_id_deleted_idx',
networks.c.uuid, networks.c.project_id, networks.c.deleted),
Index('networks_vlan_deleted_idx', networks.c.vlan,
networks.c.deleted),
# project_user_quotas
Index('project_user_quotas_project_id_deleted_idx',
project_user_quotas.c.project_id,
project_user_quotas.c.deleted),
Index('project_user_quotas_user_id_deleted_idx',
project_user_quotas.c.user_id, project_user_quotas.c.deleted),
# reservations
Index('ix_reservations_project_id', reservations.c.project_id),
Index('ix_reservations_user_id_deleted',
reservations.c.user_id, reservations.c.deleted),
Index('reservations_uuid_idx', reservations.c.uuid),
# security_group_instance_association
Index('security_group_instance_association_instance_uuid_idx',
security_group_instance_association.c.instance_uuid),
# task_log
Index('ix_task_log_period_beginning', task_log.c.period_beginning),
Index('ix_task_log_host', task_log.c.host),
Index('ix_task_log_period_ending', task_log.c.period_ending),
# quota_classes
Index('ix_quota_classes_class_name', quota_classes.c.class_name),
# quota_usages
Index('ix_quota_usages_project_id', quota_usages.c.project_id),
Index('ix_quota_usages_user_id_deleted',
quota_usages.c.user_id, quota_usages.c.deleted),
# volumes
Index('volumes_instance_uuid_idx', volumes.c.instance_uuid),
]
# MySQL specific indexes
if migrate_engine.name == 'mysql':
for index in mysql_pre_indexes:
index.create(migrate_engine)
# mysql-specific index by leftmost 100 chars. (mysql gets angry if the
# index key length is too long.)
sql = ("create index migrations_by_host_nodes_and_status_idx ON "
"migrations (deleted, source_compute(100), dest_compute(100), "
"source_node(100), dest_node(100), status)")
migrate_engine.execute(sql)
# PostgreSQL specific indexes
if migrate_engine.name == 'postgresql':
Index('address', fixed_ips.c.address).create()
# NOTE(dprince): PostgreSQL doesn't allow duplicate indexes
# so we skip creation of select indexes (so schemas match exactly).
POSTGRES_INDEX_SKIPS = [
# See Havana migration 186_new_bdm_format where we dropped the
# virtual_name column.
# IceHouse fix is here: https://bugs.launchpad.net/nova/+bug/1265839
'block_device_mapping_instance_uuid_virtual_name_device_name_idx'
]
# NOTE(mriedem): DB2 doesn't allow duplicate indexes either.
DB2_INDEX_SKIPS = POSTGRES_INDEX_SKIPS
MYSQL_INDEX_SKIPS = [
# we create this one manually for MySQL above
'migrations_by_host_nodes_and_status_idx'
]
for index in common_indexes:
if ((migrate_engine.name == 'postgresql' and
index.name in POSTGRES_INDEX_SKIPS) or
(migrate_engine.name == 'mysql' and
index.name in MYSQL_INDEX_SKIPS) or
(migrate_engine.name == 'ibm_db_sa' and
index.name in DB2_INDEX_SKIPS)):
continue
else:
index.create(migrate_engine)
Index('project_id', dns_domains.c.project_id).drop
# Common foreign keys
fkeys = [
[[instance_type_projects.c.instance_type_id],
[instance_types.c.id],
'instance_type_projects_ibfk_1'],
[[iscsi_targets.c.volume_id],
[volumes.c.id],
'iscsi_targets_volume_id_fkey'],
[[reservations.c.usage_id],
[quota_usages.c.id],
'reservations_ibfk_1'],
[[security_group_instance_association.c.security_group_id],
[security_groups.c.id],
'security_group_instance_association_ibfk_1'],
[[compute_node_stats.c.compute_node_id],
[compute_nodes.c.id],
'fk_compute_node_stats_compute_node_id'],
[[compute_nodes.c.service_id],
[services.c.id],
'fk_compute_nodes_service_id'],
]
# NOTE(mriedem): DB2 doesn't support unique constraints on columns that
# are nullable so we can only create foreign keys on unique constraints
# that actually exist, which excludes any FK on instances.uuid.
if migrate_engine.name != 'ibm_db_sa':
secgroup_instance_association_instance_uuid_fkey = (
'security_group_instance_association_instance_uuid_fkey')
fkeys.extend(
[
[[fixed_ips.c.instance_uuid],
[instances.c.uuid],
'fixed_ips_instance_uuid_fkey'],
[[block_device_mapping.c.instance_uuid],
[instances.c.uuid],
'block_device_mapping_instance_uuid_fkey'],
[[instance_info_caches.c.instance_uuid],
[instances.c.uuid],
'instance_info_caches_instance_uuid_fkey'],
[[instance_metadata.c.instance_uuid],
[instances.c.uuid],
'instance_metadata_instance_uuid_fkey'],
[[instance_system_metadata.c.instance_uuid],
[instances.c.uuid],
'instance_system_metadata_ibfk_1'],
[[security_group_instance_association.c.instance_uuid],
[instances.c.uuid],
secgroup_instance_association_instance_uuid_fkey],
[[virtual_interfaces.c.instance_uuid],
[instances.c.uuid],
'virtual_interfaces_instance_uuid_fkey'],
[[instance_actions.c.instance_uuid],
[instances.c.uuid],
'fk_instance_actions_instance_uuid'],
[[instance_faults.c.instance_uuid],
[instances.c.uuid],
'fk_instance_faults_instance_uuid'],
[[migrations.c.instance_uuid],
[instances.c.uuid],
'fk_migrations_instance_uuid']
])
for fkey_pair in fkeys:
if migrate_engine.name in ('mysql', 'ibm_db_sa'):
# For MySQL and DB2 we name our fkeys explicitly
# so they match Havana
fkey = ForeignKeyConstraint(columns=fkey_pair[0],
refcolumns=fkey_pair[1],
name=fkey_pair[2])
fkey.create()
elif migrate_engine.name == 'postgresql':
# PostgreSQL names things like it wants (correct and compatible!)
fkey = ForeignKeyConstraint(columns=fkey_pair[0],
refcolumns=fkey_pair[1])
fkey.create()
if migrate_engine.name == 'mysql':
# In Folsom we explicitly converted migrate_version to UTF8.
migrate_engine.execute(
'ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8')
# Set default DB charset to UTF8.
migrate_engine.execute(
'ALTER DATABASE %s DEFAULT CHARACTER SET utf8' %
migrate_engine.url.database)
_create_shadow_tables(migrate_engine)
# populate initial instance types
_populate_instance_types(instance_types)
_create_dump_tables(migrate_engine)
| apache-2.0 |
DonaldTrumpHasTinyHands/tiny_hands_pac | documents_gallery/models.py | 1 | 4091 | from django.db import models
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailcore.models import Page
from wagtail.wagtailadmin.edit_handlers import FieldPanel, MultiFieldPanel
from wagtail.wagtaildocs.models import Document
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailsearch import index
from modelcluster.fields import ParentalKey
from modelcluster.tags import ClusterTaggableManager
from taggit.models import TaggedItemBase, Tag
class DocumentsIndexPage(Page):
"""
This is the index page for the Documents Gallery. It contains the links to Gallery pages.
Gallery Page displays the gallery documents according to tags defined.
"""
intro = RichTextField(blank=True)
search_fields = Page.search_fields + (
index.SearchField('intro'),
)
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
@property
def children(self):
return self.get_children().live()
def get_context(self, request):
# Get list of live Gallery pages that are descendants of this page
pages = DocumentsPage.objects.live().descendant_of(self)
# Update template context
context = super(DocumentsIndexPage, self).get_context(request)
context['pages'] = pages
return context
class Meta:
verbose_name = "Documents Index Page"
DocumentsIndexPage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('intro', classname="full")
]
DocumentsIndexPage.promote_panels = [
MultiFieldPanel(Page.promote_panels, "SEO and metadata fields"),
ImageChooserPanel('feed_image'),
]
class DocumentsPageTag(TaggedItemBase):
content_object = ParentalKey('documents_gallery.DocumentsPage', related_name='tagged_items')
class DocumentsPage(Page):
"""
This is the Documents page. It takes tag names which you have assigned to your
documents. It gets the document objects according to tags defined by you. Your document gallery will
be created as per tags.
"""
tags = ClusterTaggableManager(through=DocumentsPageTag, blank=True)
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
@property
def gallery_index(self):
# Find closest ancestor which is a Gallery index
return self.get_ancestors().type(GalleryIndexPage).last()
def get_context(self, request):
# Get tags and convert them into list so we can iterate over them
tags = self.tags.values_list('name', flat=True)
# Creating empty Queryset from Wagtail Document model
documents = Document.objects.none()
# Populating the empty documents Queryset with documents of all tags in tags list.
if tags:
len_tags = len(tags)
for i in range(0, len_tags):
doc = Document.objects.filter(tags__name=tags[i])
documents = documents | doc
# Pagination
page = request.GET.get('page')
paginator = Paginator(documents, 25) # Show 25 documents per page
try:
documents = paginator.page(page)
except PageNotAnInteger:
documents = paginator.page(1)
except EmptyPage:
documents = paginator.page(paginator.num_pages)
# Update template context
context = super(DocumentsPage, self).get_context(request)
context['documents'] = documents
return context
class Meta:
verbose_name = "Documents Page"
DocumentsPage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('tags'),
]
DocumentsPage.promote_panels = [
MultiFieldPanel(Page.promote_panels, "SEO and metadata fields"),
ImageChooserPanel('feed_image'),
] | mit |
SomethingExplosive/android_external_chromium_org | tools/telemetry/telemetry/core/browser_finder.py | 23 | 3817 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds browsers that can be controlled by telemetry."""
import logging
from telemetry.core.chrome import android_browser_finder
from telemetry.core.chrome import cros_browser_finder
from telemetry.core.chrome import desktop_browser_finder
BROWSER_FINDERS = [
desktop_browser_finder,
android_browser_finder,
cros_browser_finder
]
ALL_BROWSER_TYPES = ','.join([bf.ALL_BROWSER_TYPES for bf in BROWSER_FINDERS])
class BrowserTypeRequiredException(Exception):
pass
class BrowserFinderException(Exception):
pass
def FindBrowser(options):
"""Finds the best PossibleBrowser object to run given the provided
BrowserOptions object. The returned possiblity object can then be used to
connect to and control the located browser. A BrowserFinderException will
be raised if the BrowserOptions argument is improperly set or if an error
occurs when finding a browser.
"""
if options.browser_type == 'exact' and options.browser_executable == None:
raise BrowserFinderException(
'--browser=exact requires --browser-executable to be set.')
if options.browser_type != 'exact' and options.browser_executable != None:
raise BrowserFinderException(
'--browser-executable requires --browser=exact.')
if options.browser_type == 'cros-chrome' and options.cros_remote == None:
raise BrowserFinderException(
'browser_type=cros-chrome requires cros_remote be set.')
if (options.browser_type != 'cros-chrome' and
options.browser_type != 'cros-chrome-guest' and
options.cros_remote != None):
raise BrowserFinderException(
'cros_remote requires browser_type=cros-chrome or cros-chrome-guest.')
browsers = []
default_browser = None
for finder in BROWSER_FINDERS:
curr_browsers = finder.FindAllAvailableBrowsers(options)
if not default_browser:
default_browser = finder.SelectDefaultBrowser(curr_browsers)
browsers.extend(curr_browsers)
if options.browser_type == None:
if default_browser:
logging.warning('--browser omitted. Using most recent local build: %s' %
default_browser.browser_type)
options.browser_type = default_browser.browser_type
return default_browser
raise BrowserTypeRequiredException(
'--browser must be specified. Available browsers:\n%s' %
'\n'.join(sorted(set([b.browser_type for b in browsers]))))
if options.browser_type == 'any':
types = ALL_BROWSER_TYPES.split(',')
def compare_browsers_on_type_priority(x, y):
x_idx = types.index(x.browser_type)
y_idx = types.index(y.browser_type)
return x_idx - y_idx
browsers.sort(compare_browsers_on_type_priority)
if len(browsers) >= 1:
return browsers[0]
else:
return None
matching_browsers = [b for b in browsers
if b.browser_type == options.browser_type and b.SupportsOptions(options)]
if len(matching_browsers) == 1:
return matching_browsers[0]
elif len(matching_browsers) > 1:
logging.warning('Multiple browsers of the same type found: %s' % (
repr(matching_browsers)))
return matching_browsers[0]
else:
return None
def GetAllAvailableBrowserTypes(options):
"""Returns an array of browser types supported on this system.
A BrowserFinderException will be raised if the BrowserOptions argument is
improperly set or if an error occurs when finding a browser.
"""
browsers = []
for finder in BROWSER_FINDERS:
browsers.extend(finder.FindAllAvailableBrowsers(options))
type_list = set([browser.browser_type for browser in browsers])
type_list = list(type_list)
type_list.sort()
return type_list
| bsd-3-clause |
SnabbCo/neutron | neutron/tests/unit/ml2/test_rpcapi.py | 16 | 4775 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for ml2 rpc
"""
import mock
from neutron.agent import rpc as agent_rpc
from neutron.common import topics
from neutron.openstack.common import context
from neutron.openstack.common import rpc
from neutron.plugins.ml2.drivers import type_tunnel
from neutron.plugins.ml2 import rpc as plugin_rpc
from neutron.tests import base
class RpcApiTestCase(base.BaseTestCase):
def _test_rpc_api(self, rpcapi, topic, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
expected_retval = 'foo' if method == 'call' else None
expected_msg = rpcapi.make_msg(method, **kwargs)
expected_msg['version'] = rpcapi.BASE_RPC_API_VERSION
if rpc_method == 'cast' and method == 'run_instance':
kwargs['call'] = False
rpc_method_mock = mock.Mock()
rpc_method_mock.return_value = expected_retval
setattr(rpc, rpc_method, rpc_method_mock)
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, expected_retval)
expected_args = [ctxt, topic, expected_msg]
for arg, expected_arg in zip(rpc_method_mock.call_args[0],
expected_args):
self.assertEqual(arg, expected_arg)
def test_delete_network(self):
rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT)
self._test_rpc_api(rpcapi,
topics.get_topic_name(topics.AGENT,
topics.NETWORK,
topics.DELETE),
'network_delete', rpc_method='fanout_cast',
network_id='fake_request_spec')
def test_port_update(self):
rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT)
self._test_rpc_api(rpcapi,
topics.get_topic_name(topics.AGENT,
topics.PORT,
topics.UPDATE),
'port_update', rpc_method='fanout_cast',
port='fake_port',
network_type='fake_network_type',
segmentation_id='fake_segmentation_id',
physical_network='fake_physical_network')
def test_tunnel_update(self):
rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT)
self._test_rpc_api(rpcapi,
topics.get_topic_name(topics.AGENT,
type_tunnel.TUNNEL,
topics.UPDATE),
'tunnel_update', rpc_method='fanout_cast',
tunnel_ip='fake_ip', tunnel_type='gre')
def test_device_details(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_rpc_api(rpcapi, topics.PLUGIN,
'get_device_details', rpc_method='call',
device='fake_device',
agent_id='fake_agent_id')
def test_update_device_down(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_rpc_api(rpcapi, topics.PLUGIN,
'update_device_down', rpc_method='call',
device='fake_device',
agent_id='fake_agent_id',
host='fake_host')
def test_tunnel_sync(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_rpc_api(rpcapi, topics.PLUGIN,
'tunnel_sync', rpc_method='call',
tunnel_ip='fake_tunnel_ip',
tunnel_type=None)
def test_update_device_up(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_rpc_api(rpcapi, topics.PLUGIN,
'update_device_up', rpc_method='call',
device='fake_device',
agent_id='fake_agent_id',
host='fake_host')
| apache-2.0 |
potatolondon/django-nonrel-1-4 | tests/modeltests/many_to_many/tests.py | 25 | 17889 | from __future__ import absolute_import, with_statement
from django.test import TestCase
from .models import Article, Publication
class ManyToManyTests(TestCase):
def setUp(self):
# Create a couple of Publications.
self.p1 = Publication.objects.create(id=None, title='The Python Journal')
self.p2 = Publication.objects.create(id=None, title='Science News')
self.p3 = Publication.objects.create(id=None, title='Science Weekly')
self.p4 = Publication.objects.create(title='Highlights for Children')
self.a1 = Article.objects.create(id=None, headline='Django lets you build Web apps easily')
self.a1.publications.add(self.p1)
self.a2 = Article.objects.create(id=None, headline='NASA uses Python')
self.a2.publications.add(self.p1, self.p2, self.p3, self.p4)
self.a3 = Article.objects.create(headline='NASA finds intelligent life on Earth')
self.a3.publications.add(self.p2)
self.a4 = Article.objects.create(headline='Oxygen-free diet works wonders')
self.a4.publications.add(self.p2)
def test_add(self):
# Create an Article.
a5 = Article(id=None, headline='Django lets you reate Web apps easily')
# You can't associate it with a Publication until it's been saved.
self.assertRaises(ValueError, getattr, a5, 'publications')
# Save it!
a5.save()
# Associate the Article with a Publication.
a5.publications.add(self.p1)
self.assertQuerysetEqual(a5.publications.all(),
['<Publication: The Python Journal>'])
# Create another Article, and set it to appear in both Publications.
a6 = Article(id=None, headline='ESA uses Python')
a6.save()
a6.publications.add(self.p1, self.p2)
a6.publications.add(self.p3)
# Adding a second time is OK
a6.publications.add(self.p3)
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
# Adding an object of the wrong type raises TypeError
with self.assertRaisesRegexp(TypeError, "'Publication' instance expected, got <Article.*"):
a6.publications.add(a5)
# Add a Publication directly via publications.add by using keyword arguments.
p4 = a6.publications.create(title='Highlights for Adults')
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Highlights for Adults>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_reverse_add(self):
# Adding via the 'other' end of an m2m
a5 = Article(headline='NASA finds intelligent life on Mars')
a5.save()
self.p2.article_set.add(a5)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA finds intelligent life on Mars>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(a5.publications.all(),
['<Publication: Science News>'])
# Adding via the other end using keywords
new_article = self.p2.article_set.create(headline='Carbon-free diet works wonders')
self.assertQuerysetEqual(
self.p2.article_set.all(),
[
'<Article: Carbon-free diet works wonders>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA finds intelligent life on Mars>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
a6 = self.p2.article_set.all()[3]
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_related_sets(self):
# Article objects have access to their related Publication objects.
self.assertQuerysetEqual(self.a1.publications.all(),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(self.a2.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
# Publication objects have access to their related Article objects.
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.p1.article_set.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(Publication.objects.get(id=self.p4.id).article_set.all(),
['<Article: NASA uses Python>'])
def test_selects(self):
# We can perform kwarg queries across m2m relationships
self.assertQuerysetEqual(
Article.objects.filter(publications__id__exact=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__pk=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications=self.p1),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__title__startswith="Science"),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__title__startswith="Science").distinct(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
# The count() function respects distinct() as well.
self.assertEqual(Article.objects.filter(publications__title__startswith="Science").count(), 4)
self.assertEqual(Article.objects.filter(publications__title__startswith="Science").distinct().count(), 3)
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1.id,self.p2.id]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1.id,self.p2]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1,self.p2]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
# Excluding a related item works as you would expect, too (although the SQL
# involved is a little complex).
self.assertQuerysetEqual(Article.objects.exclude(publications=self.p2),
['<Article: Django lets you build Web apps easily>'])
def test_reverse_selects(self):
# Reverse m2m queries are supported (i.e., starting at the table that
# doesn't have a ManyToManyField).
self.assertQuerysetEqual(Publication.objects.filter(id__exact=self.p1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(pk=self.p1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(
Publication.objects.filter(article__headline__startswith="NASA"),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(Publication.objects.filter(article__id__exact=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article__pk=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article=self.a1),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1.id,self.a2.id]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1.id,self.a2]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1,self.a2]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_delete(self):
# If we delete a Publication, its Articles won't be able to access it.
self.p1.delete()
self.assertQuerysetEqual(Publication.objects.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
])
self.assertQuerysetEqual(self.a1.publications.all(), [])
# If we delete an Article, its Publications won't be able to access it.
self.a2.delete()
self.assertQuerysetEqual(Article.objects.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
def test_bulk_delete(self):
# Bulk delete some Publications - references to deleted publications should go
Publication.objects.filter(title__startswith='Science').delete()
self.assertQuerysetEqual(Publication.objects.all(),
[
'<Publication: Highlights for Children>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(Article.objects.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a2.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: The Python Journal>',
])
# Bulk delete some articles - references to deleted objects should go
q = Article.objects.filter(headline__startswith='Django')
self.assertQuerysetEqual(q, ['<Article: Django lets you build Web apps easily>'])
q.delete()
# After the delete, the QuerySet cache needs to be cleared,
# and the referenced objects should be gone
self.assertQuerysetEqual(q, [])
self.assertQuerysetEqual(self.p1.article_set.all(),
['<Article: NASA uses Python>'])
def test_remove(self):
# Removing publication from an article:
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.a4.publications.remove(self.p2)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(self.a4.publications.all(), [])
# And from the other end
self.p2.article_set.remove(self.a3)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(self.a3.publications.all(), [])
def test_assign(self):
# Relation sets can be assigned. Assignment clears any existing set members
self.p2.article_set = [self.a4, self.a3]
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications = [self.p3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
# An alternate to calling clear() is to assign the empty set
self.p2.article_set = []
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.a4.publications = []
self.assertQuerysetEqual(self.a4.publications.all(), [])
def test_assign_ids(self):
# Relation sets can also be set using primary key values
self.p2.article_set = [self.a4.id, self.a3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications = [self.p3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
def test_clear(self):
# Relation sets can be cleared:
self.p2.article_set.clear()
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.assertQuerysetEqual(self.a4.publications.all(), [])
# And you can clear from the other end
self.p2.article_set.add(self.a3, self.a4)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
[
'<Publication: Science News>',
])
self.a4.publications.clear()
self.assertQuerysetEqual(self.a4.publications.all(), [])
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
| bsd-3-clause |
tinchoss/Python_Android | python/src/Lib/SimpleXMLRPCServer.py | 51 | 21906 | """Simple XML-RPC Server.
This module can be used to create simple XML-RPC servers
by creating a server and either installing functions, a
class instance, or by extending the SimpleXMLRPCServer
class.
It can also be used to handle XML-RPC requests in a CGI
environment using CGIXMLRPCRequestHandler.
A list of possible usage patterns follows:
1. Install functions:
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
2. Install an instance:
class MyFuncs:
def __init__(self):
# make all of the string functions available through
# string.func_name
import string
self.string = string
def _listMethods(self):
# implement this method so that system.listMethods
# knows to advertise the strings methods
return list_public_methods(self) + \
['string.' + method for method in list_public_methods(self.string)]
def pow(self, x, y): return pow(x, y)
def add(self, x, y) : return x + y
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(MyFuncs())
server.serve_forever()
3. Install an instance with custom dispatch method:
class Math:
def _listMethods(self):
# this method must be present for system.listMethods
# to work
return ['add', 'pow']
def _methodHelp(self, method):
# this method must be present for system.methodHelp
# to work
if method == 'add':
return "add(2,3) => 5"
elif method == 'pow':
return "pow(x, y[, z]) => number"
else:
# By convention, return empty
# string if no help is available
return ""
def _dispatch(self, method, params):
if method == 'pow':
return pow(*params)
elif method == 'add':
return params[0] + params[1]
else:
raise 'bad method'
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(Math())
server.serve_forever()
4. Subclass SimpleXMLRPCServer:
class MathServer(SimpleXMLRPCServer):
def _dispatch(self, method, params):
try:
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC to prevent potential security
# problems
func = getattr(self, 'export_' + method)
except AttributeError:
raise Exception('method "%s" is not supported' % method)
else:
return func(*params)
def export_add(self, x, y):
return x + y
server = MathServer(("localhost", 8000))
server.serve_forever()
5. CGI script:
server = CGIXMLRPCRequestHandler()
server.register_function(pow)
server.handle_request()
"""
# Written by Brian Quinlan (brian@sweetapp.com).
# Based on code written by Fredrik Lundh.
import xmlrpclib
from xmlrpclib import Fault
import SocketServer
import BaseHTTPServer
import sys
import os
import traceback
try:
import fcntl
except ImportError:
fcntl = None
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
"""resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '_'.
If the optional allow_dotted_names argument is false, dots are not
supported and this function operates similar to getattr(obj, attr).
"""
if allow_dotted_names:
attrs = attr.split('.')
else:
attrs = [attr]
for i in attrs:
if i.startswith('_'):
raise AttributeError(
'attempt to access private attribute "%s"' % i
)
else:
obj = getattr(obj,i)
return obj
def list_public_methods(obj):
"""Returns a list of attribute strings, found in the specified
object, which represent callable attributes"""
return [member for member in dir(obj)
if not member.startswith('_') and
hasattr(getattr(obj, member), '__call__')]
def remove_duplicates(lst):
"""remove_duplicates([2,2,2,1,3,3]) => [3,1,2]
Returns a copy of a list without duplicates. Every list
item must be hashable and the order of the items in the
resulting list is not defined.
"""
u = {}
for x in lst:
u[x] = 1
return u.keys()
class SimpleXMLRPCDispatcher:
"""Mix-in class that dispatches XML-RPC requests.
This class is used to register XML-RPC method handlers
and then to dispatch them. There should never be any
reason to instantiate this class directly.
"""
def __init__(self, allow_none=False, encoding=None):
self.funcs = {}
self.instance = None
self.allow_none = allow_none
self.encoding = encoding
def register_instance(self, instance, allow_dotted_names=False):
"""Registers an instance to respond to XML-RPC requests.
Only one instance can be installed at a time.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called. Methods beginning with an '_'
are considered private and will not be called by
SimpleXMLRPCServer.
If a registered function matches a XML-RPC request, then it
will be called instead of the registered instance.
If the optional allow_dotted_names argument is true and the
instance does not have a _dispatch method, method names
containing dots are supported and resolved, as long as none of
the name segments start with an '_'.
*** SECURITY WARNING: ***
Enabling the allow_dotted_names options allows intruders
to access your module's global variables and may allow
intruders to execute arbitrary code on your machine. Only
use this option on a secure, closed network.
"""
self.instance = instance
self.allow_dotted_names = allow_dotted_names
def register_function(self, function, name = None):
"""Registers a function to respond to XML-RPC requests.
The optional name argument can be used to set a Unicode name
for the function.
"""
if name is None:
name = function.__name__
self.funcs[name] = function
def register_introspection_functions(self):
"""Registers the XML-RPC introspection methods in the system
namespace.
see http://xmlrpc.usefulinc.com/doc/reserved.html
"""
self.funcs.update({'system.listMethods' : self.system_listMethods,
'system.methodSignature' : self.system_methodSignature,
'system.methodHelp' : self.system_methodHelp})
def register_multicall_functions(self):
"""Registers the XML-RPC multicall method in the system
namespace.
see http://www.xmlrpc.com/discuss/msgReader$1208"""
self.funcs.update({'system.multicall' : self.system_multicall})
def _marshaled_dispatch(self, data, dispatch_method = None):
"""Dispatches an XML-RPC method from marshalled (XML) data.
XML-RPC methods are dispatched from the marshalled (XML) data
using the _dispatch method and the result is returned as
marshalled data. For backwards compatibility, a dispatch
function can be provided as an argument (see comment in
SimpleXMLRPCRequestHandler.do_POST) but overriding the
existing method through subclassing is the prefered means
of changing method dispatch behavior.
"""
try:
params, method = xmlrpclib.loads(data)
# generate response
if dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
response = xmlrpclib.dumps(response, methodresponse=1,
allow_none=self.allow_none, encoding=self.encoding)
except Fault, fault:
response = xmlrpclib.dumps(fault, allow_none=self.allow_none,
encoding=self.encoding)
except:
# report exception back to server
exc_type, exc_value, exc_tb = sys.exc_info()
response = xmlrpclib.dumps(
xmlrpclib.Fault(1, "%s:%s" % (exc_type, exc_value)),
encoding=self.encoding, allow_none=self.allow_none,
)
return response
def system_listMethods(self):
"""system.listMethods() => ['add', 'subtract', 'multiple']
Returns a list of the methods supported by the server."""
methods = self.funcs.keys()
if self.instance is not None:
# Instance can implement _listMethod to return a list of
# methods
if hasattr(self.instance, '_listMethods'):
methods = remove_duplicates(
methods + self.instance._listMethods()
)
# if the instance has a _dispatch method then we
# don't have enough information to provide a list
# of methods
elif not hasattr(self.instance, '_dispatch'):
methods = remove_duplicates(
methods + list_public_methods(self.instance)
)
methods.sort()
return methods
def system_methodSignature(self, method_name):
"""system.methodSignature('add') => [double, int, int]
Returns a list describing the signature of the method. In the
above example, the add method takes two integers as arguments
and returns a double result.
This server does NOT support system.methodSignature."""
# See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html
return 'signatures not supported'
def system_methodHelp(self, method_name):
"""system.methodHelp('add') => "Adds two integers together"
Returns a string containing documentation for the specified method."""
method = None
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
# Instance can implement _methodHelp to return help for a method
if hasattr(self.instance, '_methodHelp'):
return self.instance._methodHelp(method_name)
# if the instance has a _dispatch method then we
# don't have enough information to provide help
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name,
self.allow_dotted_names
)
except AttributeError:
pass
# Note that we aren't checking that the method actually
# be a callable object of some kind
if method is None:
return ""
else:
import pydoc
return pydoc.getdoc(method)
def system_multicall(self, call_list):
"""system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \
[[4], ...]
Allows the caller to package multiple XML-RPC calls into a single
request.
See http://www.xmlrpc.com/discuss/msgReader$1208
"""
results = []
for call in call_list:
method_name = call['methodName']
params = call['params']
try:
# XXX A marshalling error in any response will fail the entire
# multicall. If someone cares they should fix this.
results.append([self._dispatch(method_name, params)])
except Fault, fault:
results.append(
{'faultCode' : fault.faultCode,
'faultString' : fault.faultString}
)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
results.append(
{'faultCode' : 1,
'faultString' : "%s:%s" % (exc_type, exc_value)}
)
return results
def _dispatch(self, method, params):
"""Dispatches the XML-RPC method.
XML-RPC calls are forwarded to a registered function that
matches the called XML-RPC method name. If no such function
exists then the call is forwarded to the registered instance,
if available.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called.
"""
func = None
try:
# check to see if a matching function has been registered
func = self.funcs[method]
except KeyError:
if self.instance is not None:
# check for a _dispatch method
if hasattr(self.instance, '_dispatch'):
return self.instance._dispatch(method, params)
else:
# call instance method directly
try:
func = resolve_dotted_attribute(
self.instance,
method,
self.allow_dotted_names
)
except AttributeError:
pass
if func is not None:
return func(*params)
else:
raise Exception('method "%s" is not supported' % method)
class SimpleXMLRPCRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple XML-RPC request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
"""
# Class attribute listing the accessible path components;
# paths not on this list will result in a 404 error.
rpc_paths = ('/', '/RPC2')
def is_rpc_path_valid(self):
if self.rpc_paths:
return self.path in self.rpc_paths
else:
# If .rpc_paths is empty, just assume all paths are legal
return True
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10*1024*1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
L.append(self.rfile.read(chunk_size))
size_remaining -= len(L[-1])
data = ''.join(L)
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None)
)
except Exception, e: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
# Send information about the exception if requested
if hasattr(self.server, '_send_traceback_header') and \
self.server._send_traceback_header:
self.send_header("X-exception", str(e))
self.send_header("X-traceback", traceback.format_exc())
self.end_headers()
else:
# got a valid XML RPC response
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1)
def report_404 (self):
# Report a 404 error
self.send_response(404)
response = 'No such page'
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1)
def log_request(self, code='-', size='-'):
"""Selectively log an accepted request."""
if self.server.logRequests:
BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)
class SimpleXMLRPCServer(SocketServer.TCPServer,
SimpleXMLRPCDispatcher):
"""Simple XML-RPC server.
Simple XML-RPC server that allows functions and a single instance
to be installed to handle requests. The default implementation
attempts to dispatch XML-RPC calls to the functions or instance
installed in the server. Override the _dispatch method inhereted
from SimpleXMLRPCDispatcher to change this behavior.
"""
allow_reuse_address = True
# Warning: this is for debugging purposes only! Never set this to True in
# production code, as will be sending out sensitive information (exception
# and stack trace details) when exceptions are raised inside
# SimpleXMLRPCRequestHandler.do_POST
_send_traceback_header = False
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None, bind_and_activate=True):
self.logRequests = logRequests
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
SocketServer.TCPServer.__init__(self, addr, requestHandler, bind_and_activate)
# [Bug #1222790] If possible, set close-on-exec flag; if a
# method spawns a subprocess, the subprocess shouldn't have
# the listening socket open.
if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher):
"""Simple handler for XML-RPC data passed through CGI."""
def __init__(self, allow_none=False, encoding=None):
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
def handle_xmlrpc(self, request_text):
"""Handle a single XML-RPC request"""
response = self._marshaled_dispatch(request_text)
print 'Content-Type: text/xml'
print 'Content-Length: %d' % len(response)
print
sys.stdout.write(response)
def handle_get(self):
"""Handle a single HTTP GET request.
Default implementation indicates an error because
XML-RPC uses the POST method.
"""
code = 400
message, explain = \
BaseHTTPServer.BaseHTTPRequestHandler.responses[code]
response = BaseHTTPServer.DEFAULT_ERROR_MESSAGE % \
{
'code' : code,
'message' : message,
'explain' : explain
}
print 'Status: %d %s' % (code, message)
print 'Content-Type: text/html'
print 'Content-Length: %d' % len(response)
print
sys.stdout.write(response)
def handle_request(self, request_text = None):
"""Handle a single XML-RPC request passed through a CGI post method.
If no XML data is given then it is read from stdin. The resulting
XML-RPC response is printed to stdout along with the correct HTTP
headers.
"""
if request_text is None and \
os.environ.get('REQUEST_METHOD', None) == 'GET':
self.handle_get()
else:
# POST data is normally available through stdin
try:
length = int(os.environ.get('CONTENT_LENGTH', None))
except (TypeError, ValueError):
length = -1
if request_text is None:
request_text = sys.stdin.read(length)
self.handle_xmlrpc(request_text)
if __name__ == '__main__':
print 'Running XML-RPC server on port 8000'
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
| apache-2.0 |
c-amr/camr | stanfordnlp/unidecode/x0c0.py | 253 | 4856 | data = (
'bbweok', # 0x00
'bbweot', # 0x01
'bbweop', # 0x02
'bbweoh', # 0x03
'bbwe', # 0x04
'bbweg', # 0x05
'bbwegg', # 0x06
'bbwegs', # 0x07
'bbwen', # 0x08
'bbwenj', # 0x09
'bbwenh', # 0x0a
'bbwed', # 0x0b
'bbwel', # 0x0c
'bbwelg', # 0x0d
'bbwelm', # 0x0e
'bbwelb', # 0x0f
'bbwels', # 0x10
'bbwelt', # 0x11
'bbwelp', # 0x12
'bbwelh', # 0x13
'bbwem', # 0x14
'bbweb', # 0x15
'bbwebs', # 0x16
'bbwes', # 0x17
'bbwess', # 0x18
'bbweng', # 0x19
'bbwej', # 0x1a
'bbwec', # 0x1b
'bbwek', # 0x1c
'bbwet', # 0x1d
'bbwep', # 0x1e
'bbweh', # 0x1f
'bbwi', # 0x20
'bbwig', # 0x21
'bbwigg', # 0x22
'bbwigs', # 0x23
'bbwin', # 0x24
'bbwinj', # 0x25
'bbwinh', # 0x26
'bbwid', # 0x27
'bbwil', # 0x28
'bbwilg', # 0x29
'bbwilm', # 0x2a
'bbwilb', # 0x2b
'bbwils', # 0x2c
'bbwilt', # 0x2d
'bbwilp', # 0x2e
'bbwilh', # 0x2f
'bbwim', # 0x30
'bbwib', # 0x31
'bbwibs', # 0x32
'bbwis', # 0x33
'bbwiss', # 0x34
'bbwing', # 0x35
'bbwij', # 0x36
'bbwic', # 0x37
'bbwik', # 0x38
'bbwit', # 0x39
'bbwip', # 0x3a
'bbwih', # 0x3b
'bbyu', # 0x3c
'bbyug', # 0x3d
'bbyugg', # 0x3e
'bbyugs', # 0x3f
'bbyun', # 0x40
'bbyunj', # 0x41
'bbyunh', # 0x42
'bbyud', # 0x43
'bbyul', # 0x44
'bbyulg', # 0x45
'bbyulm', # 0x46
'bbyulb', # 0x47
'bbyuls', # 0x48
'bbyult', # 0x49
'bbyulp', # 0x4a
'bbyulh', # 0x4b
'bbyum', # 0x4c
'bbyub', # 0x4d
'bbyubs', # 0x4e
'bbyus', # 0x4f
'bbyuss', # 0x50
'bbyung', # 0x51
'bbyuj', # 0x52
'bbyuc', # 0x53
'bbyuk', # 0x54
'bbyut', # 0x55
'bbyup', # 0x56
'bbyuh', # 0x57
'bbeu', # 0x58
'bbeug', # 0x59
'bbeugg', # 0x5a
'bbeugs', # 0x5b
'bbeun', # 0x5c
'bbeunj', # 0x5d
'bbeunh', # 0x5e
'bbeud', # 0x5f
'bbeul', # 0x60
'bbeulg', # 0x61
'bbeulm', # 0x62
'bbeulb', # 0x63
'bbeuls', # 0x64
'bbeult', # 0x65
'bbeulp', # 0x66
'bbeulh', # 0x67
'bbeum', # 0x68
'bbeub', # 0x69
'bbeubs', # 0x6a
'bbeus', # 0x6b
'bbeuss', # 0x6c
'bbeung', # 0x6d
'bbeuj', # 0x6e
'bbeuc', # 0x6f
'bbeuk', # 0x70
'bbeut', # 0x71
'bbeup', # 0x72
'bbeuh', # 0x73
'bbyi', # 0x74
'bbyig', # 0x75
'bbyigg', # 0x76
'bbyigs', # 0x77
'bbyin', # 0x78
'bbyinj', # 0x79
'bbyinh', # 0x7a
'bbyid', # 0x7b
'bbyil', # 0x7c
'bbyilg', # 0x7d
'bbyilm', # 0x7e
'bbyilb', # 0x7f
'bbyils', # 0x80
'bbyilt', # 0x81
'bbyilp', # 0x82
'bbyilh', # 0x83
'bbyim', # 0x84
'bbyib', # 0x85
'bbyibs', # 0x86
'bbyis', # 0x87
'bbyiss', # 0x88
'bbying', # 0x89
'bbyij', # 0x8a
'bbyic', # 0x8b
'bbyik', # 0x8c
'bbyit', # 0x8d
'bbyip', # 0x8e
'bbyih', # 0x8f
'bbi', # 0x90
'bbig', # 0x91
'bbigg', # 0x92
'bbigs', # 0x93
'bbin', # 0x94
'bbinj', # 0x95
'bbinh', # 0x96
'bbid', # 0x97
'bbil', # 0x98
'bbilg', # 0x99
'bbilm', # 0x9a
'bbilb', # 0x9b
'bbils', # 0x9c
'bbilt', # 0x9d
'bbilp', # 0x9e
'bbilh', # 0x9f
'bbim', # 0xa0
'bbib', # 0xa1
'bbibs', # 0xa2
'bbis', # 0xa3
'bbiss', # 0xa4
'bbing', # 0xa5
'bbij', # 0xa6
'bbic', # 0xa7
'bbik', # 0xa8
'bbit', # 0xa9
'bbip', # 0xaa
'bbih', # 0xab
'sa', # 0xac
'sag', # 0xad
'sagg', # 0xae
'sags', # 0xaf
'san', # 0xb0
'sanj', # 0xb1
'sanh', # 0xb2
'sad', # 0xb3
'sal', # 0xb4
'salg', # 0xb5
'salm', # 0xb6
'salb', # 0xb7
'sals', # 0xb8
'salt', # 0xb9
'salp', # 0xba
'salh', # 0xbb
'sam', # 0xbc
'sab', # 0xbd
'sabs', # 0xbe
'sas', # 0xbf
'sass', # 0xc0
'sang', # 0xc1
'saj', # 0xc2
'sac', # 0xc3
'sak', # 0xc4
'sat', # 0xc5
'sap', # 0xc6
'sah', # 0xc7
'sae', # 0xc8
'saeg', # 0xc9
'saegg', # 0xca
'saegs', # 0xcb
'saen', # 0xcc
'saenj', # 0xcd
'saenh', # 0xce
'saed', # 0xcf
'sael', # 0xd0
'saelg', # 0xd1
'saelm', # 0xd2
'saelb', # 0xd3
'saels', # 0xd4
'saelt', # 0xd5
'saelp', # 0xd6
'saelh', # 0xd7
'saem', # 0xd8
'saeb', # 0xd9
'saebs', # 0xda
'saes', # 0xdb
'saess', # 0xdc
'saeng', # 0xdd
'saej', # 0xde
'saec', # 0xdf
'saek', # 0xe0
'saet', # 0xe1
'saep', # 0xe2
'saeh', # 0xe3
'sya', # 0xe4
'syag', # 0xe5
'syagg', # 0xe6
'syags', # 0xe7
'syan', # 0xe8
'syanj', # 0xe9
'syanh', # 0xea
'syad', # 0xeb
'syal', # 0xec
'syalg', # 0xed
'syalm', # 0xee
'syalb', # 0xef
'syals', # 0xf0
'syalt', # 0xf1
'syalp', # 0xf2
'syalh', # 0xf3
'syam', # 0xf4
'syab', # 0xf5
'syabs', # 0xf6
'syas', # 0xf7
'syass', # 0xf8
'syang', # 0xf9
'syaj', # 0xfa
'syac', # 0xfb
'syak', # 0xfc
'syat', # 0xfd
'syap', # 0xfe
'syah', # 0xff
)
| gpl-2.0 |
XueqingLin/tensorflow | tensorflow/tensorboard/lib/python/http_test.py | 27 | 6269 | # -*- coding: utf-8 -*-
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests HTTP utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import gzip
import six
from tensorflow.python.platform import test
from tensorflow.tensorboard.lib.python import http
class RespondTest(test.TestCase):
def testHelloWorld(self):
hand = _create_mocked_handler()
http.Respond(hand, '<b>hello world</b>', 'text/html')
hand.send_response.assert_called_with(200)
hand.wfile.write.assert_called_with(b'<b>hello world</b>')
hand.wfile.flush.assert_called_with()
def testHeadRequest_doesNotWrite(self):
hand = _create_mocked_handler()
hand.command = 'HEAD'
http.Respond(hand, '<b>hello world</b>', 'text/html')
hand.send_response.assert_called_with(200)
hand.wfile.write.assert_not_called()
hand.wfile.flush.assert_called_with()
def testPlainText_appendsUtf8ToContentType(self):
hand = _create_mocked_handler()
http.Respond(hand, 'hello', 'text/plain')
hand.send_header.assert_any_call(
'Content-Type', 'text/plain; charset=utf-8')
def testContentLength_isInBytes(self):
hand = _create_mocked_handler()
http.Respond(hand, '爱', 'text/plain')
hand.send_header.assert_any_call('Content-Length', '3')
hand = _create_mocked_handler()
http.Respond(hand, '爱'.encode('utf-8'), 'text/plain')
hand.send_header.assert_any_call('Content-Length', '3')
def testResponseCharsetTranscoding(self):
bean = '要依法治国是赞美那些谁是公义的和惩罚恶人。 - 韩非'
# input is unicode string, output is gbk string
hand = _create_mocked_handler()
http.Respond(hand, bean, 'text/plain; charset=gbk')
hand.wfile.write.assert_called_with(bean.encode('gbk'))
# input is utf-8 string, output is gbk string
hand = _create_mocked_handler()
http.Respond(hand, bean.encode('utf-8'), 'text/plain; charset=gbk')
hand.wfile.write.assert_called_with(bean.encode('gbk'))
# input is object with unicode strings, output is gbk json
hand = _create_mocked_handler()
http.Respond(hand, {'red': bean}, 'application/json; charset=gbk')
hand.wfile.write.assert_called_with(
b'{"red": "' + bean.encode('gbk') + b'"}')
# input is object with utf-8 strings, output is gbk json
hand = _create_mocked_handler()
http.Respond(
hand, {'red': bean.encode('utf-8')}, 'application/json; charset=gbk')
hand.wfile.write.assert_called_with(
b'{"red": "' + bean.encode('gbk') + b'"}')
# input is object with gbk strings, output is gbk json
hand = _create_mocked_handler()
http.Respond(
hand, {'red': bean.encode('gbk')}, 'application/json; charset=gbk',
encoding='gbk')
hand.wfile.write.assert_called_with(
b'{"red": "' + bean.encode('gbk') + b'"}')
def testAcceptGzip_compressesResponse(self):
fall_of_hyperion_canto1_stanza1 = "\n".join([
"Fanatics have their dreams, wherewith they weave",
"A paradise for a sect; the savage too",
"From forth the loftiest fashion of his sleep",
"Guesses at Heaven; pity these have not",
"Trac'd upon vellum or wild Indian leaf",
"The shadows of melodious utterance.",
"But bare of laurel they live, dream, and die;",
"For Poesy alone can tell her dreams,",
"With the fine spell of words alone can save",
"Imagination from the sable charm",
"And dumb enchantment. Who alive can say,",
"'Thou art no Poet may'st not tell thy dreams?'",
"Since every man whose soul is not a clod",
"Hath visions, and would speak, if he had loved",
"And been well nurtured in his mother tongue.",
"Whether the dream now purpos'd to rehearse",
"Be poet's or fanatic's will be known",
"When this warm scribe my hand is in the grave.",
])
hand = _create_mocked_handler(headers={'Accept-Encoding': '*'})
http.Respond(hand, fall_of_hyperion_canto1_stanza1, 'text/plain')
hand.send_header.assert_any_call('Content-Encoding', 'gzip')
self.assertEqual(_gunzip(hand.wfile.write.call_args[0][0]),
fall_of_hyperion_canto1_stanza1.encode('utf-8'))
hand = _create_mocked_handler(headers={'Accept-Encoding': 'gzip'})
http.Respond(hand, fall_of_hyperion_canto1_stanza1, 'text/plain')
hand.send_header.assert_any_call('Content-Encoding', 'gzip')
self.assertEqual(_gunzip(hand.wfile.write.call_args[0][0]),
fall_of_hyperion_canto1_stanza1.encode('utf-8'))
hand = _create_mocked_handler(headers={'Accept-Encoding': '*'})
http.Respond(hand, fall_of_hyperion_canto1_stanza1, 'image/png')
hand.wfile.write.assert_any_call(
fall_of_hyperion_canto1_stanza1.encode('utf-8'))
def testJson_getsAutoSerialized(self):
hand = _create_mocked_handler()
http.Respond(hand, [1, 2, 3], 'application/json')
hand.wfile.write.assert_called_with(b'[1, 2, 3]')
def testExpires_setsCruiseControl(self):
hand = _create_mocked_handler()
http.Respond(hand, '<b>hello world</b>', 'text/html', expires=60)
hand.send_header.assert_any_call('Cache-Control', 'private, max-age=60')
def _create_mocked_handler(path='', headers=None):
hand = test.mock.Mock()
hand.wfile = test.mock.Mock()
hand.path = path
hand.headers = headers or {}
return hand
def _gunzip(bs):
return gzip.GzipFile('', 'rb', 9, six.BytesIO(bs)).read()
if __name__ == '__main__':
test.main()
| apache-2.0 |
sdpython/cvxpy | examples/branch_and_bound.py | 8 | 1488 | """
Branch and bound to solve minimum cardinality problem.
minimize ||A*x - b||^2_2
subject to x in {0, 1}^n
"""
from cvxpy import *
import numpy
try:
from Queue import PriorityQueue
except:
from queue import PriorityQueue
# Problem data.
m = 25
n = 20
numpy.random.seed(1)
A = numpy.matrix(numpy.random.randn(m, n))
b = numpy.matrix(numpy.random.randn(m, 1))
#b = A*numpy.random.uniform(-1, 1, size=(n, 1))
# Construct the problem.
x = Variable(n)
L = Parameter(n)
U = Parameter(n)
f = lambda x: sum_squares(A*x - b)
prob = Problem(Minimize(f(x)),
[L <= x, x <= U])
visited = 0
best_solution = numpy.inf
best_x = 0
nodes = PriorityQueue()
nodes.put((numpy.inf, 0, -numpy.ones(n), numpy.ones(n), 0))
while not nodes.empty():
visited += 1
# Evaluate the node with the lowest lower bound.
_, _, L_val, U_val, idx = nodes.get()
L.value = L_val
U.value = U_val
lower_bound = prob.solve()
upper_bound = f(numpy.sign(x.value)).value
best_solution = min(best_solution, upper_bound)
if upper_bound == best_solution:
best_x = numpy.sign(x.value)
# Add new nodes if not at a leaf and the branch cannot be pruned.
if idx < n and lower_bound < best_solution:
for i in [-1, 1]:
L_val[idx] = U_val[idx] = i
nodes.put((lower_bound, i, L_val.copy(), U_val.copy(), idx + 1))
print("Nodes visited: %s out of %s" % (visited, 2**(n+1)-1))
print("Optimal solution:", best_solution)
print(best_x)
| gpl-3.0 |
837468220/python-for-android | python3-alpha/python3-src/Lib/test/test_gettext.py | 102 | 18333 | import os
import base64
import shutil
import gettext
import unittest
from test import support
# TODO:
# - Add new tests, for example for "dgettext"
# - Remove dummy tests, for example testing for single and double quotes
# has no sense, it would have if we were testing a parser (i.e. pygettext)
# - Tests should have only one assert.
GNU_MO_DATA = b'''\
3hIElQAAAAAGAAAAHAAAAEwAAAALAAAAfAAAAAAAAACoAAAAFQAAAKkAAAAjAAAAvwAAAKEAAADj
AAAABwAAAIUBAAALAAAAjQEAAEUBAACZAQAAFgAAAN8CAAAeAAAA9gIAAKEAAAAVAwAABQAAALcD
AAAJAAAAvQMAAAEAAAADAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABQAAAAYAAAACAAAAAFJh
eW1vbmQgTHV4dXJ5IFlhY2gtdABUaGVyZSBpcyAlcyBmaWxlAFRoZXJlIGFyZSAlcyBmaWxlcwBU
aGlzIG1vZHVsZSBwcm92aWRlcyBpbnRlcm5hdGlvbmFsaXphdGlvbiBhbmQgbG9jYWxpemF0aW9u
CnN1cHBvcnQgZm9yIHlvdXIgUHl0aG9uIHByb2dyYW1zIGJ5IHByb3ZpZGluZyBhbiBpbnRlcmZh
Y2UgdG8gdGhlIEdOVQpnZXR0ZXh0IG1lc3NhZ2UgY2F0YWxvZyBsaWJyYXJ5LgBtdWxsdXNrAG51
ZGdlIG51ZGdlAFByb2plY3QtSWQtVmVyc2lvbjogMi4wClBPLVJldmlzaW9uLURhdGU6IDIwMDAt
MDgtMjkgMTI6MTktMDQ6MDAKTGFzdC1UcmFuc2xhdG9yOiBKLiBEYXZpZCBJYsOhw7FleiA8ai1k
YXZpZEBub29zLmZyPgpMYW5ndWFnZS1UZWFtOiBYWCA8cHl0aG9uLWRldkBweXRob24ub3JnPgpN
SU1FLVZlcnNpb246IDEuMApDb250ZW50LVR5cGU6IHRleHQvcGxhaW47IGNoYXJzZXQ9aXNvLTg4
NTktMQpDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiBub25lCkdlbmVyYXRlZC1CeTogcHlnZXR0
ZXh0LnB5IDEuMQpQbHVyYWwtRm9ybXM6IG5wbHVyYWxzPTI7IHBsdXJhbD1uIT0xOwoAVGhyb2F0
d29iYmxlciBNYW5ncm92ZQBIYXkgJXMgZmljaGVybwBIYXkgJXMgZmljaGVyb3MAR3V2ZiB6YnFo
eXIgY2ViaXZxcmYgdmFncmVhbmd2YmFueXZtbmd2YmEgbmFxIHlicG55dm1uZ3ZiYQpmaGNjYmVn
IHNiZSBsYmhlIENsZ3ViYSBjZWJ0ZW56ZiBvbCBjZWJpdnF2YXQgbmEgdmFncmVzbnByIGdiIGd1
ciBUQUgKdHJnZ3JrZyB6cmZmbnRyIHBuZ255YnQgeXZvZW5lbC4AYmFjb24Ad2luayB3aW5rAA==
'''
UMO_DATA = b'''\
3hIElQAAAAACAAAAHAAAACwAAAAFAAAAPAAAAAAAAABQAAAABAAAAFEAAAAPAQAAVgAAAAQAAABm
AQAAAQAAAAIAAAAAAAAAAAAAAAAAAAAAYWLDngBQcm9qZWN0LUlkLVZlcnNpb246IDIuMApQTy1S
ZXZpc2lvbi1EYXRlOiAyMDAzLTA0LTExIDEyOjQyLTA0MDAKTGFzdC1UcmFuc2xhdG9yOiBCYXJy
eSBBLiBXQXJzYXcgPGJhcnJ5QHB5dGhvbi5vcmc+Ckxhbmd1YWdlLVRlYW06IFhYIDxweXRob24t
ZGV2QHB5dGhvbi5vcmc+Ck1JTUUtVmVyc2lvbjogMS4wCkNvbnRlbnQtVHlwZTogdGV4dC9wbGFp
bjsgY2hhcnNldD11dGYtOApDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiA3Yml0CkdlbmVyYXRl
ZC1CeTogbWFudWFsbHkKAMKkeXoA
'''
MMO_DATA = b'''\
3hIElQAAAAABAAAAHAAAACQAAAADAAAALAAAAAAAAAA4AAAAeAEAADkAAAABAAAAAAAAAAAAAAAA
UHJvamVjdC1JZC1WZXJzaW9uOiBObyBQcm9qZWN0IDAuMApQT1QtQ3JlYXRpb24tRGF0ZTogV2Vk
IERlYyAxMSAwNzo0NDoxNSAyMDAyClBPLVJldmlzaW9uLURhdGU6IDIwMDItMDgtMTQgMDE6MTg6
NTgrMDA6MDAKTGFzdC1UcmFuc2xhdG9yOiBKb2huIERvZSA8amRvZUBleGFtcGxlLmNvbT4KSmFu
ZSBGb29iYXIgPGpmb29iYXJAZXhhbXBsZS5jb20+Ckxhbmd1YWdlLVRlYW06IHh4IDx4eEBleGFt
cGxlLmNvbT4KTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UeXBlOiB0ZXh0L3BsYWluOyBjaGFy
c2V0PWlzby04ODU5LTE1CkNvbnRlbnQtVHJhbnNmZXItRW5jb2Rpbmc6IHF1b3RlZC1wcmludGFi
bGUKR2VuZXJhdGVkLUJ5OiBweWdldHRleHQucHkgMS4zCgA=
'''
LOCALEDIR = os.path.join('xx', 'LC_MESSAGES')
MOFILE = os.path.join(LOCALEDIR, 'gettext.mo')
UMOFILE = os.path.join(LOCALEDIR, 'ugettext.mo')
MMOFILE = os.path.join(LOCALEDIR, 'metadata.mo')
class GettextBaseTest(unittest.TestCase):
def setUp(self):
if not os.path.isdir(LOCALEDIR):
os.makedirs(LOCALEDIR)
with open(MOFILE, 'wb') as fp:
fp.write(base64.decodebytes(GNU_MO_DATA))
with open(UMOFILE, 'wb') as fp:
fp.write(base64.decodebytes(UMO_DATA))
with open(MMOFILE, 'wb') as fp:
fp.write(base64.decodebytes(MMO_DATA))
self.env = support.EnvironmentVarGuard()
self.env['LANGUAGE'] = 'xx'
gettext._translations.clear()
def tearDown(self):
self.env.__exit__()
del self.env
shutil.rmtree(os.path.split(LOCALEDIR)[0])
class GettextTestCase1(GettextBaseTest):
def setUp(self):
GettextBaseTest.setUp(self)
self.localedir = os.curdir
self.mofile = MOFILE
gettext.install('gettext', self.localedir)
def test_some_translations(self):
eq = self.assertEqual
# test some translations
eq(_('albatross'), 'albatross')
eq(_('mullusk'), 'bacon')
eq(_(r'Raymond Luxury Yach-t'), 'Throatwobbler Mangrove')
eq(_(r'nudge nudge'), 'wink wink')
def test_double_quotes(self):
eq = self.assertEqual
# double quotes
eq(_("albatross"), 'albatross')
eq(_("mullusk"), 'bacon')
eq(_(r"Raymond Luxury Yach-t"), 'Throatwobbler Mangrove')
eq(_(r"nudge nudge"), 'wink wink')
def test_triple_single_quotes(self):
eq = self.assertEqual
# triple single quotes
eq(_('''albatross'''), 'albatross')
eq(_('''mullusk'''), 'bacon')
eq(_(r'''Raymond Luxury Yach-t'''), 'Throatwobbler Mangrove')
eq(_(r'''nudge nudge'''), 'wink wink')
def test_triple_double_quotes(self):
eq = self.assertEqual
# triple double quotes
eq(_("""albatross"""), 'albatross')
eq(_("""mullusk"""), 'bacon')
eq(_(r"""Raymond Luxury Yach-t"""), 'Throatwobbler Mangrove')
eq(_(r"""nudge nudge"""), 'wink wink')
def test_multiline_strings(self):
eq = self.assertEqual
# multiline strings
eq(_('''This module provides internationalization and localization
support for your Python programs by providing an interface to the GNU
gettext message catalog library.'''),
'''Guvf zbqhyr cebivqrf vagreangvbanyvmngvba naq ybpnyvmngvba
fhccbeg sbe lbhe Clguba cebtenzf ol cebivqvat na vagresnpr gb gur TAH
trggrkg zrffntr pngnybt yvoenel.''')
def test_the_alternative_interface(self):
eq = self.assertEqual
# test the alternative interface
with open(self.mofile, 'rb') as fp:
t = gettext.GNUTranslations(fp)
# Install the translation object
t.install()
eq(_('nudge nudge'), 'wink wink')
# Try unicode return type
t.install()
eq(_('mullusk'), 'bacon')
# Test installation of other methods
import builtins
t.install(names=["gettext", "lgettext"])
eq(_, t.gettext)
eq(builtins.gettext, t.gettext)
eq(lgettext, t.lgettext)
del builtins.gettext
del builtins.lgettext
class GettextTestCase2(GettextBaseTest):
def setUp(self):
GettextBaseTest.setUp(self)
self.localedir = os.curdir
# Set up the bindings
gettext.bindtextdomain('gettext', self.localedir)
gettext.textdomain('gettext')
# For convenience
self._ = gettext.gettext
def test_bindtextdomain(self):
self.assertEqual(gettext.bindtextdomain('gettext'), self.localedir)
def test_textdomain(self):
self.assertEqual(gettext.textdomain(), 'gettext')
def test_some_translations(self):
eq = self.assertEqual
# test some translations
eq(self._('albatross'), 'albatross')
eq(self._('mullusk'), 'bacon')
eq(self._(r'Raymond Luxury Yach-t'), 'Throatwobbler Mangrove')
eq(self._(r'nudge nudge'), 'wink wink')
def test_double_quotes(self):
eq = self.assertEqual
# double quotes
eq(self._("albatross"), 'albatross')
eq(self._("mullusk"), 'bacon')
eq(self._(r"Raymond Luxury Yach-t"), 'Throatwobbler Mangrove')
eq(self._(r"nudge nudge"), 'wink wink')
def test_triple_single_quotes(self):
eq = self.assertEqual
# triple single quotes
eq(self._('''albatross'''), 'albatross')
eq(self._('''mullusk'''), 'bacon')
eq(self._(r'''Raymond Luxury Yach-t'''), 'Throatwobbler Mangrove')
eq(self._(r'''nudge nudge'''), 'wink wink')
def test_triple_double_quotes(self):
eq = self.assertEqual
# triple double quotes
eq(self._("""albatross"""), 'albatross')
eq(self._("""mullusk"""), 'bacon')
eq(self._(r"""Raymond Luxury Yach-t"""), 'Throatwobbler Mangrove')
eq(self._(r"""nudge nudge"""), 'wink wink')
def test_multiline_strings(self):
eq = self.assertEqual
# multiline strings
eq(self._('''This module provides internationalization and localization
support for your Python programs by providing an interface to the GNU
gettext message catalog library.'''),
'''Guvf zbqhyr cebivqrf vagreangvbanyvmngvba naq ybpnyvmngvba
fhccbeg sbe lbhe Clguba cebtenzf ol cebivqvat na vagresnpr gb gur TAH
trggrkg zrffntr pngnybt yvoenel.''')
class PluralFormsTestCase(GettextBaseTest):
def setUp(self):
GettextBaseTest.setUp(self)
self.mofile = MOFILE
def test_plural_forms1(self):
eq = self.assertEqual
x = gettext.ngettext('There is %s file', 'There are %s files', 1)
eq(x, 'Hay %s fichero')
x = gettext.ngettext('There is %s file', 'There are %s files', 2)
eq(x, 'Hay %s ficheros')
def test_plural_forms2(self):
eq = self.assertEqual
with open(self.mofile, 'rb') as fp:
t = gettext.GNUTranslations(fp)
x = t.ngettext('There is %s file', 'There are %s files', 1)
eq(x, 'Hay %s fichero')
x = t.ngettext('There is %s file', 'There are %s files', 2)
eq(x, 'Hay %s ficheros')
def test_hu(self):
eq = self.assertEqual
f = gettext.c2py('0')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
def test_de(self):
eq = self.assertEqual
f = gettext.c2py('n != 1')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "10111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")
def test_fr(self):
eq = self.assertEqual
f = gettext.c2py('n>1')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "00111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")
def test_gd(self):
eq = self.assertEqual
f = gettext.c2py('n==1 ? 0 : n==2 ? 1 : 2')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "20122222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222")
def test_gd2(self):
eq = self.assertEqual
# Tests the combination of parentheses and "?:"
f = gettext.c2py('n==1 ? 0 : (n==2 ? 1 : 2)')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "20122222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222")
def test_lt(self):
eq = self.assertEqual
f = gettext.c2py('n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "20111111112222222222201111111120111111112011111111201111111120111111112011111111201111111120111111112011111111222222222220111111112011111111201111111120111111112011111111201111111120111111112011111111")
def test_ru(self):
eq = self.assertEqual
f = gettext.c2py('n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "20111222222222222222201112222220111222222011122222201112222220111222222011122222201112222220111222222011122222222222222220111222222011122222201112222220111222222011122222201112222220111222222011122222")
def test_pl(self):
eq = self.assertEqual
f = gettext.c2py('n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "20111222222222222222221112222222111222222211122222221112222222111222222211122222221112222222111222222211122222222222222222111222222211122222221112222222111222222211122222221112222222111222222211122222")
def test_sl(self):
eq = self.assertEqual
f = gettext.c2py('n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "30122333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333012233333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333")
def test_security(self):
raises = self.assertRaises
# Test for a dangerous expression
raises(ValueError, gettext.c2py, "os.chmod('/etc/passwd',0777)")
class UnicodeTranslationsTest(GettextBaseTest):
def setUp(self):
GettextBaseTest.setUp(self)
with open(UMOFILE, 'rb') as fp:
self.t = gettext.GNUTranslations(fp)
self._ = self.t.gettext
def test_unicode_msgid(self):
unless = self.assertTrue
unless(isinstance(self._(''), str))
unless(isinstance(self._(''), str))
def test_unicode_msgstr(self):
eq = self.assertEqual
eq(self._('ab\xde'), '\xa4yz')
class WeirdMetadataTest(GettextBaseTest):
def setUp(self):
GettextBaseTest.setUp(self)
with open(MMOFILE, 'rb') as fp:
try:
self.t = gettext.GNUTranslations(fp)
except:
self.tearDown()
raise
def test_weird_metadata(self):
info = self.t.info()
self.assertEqual(len(info), 9)
self.assertEqual(info['last-translator'],
'John Doe <jdoe@example.com>\nJane Foobar <jfoobar@example.com>')
class DummyGNUTranslations(gettext.GNUTranslations):
def foo(self):
return 'foo'
class GettextCacheTestCase(GettextBaseTest):
def test_cache(self):
self.localedir = os.curdir
self.mofile = MOFILE
self.assertEqual(len(gettext._translations), 0)
t = gettext.translation('gettext', self.localedir)
self.assertEqual(len(gettext._translations), 1)
t = gettext.translation('gettext', self.localedir,
class_=DummyGNUTranslations)
self.assertEqual(len(gettext._translations), 2)
self.assertEqual(t.__class__, DummyGNUTranslations)
# Calling it again doesn't add to the cache
t = gettext.translation('gettext', self.localedir,
class_=DummyGNUTranslations)
self.assertEqual(len(gettext._translations), 2)
self.assertEqual(t.__class__, DummyGNUTranslations)
def test_main():
support.run_unittest(__name__)
if __name__ == '__main__':
test_main()
# For reference, here's the .po file used to created the GNU_MO_DATA above.
#
# The original version was automatically generated from the sources with
# pygettext. Later it was manually modified to add plural forms support.
'''
# Dummy translation for the Python test_gettext.py module.
# Copyright (C) 2001 Python Software Foundation
# Barry Warsaw <barry@python.org>, 2000.
#
msgid ""
msgstr ""
"Project-Id-Version: 2.0\n"
"PO-Revision-Date: 2003-04-11 14:32-0400\n"
"Last-Translator: J. David Ibanez <j-david@noos.fr>\n"
"Language-Team: XX <python-dev@python.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=iso-8859-1\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: pygettext.py 1.1\n"
"Plural-Forms: nplurals=2; plural=n!=1;\n"
#: test_gettext.py:19 test_gettext.py:25 test_gettext.py:31 test_gettext.py:37
#: test_gettext.py:51 test_gettext.py:80 test_gettext.py:86 test_gettext.py:92
#: test_gettext.py:98
msgid "nudge nudge"
msgstr "wink wink"
#: test_gettext.py:16 test_gettext.py:22 test_gettext.py:28 test_gettext.py:34
#: test_gettext.py:77 test_gettext.py:83 test_gettext.py:89 test_gettext.py:95
msgid "albatross"
msgstr ""
#: test_gettext.py:18 test_gettext.py:24 test_gettext.py:30 test_gettext.py:36
#: test_gettext.py:79 test_gettext.py:85 test_gettext.py:91 test_gettext.py:97
msgid "Raymond Luxury Yach-t"
msgstr "Throatwobbler Mangrove"
#: test_gettext.py:17 test_gettext.py:23 test_gettext.py:29 test_gettext.py:35
#: test_gettext.py:56 test_gettext.py:78 test_gettext.py:84 test_gettext.py:90
#: test_gettext.py:96
msgid "mullusk"
msgstr "bacon"
#: test_gettext.py:40 test_gettext.py:101
msgid ""
"This module provides internationalization and localization\n"
"support for your Python programs by providing an interface to the GNU\n"
"gettext message catalog library."
msgstr ""
"Guvf zbqhyr cebivqrf vagreangvbanyvmngvba naq ybpnyvmngvba\n"
"fhccbeg sbe lbhe Clguba cebtenzf ol cebivqvat na vagresnpr gb gur TAH\n"
"trggrkg zrffntr pngnybt yvoenel."
# Manually added, as neither pygettext nor xgettext support plural forms
# in Python.
msgid "There is %s file"
msgid_plural "There are %s files"
msgstr[0] "Hay %s fichero"
msgstr[1] "Hay %s ficheros"
'''
# Here's the second example po file example, used to generate the UMO_DATA
# containing utf-8 encoded Unicode strings
'''
# Dummy translation for the Python test_gettext.py module.
# Copyright (C) 2001 Python Software Foundation
# Barry Warsaw <barry@python.org>, 2000.
#
msgid ""
msgstr ""
"Project-Id-Version: 2.0\n"
"PO-Revision-Date: 2003-04-11 12:42-0400\n"
"Last-Translator: Barry A. WArsaw <barry@python.org>\n"
"Language-Team: XX <python-dev@python.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 7bit\n"
"Generated-By: manually\n"
#: nofile:0
msgid "ab\xc3\x9e"
msgstr "\xc2\xa4yz"
'''
# Here's the third example po file, used to generate MMO_DATA
'''
msgid ""
msgstr ""
"Project-Id-Version: No Project 0.0\n"
"POT-Creation-Date: Wed Dec 11 07:44:15 2002\n"
"PO-Revision-Date: 2002-08-14 01:18:58+00:00\n"
"Last-Translator: John Doe <jdoe@example.com>\n"
"Jane Foobar <jfoobar@example.com>\n"
"Language-Team: xx <xx@example.com>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=iso-8859-15\n"
"Content-Transfer-Encoding: quoted-printable\n"
"Generated-By: pygettext.py 1.3\n"
'''
| apache-2.0 |
Workday/OpenFrame | third_party/WebKit/Tools/Scripts/webkitpy/style/checkers/common.py | 203 | 3229 | # Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Supports style checking not specific to any one file type."""
# FIXME: Test this list in the same way that the list of CppChecker
# categories is tested, for example by checking that all of its
# elements appear in the unit tests. This should probably be done
# after moving the relevant cpp_unittest.ErrorCollector code
# into a shared location and refactoring appropriately.
categories = set([
"whitespace/carriage_return",
"whitespace/tab"])
class CarriageReturnChecker(object):
"""Supports checking for and handling carriage returns."""
def __init__(self, handle_style_error):
self._handle_style_error = handle_style_error
def check(self, lines):
"""Check for and strip trailing carriage returns from lines."""
for line_number in range(len(lines)):
if not lines[line_number].endswith("\r"):
continue
self._handle_style_error(line_number + 1, # Correct for offset.
"whitespace/carriage_return",
1,
"One or more unexpected \\r (^M) found; "
"better to use only a \\n")
lines[line_number] = lines[line_number].rstrip("\r")
return lines
class TabChecker(object):
"""Supports checking for and handling tabs."""
def __init__(self, file_path, handle_style_error):
self.file_path = file_path
self.handle_style_error = handle_style_error
def check(self, lines):
# FIXME: share with cpp_style.
for line_number, line in enumerate(lines):
if "\t" in line:
self.handle_style_error(line_number + 1,
"whitespace/tab", 5,
"Line contains tab character.")
| bsd-3-clause |
kfwang/Glance-OVA-OVF | glance/tests/unit/test_domain_proxy.py | 15 | 11529 | # Copyright 2013 OpenStack Foundation.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from glance.domain import proxy
import glance.tests.utils as test_utils
UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d'
TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df'
class FakeProxy(object):
def __init__(self, base, *args, **kwargs):
self.base = base
self.args = args
self.kwargs = kwargs
class FakeRepo(object):
def __init__(self, result=None):
self.args = None
self.kwargs = None
self.result = result
def fake_method(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
return self.result
get = fake_method
list = fake_method
add = fake_method
save = fake_method
remove = fake_method
class TestProxyRepoPlain(test_utils.BaseTestCase):
def setUp(self):
super(TestProxyRepoPlain, self).setUp()
self.fake_repo = FakeRepo()
self.proxy_repo = proxy.Repo(self.fake_repo)
def _test_method(self, name, base_result, *args, **kwargs):
self.fake_repo.result = base_result
method = getattr(self.proxy_repo, name)
proxy_result = method(*args, **kwargs)
self.assertEqual(base_result, proxy_result)
self.assertEqual(args, self.fake_repo.args)
self.assertEqual(kwargs, self.fake_repo.kwargs)
def test_get(self):
self._test_method('get', 'snarf', 'abcd')
def test_list(self):
self._test_method('list', ['sniff', 'snarf'], 2, filter='^sn')
def test_add(self):
self._test_method('add', 'snuff', 'enough')
def test_save(self):
self._test_method('save', 'snuff', 'enough', from_state=None)
def test_remove(self):
self._test_method('add', None, 'flying')
class TestProxyRepoWrapping(test_utils.BaseTestCase):
def setUp(self):
super(TestProxyRepoWrapping, self).setUp()
self.fake_repo = FakeRepo()
self.proxy_repo = proxy.Repo(self.fake_repo,
item_proxy_class=FakeProxy,
item_proxy_kwargs={'a': 1})
def _test_method(self, name, base_result, *args, **kwargs):
self.fake_repo.result = base_result
method = getattr(self.proxy_repo, name)
proxy_result = method(*args, **kwargs)
self.assertIsInstance(proxy_result, FakeProxy)
self.assertEqual(base_result, proxy_result.base)
self.assertEqual(0, len(proxy_result.args))
self.assertEqual({'a': 1}, proxy_result.kwargs)
self.assertEqual(args, self.fake_repo.args)
self.assertEqual(kwargs, self.fake_repo.kwargs)
def test_get(self):
self.fake_repo.result = 'snarf'
result = self.proxy_repo.get('some-id')
self.assertIsInstance(result, FakeProxy)
self.assertEqual(('some-id',), self.fake_repo.args)
self.assertEqual({}, self.fake_repo.kwargs)
self.assertEqual('snarf', result.base)
self.assertEqual(tuple(), result.args)
self.assertEqual({'a': 1}, result.kwargs)
def test_list(self):
self.fake_repo.result = ['scratch', 'sniff']
results = self.proxy_repo.list(2, prefix='s')
self.assertEqual((2,), self.fake_repo.args)
self.assertEqual({'prefix': 's'}, self.fake_repo.kwargs)
self.assertEqual(2, len(results))
for i in range(2):
self.assertIsInstance(results[i], FakeProxy)
self.assertEqual(self.fake_repo.result[i], results[i].base)
self.assertEqual(tuple(), results[i].args)
self.assertEqual({'a': 1}, results[i].kwargs)
def _test_method_with_proxied_argument(self, name, result, **kwargs):
self.fake_repo.result = result
item = FakeProxy('snoop')
method = getattr(self.proxy_repo, name)
proxy_result = method(item)
self.assertEqual(('snoop',), self.fake_repo.args)
self.assertEqual(kwargs, self.fake_repo.kwargs)
if result is None:
self.assertIsNone(proxy_result)
else:
self.assertIsInstance(proxy_result, FakeProxy)
self.assertEqual(result, proxy_result.base)
self.assertEqual(tuple(), proxy_result.args)
self.assertEqual({'a': 1}, proxy_result.kwargs)
def test_add(self):
self._test_method_with_proxied_argument('add', 'dog')
def test_add_with_no_result(self):
self._test_method_with_proxied_argument('add', None)
def test_save(self):
self._test_method_with_proxied_argument('save', 'dog',
from_state=None)
def test_save_with_no_result(self):
self._test_method_with_proxied_argument('save', None,
from_state=None)
def test_remove(self):
self._test_method_with_proxied_argument('remove', 'dog')
def test_remove_with_no_result(self):
self._test_method_with_proxied_argument('remove', None)
class FakeImageFactory(object):
def __init__(self, result=None):
self.result = None
self.kwargs = None
def new_image(self, **kwargs):
self.kwargs = kwargs
return self.result
class TestImageFactory(test_utils.BaseTestCase):
def setUp(self):
super(TestImageFactory, self).setUp()
self.factory = FakeImageFactory()
def test_proxy_plain(self):
proxy_factory = proxy.ImageFactory(self.factory)
self.factory.result = 'eddard'
image = proxy_factory.new_image(a=1, b='two')
self.assertEqual('eddard', image)
self.assertEqual({'a': 1, 'b': 'two'}, self.factory.kwargs)
def test_proxy_wrapping(self):
proxy_factory = proxy.ImageFactory(self.factory,
proxy_class=FakeProxy,
proxy_kwargs={'dog': 'bark'})
self.factory.result = 'stark'
image = proxy_factory.new_image(a=1, b='two')
self.assertIsInstance(image, FakeProxy)
self.assertEqual('stark', image.base)
self.assertEqual({'a': 1, 'b': 'two'}, self.factory.kwargs)
class FakeImageMembershipFactory(object):
def __init__(self, result=None):
self.result = None
self.image = None
self.member_id = None
def new_image_member(self, image, member_id):
self.image = image
self.member_id = member_id
return self.result
class TestImageMembershipFactory(test_utils.BaseTestCase):
def setUp(self):
super(TestImageMembershipFactory, self).setUp()
self.factory = FakeImageMembershipFactory()
def test_proxy_plain(self):
proxy_factory = proxy.ImageMembershipFactory(self.factory)
self.factory.result = 'tyrion'
membership = proxy_factory.new_image_member('jaime', 'cersei')
self.assertEqual('tyrion', membership)
self.assertEqual('jaime', self.factory.image)
self.assertEqual('cersei', self.factory.member_id)
def test_proxy_wrapped_membership(self):
proxy_factory = proxy.ImageMembershipFactory(
self.factory, member_proxy_class=FakeProxy,
member_proxy_kwargs={'a': 1})
self.factory.result = 'tyrion'
membership = proxy_factory.new_image_member('jaime', 'cersei')
self.assertIsInstance(membership, FakeProxy)
self.assertEqual('tyrion', membership.base)
self.assertEqual({'a': 1}, membership.kwargs)
self.assertEqual('jaime', self.factory.image)
self.assertEqual('cersei', self.factory.member_id)
def test_proxy_wrapped_image(self):
proxy_factory = proxy.ImageMembershipFactory(
self.factory, image_proxy_class=FakeProxy)
self.factory.result = 'tyrion'
image = FakeProxy('jaime')
membership = proxy_factory.new_image_member(image, 'cersei')
self.assertEqual('tyrion', membership)
self.assertEqual('jaime', self.factory.image)
self.assertEqual('cersei', self.factory.member_id)
def test_proxy_both_wrapped(self):
class FakeProxy2(FakeProxy):
pass
proxy_factory = proxy.ImageMembershipFactory(
self.factory,
member_proxy_class=FakeProxy,
member_proxy_kwargs={'b': 2},
image_proxy_class=FakeProxy2)
self.factory.result = 'tyrion'
image = FakeProxy2('jaime')
membership = proxy_factory.new_image_member(image, 'cersei')
self.assertIsInstance(membership, FakeProxy)
self.assertEqual('tyrion', membership.base)
self.assertEqual({'b': 2}, membership.kwargs)
self.assertEqual('jaime', self.factory.image)
self.assertEqual('cersei', self.factory.member_id)
class FakeImage(object):
def __init__(self, result=None):
self.result = result
def get_member_repo(self):
return self.result
class TestImage(test_utils.BaseTestCase):
def setUp(self):
super(TestImage, self).setUp()
self.image = FakeImage()
def test_normal_member_repo(self):
proxy_image = proxy.Image(self.image)
self.image.result = 'mormont'
self.assertEqual('mormont', proxy_image.get_member_repo())
def test_proxied_member_repo(self):
proxy_image = proxy.Image(self.image,
member_repo_proxy_class=FakeProxy,
member_repo_proxy_kwargs={'a': 10})
self.image.result = 'corn'
member_repo = proxy_image.get_member_repo()
self.assertIsInstance(member_repo, FakeProxy)
self.assertEqual('corn', member_repo.base)
class TestTaskFactory(test_utils.BaseTestCase):
def setUp(self):
super(TestTaskFactory, self).setUp()
self.factory = mock.Mock()
self.fake_type = 'import'
self.fake_owner = "owner"
def test_proxy_plain(self):
proxy_factory = proxy.TaskFactory(self.factory)
proxy_factory.new_task(
type=self.fake_type,
owner=self.fake_owner
)
self.factory.new_task.assert_called_once_with(
type=self.fake_type,
owner=self.fake_owner
)
def test_proxy_wrapping(self):
proxy_factory = proxy.TaskFactory(
self.factory,
task_proxy_class=FakeProxy,
task_proxy_kwargs={'dog': 'bark'})
self.factory.new_task.return_value = 'fake_task'
task = proxy_factory.new_task(
type=self.fake_type,
owner=self.fake_owner
)
self.factory.new_task.assert_called_once_with(
type=self.fake_type,
owner=self.fake_owner
)
self.assertIsInstance(task, FakeProxy)
self.assertEqual('fake_task', task.base)
| apache-2.0 |
papados/ordersys | Lib/site-packages/django/contrib/auth/models.py | 49 | 18339 | from __future__ import unicode_literals
import re
import warnings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail import send_mail
from django.core import validators
from django.db import models
from django.db.models.manager import EmptyManager
from django.utils.crypto import get_random_string
from django.utils.http import urlquote
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib import auth
from django.contrib.auth.hashers import (
check_password, make_password, is_password_usable)
from django.contrib.auth.signals import user_logged_in
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import python_2_unicode_compatible
def update_last_login(sender, user, **kwargs):
"""
A signal receiver which updates the last_login date for
the user logging in.
"""
user.last_login = timezone.now()
user.save(update_fields=['last_login'])
user_logged_in.connect(update_last_login)
class SiteProfileNotAvailable(Exception):
pass
class PermissionManager(models.Manager):
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.get_by_natural_key(app_label,
model),
)
@python_2_unicode_compatible
class Permission(models.Model):
"""
The permissions system provides a way to assign permissions to specific
users and groups of users.
The permission system is used by the Django admin site, but may also be
useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form
and add an object.
- The "change" permission limits a user's ability to view the change
list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object
instance. It is possible to say "Mary may change news stories," but it's
not currently possible to say "Mary may change news stories, but only the
ones she created herself" or "Mary may only change news stories that have a
certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically
created for each Django model.
"""
name = models.CharField(_('name'), max_length=50)
content_type = models.ForeignKey(ContentType)
codename = models.CharField(_('codename'), max_length=100)
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
unique_together = (('content_type', 'codename'),)
ordering = ('content_type__app_label', 'content_type__model',
'codename')
def __str__(self):
return "%s | %s | %s" % (
six.text_type(self.content_type.app_label),
six.text_type(self.content_type),
six.text_type(self.name))
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class GroupManager(models.Manager):
"""
The manager for the auth's Group model.
"""
def get_by_natural_key(self, name):
return self.get(name=name)
@python_2_unicode_compatible
class Group(models.Model):
"""
Groups are a generic way of categorizing users to apply permissions, or
some other label, to those users. A user can belong to any number of
groups.
A user in a group automatically has all the permissions granted to that
group. For example, if the group Site editors has the permission
can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to
apply some label, or extended functionality, to them. For example, you
could create a group 'Special users', and you could write code that would
do special things to those users -- such as giving them access to a
members-only portion of your site, or sending them members-only email
messages.
"""
name = models.CharField(_('name'), max_length=80, unique=True)
permissions = models.ManyToManyField(Permission,
verbose_name=_('permissions'), blank=True)
objects = GroupManager()
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
class BaseUserManager(models.Manager):
@classmethod
def normalize_email(cls, email):
"""
Normalize the address by lowercasing the domain part of the email
address.
"""
email = email or ''
try:
email_name, domain_part = email.strip().rsplit('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
return email
def make_random_password(self, length=10,
allowed_chars='abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789'):
"""
Generates a random password with the given length and given
allowed_chars. Note that the default value of allowed_chars does not
have "I" or "O" or letters and digits that look similar -- just to
avoid confusion.
"""
return get_random_string(length, allowed_chars)
def get_by_natural_key(self, username):
return self.get(**{self.model.USERNAME_FIELD: username})
class UserManager(BaseUserManager):
def _create_user(self, username, email, password,
is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
user = self.model(username=username, email=email,
is_staff=is_staff, is_active=True,
is_superuser=is_superuser, last_login=now,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
return self._create_user(username, email, password, False, False,
**extra_fields)
def create_superuser(self, username, email, password, **extra_fields):
return self._create_user(username, email, password, True, True,
**extra_fields)
@python_2_unicode_compatible
class AbstractBaseUser(models.Model):
password = models.CharField(_('password'), max_length=128)
last_login = models.DateTimeField(_('last login'), default=timezone.now)
is_active = True
REQUIRED_FIELDS = []
class Meta:
abstract = True
def get_username(self):
"Return the identifying username for this User"
return getattr(self, self.USERNAME_FIELD)
def __str__(self):
return self.get_username()
def natural_key(self):
return (self.get_username(),)
def is_anonymous(self):
"""
Always returns False. This is a way of comparing User objects to
anonymous users.
"""
return False
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def set_password(self, raw_password):
self.password = make_password(raw_password)
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
"""
def setter(raw_password):
self.set_password(raw_password)
self.save(update_fields=["password"])
return check_password(raw_password, self.password, setter)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = make_password(None)
def has_usable_password(self):
return is_password_usable(self.password)
def get_full_name(self):
raise NotImplementedError()
def get_short_name(self):
raise NotImplementedError()
# A few helper functions for common logic between User and AnonymousUser.
def _user_get_all_permissions(user, obj):
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_all_permissions"):
permissions.update(backend.get_all_permissions(user, obj))
return permissions
def _user_has_perm(user, perm, obj):
for backend in auth.get_backends():
if hasattr(backend, "has_perm"):
if backend.has_perm(user, perm, obj):
return True
return False
def _user_has_module_perms(user, app_label):
for backend in auth.get_backends():
if hasattr(backend, "has_module_perms"):
if backend.has_module_perms(user, app_label):
return True
return False
class PermissionsMixin(models.Model):
"""
A mixin class that adds the fields and methods necessary to support
Django's Group and Permission model using the ModelBackend.
"""
is_superuser = models.BooleanField(_('superuser status'), default=False,
help_text=_('Designates that this user has all permissions without '
'explicitly assigning them.'))
groups = models.ManyToManyField(Group, verbose_name=_('groups'),
blank=True, help_text=_('The groups this user belongs to. A user will '
'get all permissions granted to each of '
'his/her group.'),
related_name="user_set", related_query_name="user")
user_permissions = models.ManyToManyField(Permission,
verbose_name=_('user permissions'), blank=True,
help_text=_('Specific permissions for this user.'),
related_name="user_set", related_query_name="user")
class Meta:
abstract = True
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through his/her
groups. This method queries all available auth backends. If an object
is passed in, only permissions matching this object are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
permissions.update(backend.get_group_permissions(self, obj))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an object is
provided, permissions for this specific object are checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_perms(self, perm_list, obj=None):
"""
Returns True if the user has each of the specified permissions. If
object is passed, it checks if the user has all required perms for this
object.
"""
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app label.
Uses pretty much the same logic as has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
class AbstractUser(AbstractBaseUser, PermissionsMixin):
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
Username, password and email are required. Other fields are optional.
"""
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, numbers and '
'@/./+/-/_ characters'),
validators=[
validators.RegexValidator(re.compile('^[\w.@+-]+$'), _('Enter a valid username.'), 'invalid')
])
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('email address'), blank=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
abstract = True
def get_absolute_url(self):
return "/users/%s/" % urlquote(self.username)
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=None):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email])
def get_profile(self):
"""
Returns site-specific profile for this user. Raises
SiteProfileNotAvailable if this site does not allow profiles.
"""
warnings.warn("The use of AUTH_PROFILE_MODULE to define user profiles has been deprecated.",
DeprecationWarning, stacklevel=2)
if not hasattr(self, '_profile_cache'):
from django.conf import settings
if not getattr(settings, 'AUTH_PROFILE_MODULE', False):
raise SiteProfileNotAvailable(
'You need to set AUTH_PROFILE_MODULE in your project '
'settings')
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
except ValueError:
raise SiteProfileNotAvailable(
'app_label and model_name should be separated by a dot in '
'the AUTH_PROFILE_MODULE setting')
try:
model = models.get_model(app_label, model_name)
if model is None:
raise SiteProfileNotAvailable(
'Unable to load the profile model, check '
'AUTH_PROFILE_MODULE in your project settings')
self._profile_cache = model._default_manager.using(
self._state.db).get(user__id__exact=self.id)
self._profile_cache.user = self
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
return self._profile_cache
class User(AbstractUser):
"""
Users within the Django authentication system are represented by this
model.
Username, password and email are required. Other fields are optional.
"""
class Meta(AbstractUser.Meta):
swappable = 'AUTH_USER_MODEL'
@python_2_unicode_compatible
class AnonymousUser(object):
id = None
pk = None
username = ''
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager(Group)
_user_permissions = EmptyManager(Permission)
def __init__(self):
pass
def __str__(self):
return 'AnonymousUser'
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return 1 # instances always return the same hash value
def save(self):
raise NotImplementedError
def delete(self):
raise NotImplementedError
def set_password(self, raw_password):
raise NotImplementedError
def check_password(self, raw_password):
raise NotImplementedError
def _get_groups(self):
return self._groups
groups = property(_get_groups)
def _get_user_permissions(self):
return self._user_permissions
user_permissions = property(_get_user_permissions)
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj=obj)
def has_perm(self, perm, obj=None):
return _user_has_perm(self, perm, obj=obj)
def has_perms(self, perm_list, obj=None):
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, module):
return _user_has_module_perms(self, module)
def is_anonymous(self):
return True
def is_authenticated(self):
return False
| unlicense |
harisibrahimkv/django | django/contrib/gis/db/backends/spatialite/schema.py | 33 | 6791 | from django.db.backends.sqlite3.schema import DatabaseSchemaEditor
from django.db.utils import DatabaseError
class SpatialiteSchemaEditor(DatabaseSchemaEditor):
sql_add_geometry_column = (
"SELECT AddGeometryColumn(%(table)s, %(column)s, %(srid)s, "
"%(geom_type)s, %(dim)s, %(null)s)"
)
sql_add_spatial_index = "SELECT CreateSpatialIndex(%(table)s, %(column)s)"
sql_drop_spatial_index = "DROP TABLE idx_%(table)s_%(column)s"
sql_recover_geometry_metadata = (
"SELECT RecoverGeometryColumn(%(table)s, %(column)s, %(srid)s, "
"%(geom_type)s, %(dim)s)"
)
sql_remove_geometry_metadata = "SELECT DiscardGeometryColumn(%(table)s, %(column)s)"
sql_discard_geometry_columns = "DELETE FROM %(geom_table)s WHERE f_table_name = %(table)s"
sql_update_geometry_columns = (
"UPDATE %(geom_table)s SET f_table_name = %(new_table)s "
"WHERE f_table_name = %(old_table)s"
)
geometry_tables = [
"geometry_columns",
"geometry_columns_auth",
"geometry_columns_time",
"geometry_columns_statistics",
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.geometry_sql = []
def geo_quote_name(self, name):
return self.connection.ops.geo_quote_name(name)
def column_sql(self, model, field, include_default=False):
from django.contrib.gis.db.models.fields import GeometryField
if not isinstance(field, GeometryField):
return super().column_sql(model, field, include_default)
# Geometry columns are created by the `AddGeometryColumn` function
self.geometry_sql.append(
self.sql_add_geometry_column % {
"table": self.geo_quote_name(model._meta.db_table),
"column": self.geo_quote_name(field.column),
"srid": field.srid,
"geom_type": self.geo_quote_name(field.geom_type),
"dim": field.dim,
"null": int(not field.null),
}
)
if field.spatial_index:
self.geometry_sql.append(
self.sql_add_spatial_index % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
}
)
return None, None
def remove_geometry_metadata(self, model, field):
self.execute(
self.sql_remove_geometry_metadata % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
}
)
self.execute(
self.sql_drop_spatial_index % {
"table": model._meta.db_table,
"column": field.column,
}
)
def create_model(self, model):
super().create_model(model)
# Create geometry columns
for sql in self.geometry_sql:
self.execute(sql)
self.geometry_sql = []
def delete_model(self, model, **kwargs):
from django.contrib.gis.db.models.fields import GeometryField
# Drop spatial metadata (dropping the table does not automatically remove them)
for field in model._meta.local_fields:
if isinstance(field, GeometryField):
self.remove_geometry_metadata(model, field)
# Make sure all geom stuff is gone
for geom_table in self.geometry_tables:
try:
self.execute(
self.sql_discard_geometry_columns % {
"geom_table": geom_table,
"table": self.quote_name(model._meta.db_table),
}
)
except DatabaseError:
pass
super().delete_model(model, **kwargs)
def add_field(self, model, field):
from django.contrib.gis.db.models.fields import GeometryField
if isinstance(field, GeometryField):
# Populate self.geometry_sql
self.column_sql(model, field)
for sql in self.geometry_sql:
self.execute(sql)
self.geometry_sql = []
else:
super().add_field(model, field)
def remove_field(self, model, field):
from django.contrib.gis.db.models.fields import GeometryField
# NOTE: If the field is a geometry field, the table is just recreated,
# the parent's remove_field can't be used cause it will skip the
# recreation if the field does not have a database type. Geometry fields
# do not have a db type cause they are added and removed via stored
# procedures.
if isinstance(field, GeometryField):
self._remake_table(model, delete_field=field)
else:
super().remove_field(model, field)
def alter_db_table(self, model, old_db_table, new_db_table):
from django.contrib.gis.db.models.fields import GeometryField
# Remove geometry-ness from temp table
for field in model._meta.local_fields:
if isinstance(field, GeometryField):
self.execute(
self.sql_remove_geometry_metadata % {
"table": self.quote_name(old_db_table),
"column": self.quote_name(field.column),
}
)
# Alter table
super().alter_db_table(model, old_db_table, new_db_table)
# Repoint any straggler names
for geom_table in self.geometry_tables:
try:
self.execute(
self.sql_update_geometry_columns % {
"geom_table": geom_table,
"old_table": self.quote_name(old_db_table),
"new_table": self.quote_name(new_db_table),
}
)
except DatabaseError:
pass
# Re-add geometry-ness and rename spatial index tables
for field in model._meta.local_fields:
if isinstance(field, GeometryField):
self.execute(self.sql_recover_geometry_metadata % {
"table": self.geo_quote_name(new_db_table),
"column": self.geo_quote_name(field.column),
"srid": field.srid,
"geom_type": self.geo_quote_name(field.geom_type),
"dim": field.dim,
})
if getattr(field, 'spatial_index', False):
self.execute(self.sql_rename_table % {
"old_table": self.quote_name("idx_%s_%s" % (old_db_table, field.column)),
"new_table": self.quote_name("idx_%s_%s" % (new_db_table, field.column)),
})
| bsd-3-clause |
rustogi/yang-explorer | server/explorer/utils/yang.py | 2 | 11173 | """
Copyright 2015, Cisco Systems, Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: Pravin Gohite, Cisco Systems, Inc.
"""
import os
import re
import glob
import logging
import subprocess
from sets import Set
import lxml.etree as ET
from django.conf import settings
from explorer.utils.dygraph import DYGraph
from explorer.utils.misc import ServerSettings
class Parser(object):
"""
Basic Yang modulename parser
"""
def __init__(self, filename):
self.module = None
self.revision = None
self.imports = []
self.includes = []
if not os.path.exists(filename):
return
module_re = re.compile("""^\s*[sub]*module\s+['"]?\s*([\w+[\-\w+]+)\s*['"]?\s*""")
revision_re = re.compile("""^\s*revision\s+['"]?\s*(\w+-\w+-\w+)\s*['"]?\s*""")
import_re = re.compile("""^\s*import\s+['"]?\s*([\w+[\-\w+]+)\s*['"]?\s*""")
include_re = re.compile("""^\s*include\s+['"]?\s*([\w+[\-\w+]+)\s*['"]?\s*""")
with open(filename, 'r') as f:
for line in f:
if self.module is None:
res = module_re.match(line)
if res is not None:
self.module = res.group(1).strip()
continue
imp = import_re.match(line)
if imp is not None:
self.imports.append(imp.group(1).strip())
continue
inc = include_re.match(line)
if inc is not None:
self.includes.append(inc.group(1).strip())
continue
res = revision_re.match(line)
if res is not None:
self.revision = res.group(1).strip()
break
if self.module is None:
logging.error('Could not parse modulename, uploaded file may be corrupted !!')
def get_filename(self):
"""
Returns: yang file name with version suffix.
"""
if self.revision:
return self.module + '@' + self.revision + '.yang'
return self.module + '.yang'
def get_dependency(self):
"""
Returns: List of dependency (yang imports and includes)
"""
return self.imports + self.includes
def __str__(self):
return self.get_filename() + ' -> ' + str(self.get_dependency())
class Compiler(object):
"""
Compile yang models into cxml
"""
@staticmethod
def compile_cxml(username, session, filename):
"""
Compile yang model and return tuple (boolean, list-of-errors)
"""
logging.debug('Compiling %s .. !!' % filename)
plugins = os.path.join(settings.BASE_DIR, 'explorer', 'plugins')
if not os.path.exists(plugins):
logging.error('CXML Plugin directory is missing .. !!')
return False, None
if subprocess.call(['which', 'pyang']) != 0:
logging.error('Could not find pyang compiler, please install pyang .. !!')
return False, None
basename = os.path.basename(filename)
modulename = basename.split('.')[0].strip()
session_dir = ''
if session is not None:
session_dir = ServerSettings.session_path(session)
if not os.path.exists(session_dir):
logging.error('compile_cxml: Session directory %s not found !!', session_dir)
return False, ["Session error !!"]
yangfile = os.path.join(session_dir, modulename + '.yang')
cxmlfile = os.path.join(session_dir, modulename + '.xml')
else:
yangfile = os.path.join(ServerSettings.yang_path(username), modulename + '.yang')
cxmlfile = os.path.join(ServerSettings.cxml_path(username), modulename + '.xml')
# Verify if yang file exists
if not os.path.exists(yangfile):
logging.debug("compile_cxml: " + yangfile + ' not found !!')
return False, ["Yang module %s not found on server !!" % modulename]
command = ['pyang', '-f', 'cxml', '--plugindir', 'explorer/plugins', '-p']
# include path for pyang compilation
includes = ServerSettings.yang_path(username)
if session_dir:
includes += ':' + session_dir
command.append(includes)
# include dependent models
command += Compiler.get_dependencies(username, [filename], session)
# finally add target module
command.append(yangfile)
# create a callback to handle empty output
def empty_callback(outfile):
module = os.path.basename(outfile)
module = module.split('.')[0]
module = module.split('@')[0]
node = ET.Element('node')
node.set('name', module)
node.set('type', 'module')
with open(outfile, 'w') as fd:
fd.write(ET.tostring(node))
logging.debug('compile_cxml: Empty output from pyang, created default cxml!!')
return Compiler.invoke_compile(command, cxmlfile, empty_callback)
@staticmethod
def compile_pyimport(username, session=None):
"""
Compile yang model and return tuple (boolean, list-of-errors)
"""
plugins = os.path.join(settings.BASE_DIR, 'explorer', 'plugins')
if not os.path.exists(plugins):
logging.error('CXML Plugin directory is missing .. !!')
return False, None
if subprocess.call(['which', 'pyang']) != 0:
logging.error('Could not find pyang compiler, please install pyang .. !!')
return False, None
logging.debug('Rebuilding dependencies for user %s' % username)
# build include path
includes = [ServerSettings.yang_path(username)]
if session is not None:
session_dir = ServerSettings.session_path(session)
if not os.path.exists(session_dir):
logging.error('compile_pyimport: Session directory %s not found !!', session_dir)
return False, ["Session error !!"]
includes.append(session_dir)
depfile = os.path.join(session_dir, 'dependencies.xml')
else:
depfile = os.path.join(includes[0], 'dependencies.xml')
target_yangs = []
for yang_dir in includes:
for _file in glob.glob(os.path.join(yang_dir, '*.yang')):
target_yangs.append(_file)
if not target_yangs:
logging.debug('compile_pyimport: No yang file found !!')
return True, ET.Element('messages')
command = ['pyang', '-f', 'pyimport', '--plugindir', 'explorer/plugins', '-p']
command += [':'.join(includes)]
command += target_yangs
return Compiler.invoke_compile(command, depfile)
@staticmethod
def get_dependencies(username, modules, session):
"""
return dependencies for given yang models
"""
session_dir = ''
logging.debug("get_dependencies: Target Modules " + str(modules))
if session is not None:
session_dir = ServerSettings.session_path(session)
dfile = os.path.join(session_dir, 'dependencies.xml')
else:
dfile = os.path.join(ServerSettings.yang_path(username), 'dependencies.xml')
if not os.path.exists(dfile):
logging.error('get_dependencies: dependency file %s missing!!', dfile)
return []
if session_dir:
session_files = [os.path.basename(_file) for _file in glob.glob(os.path.join(session_dir, '*.yang'))]
yang_path = ServerSettings.yang_path(username)
yang_files = [os.path.basename(_file) for _file in glob.glob(os.path.join(yang_path, '*.yang'))]
dmodules = Set([])
dgraph = DYGraph(dfile)
for m in modules:
module = dgraph.dependency_module(m)
if module is None:
continue
for name in module.imports:
dmodules.add(name)
for name in module.includes:
dmodules.add(name)
for name in module.depends:
dmodules.add(name)
dmodules_list = list(dmodules)
deplist = []
for _file in dmodules_list:
# prefer freshly uploaded files
if session_dir:
depfile = _find_matching(_file, session_dir, session_files)
else:
depfile = _find_matching(_file, yang_path, yang_files)
if depfile is not None:
deplist.append(depfile)
else:
logging.warning("get_dependencies: Dependency (%s) not satisfied, compilation may fail !!" % _file)
logging.debug("get_dependencies: Computed " + str(deplist))
return deplist
@staticmethod
def invoke_compile(command, outfile, empty_callback=None):
"""
Invoke pyang compilation and return result
"""
logging.debug("invoke_compile: CMD: " + str(command))
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
rc = True
lines = []
if stderr:
lines = stderr.split('\n')
if p.returncode != 0:
logging.error('invoke_compile: Compile Errors: ' + str(lines))
if os.path.exists(outfile):
os.remove(outfile)
rc = False
elif stdout:
with open(outfile, 'w') as fd:
fd.write(stdout)
logging.debug('invoke_compile: %s -> done', outfile)
logging.debug('invoke_compile: Compile Warning: ' + str(lines))
else:
logging.warning('invoke_compile: empty pyang output !!')
if empty_callback is not None:
empty_callback(outfile)
messages = ET.Element('messages')
for line in lines:
msg = ET.Element('message')
msg.text = line
messages.append(msg)
return rc, messages
def _find_matching(target, directory, modules):
logging.debug('Searching target %s in %s' % (target, directory))
if not modules:
modules = [os.path.basename(_file) for _file in glob.glob(os.path.join(directory, '*.yang'))]
for module in modules:
if module == target + '.yang':
return os.path.join(directory, module)
if module.startswith(target + '@'):
return os.path.join(directory, module)
return None
| apache-2.0 |
BaladiDogGames/baladidoggames.github.io | mingw/bin/lib/encodings/cp1254.py | 593 | 13758 | """ Python Character Mapping Codec cp1254 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1254.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1254',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\u02dc' # 0x98 -> SMALL TILDE
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u011e' # 0xD0 -> LATIN CAPITAL LETTER G WITH BREVE
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u0130' # 0xDD -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u011f' # 0xF0 -> LATIN SMALL LETTER G WITH BREVE
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u0131' # 0xFD -> LATIN SMALL LETTER DOTLESS I
u'\u015f' # 0xFE -> LATIN SMALL LETTER S WITH CEDILLA
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
styxit/CouchPotatoServer | libs/git/remotes.py | 110 | 2614 | # Copyright (c) 2009, Rotem Yaari <vmalloc@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Rotem Yaari ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Rotem Yaari BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from . import branch
from . import ref_container
class Remote(ref_container.RefContainer):
def __init__(self, repo, name, url):
super(Remote, self).__init__()
self.repo = repo
self.name = name
self.url = url
def fetch(self):
self.repo._executeGitCommandAssertSuccess("fetch %s" % self.name)
def prune(self):
self.repo._executeGitCommandAssertSuccess("remote prune %s" % self.name)
def __eq__(self, other):
return (type(self) is type(other)) and (self.name == other.name)
###################### For compatibility with RefContainer #####################
def getBranches(self):
prefix = "%s/" % self.name
returned = []
for line in self.repo._getOutputAssertSuccess("branch -r").splitlines():
if self.repo.getGitVersion() >= '1.6.3' and ' -> ' in line:
continue
line = line.strip()
if line.startswith(prefix):
returned.append(branch.RegisteredRemoteBranch(self.repo, self, line[len(prefix):]))
return returned
| gpl-3.0 |
mrocklin/into | into/backends/sql_csv.py | 1 | 2811 |
from ..regex import RegexDispatcher
from ..append import append
from .csv import CSV
import os
import datashape
import sqlalchemy
import subprocess
copy_command = RegexDispatcher('copy_command')
execute_copy = RegexDispatcher('execute_copy')
@copy_command.register('.*sqlite')
def copy_sqlite(dialect, tbl, csv):
abspath = os.path.abspath(csv.path)
tblname = tbl.name
dbpath = str(tbl.bind.url).split('///')[-1]
statement = """
(echo '.mode csv'; echo '.import {abspath} {tblname}';) | sqlite3 {dbpath}
"""
return statement.format(**locals())
@execute_copy.register('sqlite')
def execute_copy_sqlite(dialect, engine, statement):
ps = subprocess.Popen(statement, shell=True, stdout=subprocess.PIPE)
return ps.stdout.read()
@copy_command.register('postgresql')
def copy_postgres(dialect, tbl, csv):
abspath = os.path.abspath(csv.path)
tblname = tbl.name
format_str = 'csv'
delimiter = csv.dialect.get('delimiter', ',')
na_value = ''
quotechar = csv.dialect.get('quotechar', '"')
escapechar = csv.dialect.get('escapechar', '\\')
header = not not csv.has_header
encoding = csv.encoding or 'utf-8'
statement = """
COPY {tblname} FROM '{abspath}'
(FORMAT {format_str},
DELIMITER E'{delimiter}',
NULL '{na_value}',
QUOTE '{quotechar}',
ESCAPE '{escapechar}',
HEADER {header},
ENCODING '{encoding}');"""
return statement.format(**locals())
@copy_command.register('mysql.*')
def copy_mysql(dialect, tbl, csv):
mysql_local = ''
abspath = os.path.abspath(csv.path)
tblname = tbl.name
delimiter = csv.dialect.get('delimiter', ',')
quotechar = csv.dialect.get('quotechar', '"')
escapechar = csv.dialect.get('escapechar', '\\')
lineterminator = csv.dialect.get('lineterminator', r'\n\r')
skiprows = 1 if csv.has_header else 0
encoding = csv.encoding or 'utf-8'
statement = u"""
LOAD DATA {mysql_local} INFILE '{abspath}'
INTO TABLE {tblname}
CHARACTER SET {encoding}
FIELDS
TERMINATED BY '{delimiter}'
ENCLOSED BY '{quotechar}'
ESCAPED BY '{escapechar}'
LINES TERMINATED by '{lineterminator}'
IGNORE {skiprows} LINES;
"""
return statement.format(**locals())
@execute_copy.register('.*', priority=9)
def execute_copy_all(dialect, engine, statement):
conn = engine.raw_connection()
cursor = conn.cursor()
cursor.execute(statement)
conn.commit()
@append.register(sqlalchemy.Table, CSV)
def append_csv_to_sql_table(tbl, csv, **kwargs):
statement = copy_command(tbl.bind.dialect.name, tbl, csv)
execute_copy(tbl.bind.dialect.name, tbl.bind, statement)
return tbl
| bsd-3-clause |
emidln/django_roa | env/lib/python2.7/site-packages/django/contrib/gis/geos/prototypes/predicates.py | 623 | 1777 | """
This module houses the GEOS ctypes prototype functions for the
unary and binary predicate operations on geometries.
"""
from ctypes import c_char, c_char_p, c_double
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.prototypes.errcheck import check_predicate
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
## Binary & unary predicate functions ##
def binary_predicate(func, *args):
"For GEOS binary predicate functions."
argtypes = [GEOM_PTR, GEOM_PTR]
if args: argtypes += args
func.argtypes = argtypes
func.restype = c_char
func.errcheck = check_predicate
return func
def unary_predicate(func):
"For GEOS unary predicate functions."
func.argtypes = [GEOM_PTR]
func.restype = c_char
func.errcheck = check_predicate
return func
## Unary Predicates ##
geos_hasz = unary_predicate(GEOSFunc('GEOSHasZ'))
geos_isempty = unary_predicate(GEOSFunc('GEOSisEmpty'))
geos_isring = unary_predicate(GEOSFunc('GEOSisRing'))
geos_issimple = unary_predicate(GEOSFunc('GEOSisSimple'))
geos_isvalid = unary_predicate(GEOSFunc('GEOSisValid'))
## Binary Predicates ##
geos_contains = binary_predicate(GEOSFunc('GEOSContains'))
geos_crosses = binary_predicate(GEOSFunc('GEOSCrosses'))
geos_disjoint = binary_predicate(GEOSFunc('GEOSDisjoint'))
geos_equals = binary_predicate(GEOSFunc('GEOSEquals'))
geos_equalsexact = binary_predicate(GEOSFunc('GEOSEqualsExact'), c_double)
geos_intersects = binary_predicate(GEOSFunc('GEOSIntersects'))
geos_overlaps = binary_predicate(GEOSFunc('GEOSOverlaps'))
geos_relatepattern = binary_predicate(GEOSFunc('GEOSRelatePattern'), c_char_p)
geos_touches = binary_predicate(GEOSFunc('GEOSTouches'))
geos_within = binary_predicate(GEOSFunc('GEOSWithin'))
| bsd-3-clause |
abdoosh00/edx-platform | common/djangoapps/student/tests/test_email.py | 10 | 13509 | import json
import django.db
import unittest
from student.tests.factories import UserFactory, RegistrationFactory, PendingEmailChangeFactory
from student.views import reactivation_email_for_user, change_email_request, confirm_email_change
from student.models import UserProfile, PendingEmailChange
from django.contrib.auth.models import User, AnonymousUser
from django.test import TestCase, TransactionTestCase
from django.test.client import RequestFactory
from mock import Mock, patch
from django.http import Http404, HttpResponse
from django.conf import settings
from edxmako.shortcuts import render_to_string
from util.request import safe_get_host
from textwrap import dedent
class TestException(Exception):
"""Exception used for testing that nothing will catch explicitly"""
pass
def mock_render_to_string(template_name, context):
"""Return a string that encodes template_name and context"""
return str((template_name, sorted(context.iteritems())))
def mock_render_to_response(template_name, context):
"""Return an HttpResponse with content that encodes template_name and context"""
# View confirm_email_change uses @transaction.commit_manually.
# This simulates any db access in the templates.
UserProfile.objects.exists()
return HttpResponse(mock_render_to_string(template_name, context))
class EmailTestMixin(object):
"""Adds useful assertions for testing `email_user`"""
def assertEmailUser(self, email_user, subject_template, subject_context, body_template, body_context):
"""Assert that `email_user` was used to send and email with the supplied subject and body
`email_user`: The mock `django.contrib.auth.models.User.email_user` function
to verify
`subject_template`: The template to have been used for the subject
`subject_context`: The context to have been used for the subject
`body_template`: The template to have been used for the body
`body_context`: The context to have been used for the body
"""
email_user.assert_called_with(
mock_render_to_string(subject_template, subject_context),
mock_render_to_string(body_template, body_context),
settings.DEFAULT_FROM_EMAIL
)
def append_allowed_hosts(self, hostname):
""" Append hostname to settings.ALLOWED_HOSTS """
settings.ALLOWED_HOSTS.append(hostname)
self.addCleanup(settings.ALLOWED_HOSTS.pop)
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
@patch('django.contrib.auth.models.User.email_user')
class ReactivationEmailTests(EmailTestMixin, TestCase):
"""Test sending a reactivation email to a user"""
def setUp(self):
self.user = UserFactory.create()
self.unregisteredUser = UserFactory.create()
self.registration = RegistrationFactory.create(user=self.user)
def reactivation_email(self, user):
"""
Send the reactivation email to the specified user,
and return the response as json data.
"""
return json.loads(reactivation_email_for_user(user).content)
def assertReactivateEmailSent(self, email_user):
"""Assert that the correct reactivation email has been sent"""
context = {
'name': self.user.profile.name,
'key': self.registration.activation_key
}
self.assertEmailUser(
email_user,
'emails/activation_email_subject.txt',
context,
'emails/activation_email.txt',
context
)
# Thorough tests for safe_get_host are elsewhere; here we just want a quick URL sanity check
request = RequestFactory().post('unused_url')
request.META['HTTP_HOST'] = "aGenericValidHostName"
self.append_allowed_hosts("aGenericValidHostName")
body = render_to_string('emails/activation_email.txt', context)
host = safe_get_host(request)
self.assertIn(host, body)
def test_reactivation_email_failure(self, email_user):
self.user.email_user.side_effect = Exception
response_data = self.reactivation_email(self.user)
self.assertReactivateEmailSent(email_user)
self.assertFalse(response_data['success'])
def test_reactivation_for_unregistered_user(self, email_user):
"""
Test that trying to send a reactivation email to an unregistered
user fails without throwing a 500 error.
"""
response_data = self.reactivation_email(self.unregisteredUser)
self.assertFalse(response_data['success'])
def test_reactivation_email_success(self, email_user):
response_data = self.reactivation_email(self.user)
self.assertReactivateEmailSent(email_user)
self.assertTrue(response_data['success'])
class EmailChangeRequestTests(TestCase):
"""Test changing a user's email address"""
def setUp(self):
self.user = UserFactory.create()
self.new_email = 'new.email@edx.org'
self.req_factory = RequestFactory()
self.request = self.req_factory.post('unused_url', data={
'password': 'test',
'new_email': self.new_email
})
self.request.user = self.user
self.user.email_user = Mock()
def run_request(self, request=None):
"""Execute request and return result parsed as json
If request isn't passed in, use self.request instead
"""
if request is None:
request = self.request
response = change_email_request(self.request)
return json.loads(response.content)
def assertFailedRequest(self, response_data, expected_error):
"""Assert that `response_data` indicates a failed request that returns `expected_error`"""
self.assertFalse(response_data['success'])
self.assertEquals(expected_error, response_data['error'])
self.assertFalse(self.user.email_user.called)
def test_unauthenticated(self):
self.request.user = AnonymousUser()
self.request.user.email_user = Mock()
with self.assertRaises(Http404):
change_email_request(self.request)
self.assertFalse(self.request.user.email_user.called)
def test_invalid_password(self):
self.request.POST['password'] = 'wrong'
self.assertFailedRequest(self.run_request(), 'Invalid password')
def test_invalid_emails(self):
for email in ('bad_email', 'bad_email@', '@bad_email'):
self.request.POST['new_email'] = email
self.assertFailedRequest(self.run_request(), 'Valid e-mail address required.')
def check_duplicate_email(self, email):
"""Test that a request to change a users email to `email` fails"""
request = self.req_factory.post('unused_url', data={
'new_email': email,
'password': 'test',
})
request.user = self.user
self.assertFailedRequest(self.run_request(request), 'An account with this e-mail already exists.')
def test_duplicate_email(self):
UserFactory.create(email=self.new_email)
self.check_duplicate_email(self.new_email)
def test_capitalized_duplicate_email(self):
"""Test that we check for email addresses in a case insensitive way"""
UserFactory.create(email=self.new_email)
self.check_duplicate_email(self.new_email.capitalize())
# TODO: Finish testing the rest of change_email_request
@patch('django.contrib.auth.models.User.email_user')
@patch('student.views.render_to_response', Mock(side_effect=mock_render_to_response, autospec=True))
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
class EmailChangeConfirmationTests(EmailTestMixin, TransactionTestCase):
"""Test that confirmation of email change requests function even in the face of exceptions thrown while sending email"""
def setUp(self):
self.user = UserFactory.create()
self.profile = UserProfile.objects.get(user=self.user)
self.req_factory = RequestFactory()
self.request = self.req_factory.get('unused_url')
self.request.user = self.user
self.user.email_user = Mock()
self.pending_change_request = PendingEmailChangeFactory.create(user=self.user)
self.key = self.pending_change_request.activation_key
def assertRolledBack(self):
"""Assert that no changes to user, profile, or pending email have been made to the db"""
self.assertEquals(self.user.email, User.objects.get(username=self.user.username).email)
self.assertEquals(self.profile.meta, UserProfile.objects.get(user=self.user).meta)
self.assertEquals(1, PendingEmailChange.objects.count())
def assertFailedBeforeEmailing(self, email_user):
"""Assert that the function failed before emailing a user"""
self.assertRolledBack()
self.assertFalse(email_user.called)
def check_confirm_email_change(self, expected_template, expected_context):
"""Call `confirm_email_change` and assert that the content was generated as expected
`expected_template`: The name of the template that should have been used
to generate the content
`expected_context`: The context dictionary that should have been used to
generate the content
"""
response = confirm_email_change(self.request, self.key)
self.assertEquals(
mock_render_to_response(expected_template, expected_context).content,
response.content
)
def assertChangeEmailSent(self, email_user):
"""Assert that the correct email was sent to confirm an email change"""
context = {
'old_email': self.user.email,
'new_email': self.pending_change_request.new_email,
}
self.assertEmailUser(
email_user,
'emails/email_change_subject.txt',
context,
'emails/confirm_email_change.txt',
context
)
# Thorough tests for safe_get_host are elsewhere; here we just want a quick URL sanity check
request = RequestFactory().post('unused_url')
request.META['HTTP_HOST'] = "aGenericValidHostName"
self.append_allowed_hosts("aGenericValidHostName")
body = render_to_string('emails/confirm_email_change.txt', context)
url = safe_get_host(request)
self.assertIn(url, body)
def test_not_pending(self, email_user):
self.key = 'not_a_key'
self.check_confirm_email_change('invalid_email_key.html', {})
self.assertFailedBeforeEmailing(email_user)
def test_duplicate_email(self, email_user):
UserFactory.create(email=self.pending_change_request.new_email)
self.check_confirm_email_change('email_exists.html', {})
self.assertFailedBeforeEmailing(email_user)
@unittest.skipIf(settings.FEATURES.get('DISABLE_RESET_EMAIL_TEST', False),
dedent("""Skipping Test because CMS has not provided necessary templates for email reset.
If LMS tests print this message, that needs to be fixed."""))
def test_old_email_fails(self, email_user):
email_user.side_effect = [Exception, None]
self.check_confirm_email_change('email_change_failed.html', {
'email': self.user.email,
})
self.assertRolledBack()
self.assertChangeEmailSent(email_user)
@unittest.skipIf(settings.FEATURES.get('DISABLE_RESET_EMAIL_TEST', False),
dedent("""Skipping Test because CMS has not provided necessary templates for email reset.
If LMS tests print this message, that needs to be fixed."""))
def test_new_email_fails(self, email_user):
email_user.side_effect = [None, Exception]
self.check_confirm_email_change('email_change_failed.html', {
'email': self.pending_change_request.new_email
})
self.assertRolledBack()
self.assertChangeEmailSent(email_user)
@unittest.skipIf(settings.FEATURES.get('DISABLE_RESET_EMAIL_TEST', False),
dedent("""Skipping Test because CMS has not provided necessary templates for email reset.
If LMS tests print this message, that needs to be fixed."""))
def test_successful_email_change(self, email_user):
self.check_confirm_email_change('email_change_successful.html', {
'old_email': self.user.email,
'new_email': self.pending_change_request.new_email
})
self.assertChangeEmailSent(email_user)
meta = json.loads(UserProfile.objects.get(user=self.user).meta)
self.assertIn('old_emails', meta)
self.assertEquals(self.user.email, meta['old_emails'][0][0])
self.assertEquals(
self.pending_change_request.new_email,
User.objects.get(username=self.user.username).email
)
self.assertEquals(0, PendingEmailChange.objects.count())
@patch('student.views.PendingEmailChange.objects.get', Mock(side_effect=TestException))
@patch('student.views.transaction.rollback', wraps=django.db.transaction.rollback)
def test_always_rollback(self, rollback, _email_user):
with self.assertRaises(TestException):
confirm_email_change(self.request, self.key)
rollback.assert_called_with()
| agpl-3.0 |
qenter/vlc-android | toolchains/arm/lib/python2.7/lib-tk/FixTk.py | 96 | 2938 | import sys, os
# Delay import _tkinter until we have set TCL_LIBRARY,
# so that Tcl_FindExecutable has a chance to locate its
# encoding directory.
# Unfortunately, we cannot know the TCL_LIBRARY directory
# if we don't know the tcl version, which we cannot find out
# without import Tcl. Fortunately, Tcl will itself look in
# <TCL_LIBRARY>\..\tcl<TCL_VERSION>, so anything close to
# the real Tcl library will do.
# Expand symbolic links on Vista
try:
import ctypes
ctypes.windll.kernel32.GetFinalPathNameByHandleW
except (ImportError, AttributeError):
def convert_path(s):
return s
else:
def convert_path(s):
assert isinstance(s, str) # sys.prefix contains only bytes
udir = s.decode("mbcs")
hdir = ctypes.windll.kernel32.\
CreateFileW(udir, 0x80, # FILE_READ_ATTRIBUTES
1, # FILE_SHARE_READ
None, 3, # OPEN_EXISTING
0x02000000, # FILE_FLAG_BACKUP_SEMANTICS
None)
if hdir == -1:
# Cannot open directory, give up
return s
buf = ctypes.create_unicode_buffer(u"", 32768)
res = ctypes.windll.kernel32.\
GetFinalPathNameByHandleW(hdir, buf, len(buf),
0) # VOLUME_NAME_DOS
ctypes.windll.kernel32.CloseHandle(hdir)
if res == 0:
# Conversion failed (e.g. network location)
return s
s = buf[:res].encode("mbcs")
# Ignore leading \\?\
if s.startswith("\\\\?\\"):
s = s[4:]
if s.startswith("UNC"):
s = "\\" + s[3:]
return s
prefix = os.path.join(sys.prefix,"tcl")
if not os.path.exists(prefix):
# devdir/../tcltk/lib
prefix = os.path.join(sys.prefix, os.path.pardir, "tcltk", "lib")
prefix = os.path.abspath(prefix)
# if this does not exist, no further search is needed
if os.path.exists(prefix):
prefix = convert_path(prefix)
if "TCL_LIBRARY" not in os.environ:
for name in os.listdir(prefix):
if name.startswith("tcl"):
tcldir = os.path.join(prefix,name)
if os.path.isdir(tcldir):
os.environ["TCL_LIBRARY"] = tcldir
# Compute TK_LIBRARY, knowing that it has the same version
# as Tcl
import _tkinter
ver = str(_tkinter.TCL_VERSION)
if "TK_LIBRARY" not in os.environ:
v = os.path.join(prefix, 'tk'+ver)
if os.path.exists(os.path.join(v, "tclIndex")):
os.environ['TK_LIBRARY'] = v
# We don't know the Tix version, so we must search the entire
# directory
if "TIX_LIBRARY" not in os.environ:
for name in os.listdir(prefix):
if name.startswith("tix"):
tixdir = os.path.join(prefix,name)
if os.path.isdir(tixdir):
os.environ["TIX_LIBRARY"] = tixdir
| gpl-2.0 |
BiznetGIO/horizon | openstack_dashboard/dashboards/admin/networks/ports/tests.py | 2 | 27602 | # Copyright 2012 NEC Corporation
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA
from horizon.workflows import views
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
DETAIL_URL = 'horizon:admin:networks:ports:detail'
NETWORKS_INDEX_URL = reverse('horizon:admin:networks:index')
NETWORKS_DETAIL_URL = 'horizon:admin:networks:detail'
class NetworkPortTests(test.BaseAdminViewTests):
@test.create_stubs({api.neutron: ('network_get',
'port_get',
'is_extension_supported',)})
def test_port_detail(self):
self._test_port_detail()
@test.create_stubs({api.neutron: ('network_get',
'port_get',
'is_extension_supported',)})
def test_port_detail_with_mac_learning(self):
self._test_port_detail(mac_learning=True)
def _test_port_detail(self, mac_learning=False):
port = self.ports.first()
network_id = self.networks.first().id
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(self.ports.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.MultipleTimes().AndReturn(mac_learning)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'allowed-address-pairs') \
.MultipleTimes().AndReturn(False)
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
res = self.client.get(reverse(DETAIL_URL, args=[port.id]))
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertEqual(res.context['port'].id, port.id)
@test.create_stubs({api.neutron: ('port_get',)})
def test_port_detail_exception(self):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
res = self.client.get(reverse(DETAIL_URL, args=[port.id]))
redir_url = NETWORKS_INDEX_URL
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',)})
def test_port_create_get(self):
self._test_port_create_get()
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',)})
def test_port_create_get_with_mac_learning(self):
self._test_port_create_get(mac_learning=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',)})
def test_port_create_get_with_port_security(self):
self._test_port_create_get(port_security=True)
def _test_port_create_get(self, mac_learning=False, binding=False,
port_security=False):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding')\
.AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'port-security')\
.AndReturn(port_security)
self.mox.ReplayAll()
url = reverse('horizon:admin:networks:addport',
args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'admin/networks/ports/create.html')
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'port_create',)})
def test_port_create_post(self):
self._test_port_create_post()
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'port_create',)})
def test_port_create_post_with_mac_learning(self):
self._test_port_create_post(mac_learning=True, binding=False)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'port_create',)})
def test_port_create_post_with_port_security(self):
self._test_port_create_post(port_security=True)
def _test_port_create_post(self, mac_learning=False, binding=False,
port_security=False):
network = self.networks.first()
port = self.ports.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding') \
.AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'port-security')\
.AndReturn(port_security)
extension_kwargs = {}
if binding:
extension_kwargs['binding__vnic_type'] = \
port.binding__vnic_type
if mac_learning:
extension_kwargs['mac_learning_enabled'] = True
if port_security:
extension_kwargs['port_security_enabled'] = True
api.neutron.port_create(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
network_id=network.id,
name=port.name,
admin_state_up=port.admin_state_up,
device_id=port.device_id,
device_owner=port.device_owner,
binding__host_id=port.binding__host_id,
mac_address=port.mac_address,
**extension_kwargs)\
.AndReturn(port)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'network_name': network.name,
'name': port.name,
'admin_state': port.admin_state_up,
'device_id': port.device_id,
'device_owner': port.device_owner,
'binding__host_id': port.binding__host_id,
'mac_address': port.mac_address}
if binding:
form_data['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
form_data['mac_state'] = True
if port_security:
form_data['port_security_enabled'] = True
url = reverse('horizon:admin:networks:addport',
args=[port.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'port_create',)})
def test_port_create_post_with_fixed_ip(self):
network = self.networks.first()
port = self.ports.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'port-security')\
.AndReturn(True)
extension_kwargs = {}
extension_kwargs['binding__vnic_type'] = \
port.binding__vnic_type
api.neutron.port_create(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
network_id=network.id,
name=port.name,
admin_state_up=port.admin_state_up,
device_id=port.device_id,
device_owner=port.device_owner,
binding__host_id=port.binding__host_id,
mac_address=port.mac_address,
fixed_ips=port.fixed_ips,
**extension_kwargs)\
.AndReturn(port)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'network_name': network.name,
'name': port.name,
'admin_state': port.admin_state_up,
'device_id': port.device_id,
'device_owner': port.device_owner,
'binding__host_id': port.binding__host_id,
'mac_address': port.mac_address,
'specify_ip': 'fixed_ip',
'fixed_ip': port.fixed_ips[0]['ip_address'],
'subnet_id': port.fixed_ips[0]['subnet_id']}
form_data['binding__vnic_type'] = port.binding__vnic_type
form_data['mac_state'] = True
url = reverse('horizon:admin:networks:addport',
args=[port.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'port_create',
'is_extension_supported',)})
def test_port_create_post_exception(self):
self._test_port_create_post_exception()
@test.create_stubs({api.neutron: ('network_get',
'port_create',
'is_extension_supported',)})
def test_port_create_post_exception_with_mac_learning(self):
self._test_port_create_post_exception(mac_learning=True)
@test.create_stubs({api.neutron: ('network_get',
'port_create',
'is_extension_supported',)})
def test_port_create_post_exception_with_port_security(self):
self._test_port_create_post_exception(port_security=True)
def _test_port_create_post_exception(self, mac_learning=False,
binding=False,
port_security=False):
network = self.networks.first()
port = self.ports.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding') \
.AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'port-security')\
.AndReturn(port_security)
extension_kwargs = {}
if binding:
extension_kwargs['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
extension_kwargs['mac_learning_enabled'] = True
if port_security:
extension_kwargs['port_security_enabled'] = True
api.neutron.port_create(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
network_id=network.id,
name=port.name,
admin_state_up=port.admin_state_up,
device_id=port.device_id,
device_owner=port.device_owner,
binding__host_id=port.binding__host_id,
mac_address=port.mac_address,
**extension_kwargs)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'network_name': network.name,
'name': port.name,
'admin_state': port.admin_state_up,
'mac_state': True,
'device_id': port.device_id,
'device_owner': port.device_owner,
'binding__host_id': port.binding__host_id,
'mac_address': port.mac_address}
if binding:
form_data['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
form_data['mac_learning_enabled'] = True
if port_security:
form_data['port_security_enabled'] = True
url = reverse('horizon:admin:networks:addport',
args=[port.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_update_get(self):
self._test_port_update_get()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_update_get_with_mac_learning(self):
self._test_port_update_get(mac_learning=True)
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_update_get_with_port_security(self):
self._test_port_update_get(port_security=True)
def _test_port_update_get(self, mac_learning=False, binding=False,
port_security=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest),
port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding') \
.AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'port-security')\
.AndReturn(port_security)
self.mox.ReplayAll()
url = reverse('horizon:admin:networks:editport',
args=[port.network_id, port.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post(self):
self._test_port_update_post()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_with_mac_learning(self):
self._test_port_update_post(mac_learning=True)
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_with_port_security(self):
self._test_port_update_post(port_security=True)
def _test_port_update_post(self, mac_learning=False, binding=False,
port_security=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding')\
.MultipleTimes().AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.MultipleTimes().AndReturn(mac_learning)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'port-security')\
.MultipleTimes().AndReturn(port_security)
extension_kwargs = {}
if binding:
extension_kwargs['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
extension_kwargs['mac_learning_enabled'] = True
if port_security:
extension_kwargs['port_security_enabled'] = True
api.neutron.port_update(IsA(http.HttpRequest), port.id,
name=port.name,
admin_state_up=port.admin_state_up,
device_id=port.device_id,
device_owner=port.device_owner,
binding__host_id=port.binding__host_id,
mac_address=port.mac_address,
**extension_kwargs)\
.AndReturn(port)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'port_id': port.id,
'name': port.name,
'admin_state': port.admin_state_up,
'device_id': port.device_id,
'device_owner': port.device_owner,
'binding__host_id': port.binding__host_id,
'mac_address': port.mac_address}
if binding:
form_data['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
form_data['mac_state'] = True
if port_security:
form_data['port_security_enabled'] = True
url = reverse('horizon:admin:networks:editport',
args=[port.network_id, port.id])
res = self.client.post(url, form_data)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_exception(self):
self._test_port_update_post_exception()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_exception_with_mac_learning(self):
self._test_port_update_post_exception(mac_learning=True, binding=False)
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_exception_with_port_security(self):
self._test_port_update_post_exception(port_security=True)
def _test_port_update_post_exception(self, mac_learning=False,
binding=False,
port_security=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding')\
.MultipleTimes().AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.MultipleTimes().AndReturn(mac_learning)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'port-security')\
.MultipleTimes().AndReturn(port_security)
extension_kwargs = {}
if binding:
extension_kwargs['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
extension_kwargs['mac_learning_enabled'] = True
if port_security:
extension_kwargs['port_security_enabled'] = True
api.neutron.port_update(IsA(http.HttpRequest), port.id,
name=port.name,
admin_state_up=port.admin_state_up,
device_id=port.device_id,
device_owner=port.device_owner,
binding__host_id=port.binding__host_id,
mac_address=port.mac_address,
**extension_kwargs)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'port_id': port.id,
'name': port.name,
'admin_state': port.admin_state_up,
'device_id': port.device_id,
'device_owner': port.device_owner,
'binding__host_id': port.binding__host_id,
'mac_address': port.mac_address}
if binding:
form_data['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
form_data['mac_state'] = True
if port_security:
form_data['port_security_enabled'] = True
url = reverse('horizon:admin:networks:editport',
args=[port.network_id, port.id])
res = self.client.post(url, form_data)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('port_delete',
'subnet_list',
'port_list',
'show_network_ip_availability',
'is_extension_supported',
'list_dhcp_agent_hosting_networks',)})
def test_port_delete(self):
self._test_port_delete()
@test.create_stubs({api.neutron: ('port_delete',
'subnet_list',
'port_list',
'show_network_ip_availability',
'is_extension_supported',
'list_dhcp_agent_hosting_networks',)})
def test_port_delete_with_mac_learning(self):
self._test_port_delete(mac_learning=True)
def _test_port_delete(self, mac_learning=False):
port = self.ports.first()
network_id = port.network_id
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'network-ip-availability').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
form_data = {'action': 'ports__delete__%s' % port.id}
url = reverse(NETWORKS_DETAIL_URL, args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
@test.create_stubs({api.neutron: ('port_delete',
'subnet_list',
'port_list',
'show_network_ip_availability',
'is_extension_supported',
'list_dhcp_agent_hosting_networks',)})
def test_port_delete_exception(self):
self._test_port_delete_exception()
@test.create_stubs({api.neutron: ('port_delete',
'subnet_list',
'port_list',
'show_network_ip_availability',
'is_extension_supported',
'list_dhcp_agent_hosting_networks')})
def test_port_delete_exception_with_mac_learning(self):
self._test_port_delete_exception(mac_learning=True)
def _test_port_delete_exception(self, mac_learning=False):
port = self.ports.first()
network_id = port.network_id
api.neutron.port_delete(IsA(http.HttpRequest), port.id)\
.AndRaise(self.exceptions.neutron)
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'network-ip-availability').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
form_data = {'action': 'ports__delete__%s' % port.id}
url = reverse(NETWORKS_DETAIL_URL, args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
| apache-2.0 |
Frenzie/youtube-dl | youtube_dl/extractor/youporn.py | 3 | 6918 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urllib_request
from ..utils import (
int_or_none,
str_to_int,
unescapeHTML,
unified_strdate,
)
from ..aes import aes_decrypt_text
class YouPornIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?youporn\.com/watch/(?P<id>\d+)/(?P<display_id>[^/?#&]+)'
_TESTS = [{
'url': 'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/',
'md5': '71ec5fcfddacf80f495efa8b6a8d9a89',
'info_dict': {
'id': '505835',
'display_id': 'sex-ed-is-it-safe-to-masturbate-daily',
'ext': 'mp4',
'title': 'Sex Ed: Is It Safe To Masturbate Daily?',
'description': 'Love & Sex Answers: http://bit.ly/DanAndJenn -- Is It Unhealthy To Masturbate Daily?',
'thumbnail': 're:^https?://.*\.jpg$',
'uploader': 'Ask Dan And Jennifer',
'upload_date': '20101221',
'average_rating': int,
'view_count': int,
'comment_count': int,
'categories': list,
'tags': list,
'age_limit': 18,
},
}, {
# Anonymous User uploader
'url': 'http://www.youporn.com/watch/561726/big-tits-awesome-brunette-on-amazing-webcam-show/?from=related3&al=2&from_id=561726&pos=4',
'info_dict': {
'id': '561726',
'display_id': 'big-tits-awesome-brunette-on-amazing-webcam-show',
'ext': 'mp4',
'title': 'Big Tits Awesome Brunette On amazing webcam show',
'description': 'http://sweetlivegirls.com Big Tits Awesome Brunette On amazing webcam show.mp4',
'thumbnail': 're:^https?://.*\.jpg$',
'uploader': 'Anonymous User',
'upload_date': '20111125',
'average_rating': int,
'view_count': int,
'comment_count': int,
'categories': list,
'tags': list,
'age_limit': 18,
},
'params': {
'skip_download': True,
},
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
request = compat_urllib_request.Request(url)
request.add_header('Cookie', 'age_verified=1')
webpage = self._download_webpage(request, display_id)
title = self._search_regex(
[r'(?:video_titles|videoTitle)\s*[:=]\s*(["\'])(?P<title>.+?)\1',
r'<h1[^>]+class=["\']heading\d?["\'][^>]*>([^<])<'],
webpage, 'title', group='title')
links = []
sources = self._search_regex(
r'sources\s*:\s*({.+?})', webpage, 'sources', default=None)
if sources:
for _, link in re.findall(r'[^:]+\s*:\s*(["\'])(http.+?)\1', sources):
links.append(link)
# Fallback #1
for _, link in re.findall(
r'(?:videoUrl|videoSrc|videoIpadUrl|html5PlayerSrc)\s*[:=]\s*(["\'])(http.+?)\1', webpage):
links.append(link)
# Fallback #2, this also contains extra low quality 180p format
for _, link in re.findall(r'<a[^>]+href=(["\'])(http.+?)\1[^>]+title=["\']Download [Vv]ideo', webpage):
links.append(link)
# Fallback #3, encrypted links
for _, encrypted_link in re.findall(
r'encryptedQuality\d{3,4}URL\s*=\s*(["\'])([\da-zA-Z+/=]+)\1', webpage):
links.append(aes_decrypt_text(encrypted_link, title, 32).decode('utf-8'))
formats = []
for video_url in set(unescapeHTML(link) for link in links):
f = {
'url': video_url,
}
# Video URL's path looks like this:
# /201012/17/505835/720p_1500k_505835/YouPorn%20-%20Sex%20Ed%20Is%20It%20Safe%20To%20Masturbate%20Daily.mp4
# We will benefit from it by extracting some metadata
mobj = re.search(r'/(?P<height>\d{3,4})[pP]_(?P<bitrate>\d+)[kK]_\d+/', video_url)
if mobj:
height = int(mobj.group('height'))
bitrate = int(mobj.group('bitrate'))
f.update({
'format_id': '%dp-%dk' % (height, bitrate),
'height': height,
'tbr': bitrate,
})
formats.append(f)
self._sort_formats(formats)
description = self._html_search_regex(
r'(?s)<div[^>]+class=["\']video-description["\'][^>]*>(.+?)</div>',
webpage, 'description', default=None)
thumbnail = self._search_regex(
r'(?:imageurl\s*=|poster\s*:)\s*(["\'])(?P<thumbnail>.+?)\1',
webpage, 'thumbnail', fatal=False, group='thumbnail')
uploader = self._html_search_regex(
r'(?s)<div[^>]+class=["\']videoInfoBy["\'][^>]*>\s*By:\s*</div>(.+?)</(?:a|div)>',
webpage, 'uploader', fatal=False)
upload_date = unified_strdate(self._html_search_regex(
r'(?s)<div[^>]+class=["\']videoInfoTime["\'][^>]*>(.+?)</div>',
webpage, 'upload date', fatal=False))
age_limit = self._rta_search(webpage)
average_rating = int_or_none(self._search_regex(
r'<div[^>]+class=["\']videoInfoRating["\'][^>]*>\s*<div[^>]+class=["\']videoRatingPercentage["\'][^>]*>(\d+)%</div>',
webpage, 'average rating', fatal=False))
view_count = str_to_int(self._search_regex(
r'(?s)<div[^>]+class=["\']videoInfoViews["\'][^>]*>.*?([\d,.]+)\s*</div>',
webpage, 'view count', fatal=False))
comment_count = str_to_int(self._search_regex(
r'>All [Cc]omments? \(([\d,.]+)\)',
webpage, 'comment count', fatal=False))
def extract_tag_box(title):
tag_box = self._search_regex(
(r'<div[^>]+class=["\']tagBoxTitle["\'][^>]*>\s*%s\b.*?</div>\s*'
'<div[^>]+class=["\']tagBoxContent["\']>(.+?)</div>') % re.escape(title),
webpage, '%s tag box' % title, default=None)
if not tag_box:
return []
return re.findall(r'<a[^>]+href=[^>]+>([^<]+)', tag_box)
categories = extract_tag_box('Category')
tags = extract_tag_box('Tags')
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'upload_date': upload_date,
'average_rating': average_rating,
'view_count': view_count,
'comment_count': comment_count,
'categories': categories,
'tags': tags,
'age_limit': age_limit,
'formats': formats,
}
| unlicense |
blenderben/lolstatbot | lolstatbot.py | 1 | 22559 | # Leauge of Legends Statistics Chat Bot
# A chat bot written in Python that provides match statistics right to your Twitch chat.
# 2015 Benjamin Chu - https://github.com/blenderben
import socket # imports module allowing connection to IRC
import threading # imports module allowing timing functions
import requests # imports module allowing requests
import json
import time
import calendar # imports module allowing epoch time
import ConfigParser # imports module allowing reading of .ini files
import os # for relative pathing
import string # for string manipulation
# from routes import API_ROUTES
class API_ROUTES:
# summoner-v1.4 - get summoner id data
summoner_url = 'https://{region}.api.pvp.net/api/lol/{region}/v1.4/summoner/by-name/{summonername}?api_key={key}'
# summoner-v1.4 - summoner mastery data
summonermastery_url = 'https://{region}.api.pvp.net/api/lol/{region}/v1.4/summoner/{summonerid}/masteries?api_key={key}'
# league-v2.5 - summoner league data
summonerleague_url = 'https://{region}.api.pvp.net/api/lol/{region}/v2.5/league/by-summoner/{summonerid}/entry?api_key={key}'
# lol-static-data-v1.2 - static champion data
championstaticdata_url = 'https://global.api.pvp.net/api/lol/static-data/{region}/v1.2/champion/{championid}?champData=all&api_key={key}'
# lol-static-data-v1.2 - static rune data
runestaticdata_url = 'https://global.api.pvp.net/api/lol/static-data/{region}/v1.2/rune/{runeid}?runeData=all&api_key={key}'
# lol-static-data-v1.2 - static mastery data
masterystaticdata_url = 'https://global.api.pvp.net/api/lol/static-data/{region}/v1.2/mastery/{masteryid}?masteryData=all&api_key={key}'
# lol-static-data-v1.2 - static spell data
spellstaticdata_url = 'https://global.api.pvp.net/api/lol/static-data/{region}/v1.2/summoner-spell/{spellid}?api_key={key}'
# current-game-v1.0 - current game data
current_url = 'https://{region}.api.pvp.net/observer-mode/rest/consumer/getSpectatorGameInfo/{region_upper}1/{summonerid}?api_key={key}'
# game-v1.3 - historic game data
last_url = 'https://{region}.api.pvp.net/api/lol/{region}/v1.3/game/by-summoner/{summonerid}/recent?api_key={key}'
# op.gg
opgg_url = 'http://{region}.op.gg/summoner/userName={summonername}'
opgg_masteries_url = 'http://{region}.op.gg/summoner/mastery/userName={summonername}'
opgg_runes_url = 'http://{region}.op.gg/summoner/rune/userName={summonername}'
opgg_matches_url = 'http://{region}.op.gg/summoner/matches/userName={summonername}'
opgg_leagues_url = 'http://{region}.op.gg/summoner/league/userName={summonername}'
opgg_champions_url = 'http://{region}.op.gg/summoner/champions/userName={summonername}'
# LoLNexus
lolnexus_url = 'http://www.lolnexus.com/{region}/search?name={summonername}&server={region}'
# LoLKing
lolking_url = 'http://www.lolking.net/summoner/{region}/{summonerid}'
# LoLSkill
lolskill_url = 'http://www.lolskill.net/summoner/{region}/{summonername}'
# ====== READ CONFIG ======
Config = ConfigParser.ConfigParser()
Config.read(os.path.dirname(os.path.abspath(__file__)) + '/config.ini')
def ConfigSectionMap(section):
temp_dict = {}
options = Config.options(section)
for option in options:
try:
temp_dict[option] = Config.get(section, option)
if temp_dict[option] == -1:
DebugPrint('skip: %s' % option)
except:
print('exception on %s!' % option)
temp_dict[option] = None
return temp_dict
# ====== CONNECTION INFO ======
# Set variables for connection
botOwner = ConfigSectionMap('settings')['botowner']
nick = ConfigSectionMap('settings')['nick']
channel = '#' + ConfigSectionMap('settings')['channel']
server = ConfigSectionMap('settings')['server']
port = int(ConfigSectionMap('settings')['port'])
password = ConfigSectionMap('settings')['oauth']
# ====== RIOT API PRELIM DATA ======
api_key = ConfigSectionMap('settings')['api']
# Riot API Information
summonerName = ConfigSectionMap('settings')['summonername'].lower()
summonerName = summonerName.replace(" ", "")
region = ConfigSectionMap('settings')['region']
summoner_url = API_ROUTES.summoner_url.format(region=region, summonername=summonerName, key=api_key)
# Initial Data Load // Get Summoner ID and Level
summonerName_dict = requests.get(summoner_url).json()
summonerID = str(summonerName_dict[summonerName]['id'])
summonerLevel = str(summonerName_dict[summonerName]['summonerLevel'])
# ====== RIOT API FUNCTIONS ======
def about(ircname):
return 'Hello ' + ircname + '! I am a League of Legends statistics chat bot. My creator is blenderben [ https://github.com/blenderben/LoLStatBot ].'\
+ ' I am currently assigned to summoner ' + summonerName.upper() + ' [ID:' + getSummonerID() + '].'
def getCommands():
return 'Available commands: ['\
+ ' !about, !summoner, !league, !last, !current, !runes, !mastery, !opgg, !lolnexus, !lolking, !lolskill ]'
def getSummonerInfo():
return summonerName.upper() + ' is summoner level ' + getSummonerLevel() + ', playing in Region: ' + region.upper() + ' // ' + opgg('')
def opgg(details):
if details == 'runes':
return API_ROUTES.opgg_runes_url.format(region=region, summonername=summonerName)
elif details == 'masteries':
return API_ROUTES.opgg_masteries_url.format(region=region, summonername=summonerName)
elif details == 'matches':
return API_ROUTES.opgg_matches_url.format(region=region, summonername=summonerName)
elif details == 'leagues':
return API_ROUTES.opgg_leagues_url.format(region=region, summonername=summonerName)
elif details == 'champions':
return API_ROUTES.opgg_champions_url.format(region=region, summonername=summonerName)
else:
return API_ROUTES.opgg_url.format(region=region, summonername=summonerName)
def lolnexus():
return API_ROUTES.lolnexus_url.format(region=region, summonername=summonerName)
def lolking(details):
if details == 'runes':
return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID) + '#runes'
elif details == 'masteries':
return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID) + '#masteries'
elif details == 'matches':
return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID) + '#matches'
elif details == 'rankedstats':
return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID) + '#ranked-stats'
elif details == 'leagues':
return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID) + '#leagues'
else:
return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID)
def lolskill(details):
if details == 'runes':
return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName) + '/runes'
elif details == 'masteries':
return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName) + '/masteries'
elif details == 'matches':
return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName) + '/matches'
elif details == 'stats':
return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName) + '/stats'
elif details == 'champions':
return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName) + '/champions'
else:
return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName)
def getTeamColor(teamid):
if teamid == 100:
return 'Blue Team'
elif teamid == 200:
return 'Purple Team'
else:
return 'No Team'
def getWinLoss(win):
if win == True:
return 'WON'
elif win == False:
return 'LOST'
else:
return 'TIED'
def getTimePlayed(time):
if time > 3600:
hours = time / 3600
minutes = time % 3600 / 60
seconds = time % 3600 % 60
if hours > 1:
return str(hours) + ' hours & ' + str(minutes) + ' minutes & ' + str(seconds) + ' seconds'
else:
return str(hours) + ' hour & ' + str(minutes) + ' minutes & ' + str(seconds) + ' seconds'
elif time > 60:
minutes = time / 60
seconds = time % 60
return str(minutes) + ' minutes & ' + str(seconds) + ' seconds'
else:
return str(time) + ' seconds'
def getKDA(kills, deaths, assists):
if deaths < 1:
return 'PERFECT'
else:
kda = float(kills) + float(assists) / (float(deaths))
kda = round(kda, 2)
return str(kda) + ':1'
def getChampionbyID(championid):
tempDict = requests.get(API_ROUTES.championstaticdata_url.format(region=region, championid=int(championid), key=api_key)).json()
name = tempDict['name'] + " " + tempDict['title']
return name
def getSpellbyID(spellid):
tempDict = requests.get(API_ROUTES.spellstaticdata_url.format(region=region, spellid=int(spellid), key=api_key)).json()
spellName = tempDict['name']
return spellName
# Refresh / Get Summoner ID
def getSummonerID():
global summonerID
try:
tempDict = requests.get(summoner_url).json()
summonerID = str(tempDict[summonerName]['id'])
return summonerID
except:
print 'Riot API Down'
return 1
# Refresh / Get Summoner Level
def getSummonerLevel():
global summonerLevel
tempDict = requests.get(summoner_url).json()
summonerLevel = str(tempDict[summonerName]['summonerLevel'])
return summonerLevel
def getWinRatio(win, loss):
total = float(win) + float(loss)
ratio = win / total
ratioPercent = round(ratio * 100, 1)
return str(ratioPercent) + '%'
def getStats():
# Function to eventually get statistics, avg kills, etc, for now, output Stats page from Lolskill
return lolskill('stats')
def getSummonerMastery():
tempDict = requests.get(API_ROUTES.summonermastery_url.format(region=region, summonerid=summonerID, key=api_key)).json()
i = 0
masteryIDList = []
masteryRank = []
for pages in tempDict[summonerID]['pages']:
if bool(pages.get('current')) == True:
pageName = tempDict[summonerID]['pages'][i]['name']
for mastery in tempDict[summonerID]['pages'][i]['masteries']:
masteryIDList.append(mastery.get('id'))
masteryRank.append(mastery.get('rank'))
else:
i += 1
return getCurrentMastery(masteryIDList, masteryRank) + ' // Mastery Name: ' + pageName
def getLeagueInfo():
try:
tempDict = requests.get(API_ROUTES.summonerleague_url.format(region=region, summonerid=summonerID, key=api_key)).json()
LEAGUE_TIER = string.capwords(tempDict[summonerID][0]['tier'])
LEAGUE_QUEUE = tempDict[summonerID][0]['queue'].replace('_', ' ')
LEAGUE_DIVISION = tempDict[summonerID][0]['entries'][0]['division']
LEAGUE_WINS = tempDict[summonerID][0]['entries'][0]['wins']
LEAGUE_LOSSES = tempDict[summonerID][0]['entries'][0]['losses']
LEAGUE_POINTS = tempDict[summonerID][0]['entries'][0]['leaguePoints']
# LEAGUE_ISVETERAN = tempDict[summonerID][0]['entries'][0]['isHotStreak']
# LEAGUE_ISHOTSTREAK = tempDict[summonerID][0]['entries'][0]['isVeteran']
# LEAGUE_ISFRESHBLOOD = tempDict[summonerID][0]['entries'][0]['isFreshBlood']
# LEAGUE_ISINACTIVE = tempDict[summonerID][0]['entries'][0]['isInactive']
return summonerName.upper() + ' is ' + LEAGUE_TIER + ' ' + LEAGUE_DIVISION + ' in ' + LEAGUE_QUEUE\
+ ' // ' + str(LEAGUE_WINS) + 'W / ' + str(LEAGUE_LOSSES) + 'L (Win Ratio ' + getWinRatio(LEAGUE_WINS, LEAGUE_LOSSES) + ')'\
+ ' // LP: ' + str(LEAGUE_POINTS)\
+ ' // ' + lolking('leagues')
except:
return 'Summoner ' + summonerName.upper() + ' has not played any Ranked Solo 5x5 matches'\
+ ' // ' + lolking('leagues')
# Get Current Match Stats
def getCurrent(details):
try:
current_api_url = API_ROUTES.current_url.format(region=region, region_upper=region.upper(), summonerid=summonerID, key=api_key)
tempDict = requests.get(current_api_url).json()
CURRENT_GAMEMODE = tempDict['gameMode']
CURRENT_GAMELENGTH = tempDict['gameLength']
CURRENT_GAMETYPE = tempDict['gameType'].replace('_', ' ')
CURRENT_TIME = calendar.timegm(time.gmtime())
CURRENT_EPOCHTIME = tempDict['gameStartTime'] / 1000
if CURRENT_EPOCHTIME <= 0:
CURRENT_TIMEDIFF = 0
else:
CURRENT_TIMEDIFF = CURRENT_TIME - CURRENT_EPOCHTIME
if CURRENT_TIMEDIFF < 0:
CURRENT_TIMEDIFF = 0
runeIDList = []
runeCount = []
masteryIDList = []
masteryRank = []
i = 0
for participant in tempDict['participants']:
if int(summonerID) == int(participant.get('summonerId')):
CURRENT_TEAM = participant.get('teamId')
CURRENT_CHAMPION = participant.get('championId')
CURRENT_SPELL1 = participant.get('spell1Id')
CURRENT_SPELL2 = participant.get('spell2Id')
for rune in tempDict['participants'][i]['runes']:
runeIDList.append(rune.get('runeId'))
runeCount.append(rune.get('count'))
for mastery in tempDict['participants'][i]['masteries']:
masteryIDList.append(mastery.get('masteryId'))
masteryRank.append(mastery.get('rank'))
else:
i += 1
runeCountOutput = ''
runeBonusOutput = ''
for x in range(len(runeIDList)):
runeCountOutput += ' [' + getCurrentRuneTotal(runeIDList[x], runeCount[x]) + '] '
runeBonusOutput += ' [' + getCurrentRuneBonusTotal(runeIDList[x], runeCount[x]) + '] '
masteryOutput = getCurrentMastery(masteryIDList, masteryRank)
if details == 'runes':
return 'Current Runes: ' + runeCountOutput\
+ ' // Rune Bonuses: ' + runeBonusOutput\
+ ' // ' + lolskill('runes')
elif details == 'masteries':
return 'Current Mastery Distribution: ' + masteryOutput\
+ ' // ' + lolskill('masteries')
else:
return summonerName.upper()\
+ ' is currently playing ' + CURRENT_GAMEMODE + ' ' + CURRENT_GAMETYPE\
+ ' with ' + getChampionbyID(CURRENT_CHAMPION)\
+ ' on the ' + getTeamColor(CURRENT_TEAM)\
+ ' // Elapsed Time: ' + getTimePlayed(CURRENT_TIMEDIFF)\
+ ' // Spells Chosen: ' + getSpellbyID(CURRENT_SPELL1) + ' & ' + getSpellbyID(CURRENT_SPELL2)\
+ ' // Mastery Distribution: ' + masteryOutput\
+ ' // Rune Bonuses: ' + runeBonusOutput\
+ ' // ' + lolnexus()
except:
if details == 'runes':
return 'Summoner ' + summonerName.upper() + ' needs to currently be in a game for current Rune data to display'\
+ ' // ' + lolking('runes')
elif details == 'masteries':
return 'Current Mastery Distribution: ' + getSummonerMastery() + ' // ' + lolskill('masteries')
else:
return 'The summoner ' + summonerName.upper() + ' is not currently in a game.'
def getCurrentMastery(masteryidlist, masteryrank):
offense = 0
defense = 0
utility = 0
for x in range(len(masteryidlist)):
masteryID = masteryidlist[x]
tempDict = requests.get(API_ROUTES.masterystaticdata_url.format(region=region, masteryid=masteryID, key=api_key)).json()
masteryTree = tempDict['masteryTree']
ranks = int(masteryrank[x])
if masteryTree == 'Offense':
offense += ranks
elif masteryTree == 'Defense':
defense += ranks
else:
utility += ranks
return '(' + str(offense) + '/' + str(defense) + '/' + str(utility) + ')'
def getCurrentRuneTotal(runeid, count):
tempDict = requests.get(API_ROUTES.runestaticdata_url.format(region=region, runeid=runeid, key=api_key)).json()
runeName = tempDict['name']
return str(count) + 'x ' + runeName
def getCurrentRuneBonusTotal(runeid, count):
tempDict = requests.get(API_ROUTES.runestaticdata_url.format(region=region, runeid=runeid, key=api_key)).json()
runeBonus = tempDict['description']
try:
runeBonus.split('/')[1]
except IndexError:
# Single Bonus
value = runeBonus.split()[0]
value = value.replace('+', '').replace('%', '').replace('-', '')
valueCount = float(value) * float(count)
valueCount = round(valueCount, 2)
description = tempDict['description'].split(' (', 1)[0]
description = string.capwords(description)
description = description.replace(value, str(valueCount))
return description
else:
# Hybrid Bonus
value = runeBonus.split()[0]
value = value.replace('+', '').replace('%', '').replace('-', '')
valueCount = float(value) * float(count)
valueCount = round(valueCount, 2)
firstDescription = runeBonus.split('/')[0].strip()
firstDescription = firstDescription.split(' (', 1)[0]
firstDescription = string.capwords(firstDescription)
firstDescription = firstDescription.replace(value, str(valueCount))
value = runeBonus.split('/')[1].strip()
if value.split()[1] == 'sec.':
return firstDescription + ' / 5 Sec.'
else:
value = value.split()[0]
value = value.replace('+', '').replace('%', '').replace('-', '')
valueCount = float(value) * float(count)
valueCount = round(valueCount, 2)
secondDescription = runeBonus.split('/')[1].strip()
secondDescription = secondDescription.split(' (', 1)[0]
secondDescription = string.capwords(secondDescription)
secondDescription = secondDescription.replace(value, str(valueCount))
return firstDescription + ' / ' + secondDescription
# Get Last Match Stats
def getLast():
tempDict = requests.get(API_ROUTES.last_url.format(region=region, summonerid=summonerID, key=api_key)).json()
LAST_GAMEID = tempDict['games'][0]['gameId']
# LAST_GAMEMODE = tempDict['games'][0]['gameMode']
LAST_SUBTYPE = tempDict['games'][0]['subType'].replace('_', ' ')
LAST_GAMETYPE = tempDict['games'][0]['gameType'].replace('_GAME', '')
LAST_TIMEPLAYED = tempDict['games'][0]['stats']['timePlayed']
LAST_WIN = tempDict['games'][0]['stats']['win']
LAST_GOLDSPENT = tempDict['games'][0]['stats']['goldSpent']
LAST_GOLDEARNED = tempDict['games'][0]['stats']['goldEarned']
LAST_CHAMPION_ID = str(tempDict['games'][0]['championId'])
LAST_IPEARNED = str(tempDict['games'][0]['ipEarned'])
LAST_LEVEL = str(tempDict['games'][0]['stats']['level'])
LAST_SPELL1 = tempDict['games'][0]['spell1']
LAST_SPELL2 = tempDict['games'][0]['spell2']
LAST_CHAMPIONSKILLED = str(tempDict['games'][0]['stats'].get('championsKilled', 0))
LAST_NUMDEATHS = str(tempDict['games'][0]['stats'].get('numDeaths' , 0))
LAST_ASSISTS = str(tempDict['games'][0]['stats'].get('assists', 0))
LAST_TOTALDAMAGECHAMPIONS = str(tempDict['games'][0]['stats']['totalDamageDealtToChampions'])
LAST_MINIONSKILLED = str(tempDict['games'][0]['stats']['minionsKilled'])
LAST_WARDSPLACED = str(tempDict['games'][0]['stats'].get('wardPlaced', 0))
output = summonerName.upper() + ' ' + getWinLoss(LAST_WIN)\
+ ' the last ' + LAST_GAMETYPE + ' ' + LAST_SUBTYPE\
+ ' GAME using ' + getChampionbyID(LAST_CHAMPION_ID)\
+ ' // The game took ' + getTimePlayed(LAST_TIMEPLAYED)\
+ ' // ' + getKDA(LAST_CHAMPIONSKILLED, LAST_NUMDEATHS, LAST_ASSISTS) + ' KDA (' + LAST_CHAMPIONSKILLED + '/' + LAST_NUMDEATHS + '/' + LAST_ASSISTS + ')'\
+ ' // ' + getSpellbyID(LAST_SPELL1) + ' & ' + getSpellbyID(LAST_SPELL2) + ' spells were chosen'\
+ ' // ' + LAST_TOTALDAMAGECHAMPIONS + ' damage was dealt to champions'\
+ ' // ' + LAST_MINIONSKILLED + ' minions were killed'\
+ ' // ' + LAST_WARDSPLACED + ' wards were placed'\
+ ' // Spent ' + str(round(float(LAST_GOLDSPENT) / float(LAST_GOLDEARNED)*100, 1)) + '% of Gold earned [' + str(LAST_GOLDSPENT) + '/' + str(LAST_GOLDEARNED) + ']'\
+ ' // ' + LAST_IPEARNED + ' IP was earned'
# add Official League Match history here
return output
# ====== IRC FUNCTIONS ======
# Extract Nickname
def getNick(data):
nick = data.split('!')[0]
nick = nick.replace(':', ' ')
nick = nick.replace(' ', '')
nick = nick.strip(' \t\n\r')
return nick
def getMessage(data):
if data.find('PRIVMSG'):
try:
message = data.split(channel, 1)[1][2:]
return message
except IndexError:
return 'Index Error'
except:
return 'No message'
else:
return 'Not a message'
# ====== TIMER FUNCTIONS ======
def printit():
threading.Timer(60.0, printit).start()
print "Hello World"
# ===============================
# queue = 13 #sets variable for anti-spam queue functionality
# Connect to server
print '\nConnecting to: ' + server + ' over port ' + str(port)
irc = socket.socket()
irc.connect((server, port))
# Send variables for connection to Twitch chat
irc.send('PASS ' + password + '\r\n')
irc.send('USER ' + nick + ' 0 * :' + botOwner + '\r\n')
irc.send('NICK ' + nick + '\r\n')
irc.send('JOIN ' + channel + '\r\n')
printit()
# Main Program Loop
while True:
ircdata = irc.recv(4096) # gets output from IRC server
ircuser = ircdata.split(':')[1]
ircuser = ircuser.split('!')[0] # determines the sender of the messages
# Check messages for any banned words against banned.txt list
f = open(os.path.dirname(os.path.abspath(__file__)) + '/banned.txt', 'r')
banned = f.readlines()
message = getMessage(ircdata).lower().strip(' \t\n\r')
for i in range(len(banned)):
if message.find(banned[i].strip(' \t\n\r')) != -1:
irc.send('PRIVMSG ' + channel + ' :' + getNick(ircdata) + ', banned words are not allowed. A timeout has been issued.' + '\r\n')
# irc.send('PRIVMSG ' + channel + ' :\/timeout ' + getNick(ircdata) + ' 5\r\n')
break
else:
pass
print 'DEBUG: ' + ircdata.strip(' \t\n\r')
print 'USER: ' + getNick(ircdata).strip(' \t\n\r')
print 'MESSAGE: ' + getMessage(ircdata).strip(' \t\n\r')
print '======================='
# About
if ircdata.find(':!about') != -1:
irc.send('PRIVMSG ' + channel + ' :' + about(getNick(ircdata)) + '\r\n')
# Commands
if ircdata.find(':!commands') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getCommands() + '\r\n')
# Last
if ircdata.find(':!last') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getLast() + '\r\n')
# Current
if ircdata.find(':!current') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getCurrent('games') + '\r\n')
# Current Runes
if ircdata.find(':!runes') != -1 or ircdata.find(':!rune') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getCurrent('runes') + '\r\n')
# Current Mastery
if ircdata.find(':!mastery') != -1 or ircdata.find(':!masteries') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getCurrent('masteries') + '\r\n')
# Basic Summoner Data
if ircdata.find(':!summoner') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getSummonerInfo() + '\r\n')
# Seaonal League Rank Data
if ircdata.find(':!league') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getLeagueInfo() + '\r\n')
# Stats
if ircdata.find(':!stats') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getStats() + '\r\n')
# Return op.gg
if ircdata.find(':!opgg') != -1:
irc.send('PRIVMSG ' + channel + ' :' + opgg('') + '\r\n')
# Return lolnexus
if ircdata.find(':!lolnexus') != -1:
irc.send('PRIVMSG ' + channel + ' :' + lolnexus() + '\r\n')
# Return lolking
if ircdata.find(':!lolking') != -1:
irc.send('PRIVMSG ' + channel + ' :' + lolking('') + '\r\n')
# Return lolskill
if ircdata.find(':!lolskill') != -1:
irc.send('PRIVMSG ' + channel + ' :' + lolskill('') + '\r\n')
# Keep Alive
if ircdata.find('PING') != -1:
irc.send('PONG ' + ircdata.split()[1] + '\r\n')
| mit |
massot/odoo | addons/base_gengo/__init__.py | 377 | 1122 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Openerp sa (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import res_company
import ir_translation
import wizard
import controller
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
leppa/home-assistant | tests/components/izone/test_config_flow.py | 3 | 2677 | """Tests for iZone."""
from unittest.mock import Mock, patch
import pytest
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.izone.const import DISPATCH_CONTROLLER_DISCOVERED, IZONE
from tests.common import mock_coro
@pytest.fixture
def mock_disco():
"""Mock discovery service."""
disco = Mock()
disco.pi_disco = Mock()
disco.pi_disco.controllers = {}
yield disco
def _mock_start_discovery(hass, mock_disco):
from homeassistant.helpers.dispatcher import async_dispatcher_send
def do_disovered(*args):
async_dispatcher_send(hass, DISPATCH_CONTROLLER_DISCOVERED, True)
return mock_coro(mock_disco)
return do_disovered
async def test_not_found(hass, mock_disco):
"""Test not finding iZone controller."""
with patch(
"homeassistant.components.izone.config_flow.async_start_discovery_service"
) as start_disco, patch(
"homeassistant.components.izone.config_flow.async_stop_discovery_service",
return_value=mock_coro(),
) as stop_disco:
start_disco.side_effect = _mock_start_discovery(hass, mock_disco)
result = await hass.config_entries.flow.async_init(
IZONE, context={"source": config_entries.SOURCE_USER}
)
# Confirmation form
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
await hass.async_block_till_done()
stop_disco.assert_called_once()
async def test_found(hass, mock_disco):
"""Test not finding iZone controller."""
mock_disco.pi_disco.controllers["blah"] = object()
with patch(
"homeassistant.components.izone.climate.async_setup_entry",
return_value=mock_coro(True),
) as mock_setup, patch(
"homeassistant.components.izone.config_flow.async_start_discovery_service"
) as start_disco, patch(
"homeassistant.components.izone.async_start_discovery_service",
return_value=mock_coro(),
):
start_disco.side_effect = _mock_start_discovery(hass, mock_disco)
result = await hass.config_entries.flow.async_init(
IZONE, context={"source": config_entries.SOURCE_USER}
)
# Confirmation form
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
await hass.async_block_till_done()
mock_setup.assert_called_once()
| apache-2.0 |
FOSSRIT/PyCut | game/events/togglable.py | 4 | 2030 | import pygame
from game.objects import STATE
class Togglable():
"""docstring for Clickable
TODO: modify to handle clicked vs released
"""
def __init__(self):
self.setState(STATE.NORMAL)
self.location = (0,0)
self.width = 0
self.height = 0
self.onSelect = None #name of function to call when left clicking
self.onDeselect = None #name of function to call when left clicking
self.initiated = False #mouse pressed
self.engaged = False #mouse released
self.dirty = True;
def isClicked(self, event):
if self.inRange(event.pos[0], event.pos[1]):
if event.type == pygame.MOUSEBUTTONDOWN:
self.initiated = True
if self.initiated and (event.type == pygame.MOUSEBUTTONUP):
self.engaged = True
else: #when click or release is detected outside of range make sure this is still not initiated
self.initiated = False
if self.engaged:
if self.state is STATE.ACTIVE:
self.deselect()
else:
self.select()
self.initiated = False
self.engaged = False
def inRange(self, x, y):
if ((self.x <= x <= (self.x + self.width)) and
(self.y <= y <= (self.y + self.height))):
return True
else:
return False
def select(self):
if self.state is STATE.NORMAL:
self.setState(STATE.ACTIVE)
if self.onSelect:
self.onSelect()
def deselect(self):
if self.state is STATE.ACTIVE:
self.setState(STATE.NORMAL)
if self.onDeselect:
self.onDeselect()
def setOnSelect(self, func):
self.onSelect = func
def setOnDeselect(self, func):
self.onDeselect = func
def setState(self, state):
self.state = state
self.dirty = True
| mpl-2.0 |
minhphung171093/GreenERP_V9 | openerp/addons/base/module/wizard/base_export_language.py | 43 | 2692 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import contextlib
import cStringIO
from openerp import tools
from openerp.osv import fields,osv
from openerp.tools.translate import _
from openerp.tools.misc import get_iso_codes
NEW_LANG_KEY = '__new__'
class base_language_export(osv.osv_memory):
_name = "base.language.export"
def _get_languages(self, cr, uid, context):
lang_obj = self.pool.get('res.lang')
ids = lang_obj.search(cr, uid, [('translatable', '=', True)])
langs = lang_obj.browse(cr, uid, ids)
return [(NEW_LANG_KEY, _('New Language (Empty translation template)'))] + [(lang.code, lang.name) for lang in langs]
_columns = {
'name': fields.char('File Name', readonly=True),
'lang': fields.selection(_get_languages, 'Language', required=True),
'format': fields.selection([('csv','CSV File'),
('po','PO File'),
('tgz', 'TGZ Archive')], 'File Format', required=True),
'modules': fields.many2many('ir.module.module', 'rel_modules_langexport', 'wiz_id', 'module_id', 'Apps To Export', domain=[('state','=','installed')]),
'data': fields.binary('File', readonly=True),
'state': fields.selection([('choose', 'choose'), # choose language
('get', 'get')]) # get the file
}
_defaults = {
'state': 'choose',
'lang': NEW_LANG_KEY,
'format': 'csv',
}
def act_getfile(self, cr, uid, ids, context=None):
this = self.browse(cr, uid, ids, context=context)[0]
lang = this.lang if this.lang != NEW_LANG_KEY else False
mods = sorted(map(lambda m: m.name, this.modules)) or ['all']
with contextlib.closing(cStringIO.StringIO()) as buf:
tools.trans_export(lang, mods, buf, this.format, cr)
out = base64.encodestring(buf.getvalue())
filename = 'new'
if lang:
filename = get_iso_codes(lang)
elif len(mods) == 1:
filename = mods[0]
extension = this.format
if not lang and extension == 'po':
extension = 'pot'
name = "%s.%s" % (filename, extension)
this.write({ 'state': 'get', 'data': out, 'name': name })
return {
'type': 'ir.actions.act_window',
'res_model': 'base.language.export',
'view_mode': 'form',
'view_type': 'form',
'res_id': this.id,
'views': [(False, 'form')],
'target': 'new',
}
| gpl-3.0 |
pacpac1992/mymockup | src/widgets/tab.py | 1 | 3754 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import wx
import wx.lib.ogl as ogl
class Tab_dialog(wx.Dialog):
def __init__(self, parent, title):
super(Tab_dialog, self).__init__(parent, title=title,size=(410,220))
self.parent = parent
self.nombre = wx.TextCtrl(self,-1, pos=(10,10), size=(200,30),style=wx.TE_PROCESS_ENTER)
wx.StaticText(self,-1,'Activo: ',pos=(10,55))
self.lbl_selection = wx.StaticText(self,-1,'',(60, 55),(150, -1))
btn = wx.Button(self,-1,'Aceptar',pos=(10,100))
self.listBox = wx.ListBox(self, -1, (220, 10), (90, 170), [], wx.LB_SINGLE)
up = wx.Button(self,-1,'Arriba',pos=(320,10))
down = wx.Button(self,-1,'Abajo',pos=(320,50))
delete = wx.Button(self,-1,'Eliminar',pos=(320,90))
btn.Bind(wx.EVT_BUTTON,self.crear_tabs)
up.Bind(wx.EVT_BUTTON,self.up)
down.Bind(wx.EVT_BUTTON,self.down)
delete.Bind(wx.EVT_BUTTON,self.delete)
self.nombre.Bind(wx.EVT_TEXT_ENTER, self.add_list)
self.Bind(wx.EVT_LISTBOX, self.onListBox, self.listBox)
def crear_tabs(self,evt):
if self.lbl_selection.GetLabel() != '':
lista = {}
for i in range(0,self.listBox.GetCount()):
lista[i] = self.listBox.GetString(i)
self.parent.draw_tab(None,self.lbl_selection.GetLabel(),lista,False)
self.Destroy()
else:
wx.MessageBox("Seleccione un item", "Message" ,wx.OK | wx.ICON_ERROR)
def add_list(self,evt):
n = self.nombre.GetValue()
self.listBox.Append(n)
self.nombre.SetValue('')
def up(self,evt):
n = self.listBox.GetCount()
r = 0
for i in range(0,n):
if self.listBox.GetString(i) == self.listBox.GetStringSelection():
r = i
dato = self.listBox.GetStringSelection()
if r != 0:
r = r - 1
d = self.listBox.GetString(r)
self.listBox.SetString(r,dato)
self.listBox.SetString(r+1,d)
def down(self,evt):
try:
n = self.listBox.GetCount()
r = 0
for i in range(0,n):
if self.listBox.GetString(i) == self.listBox.GetStringSelection():
r = i
dato = self.listBox.GetStringSelection()
if r <= (n-1):
r = r + 1
d = self.listBox.GetString(r)
self.listBox.SetString(r,dato)
self.listBox.SetString(r-1,d)
except Exception as e:
print(e)
def delete(self,evt):
n = self.listBox.GetCount()
r = 0
for i in range(0,n):
if self.listBox.GetString(i) == self.listBox.GetStringSelection():
r = i
self.listBox.Delete(r)
def onListBox(self,evt):
self.lbl_selection.SetLabel(evt.GetEventObject().GetStringSelection())
class Tab(ogl.DrawnShape):
def __init__(self,lista,active):
ogl.DrawnShape.__init__(self)
n = len(lista)
self.diccionario = lista
i = self.buscarElemento(lista,active)
r = (int(n) * 70 + ((int(n)-1))*4)+50
self.calculate_size(r)
self.tabs(n,r,i)
self.labels(n,r)
self.CalculateSize()
def calculate_size(self,r):
w = r/2
self.SetDrawnPen(wx.BLACK_PEN)
self.SetDrawnBrush(wx.WHITE_BRUSH)
return self.DrawPolygon([(w, 100), (-w,100),(-w,-70),(w,-70),(w,100)])
def tabs(self,n,r,i):
w = r / 2
cp4 = 0
for x in range(0,n):
sp = 70
self.SetDrawnPen(wx.BLACK_PEN)
if x == i:
self.SetDrawnBrush(wx.Brush(wx.Colour(240, 240, 240)))
else:
self.SetDrawnBrush(wx.Brush(wx.Colour(155, 155, 155)))
self.DrawPolygon([((-w + cp4),-70),((-w + cp4),-100),(((-w+cp4)+sp),-100),(((-w+cp4)+sp),-70)])
cp4 = cp4 + 74
def labels(self,items,r):
w = r / 2
ran = 0
for x in xrange(0,items):
self.SetDrawnTextColour(wx.BLACK)
self.SetDrawnFont(wx.Font(10, wx.SWISS, wx.NORMAL, wx.NORMAL))
name = self.diccionario[x]
self.DrawText(str(name), (-w+ran+10, -90))
ran = ran + 74
def buscarElemento(self,lista, elemento):
for i in range(0,len(lista)):
if(lista[i] == elemento):
return i | mit |
FedericoCeratto/debian-pymongo | test/version.py | 16 | 2074 | # Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Some tools for running tests based on MongoDB server version."""
class Version(tuple):
def __new__(cls, *version):
padded_version = cls._padded(version, 4)
return super(Version, cls).__new__(cls, tuple(padded_version))
@classmethod
def _padded(cls, iter, length, padding=0):
l = list(iter)
if len(l) < length:
for _ in range(length - len(l)):
l.append(padding)
return l
@classmethod
def from_string(cls, version_string):
mod = 0
if version_string.endswith("+"):
version_string = version_string[0:-1]
mod = 1
elif version_string.endswith("-pre-"):
version_string = version_string[0:-5]
mod = -1
elif version_string.endswith("-"):
version_string = version_string[0:-1]
mod = -1
# Deal with '-rcX' substrings
if version_string.find('-rc') != -1:
version_string = version_string[0:version_string.find('-rc')]
mod = -1
version = [int(part) for part in version_string.split(".")]
version = cls._padded(version, 3)
version.append(mod)
return Version(*version)
@classmethod
def from_client(cls, client):
return cls.from_string(client.server_info()['version'])
def at_least(self, *other_version):
return self >= Version(*other_version)
def __str__(self):
return ".".join(map(str, self))
| apache-2.0 |
ronyfadel/iTerm2 | tests/esctest/tests/sm_title.py | 31 | 2670 | from esc import NUL
import escargs
import esccmd
from esccmd import SET_HEX, QUERY_HEX, SET_UTF8, QUERY_UTF8
import escio
from esctypes import Point, Rect
from escutil import AssertEQ, AssertScreenCharsInRectEqual, GetCursorPosition, GetIconTitle, GetScreenSize, GetWindowTitle, knownBug, optionRequired
class SMTitleTests(object):
@optionRequired(terminal="xterm", option=escargs.XTERM_WINOPS_ENABLED,
allowPassWithoutOption=True)
@knownBug(terminal="iTerm2", reason="SM_Title not implemented.")
def test_SMTitle_SetHexQueryUTF8(self):
esccmd.RM_Title(SET_UTF8, QUERY_HEX)
esccmd.SM_Title(SET_HEX, QUERY_UTF8)
esccmd.ChangeWindowTitle("6162")
AssertEQ(GetWindowTitle(), "ab")
esccmd.ChangeWindowTitle("61")
AssertEQ(GetWindowTitle(), "a")
esccmd.ChangeIconTitle("6162")
AssertEQ(GetIconTitle(), "ab")
esccmd.ChangeIconTitle("61")
AssertEQ(GetIconTitle(), "a")
@optionRequired(terminal="xterm", option=escargs.XTERM_WINOPS_ENABLED,
allowPassWithoutOption=True)
@knownBug(terminal="iTerm2", reason="SM_Title not implemented.")
def test_SMTitle_SetUTF8QueryUTF8(self):
esccmd.RM_Title(SET_HEX, QUERY_HEX)
esccmd.SM_Title(SET_UTF8, QUERY_UTF8)
esccmd.ChangeWindowTitle("ab")
AssertEQ(GetWindowTitle(), "ab")
esccmd.ChangeWindowTitle("a")
AssertEQ(GetWindowTitle(), "a")
esccmd.ChangeIconTitle("ab")
AssertEQ(GetIconTitle(), "ab")
esccmd.ChangeIconTitle("a")
AssertEQ(GetIconTitle(), "a")
@optionRequired(terminal="xterm", option=escargs.XTERM_WINOPS_ENABLED,
allowPassWithoutOption=True)
@knownBug(terminal="iTerm2", reason="SM_Title not implemented.")
def test_SMTitle_SetUTF8QueryHex(self):
esccmd.RM_Title(SET_HEX, QUERY_UTF8)
esccmd.SM_Title(SET_UTF8, QUERY_HEX)
esccmd.ChangeWindowTitle("ab")
AssertEQ(GetWindowTitle(), "6162")
esccmd.ChangeWindowTitle("a")
AssertEQ(GetWindowTitle(), "61")
esccmd.ChangeIconTitle("ab")
AssertEQ(GetIconTitle(), "6162")
esccmd.ChangeIconTitle("a")
AssertEQ(GetIconTitle(), "61")
@optionRequired(terminal="xterm", option=escargs.XTERM_WINOPS_ENABLED,
allowPassWithoutOption=True)
@knownBug(terminal="iTerm2", reason="SM_Title not implemented.")
def test_SMTitle_SetHexQueryHex(self):
esccmd.RM_Title(SET_UTF8, QUERY_UTF8)
esccmd.SM_Title(SET_HEX, QUERY_HEX)
esccmd.ChangeWindowTitle("6162")
AssertEQ(GetWindowTitle(), "6162")
esccmd.ChangeWindowTitle("61")
AssertEQ(GetWindowTitle(), "61")
esccmd.ChangeIconTitle("6162")
AssertEQ(GetIconTitle(), "6162")
esccmd.ChangeIconTitle("61")
AssertEQ(GetIconTitle(), "61")
| gpl-2.0 |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/operations/_vpn_site_link_connections_operations.py | 1 | 5309 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VpnSiteLinkConnectionsOperations(object):
"""VpnSiteLinkConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
gateway_name, # type: str
connection_name, # type: str
link_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VpnSiteLinkConnection"
"""Retrieves the details of a vpn site link connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the vpn connection.
:type connection_name: str
:param link_connection_name: The name of the vpn connection.
:type link_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnSiteLinkConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.VpnSiteLinkConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnSiteLinkConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'linkConnectionName': self._serialize.url("link_connection_name", link_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnSiteLinkConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}/vpnLinkConnections/{linkConnectionName}'} # type: ignore
| mit |
louietsai/python-for-android | python3-alpha/python3-src/Lib/test/test_dummy_thread.py | 57 | 7159 | """Generic thread tests.
Meant to be used by dummy_thread and thread. To allow for different modules
to be used, test_main() can be called with the module to use as the thread
implementation as its sole argument.
"""
import _dummy_thread as _thread
import time
import queue
import random
import unittest
from test import support
DELAY = 0 # Set > 0 when testing a module other than _dummy_thread, such as
# the '_thread' module.
class LockTests(unittest.TestCase):
"""Test lock objects."""
def setUp(self):
# Create a lock
self.lock = _thread.allocate_lock()
def test_initlock(self):
#Make sure locks start locked
self.assertTrue(not self.lock.locked(),
"Lock object is not initialized unlocked.")
def test_release(self):
# Test self.lock.release()
self.lock.acquire()
self.lock.release()
self.assertTrue(not self.lock.locked(),
"Lock object did not release properly.")
def test_improper_release(self):
#Make sure release of an unlocked thread raises _thread.error
self.assertRaises(_thread.error, self.lock.release)
def test_cond_acquire_success(self):
#Make sure the conditional acquiring of the lock works.
self.assertTrue(self.lock.acquire(0),
"Conditional acquiring of the lock failed.")
def test_cond_acquire_fail(self):
#Test acquiring locked lock returns False
self.lock.acquire(0)
self.assertTrue(not self.lock.acquire(0),
"Conditional acquiring of a locked lock incorrectly "
"succeeded.")
def test_uncond_acquire_success(self):
#Make sure unconditional acquiring of a lock works.
self.lock.acquire()
self.assertTrue(self.lock.locked(),
"Uncondional locking failed.")
def test_uncond_acquire_return_val(self):
#Make sure that an unconditional locking returns True.
self.assertTrue(self.lock.acquire(1) is True,
"Unconditional locking did not return True.")
self.assertTrue(self.lock.acquire() is True)
def test_uncond_acquire_blocking(self):
#Make sure that unconditional acquiring of a locked lock blocks.
def delay_unlock(to_unlock, delay):
"""Hold on to lock for a set amount of time before unlocking."""
time.sleep(delay)
to_unlock.release()
self.lock.acquire()
start_time = int(time.time())
_thread.start_new_thread(delay_unlock,(self.lock, DELAY))
if support.verbose:
print()
print("*** Waiting for thread to release the lock "\
"(approx. %s sec.) ***" % DELAY)
self.lock.acquire()
end_time = int(time.time())
if support.verbose:
print("done")
self.assertTrue((end_time - start_time) >= DELAY,
"Blocking by unconditional acquiring failed.")
class MiscTests(unittest.TestCase):
"""Miscellaneous tests."""
def test_exit(self):
#Make sure _thread.exit() raises SystemExit
self.assertRaises(SystemExit, _thread.exit)
def test_ident(self):
#Test sanity of _thread.get_ident()
self.assertIsInstance(_thread.get_ident(), int,
"_thread.get_ident() returned a non-integer")
self.assertTrue(_thread.get_ident() != 0,
"_thread.get_ident() returned 0")
def test_LockType(self):
#Make sure _thread.LockType is the same type as _thread.allocate_locke()
self.assertIsInstance(_thread.allocate_lock(), _thread.LockType,
"_thread.LockType is not an instance of what "
"is returned by _thread.allocate_lock()")
def test_interrupt_main(self):
#Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
self.assertRaises(KeyboardInterrupt, _thread.start_new_thread,
call_interrupt, tuple())
def test_interrupt_in_main(self):
# Make sure that if interrupt_main is called in main threat that
# KeyboardInterrupt is raised instantly.
self.assertRaises(KeyboardInterrupt, _thread.interrupt_main)
class ThreadTests(unittest.TestCase):
"""Test thread creation."""
def test_arg_passing(self):
#Make sure that parameter passing works.
def arg_tester(queue, arg1=False, arg2=False):
"""Use to test _thread.start_new_thread() passes args properly."""
queue.put((arg1, arg2))
testing_queue = queue.Queue(1)
_thread.start_new_thread(arg_tester, (testing_queue, True, True))
result = testing_queue.get()
self.assertTrue(result[0] and result[1],
"Argument passing for thread creation using tuple failed")
_thread.start_new_thread(arg_tester, tuple(), {'queue':testing_queue,
'arg1':True, 'arg2':True})
result = testing_queue.get()
self.assertTrue(result[0] and result[1],
"Argument passing for thread creation using kwargs failed")
_thread.start_new_thread(arg_tester, (testing_queue, True), {'arg2':True})
result = testing_queue.get()
self.assertTrue(result[0] and result[1],
"Argument passing for thread creation using both tuple"
" and kwargs failed")
def test_multi_creation(self):
#Make sure multiple threads can be created.
def queue_mark(queue, delay):
"""Wait for ``delay`` seconds and then put something into ``queue``"""
time.sleep(delay)
queue.put(_thread.get_ident())
thread_count = 5
testing_queue = queue.Queue(thread_count)
if support.verbose:
print()
print("*** Testing multiple thread creation "\
"(will take approx. %s to %s sec.) ***" % (DELAY, thread_count))
for count in range(thread_count):
if DELAY:
local_delay = round(random.random(), 1)
else:
local_delay = 0
_thread.start_new_thread(queue_mark,
(testing_queue, local_delay))
time.sleep(DELAY)
if support.verbose:
print('done')
self.assertTrue(testing_queue.qsize() == thread_count,
"Not all %s threads executed properly after %s sec." %
(thread_count, DELAY))
def test_main(imported_module=None):
global _thread, DELAY
if imported_module:
_thread = imported_module
DELAY = 2
if support.verbose:
print()
print("*** Using %s as _thread module ***" % _thread)
support.run_unittest(LockTests, MiscTests, ThreadTests)
if __name__ == '__main__':
test_main()
| apache-2.0 |
caesar2164/edx-platform | lms/djangoapps/lti_provider/tests/test_tasks.py | 1 | 4437 | """
Tests for the LTI outcome service handlers, both in outcomes.py and in tasks.py
"""
import unittest
import ddt
from django.test import TestCase
from mock import patch, MagicMock
from student.tests.factories import UserFactory
from lti_provider.models import GradedAssignment, LtiConsumer, OutcomeService
import lti_provider.tasks as tasks
from opaque_keys.edx.locator import CourseLocator, BlockUsageLocator
class BaseOutcomeTest(TestCase):
"""
Super type for tests of both the leaf and composite outcome celery tasks.
"""
def setUp(self):
super(BaseOutcomeTest, self).setUp()
self.course_key = CourseLocator(
org='some_org',
course='some_course',
run='some_run'
)
self.usage_key = BlockUsageLocator(
course_key=self.course_key,
block_type='problem',
block_id='block_id'
)
self.user = UserFactory.create()
self.consumer = LtiConsumer(
consumer_name='Lti Consumer Name',
consumer_key='consumer_key',
consumer_secret='consumer_secret',
instance_guid='tool_instance_guid'
)
self.consumer.save()
outcome = OutcomeService(
lis_outcome_service_url='http://example.com/service_url',
lti_consumer=self.consumer
)
outcome.save()
self.assignment = GradedAssignment(
user=self.user,
course_key=self.course_key,
usage_key=self.usage_key,
outcome_service=outcome,
lis_result_sourcedid='sourcedid',
version_number=1,
)
self.assignment.save()
self.send_score_update_mock = self.setup_patch(
'lti_provider.outcomes.send_score_update', None
)
def setup_patch(self, function_name, return_value):
"""
Patch a method with a given return value, and return the mock
"""
mock = MagicMock(return_value=return_value)
new_patch = patch(function_name, new=mock)
new_patch.start()
self.addCleanup(new_patch.stop)
return mock
@ddt.ddt
class SendLeafOutcomeTest(BaseOutcomeTest):
"""
Tests for the send_leaf_outcome method in tasks.py
"""
@ddt.data(
(2.0, 2.0, 1.0),
(2.0, 0.0, 0.0),
(1, 2, 0.5),
)
@ddt.unpack
def test_outcome_with_score(self, earned, possible, expected):
tasks.send_leaf_outcome(
self.assignment.id,
earned,
possible
)
self.send_score_update_mock.assert_called_once_with(self.assignment, expected)
@ddt.ddt
class SendCompositeOutcomeTest(BaseOutcomeTest):
"""
Tests for the send_composite_outcome method in tasks.py
"""
def setUp(self):
super(SendCompositeOutcomeTest, self).setUp()
self.descriptor = MagicMock()
self.descriptor.location = BlockUsageLocator(
course_key=self.course_key,
block_type='problem',
block_id='problem',
)
self.course_grade = MagicMock()
self.course_grade_mock = self.setup_patch(
'lti_provider.tasks.CourseGradeFactory.create', self.course_grade
)
self.module_store = MagicMock()
self.module_store.get_item = MagicMock(return_value=self.descriptor)
self.check_result_mock = self.setup_patch(
'lti_provider.tasks.modulestore',
self.module_store
)
@ddt.data(
(2.0, 2.0, 1.0),
(2.0, 0.0, 0.0),
(1, 2, 0.5),
)
@ddt.unpack
@unittest.skip('until it always passes on Jenkins')
def test_outcome_with_score_score(self, earned, possible, expected):
"""
TODO: Figure out why this was failing on Jenkins
"""
self.course_grade.score_for_module = MagicMock(return_value=(earned, possible))
tasks.send_composite_outcome(
self.user.id, unicode(self.course_key), self.assignment.id, 1
)
self.send_score_update_mock.assert_called_once_with(self.assignment, expected)
def test_outcome_with_outdated_version(self):
self.assignment.version_number = 2
self.assignment.save()
tasks.send_composite_outcome(
self.user.id, unicode(self.course_key), self.assignment.id, 1
)
self.assertEqual(self.course_grade_mock.call_count, 0)
| agpl-3.0 |
vegeclic/django-regularcom | blog/migrations/0001_initial.py | 1 | 14127 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TaggedItem'
db.create_table('blog_taggeditem', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tag', self.gf('django.db.models.fields.SlugField')(max_length=50)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'], related_name='blog_tags')),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('blog', ['TaggedItem'])
# Adding model 'CategoryTranslation'
db.create_table('blog_category_translation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('language_code', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)),
('master', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['blog.Category'], related_name='translations', null=True)),
))
db.send_create_signal('blog', ['CategoryTranslation'])
# Adding unique constraint on 'CategoryTranslation', fields ['language_code', 'master']
db.create_unique('blog_category_translation', ['language_code', 'master_id'])
# Adding model 'Category'
db.create_table('blog_category', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(blank=True, auto_now_add=True)),
('date_last_modified', self.gf('django.db.models.fields.DateTimeField')(blank=True, auto_now=True)),
))
db.send_create_signal('blog', ['Category'])
# Adding model 'ArticleTranslation'
db.create_table('blog_article_translation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('body', self.gf('django.db.models.fields.TextField')()),
('language_code', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)),
('master', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['blog.Article'], related_name='translations', null=True)),
))
db.send_create_signal('blog', ['ArticleTranslation'])
# Adding unique constraint on 'ArticleTranslation', fields ['language_code', 'master']
db.create_unique('blog_article_translation', ['language_code', 'master_id'])
# Adding model 'Article'
db.create_table('blog_article', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=200)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=True)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(blank=True, auto_now_add=True)),
('date_last_modified', self.gf('django.db.models.fields.DateTimeField')(blank=True, auto_now=True)),
('main_image', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, to=orm['common.Image'], related_name='blog_article_main_image', unique=True, null=True)),
('title_image', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, to=orm['common.Image'], related_name='blog_article_title_image', unique=True, null=True)),
('thumb_image', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, to=orm['common.Image'], related_name='blog_article_thumb_image', unique=True, null=True)),
))
db.send_create_signal('blog', ['Article'])
# Adding M2M table for field authors on 'Article'
m2m_table_name = db.shorten_name('blog_article_authors')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('article', models.ForeignKey(orm['blog.article'], null=False)),
('author', models.ForeignKey(orm['accounts.author'], null=False))
))
db.create_unique(m2m_table_name, ['article_id', 'author_id'])
# Adding M2M table for field categories on 'Article'
m2m_table_name = db.shorten_name('blog_article_categories')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('article', models.ForeignKey(orm['blog.article'], null=False)),
('category', models.ForeignKey(orm['blog.category'], null=False))
))
db.create_unique(m2m_table_name, ['article_id', 'category_id'])
# Adding model 'Comment'
db.create_table('blog_comment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('participant', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, to=orm['accounts.Account'], null=True)),
('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['blog.Article'])),
('body', self.gf('django.db.models.fields.TextField')()),
('date_created', self.gf('django.db.models.fields.DateTimeField')(blank=True, auto_now_add=True)),
('date_last_modified', self.gf('django.db.models.fields.DateTimeField')(blank=True, auto_now=True)),
))
db.send_create_signal('blog', ['Comment'])
def backwards(self, orm):
# Removing unique constraint on 'ArticleTranslation', fields ['language_code', 'master']
db.delete_unique('blog_article_translation', ['language_code', 'master_id'])
# Removing unique constraint on 'CategoryTranslation', fields ['language_code', 'master']
db.delete_unique('blog_category_translation', ['language_code', 'master_id'])
# Deleting model 'TaggedItem'
db.delete_table('blog_taggeditem')
# Deleting model 'CategoryTranslation'
db.delete_table('blog_category_translation')
# Deleting model 'Category'
db.delete_table('blog_category')
# Deleting model 'ArticleTranslation'
db.delete_table('blog_article_translation')
# Deleting model 'Article'
db.delete_table('blog_article')
# Removing M2M table for field authors on 'Article'
db.delete_table(db.shorten_name('blog_article_authors'))
# Removing M2M table for field categories on 'Article'
db.delete_table(db.shorten_name('blog_article_categories'))
# Deleting model 'Comment'
db.delete_table('blog_comment')
models = {
'accounts.account': {
'Meta': {'object_name': 'Account'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now_add': 'True'}),
'date_last_modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'unique': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'accounts.author': {
'Meta': {'object_name': 'Author'},
'account': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['accounts.Account']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_image': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'to': "orm['common.Image']", 'related_name': "'+'", 'unique': 'True', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '30'})
},
'blog.article': {
'Meta': {'object_name': 'Article'},
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['accounts.Author']", 'symmetrical': 'False', 'related_name': "'blog_article_authors'"}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'to': "orm['blog.Category']", 'symmetrical': 'False', 'related_name': "'blog_article_categories'", 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now_add': 'True'}),
'date_last_modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_image': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'to': "orm['common.Image']", 'related_name': "'blog_article_main_image'", 'unique': 'True', 'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200'}),
'thumb_image': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'to': "orm['common.Image']", 'related_name': "'blog_article_thumb_image'", 'unique': 'True', 'null': 'True'}),
'title_image': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'to': "orm['common.Image']", 'related_name': "'blog_article_title_image'", 'unique': 'True', 'null': 'True'})
},
'blog.articletranslation': {
'Meta': {'object_name': 'ArticleTranslation', 'unique_together': "[('language_code', 'master')]", 'db_table': "'blog_article_translation'"},
'body': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blog.Article']", 'related_name': "'translations'", 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'blog.category': {
'Meta': {'object_name': 'Category'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now_add': 'True'}),
'date_last_modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'blog.categorytranslation': {
'Meta': {'object_name': 'CategoryTranslation', 'unique_together': "[('language_code', 'master')]", 'db_table': "'blog_category_translation'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blog.Category']", 'related_name': "'translations'", 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'blog.comment': {
'Meta': {'object_name': 'Comment'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blog.Article']"}),
'body': ('django.db.models.fields.TextField', [], {}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now_add': 'True'}),
'date_last_modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participant': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'to': "orm['accounts.Account']", 'null': 'True'})
},
'blog.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'related_name': "'blog_tags'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'tag': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'common.image': {
'Meta': {'object_name': 'Image'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'related_name': "'+'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '200'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'", 'object_name': 'ContentType'},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['blog'] | agpl-3.0 |
roadmapper/ansible | test/units/modules/network/f5/test_bigip_firewall_dos_vector.py | 22 | 3140 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_firewall_dos_vector import ModuleParameters
from library.modules.bigip_firewall_dos_vector import ModuleManager
from library.modules.bigip_firewall_dos_vector import ArgumentSpec
from library.modules.bigip_firewall_dos_vector import ProtocolDnsManager
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_firewall_dos_vector import ModuleParameters
from ansible.modules.network.f5.bigip_firewall_dos_vector import ModuleManager
from ansible.modules.network.f5.bigip_firewall_dos_vector import ArgumentSpec
from ansible.modules.network.f5.bigip_firewall_dos_vector import ProtocolDnsManager
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
state='mitigate'
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.state == 'mitigate'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_dns(self, *args):
set_module_args(dict(
name='aaaa',
state='mitigate',
profile='foo',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
m1 = ProtocolDnsManager(module=module)
m1.read_current_from_device = Mock(return_value=[])
m1.update_on_device = Mock(return_value=True)
mm = ModuleManager(module=module)
mm.get_manager = Mock(return_value=m1)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
azunite/wireshark_clon | tools/make-tap-reg.py | 42 | 5525 | #!/usr/bin/env python
#
# Looks for registration routines in the taps,
# and assembles C code to call all the routines.
#
# This is a Python version of the make-reg-dotc shell script.
# Running the shell script on Win32 is very very slow because of
# all the process-launching that goes on --- multiple greps and
# seds for each input file. I wrote this python version so that
# less processes would have to be started.
#
# Copyright 2010 Anders Broman <anders.broman@ericsson.com>
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <gerald@wireshark.org>
# Copyright 1998 Gerald Combs
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import sys
import re
import pickle
from stat import *
#
# The first argument is the directory in which the source files live.
#
srcdir = sys.argv[1]
#
# The second argument is "taps".
#
registertype = sys.argv[2]
if registertype == "taps":
tmp_filename = "wireshark-tap-register.c-tmp"
final_filename = "wireshark-tap-register.c"
cache_filename = "wireshark-tap-register-cache.pkl"
elif registertype == "tshark-taps":
tmp_filename = "tshark-tap-register.c-tmp"
final_filename = "tshark-tap-register.c"
cache_filename = "tshark-tap-register-cache.pkl"
else:
print("Unknown output type '%s'" % registertype)
sys.exit(1)
#
# All subsequent arguments are the files to scan.
#
files = sys.argv[3:]
# Create the proper list of filenames
filenames = []
for file in files:
if os.path.isfile(file):
filenames.append(file)
else:
filenames.append("%s/%s" % (srcdir, file))
if len(filenames) < 1:
print("No files found")
sys.exit(1)
# Look through all files, applying the regex to each line.
# If the pattern matches, save the "symbol" section to the
# appropriate array.
regs = {
'tap_reg': [],
}
# For those that don't know Python, r"" indicates a raw string,
# devoid of Python escapes.
tap_regex0 = r"^(?P<symbol>register_tap_listener_[_A-Za-z0-9]+)\s*\([^;]+$"
tap_regex1 = r"void\s+(?P<symbol>register_tap_listener_[_A-Za-z0-9]+)\s*\([^;]+$"
# This table drives the pattern-matching and symbol-harvesting
patterns = [
( 'tap_reg', re.compile(tap_regex0) ),
( 'tap_reg', re.compile(tap_regex1) ),
]
# Open our registration symbol cache
cache = None
if cache_filename:
try:
cache_file = open(cache_filename, 'rb')
cache = pickle.load(cache_file)
cache_file.close()
except:
cache = {}
# Grep
for filename in filenames:
file = open(filename)
cur_mtime = os.fstat(file.fileno())[ST_MTIME]
if cache and filename in cache:
cdict = cache[filename]
if cur_mtime == cdict['mtime']:
# print "Pulling %s from cache" % (filename)
regs['tap_reg'].extend(cdict['tap_reg'])
file.close()
continue
# We don't have a cache entry
if cache is not None:
cache[filename] = {
'mtime': cur_mtime,
'tap_reg': [],
}
# print "Searching %s" % (filename)
for line in file.readlines():
for action in patterns:
regex = action[1]
match = regex.search(line)
if match:
symbol = match.group("symbol")
sym_type = action[0]
regs[sym_type].append(symbol)
if cache is not None:
# print "Caching %s for %s: %s" % (sym_type, filename, symbol)
cache[filename][sym_type].append(symbol)
file.close()
if cache is not None and cache_filename is not None:
cache_file = open(cache_filename, 'wb')
pickle.dump(cache, cache_file)
cache_file.close()
# Make sure we actually processed something
if len(regs['tap_reg']) < 1:
print("No protocol registrations found")
sys.exit(1)
# Sort the lists to make them pretty
regs['tap_reg'].sort()
reg_code = open(tmp_filename, "w")
reg_code.write("/* Do not modify this file. Changes will be overwritten. */\n")
reg_code.write("/* Generated automatically from %s */\n" % (sys.argv[0]))
# Make the routine to register all taps
reg_code.write("""
#include "register.h"
void register_all_tap_listeners(void) {
""");
for symbol in regs['tap_reg']:
line = " {extern void %s (void); %s ();}\n" % (symbol, symbol)
reg_code.write(line)
reg_code.write("}\n")
# Close the file
reg_code.close()
# Remove the old final_file if it exists.
try:
os.stat(final_filename)
os.remove(final_filename)
except OSError:
pass
# Move from tmp file to final file
os.rename(tmp_filename, final_filename)
#
# Editor modelines - http://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 4
# indent-tabs-mode: nil
# End:
#
# vi: set shiftwidth=4 expandtab:
# :indentSize=4:noTabs=true:
#
| gpl-2.0 |
CiscoSystems/vespa | neutron/plugins/nec/ofc_driver_base.py | 5 | 4998 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
# @author: Akihiro MOTOKI
from abc import ABCMeta, abstractmethod
import six
@six.add_metaclass(ABCMeta)
class OFCDriverBase(object):
"""OpenFlow Controller (OFC) Driver Specification.
OFCDriverBase defines the minimum set of methods required by this plugin.
It would be better that other methods like update_* are implemented.
"""
@abstractmethod
def create_tenant(self, description, tenant_id=None):
"""Create a new tenant at OpenFlow Controller.
:param description: A description of this tenant.
:param tenant_id: A hint of OFC tenant ID.
A driver could use this id as a OFC id or ignore it.
:returns: ID of the tenant created at OpenFlow Controller.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def delete_tenant(self, ofc_tenant_id):
"""Delete a tenant at OpenFlow Controller.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def create_network(self, ofc_tenant_id, description, network_id=None):
"""Create a new network on specified OFC tenant at OpenFlow Controller.
:param ofc_tenant_id: a OFC tenant ID in which a new network belongs.
:param description: A description of this network.
:param network_id: A hint of an ID of OFC network.
:returns: ID of the network created at OpenFlow Controller.
ID returned must be unique in the OpenFlow Controller.
If a network is identified in conjunction with other information
such as a tenant ID, such information should be included in the ID.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def delete_network(self, ofc_network_id):
"""Delete a netwrok at OpenFlow Controller.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def create_port(self, ofc_network_id, portinfo,
port_id=None):
"""Create a new port on specified network at OFC.
:param ofc_network_id: a OFC tenant ID in which a new port belongs.
:param portinfo: An OpenFlow information of this port.
{'datapath_id': Switch ID that a port connected.
'port_no': Port Number that a port connected on a Swtich.
'vlan_id': VLAN ID that a port tagging.
'mac': Mac address.
}
:param port_id: A hint of an ID of OFC port.
ID returned must be unique in the OpenFlow Controller.
If a port is identified in combination with a network or
a tenant, such information should be included in the ID.
:returns: ID of the port created at OpenFlow Controller.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def delete_port(self, ofc_port_id):
"""Delete a port at OpenFlow Controller.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def convert_ofc_tenant_id(self, context, ofc_tenant_id):
"""Convert old-style ofc tenand id to new-style one.
:param context: neutron context object
:param ofc_tenant_id: ofc_tenant_id to be converted
"""
pass
@abstractmethod
def convert_ofc_network_id(self, context, ofc_network_id,
tenant_id):
"""Convert old-style ofc network id to new-style one.
:param context: neutron context object
:param ofc_network_id: ofc_network_id to be converted
:param tenant_id: neutron tenant_id of the network
"""
pass
@abstractmethod
def convert_ofc_port_id(self, context, ofc_port_id,
tenant_id, network_id):
"""Convert old-style ofc port id to new-style one.
:param context: neutron context object
:param ofc_port_id: ofc_port_id to be converted
:param tenant_id: neutron tenant_id of the port
:param network_id: neutron network_id of the port
"""
pass
| apache-2.0 |
aam-at/tensorflow | tensorflow/python/data/experimental/ops/threading_options.py | 27 | 1868 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for controlling threading in `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import options
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.ThreadingOptions")
class ThreadingOptions(options.OptionsBase):
"""Represents options for dataset threading.
You can set the threading options of a dataset through the
`experimental_threading` property of `tf.data.Options`; the property is
an instance of `tf.data.experimental.ThreadingOptions`.
```python
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = 10
dataset = dataset.with_options(options)
```
"""
max_intra_op_parallelism = options.create_option(
name="max_intra_op_parallelism",
ty=int,
docstring=
"If set, it overrides the maximum degree of intra-op parallelism.")
private_threadpool_size = options.create_option(
name="private_threadpool_size",
ty=int,
docstring=
"If set, the dataset will use a private threadpool of the given size.")
| apache-2.0 |
haroldl/homeworklog | django/contrib/admin/models.py | 228 | 2207 | from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
from django.contrib.admin.util import quote
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode
from django.utils.safestring import mark_safe
ADDITION = 1
CHANGE = 2
DELETION = 3
class LogEntryManager(models.Manager):
def log_action(self, user_id, content_type_id, object_id, object_repr, action_flag, change_message=''):
e = self.model(None, None, user_id, content_type_id, smart_unicode(object_id), object_repr[:200], action_flag, change_message)
e.save()
class LogEntry(models.Model):
action_time = models.DateTimeField(_('action time'), auto_now=True)
user = models.ForeignKey(User)
content_type = models.ForeignKey(ContentType, blank=True, null=True)
object_id = models.TextField(_('object id'), blank=True, null=True)
object_repr = models.CharField(_('object repr'), max_length=200)
action_flag = models.PositiveSmallIntegerField(_('action flag'))
change_message = models.TextField(_('change message'), blank=True)
objects = LogEntryManager()
class Meta:
verbose_name = _('log entry')
verbose_name_plural = _('log entries')
db_table = 'django_admin_log'
ordering = ('-action_time',)
def __repr__(self):
return smart_unicode(self.action_time)
def is_addition(self):
return self.action_flag == ADDITION
def is_change(self):
return self.action_flag == CHANGE
def is_deletion(self):
return self.action_flag == DELETION
def get_edited_object(self):
"Returns the edited object represented by this log entry"
return self.content_type.get_object_for_this_type(pk=self.object_id)
def get_admin_url(self):
"""
Returns the admin URL to edit the object represented by this log entry.
This is relative to the Django admin index page.
"""
if self.content_type and self.object_id:
return mark_safe(u"%s/%s/%s/" % (self.content_type.app_label, self.content_type.model, quote(self.object_id)))
return None | bsd-3-clause |
neoz/zer0m0n | signatures/network_irc.py | 6 | 1128 | # Copyright (C) 2013 Claudio "nex" Guarnieri (@botherder)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class NetworkIRC(Signature):
name = "network_irc"
description = "Connects to an IRC server, possibly part of a botnet"
severity = 3
categories = ["irc"]
authors = ["nex"]
minimum = "0.6"
def run(self):
if "irc" in self.results["network"]:
if len(self.results["network"]["irc"]) > 0:
return True
return False
| gpl-3.0 |
maxwward/SCOPEBak | askbot/migrations/0059_auto__add_field_thread_view_count.py | 2 | 26945 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Thread.view_count'
db.add_column('askbot_thread', 'view_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0), keep_default=False)
def backwards(self, orm):
# Deleting field 'Thread.view_count'
db.delete_column('askbot_thread', 'view_count')
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Exercise']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousproblem': {
'Meta': {'object_name': 'AnonymousProblem'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_problems'", 'to': "orm['askbot.Exercise']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousexercise': {
'Meta': {'object_name': 'AnonymousExercise'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.problem': {
'Meta': {'object_name': 'Problem', 'db_table': "u'problem'"},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'problems'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_problems'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_problems'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_problems'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'problems'", 'to': "orm['askbot.Exercise']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'askbot.comment': {
'Meta': {'ordering': "('-added_at',)", 'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'html': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2048'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'offensive_flag_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoriteexercise': {
'Meta': {'object_name': 'FavoriteExercise', 'db_table': "u'favorite_exercise'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Exercise']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_exercises'", 'to': "orm['auth.User']"})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.postrevision': {
'Meta': {'ordering': "('-revision',)", 'unique_together': "(('problem', 'revision'), ('exercise', 'revision'))", 'object_name': 'PostRevision'},
'problem': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Problem']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'postrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Exercise']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'revision_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '125', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'})
},
'askbot.exercise': {
'Meta': {'object_name': 'Exercise', 'db_table': "u'exercise'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'problem_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exercises'", 'to': "orm['auth.User']"}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'closed_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'favorite_exercises'", 'symmetrical': 'False', 'through': "orm['askbot.FavoriteExercise']", 'to': "orm['auth.User']"}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_exercises'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_active_in_exercises'", 'to': "orm['auth.User']"}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'exercises'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exercises'", 'unique': 'True', 'to': "orm['askbot.Thread']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.exerciseview': {
'Meta': {'object_name': 'ExerciseView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Exercise']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exercise_views'", 'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Exercise']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'ordering': "('-used_count', 'name')", 'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.thread': {
'Meta': {'object_name': 'Thread'},
'problem_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'exercises_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot']
| gpl-3.0 |
Ultimaker/Cura | cura/XRayPass.py | 1 | 1577 | # Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import os.path
from UM.Resources import Resources
from UM.Application import Application
from UM.PluginRegistry import PluginRegistry
from UM.View.RenderPass import RenderPass
from UM.View.RenderBatch import RenderBatch
from UM.View.GL.OpenGL import OpenGL
from cura.Scene.CuraSceneNode import CuraSceneNode
from UM.Scene.Iterator.DepthFirstIterator import DepthFirstIterator
class XRayPass(RenderPass):
def __init__(self, width, height):
super().__init__("xray", width, height)
self._shader = None
self._gl = OpenGL.getInstance().getBindingsObject()
self._scene = Application.getInstance().getController().getScene()
def render(self):
if not self._shader:
self._shader = OpenGL.getInstance().createShaderProgram(Resources.getPath(Resources.Shaders, "xray.shader"))
batch = RenderBatch(self._shader, type = RenderBatch.RenderType.NoType, backface_cull = False, blend_mode = RenderBatch.BlendMode.Additive)
for node in DepthFirstIterator(self._scene.getRoot()):
if isinstance(node, CuraSceneNode) and node.getMeshData() and node.isVisible():
batch.addItem(node.getWorldTransformation(copy = False), node.getMeshData(), normal_transformation=node.getCachedNormalMatrix())
self.bind()
self._gl.glDisable(self._gl.GL_DEPTH_TEST)
batch.render(self._scene.getActiveCamera())
self._gl.glEnable(self._gl.GL_DEPTH_TEST)
self.release()
| lgpl-3.0 |
the-nick-of-time/DnD | DnD/modules/resourceModule.py | 1 | 2471 | import tkinter as tk
from typing import Union
import lib.components as gui
import lib.resourceLib as res
import lib.settingsLib as settings
class ResourceDisplay(gui.Section):
"""Displays a resource like sorcery points or Hit Dice."""
def __init__(self, container: Union[tk.BaseWidget, tk.Tk], resource: res.Resource,
lockMax=False, **kwargs):
super().__init__(container, **kwargs)
self.resource = resource
self.numbers = tk.Frame(self.f)
self.current = gui.NumericEntry(self.numbers, self.resource.number, self.set_current,
width=5)
self.max = gui.NumericEntry(self.numbers, self.resource.maxnumber, self.set_max,
width=5)
if lockMax:
self.max.disable()
self.value = tk.Label(self.numbers, text='*' + str(self.resource.value))
self.buttonFrame = tk.Frame(self.f)
self.use = tk.Button(self.buttonFrame, text='-', command=self.increment)
self.regain = tk.Button(self.buttonFrame, text='+', command=self.decrement)
self.display = tk.Label(self.buttonFrame, width=3)
self.reset_ = tk.Button(self.buttonFrame, text='Reset', command=self.reset)
self._draw()
def _draw(self):
tk.Label(self.f, text=self.resource.name).grid(row=0, column=0)
self.numbers.grid(row=1, column=0)
self.current.grid(1, 0)
tk.Label(self.numbers, text='/').grid(row=1, column=1)
self.max.grid(1, 2)
self.value.grid(row=1, column=4)
self.buttonFrame.grid(row=2, column=0, columnspan=3)
self.display.grid(row=0, column=0)
self.regain.grid(row=0, column=1)
self.use.grid(row=0, column=2)
self.reset_.grid(row=0, column=3)
def update_view(self):
self.max.set(self.resource.maxnumber)
self.current.set(self.resource.number)
def set_current(self, value):
self.resource.number = value
def set_max(self, value):
self.resource.maxnumber = value
def increment(self):
self.resource.regain(1)
self.update_view()
def decrement(self):
val = self.resource.use(1)
self.display.config(text=str(val))
self.update_view()
def reset(self):
self.resource.reset()
self.update_view()
def rest(self, which: settings.RestLength):
self.resource.rest(which)
self.update_view()
| gpl-2.0 |
zacps/zulip | api/integrations/svn/zulip_svn_config.py | 33 | 2345 | # -*- coding: utf-8 -*-
#
# Copyright © 2014 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Change these values to configure authentication for the plugin
ZULIP_USER = "svn-bot@example.com"
ZULIP_API_KEY = "0123456789abcdef0123456789abcdef"
# commit_notice_destination() lets you customize where commit notices
# are sent to with the full power of a Python function.
#
# It takes the following arguments:
# * path = the path to the svn repository on the server
# * commit = the commit id
#
# Returns a dictionary encoding the stream and subject to send the
# notification to (or None to send no notification).
#
# The default code below will send every commit except for the "evil-master-plan"
# and "my-super-secret-repository" repos to
# * stream "commits"
# * topic "branch_name"
def commit_notice_destination(path, commit):
repo = path.split('/')[-1]
if repo not in ["evil-master-plan", "my-super-secret-repository"]:
return dict(stream = "commits",
subject = u"%s" % (repo,))
# Return None for cases where you don't want a notice sent
return None
## If properly installed, the Zulip API should be in your import
## path, but if not, set a custom path below
ZULIP_API_PATH = None
# Set this to your Zulip server's API URI
ZULIP_SITE = "https://zulip.example.com"
| apache-2.0 |
tiagofrepereira2012/tensorflow | tensorflow/tools/docs/generate_lib.py | 20 | 17656 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate docs for the TensorFlow Python API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import six
from tensorflow.python.util import tf_inspect
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
from tensorflow.tools.docs import doc_generator_visitor
from tensorflow.tools.docs import parser
from tensorflow.tools.docs import pretty_docs
from tensorflow.tools.docs import py_guide_parser
def _is_free_function(py_object, full_name, index):
"""Check if input is a free function (and not a class- or static method)."""
if not tf_inspect.isfunction(py_object):
return False
# Static methods are functions to tf_inspect (in 2.7), so check if the parent
# is a class. If there is no parent, it's not a function.
if '.' not in full_name:
return False
parent_name = full_name.rsplit('.', 1)[0]
if tf_inspect.isclass(index[parent_name]):
return False
return True
def write_docs(output_dir, parser_config, yaml_toc):
"""Write previously extracted docs to disk.
Write a docs page for each symbol included in the indices of parser_config to
a tree of docs at `output_dir`.
Symbols with multiple aliases will have only one page written about
them, which is referenced for all aliases.
Args:
output_dir: Directory to write documentation markdown files to. Will be
created if it doesn't exist.
parser_config: A `parser.ParserConfig` object, containing all the necessary
indices.
yaml_toc: Set to `True` to generate a "_toc.yaml" file.
Raises:
ValueError: if `output_dir` is not an absolute path
"""
# Make output_dir.
if not os.path.isabs(output_dir):
raise ValueError(
"'output_dir' must be an absolute path.\n"
" output_dir='%s'" % output_dir)
try:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
except OSError as e:
print('Creating output dir "%s" failed: %s' % (output_dir, e))
raise
# These dictionaries are used for table-of-contents generation below
# They will contain, after the for-loop below::
# - module name(string):classes and functions the module contains(list)
module_children = {}
# - symbol name(string):pathname (string)
symbol_to_file = {}
# Parse and write Markdown pages, resolving cross-links (@{symbol}).
for full_name, py_object in six.iteritems(parser_config.index):
parser_config.reference_resolver.current_doc_full_name = full_name
if full_name in parser_config.duplicate_of:
continue
# Methods and some routines are documented only as part of their class.
if not (tf_inspect.ismodule(py_object) or tf_inspect.isclass(py_object) or
_is_free_function(py_object, full_name, parser_config.index)):
continue
sitepath = os.path.join('api_docs/python',
parser.documentation_path(full_name)[:-3])
# For TOC, we need to store a mapping from full_name to the file
# we're generating
symbol_to_file[full_name] = sitepath
# For a module, remember the module for the table-of-contents
if tf_inspect.ismodule(py_object):
if full_name in parser_config.tree:
module_children.setdefault(full_name, [])
# For something else that's documented,
# figure out what module it lives in
else:
subname = str(full_name)
while True:
subname = subname[:subname.rindex('.')]
if tf_inspect.ismodule(parser_config.index[subname]):
module_children.setdefault(subname, []).append(full_name)
break
print('Writing docs for %s (%r).' % (full_name, py_object))
# Generate docs for `py_object`, resolving references.
page_info = parser.docs_for_object(full_name, py_object, parser_config)
path = os.path.join(output_dir, parser.documentation_path(full_name))
directory = os.path.dirname(path)
try:
if not os.path.exists(directory):
os.makedirs(directory)
with open(path, 'w') as f:
f.write(pretty_docs.build_md_page(page_info))
except OSError as e:
print('Cannot write documentation for %s to %s: %s' % (full_name,
directory, e))
raise
if yaml_toc:
# Generate table of contents
# Put modules in alphabetical order, case-insensitive
modules = sorted(module_children.keys(), key=lambda a: a.upper())
leftnav_path = os.path.join(output_dir, '_toc.yaml')
with open(leftnav_path, 'w') as f:
# Generate header
f.write('# Automatically generated file; please do not edit\ntoc:\n')
for module in modules:
f.write(' - title: ' + module + '\n'
' section:\n' + ' - title: Overview\n' +
' path: /TARGET_DOC_ROOT/VERSION/' + symbol_to_file[module]
+ '\n')
symbols_in_module = module_children.get(module, [])
# Sort case-insensitive, if equal sort case sensitive (upper first)
symbols_in_module.sort(key=lambda a: (a.upper(), a))
for full_name in symbols_in_module:
f.write(' - title: ' + full_name[len(module) + 1:] + '\n'
' path: /TARGET_DOC_ROOT/VERSION/' +
symbol_to_file[full_name] + '\n')
# Write a global index containing all full names with links.
with open(os.path.join(output_dir, 'index.md'), 'w') as f:
f.write(
parser.generate_global_index('TensorFlow', parser_config.index,
parser_config.reference_resolver))
def add_dict_to_dict(add_from, add_to):
for key in add_from:
if key in add_to:
add_to[key].extend(add_from[key])
else:
add_to[key] = add_from[key]
# Exclude some libaries in contrib from the documentation altogether.
def _get_default_private_map():
return {'tf.test': ['mock']}
# Exclude members of some libaries.
def _get_default_do_not_descend_map():
# TODO(wicke): Shrink this list once the modules get sealed.
return {
'tf': ['cli', 'lib', 'wrappers'],
'tf.contrib': [
'compiler',
'factorization',
'grid_rnn',
'labeled_tensor',
'ndlstm',
'quantization',
'session_bundle',
'slim',
'solvers',
'specs',
'tensor_forest',
'tensorboard',
'testing',
'tfprof',
],
'tf.contrib.bayesflow': [
'special_math', 'stochastic_gradient_estimators',
'stochastic_variables'
],
'tf.contrib.ffmpeg': ['ffmpeg_ops'],
'tf.contrib.graph_editor': [
'edit', 'match', 'reroute', 'subgraph', 'transform', 'select', 'util'
],
'tf.contrib.keras': ['api', 'python'],
'tf.contrib.layers': ['feature_column', 'summaries'],
'tf.contrib.learn': [
'datasets',
'head',
'graph_actions',
'io',
'models',
'monitors',
'ops',
'preprocessing',
'utils',
],
'tf.contrib.util': ['loader'],
}
def extract(py_modules, private_map, do_not_descend_map):
"""Extract docs from tf namespace and write them to disk."""
# Traverse the first module.
visitor = doc_generator_visitor.DocGeneratorVisitor(py_modules[0][0])
api_visitor = public_api.PublicAPIVisitor(visitor)
api_visitor.set_root_name(py_modules[0][0])
add_dict_to_dict(private_map, api_visitor.private_map)
add_dict_to_dict(do_not_descend_map, api_visitor.do_not_descend_map)
traverse.traverse(py_modules[0][1], api_visitor)
# Traverse all py_modules after the first:
for module_name, module in py_modules[1:]:
visitor.set_root_name(module_name)
api_visitor.set_root_name(module_name)
traverse.traverse(module, api_visitor)
return visitor
class _GetMarkdownTitle(py_guide_parser.PyGuideParser):
"""Extract the title from a .md file."""
def __init__(self):
self.title = None
py_guide_parser.PyGuideParser.__init__(self)
def process_title(self, _, title):
if self.title is None: # only use the first title
self.title = title
class _DocInfo(object):
"""A simple struct for holding a doc's url and title."""
def __init__(self, url, title):
self.url = url
self.title = title
def build_doc_index(src_dir):
"""Build an index from a keyword designating a doc to _DocInfo objects."""
doc_index = {}
if not os.path.isabs(src_dir):
raise ValueError("'src_dir' must be an absolute path.\n"
" src_dir='%s'" % src_dir)
if not os.path.exists(src_dir):
raise ValueError("'src_dir' path must exist.\n"
" src_dir='%s'" % src_dir)
for dirpath, _, filenames in os.walk(src_dir):
suffix = os.path.relpath(path=dirpath, start=src_dir)
for base_name in filenames:
if not base_name.endswith('.md'):
continue
title_parser = _GetMarkdownTitle()
title_parser.process(os.path.join(dirpath, base_name))
key_parts = os.path.join(suffix, base_name[:-3]).split('/')
if key_parts[-1] == 'index':
key_parts = key_parts[:-1]
doc_info = _DocInfo(os.path.join(suffix, base_name), title_parser.title)
doc_index[key_parts[-1]] = doc_info
if len(key_parts) > 1:
doc_index['/'.join(key_parts[-2:])] = doc_info
return doc_index
class _GuideRef(object):
def __init__(self, base_name, title, section_title, section_tag):
self.url = 'api_guides/python/' + (('%s#%s' % (base_name, section_tag))
if section_tag else base_name)
self.link_text = (('%s > %s' % (title, section_title))
if section_title else title)
def make_md_link(self, url_prefix):
return '[%s](%s%s)' % (self.link_text, url_prefix, self.url)
class _GenerateGuideIndex(py_guide_parser.PyGuideParser):
"""Turn guide files into an index from symbol name to a list of _GuideRefs."""
def __init__(self):
self.index = {}
py_guide_parser.PyGuideParser.__init__(self)
def process(self, full_path, base_name):
"""Index a file, reading from `full_path`, with `base_name` as the link."""
self.full_path = full_path
self.base_name = base_name
self.title = None
self.section_title = None
self.section_tag = None
py_guide_parser.PyGuideParser.process(self, full_path)
def process_title(self, _, title):
if self.title is None: # only use the first title
self.title = title
def process_section(self, _, section_title, tag):
self.section_title = section_title
self.section_tag = tag
def process_line(self, _, line):
"""Index @{symbol} references as in the current file & section."""
for match in parser.SYMBOL_REFERENCE_RE.finditer(line):
val = self.index.get(match.group(1), [])
val.append(
_GuideRef(self.base_name, self.title, self.section_title,
self.section_tag))
self.index[match.group(1)] = val
def _build_guide_index(guide_src_dir):
"""Return dict: symbol name -> _GuideRef from the files in `guide_src_dir`."""
index_generator = _GenerateGuideIndex()
if os.path.exists(guide_src_dir):
for full_path, base_name in py_guide_parser.md_files_in_dir(guide_src_dir):
index_generator.process(full_path, base_name)
return index_generator.index
class _UpdateTags(py_guide_parser.PyGuideParser):
"""Rewrites a Python guide so that each section has an explicit tag."""
def process_section(self, line_number, section_title, tag):
self.replace_line(line_number, '<h2 id="%s">%s</h2>' % (tag, section_title))
EXCLUDED = set(['__init__.py', 'OWNERS', 'README.txt'])
def _other_docs(src_dir, output_dir, reference_resolver):
"""Convert all the files in `src_dir` and write results to `output_dir`."""
header = '<!-- DO NOT EDIT! Automatically generated file. -->\n'
# Iterate through all the source files and process them.
tag_updater = _UpdateTags()
for dirpath, _, filenames in os.walk(src_dir):
# How to get from `dirpath` to api_docs/python/
relative_path_to_root = os.path.relpath(
path=os.path.join(src_dir, 'api_docs/python'), start=dirpath)
# Make the directory under output_dir.
new_dir = os.path.join(output_dir,
os.path.relpath(path=dirpath, start=src_dir))
try:
if not os.path.exists(new_dir):
os.makedirs(new_dir)
except OSError as e:
print('Creating output dir "%s" failed: %s' % (new_dir, e))
raise
for base_name in filenames:
if base_name in EXCLUDED:
print('Skipping excluded file %s...' % base_name)
continue
full_in_path = os.path.join(dirpath, base_name)
reference_resolver.current_doc_full_name = full_in_path
suffix = os.path.relpath(path=full_in_path, start=src_dir)
full_out_path = os.path.join(output_dir, suffix)
if not base_name.endswith('.md'):
print('Copying non-md file %s...' % suffix)
open(full_out_path, 'w').write(open(full_in_path).read())
continue
if dirpath.endswith('/api_guides/python'):
print('Processing Python guide %s...' % base_name)
md_string = tag_updater.process(full_in_path)
else:
print('Processing doc %s...' % suffix)
md_string = open(full_in_path).read()
output = reference_resolver.replace_references(md_string,
relative_path_to_root)
with open(full_out_path, 'w') as f:
f.write(header + output)
print('Done.')
class DocGenerator(object):
"""Main entry point for generating docs."""
def __init__(self):
if sys.version_info >= (3, 0):
sys.exit('Doc generation is not supported from python3.')
self.argument_parser = argparse.ArgumentParser()
self._py_modules = None
self._private_map = _get_default_private_map()
self._do_not_descend_map = _get_default_do_not_descend_map()
self.yaml_toc = True
def add_output_dir_argument(self):
self.argument_parser.add_argument(
'--output_dir',
type=str,
default=None,
required=True,
help='Directory to write docs to.')
def add_src_dir_argument(self):
self.argument_parser.add_argument(
'--src_dir',
type=str,
default=None,
required=True,
help='Directory with the source docs.')
def add_base_dir_argument(self, default_base_dir):
self.argument_parser.add_argument(
'--base_dir',
type=str,
default=default_base_dir,
help='Base directory to strip from file names referenced in docs.')
def parse_known_args(self):
flags, _ = self.argument_parser.parse_known_args()
return flags
def add_to_private_map(self, d):
add_dict_to_dict(d, self._private_map)
def add_to_do_not_descend_map(self, d):
add_dict_to_dict(d, self._do_not_descend_map)
def set_private_map(self, d):
self._private_map = d
def set_do_not_descend_map(self, d):
self._do_not_descend_map = d
def set_py_modules(self, py_modules):
self._py_modules = py_modules
def py_module_names(self):
if self._py_modules is None:
raise RuntimeError(
'Must call set_py_modules() before running py_module_names().')
return [name for (name, _) in self._py_modules]
def make_reference_resolver(self, visitor, doc_index):
return parser.ReferenceResolver.from_visitor(
visitor, doc_index, py_module_names=self.py_module_names())
def make_parser_config(self, visitor, reference_resolver, guide_index,
base_dir):
return parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates=visitor.duplicates,
duplicate_of=visitor.duplicate_of,
tree=visitor.tree,
index=visitor.index,
reverse_index=visitor.reverse_index,
guide_index=guide_index,
base_dir=base_dir)
def run_extraction(self):
return extract(
self._py_modules, self._private_map, self._do_not_descend_map)
def build(self, flags):
"""Actually build the docs."""
doc_index = build_doc_index(flags.src_dir)
visitor = self.run_extraction()
reference_resolver = self.make_reference_resolver(visitor, doc_index)
guide_index = _build_guide_index(
os.path.join(flags.src_dir, 'api_guides/python'))
parser_config = self.make_parser_config(visitor, reference_resolver,
guide_index, flags.base_dir)
output_dir = os.path.join(flags.output_dir, 'api_docs/python')
write_docs(output_dir, parser_config, yaml_toc=self.yaml_toc)
_other_docs(flags.src_dir, flags.output_dir, reference_resolver)
parser_config.reference_resolver.log_errors()
return parser_config.reference_resolver.num_errors()
| apache-2.0 |
lptorres/noah-inasafe | web_api/third_party/simplejson/decoder.py | 1 | 14670 | """Implementation of JSONDecoder
"""
from __future__ import absolute_import
import re
import sys
import struct
from .compat import fromhex, b, u, text_type, binary_type, PY3, unichr
from .scanner import make_scanner, JSONDecodeError
def _import_c_scanstring():
try:
from ._speedups import scanstring
return scanstring
except ImportError:
return None
c_scanstring = _import_c_scanstring()
# NOTE (3.1.0): JSONDecodeError may still be imported from this module for
# compatibility, but it was never in the __all__
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = fromhex('7FF80000000000007FF0000000000000')
# The struct module in Python 2.4 would get frexp() out of range here
# when an endian is specified in the format string. Fixed in Python 2.5+
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u('"'), '\\': u('\u005c'), '/': u('/'),
'b': u('\b'), 'f': u('\f'), 'n': u('\n'), 'r': u('\r'), 't': u('\t'),
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match, _join=u('').join,
_PY3=PY3, _maxunicode=sys.maxunicode):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not _PY3 and not isinstance(content, text_type):
content = text_type(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at"
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\X escape sequence %r"
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
msg = "Invalid \\uXXXX escape sequence"
esc = s[end + 1:end + 5]
escX = esc[1:2]
if len(esc) != 4 or escX == 'x' or escX == 'X':
raise JSONDecodeError(msg, s, end - 1)
try:
uni = int(esc, 16)
except ValueError:
raise JSONDecodeError(msg, s, end - 1)
end += 5
# Check for surrogate pair on UCS-4 systems
# Note that this will join high/low surrogate pairs
# but will also pass unpaired surrogates through
if (_maxunicode > 65535 and
uni & 0xfc00 == 0xd800 and
s[end:end + 2] == '\\u'):
esc2 = s[end + 2:end + 6]
escX = esc2[1:2]
if len(esc2) == 4 and not (escX == 'x' or escX == 'X'):
try:
uni2 = int(esc2, 16)
except ValueError:
raise JSONDecodeError(msg, s, end)
if uni2 & 0xfc00 == 0xdc00:
uni = 0x10000 + (((uni - 0xd800) << 10) |
(uni2 - 0xdc00))
end += 6
char = unichr(uni)
# Append the unescaped character
_append(char)
return _join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject(state, encoding, strict, scan_once, object_hook,
object_pairs_hook, memo=None,
_w=WHITESPACE.match, _ws=WHITESPACE_STR):
(s, end) = state
# Backwards compatibility
if memo is None:
memo = {}
memo_get = memo.setdefault
pairs = []
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end + 1
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
raise JSONDecodeError(
"Expecting property name enclosed in double quotes",
s, end)
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
key = memo_get(key, key)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise JSONDecodeError("Expecting ':' delimiter", s, end)
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
value, end = scan_once(s, end)
pairs.append((key, value))
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting ',' delimiter or '}'", s, end - 1)
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise JSONDecodeError(
"Expecting property name enclosed in double quotes",
s, end - 1)
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = dict(pairs)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray(state, scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
(s, end) = state
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
elif nextchar == '':
raise JSONDecodeError("Expecting value or ']'", s, end)
_append = values.append
while True:
value, end = scan_once(s, end)
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting ',' delimiter or ']'", s, end - 1)
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True,
object_pairs_hook=None):
"""
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
*strict* controls the parser's behavior when it encounters an
invalid control character in a string. The default setting of
``True`` means that unescaped control characters are parse errors, if
``False`` then control characters will be allowed in strings.
"""
if encoding is None:
encoding = DEFAULT_ENCODING
self.encoding = encoding
self.object_hook = object_hook
self.object_pairs_hook = object_pairs_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.memo = {}
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match, _PY3=PY3):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
if _PY3 and isinstance(s, binary_type):
s = s.decode(self.encoding)
obj, end = self.raw_decode(s)
end = _w(s, end).end()
if end != len(s):
raise JSONDecodeError("Extra data", s, end, len(s))
return obj
def raw_decode(self, s, idx=0, _w=WHITESPACE.match, _PY3=PY3):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
Optionally, ``idx`` can be used to specify an offset in ``s`` where
the JSON document begins.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
if _PY3 and not isinstance(s, text_type):
raise TypeError("Input string must be text, not bytes")
return self.scan_once(s, idx=_w(s, idx).end())
| gpl-3.0 |
miaoski/bsideslv-plc-home | hmi.py | 1 | 1699 | # -*- coding: utf8 -*-
# This trivial HMI is decoupled from ModBus server
import gevent
from flask import Flask, render_template
from flask_sockets import Sockets
from pymodbus.client.sync import ModbusTcpClient
from time import sleep
import sys
app = Flask(__name__)
sockets = Sockets(app)
try:
myip = sys.argv[1]
except IndexError:
print 'Usage python hmi.py 192.168.42.1'
sys.exit(1)
client = ModbusTcpClient(myip)
def read_di(num = 20):
rr = client.read_discrete_inputs(1, num).bits[:num]
di = ['1' if x else '0' for x in rr]
return di
def read_co(num = 20):
rr = client.read_coils(1, num).bits[:num]
di = ['1' if x else '0' for x in rr]
return di
def read_ir(num = 5):
rr = client.read_input_registers(1, num).registers[:num]
di = map(str, rr)
return di
def read_hr(num = 5):
rr = client.read_holding_registers(1, num).registers[:num]
di = map(str, rr)
return di
@sockets.route('/data')
def read_data(ws):
while not ws.closed:
try:
di = read_di()
co = read_co()
ir = read_ir()
hr = read_hr()
except:
print 'Exception. Wait for next run.'
gevent.sleep(1)
continue
ws.send('\n'.join((','.join(di), ','.join(co), ','.join(ir), ','.join(hr))))
gevent.sleep(0.3)
print "Connection Closed!!!", reason
@app.route('/')
def homepage():
return render_template('hmi.html')
# main
if __name__ == "__main__":
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
server = pywsgi.WSGIServer((myip, 8000), app, handler_class=WebSocketHandler)
server.serve_forever()
| gpl-2.0 |
vicky2135/lucious | lucious/lib/python2.7/site-packages/pip/_vendor/distlib/version.py | 335 | 23711 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2016 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""
Implementation of a flexible versioning scheme providing support for PEP-440,
setuptools-compatible and semantic versioning.
"""
import logging
import re
from .compat import string_types
__all__ = ['NormalizedVersion', 'NormalizedMatcher',
'LegacyVersion', 'LegacyMatcher',
'SemanticVersion', 'SemanticMatcher',
'UnsupportedVersionError', 'get_scheme']
logger = logging.getLogger(__name__)
class UnsupportedVersionError(ValueError):
"""This is an unsupported version."""
pass
class Version(object):
def __init__(self, s):
self._string = s = s.strip()
self._parts = parts = self.parse(s)
assert isinstance(parts, tuple)
assert len(parts) > 0
def parse(self, s):
raise NotImplementedError('please implement in a subclass')
def _check_compatible(self, other):
if type(self) != type(other):
raise TypeError('cannot compare %r and %r' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
self._check_compatible(other)
return self._parts < other._parts
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self._parts)
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
@property
def is_prerelease(self):
raise NotImplementedError('Please implement in subclasses.')
class Matcher(object):
version_class = None
dist_re = re.compile(r"^(\w[\s\w'.-]*)(\((.*)\))?")
comp_re = re.compile(r'^(<=|>=|<|>|!=|={2,3}|~=)?\s*([^\s,]+)$')
num_re = re.compile(r'^\d+(\.\d+)*$')
# value is either a callable or the name of a method
_operators = {
'<': lambda v, c, p: v < c,
'>': lambda v, c, p: v > c,
'<=': lambda v, c, p: v == c or v < c,
'>=': lambda v, c, p: v == c or v > c,
'==': lambda v, c, p: v == c,
'===': lambda v, c, p: v == c,
# by default, compatible => >=.
'~=': lambda v, c, p: v == c or v > c,
'!=': lambda v, c, p: v != c,
}
def __init__(self, s):
if self.version_class is None:
raise ValueError('Please specify a version class')
self._string = s = s.strip()
m = self.dist_re.match(s)
if not m:
raise ValueError('Not valid: %r' % s)
groups = m.groups('')
self.name = groups[0].strip()
self.key = self.name.lower() # for case-insensitive comparisons
clist = []
if groups[2]:
constraints = [c.strip() for c in groups[2].split(',')]
for c in constraints:
m = self.comp_re.match(c)
if not m:
raise ValueError('Invalid %r in %r' % (c, s))
groups = m.groups()
op = groups[0] or '~='
s = groups[1]
if s.endswith('.*'):
if op not in ('==', '!='):
raise ValueError('\'.*\' not allowed for '
'%r constraints' % op)
# Could be a partial version (e.g. for '2.*') which
# won't parse as a version, so keep it as a string
vn, prefix = s[:-2], True
if not self.num_re.match(vn):
# Just to check that vn is a valid version
self.version_class(vn)
else:
# Should parse as a version, so we can create an
# instance for the comparison
vn, prefix = self.version_class(s), False
clist.append((op, vn, prefix))
self._parts = tuple(clist)
def match(self, version):
"""
Check if the provided version matches the constraints.
:param version: The version to match against this instance.
:type version: String or :class:`Version` instance.
"""
if isinstance(version, string_types):
version = self.version_class(version)
for operator, constraint, prefix in self._parts:
f = self._operators.get(operator)
if isinstance(f, string_types):
f = getattr(self, f)
if not f:
msg = ('%r not implemented '
'for %s' % (operator, self.__class__.__name__))
raise NotImplementedError(msg)
if not f(version, constraint, prefix):
return False
return True
@property
def exact_version(self):
result = None
if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='):
result = self._parts[0][1]
return result
def _check_compatible(self, other):
if type(self) != type(other) or self.name != other.name:
raise TypeError('cannot compare %s and %s' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self.key == other.key and self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self.key) + hash(self._parts)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?'
r'(\.(post)(\d+))?(\.(dev)(\d+))?'
r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$')
def _pep_440_key(s):
s = s.strip()
m = PEP440_VERSION_RE.match(s)
if not m:
raise UnsupportedVersionError('Not a valid version: %s' % s)
groups = m.groups()
nums = tuple(int(v) for v in groups[1].split('.'))
while len(nums) > 1 and nums[-1] == 0:
nums = nums[:-1]
if not groups[0]:
epoch = 0
else:
epoch = int(groups[0])
pre = groups[4:6]
post = groups[7:9]
dev = groups[10:12]
local = groups[13]
if pre == (None, None):
pre = ()
else:
pre = pre[0], int(pre[1])
if post == (None, None):
post = ()
else:
post = post[0], int(post[1])
if dev == (None, None):
dev = ()
else:
dev = dev[0], int(dev[1])
if local is None:
local = ()
else:
parts = []
for part in local.split('.'):
# to ensure that numeric compares as > lexicographic, avoid
# comparing them directly, but encode a tuple which ensures
# correct sorting
if part.isdigit():
part = (1, int(part))
else:
part = (0, part)
parts.append(part)
local = tuple(parts)
if not pre:
# either before pre-release, or final release and after
if not post and dev:
# before pre-release
pre = ('a', -1) # to sort before a0
else:
pre = ('z',) # to sort after all pre-releases
# now look at the state of post and dev.
if not post:
post = ('_',) # sort before 'a'
if not dev:
dev = ('final',)
#print('%s -> %s' % (s, m.groups()))
return epoch, nums, pre, post, dev, local
_normalized_key = _pep_440_key
class NormalizedVersion(Version):
"""A rational version.
Good:
1.2 # equivalent to "1.2.0"
1.2.0
1.2a1
1.2.3a2
1.2.3b1
1.2.3c1
1.2.3.4
TODO: fill this out
Bad:
1 # minimum two numbers
1.2a # release level must have a release serial
1.2.3b
"""
def parse(self, s):
result = _normalized_key(s)
# _normalized_key loses trailing zeroes in the release
# clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0
# However, PEP 440 prefix matching needs it: for example,
# (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0).
m = PEP440_VERSION_RE.match(s) # must succeed
groups = m.groups()
self._release_clause = tuple(int(v) for v in groups[1].split('.'))
return result
PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev'])
@property
def is_prerelease(self):
return any(t[0] in self.PREREL_TAGS for t in self._parts if t)
def _match_prefix(x, y):
x = str(x)
y = str(y)
if x == y:
return True
if not x.startswith(y):
return False
n = len(y)
return x[n] == '.'
class NormalizedMatcher(Matcher):
version_class = NormalizedVersion
# value is either a callable or the name of a method
_operators = {
'~=': '_match_compatible',
'<': '_match_lt',
'>': '_match_gt',
'<=': '_match_le',
'>=': '_match_ge',
'==': '_match_eq',
'===': '_match_arbitrary',
'!=': '_match_ne',
}
def _adjust_local(self, version, constraint, prefix):
if prefix:
strip_local = '+' not in constraint and version._parts[-1]
else:
# both constraint and version are
# NormalizedVersion instances.
# If constraint does not have a local component,
# ensure the version doesn't, either.
strip_local = not constraint._parts[-1] and version._parts[-1]
if strip_local:
s = version._string.split('+', 1)[0]
version = self.version_class(s)
return version, constraint
def _match_lt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version >= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_gt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version <= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_le(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version <= constraint
def _match_ge(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version >= constraint
def _match_eq(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version == constraint)
else:
result = _match_prefix(version, constraint)
return result
def _match_arbitrary(self, version, constraint, prefix):
return str(version) == str(constraint)
def _match_ne(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version != constraint)
else:
result = not _match_prefix(version, constraint)
return result
def _match_compatible(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version == constraint:
return True
if version < constraint:
return False
# if not prefix:
# return True
release_clause = constraint._release_clause
if len(release_clause) > 1:
release_clause = release_clause[:-1]
pfx = '.'.join([str(i) for i in release_clause])
return _match_prefix(version, pfx)
_REPLACEMENTS = (
(re.compile('[.+-]$'), ''), # remove trailing puncts
(re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start
(re.compile('^[.-]'), ''), # remove leading puncts
(re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses
(re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha
(re.compile(r'\b(pre-alpha|prealpha)\b'),
'pre.alpha'), # standardise
(re.compile(r'\(beta\)$'), 'beta'), # remove parentheses
)
_SUFFIX_REPLACEMENTS = (
(re.compile('^[:~._+-]+'), ''), # remove leading puncts
(re.compile('[,*")([\]]'), ''), # remove unwanted chars
(re.compile('[~:+_ -]'), '.'), # replace illegal chars
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\.$'), ''), # trailing '.'
)
_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)')
def _suggest_semantic_version(s):
"""
Try to suggest a semantic form for a version for which
_suggest_normalized_version couldn't come up with anything.
"""
result = s.strip().lower()
for pat, repl in _REPLACEMENTS:
result = pat.sub(repl, result)
if not result:
result = '0.0.0'
# Now look for numeric prefix, and separate it out from
# the rest.
#import pdb; pdb.set_trace()
m = _NUMERIC_PREFIX.match(result)
if not m:
prefix = '0.0.0'
suffix = result
else:
prefix = m.groups()[0].split('.')
prefix = [int(i) for i in prefix]
while len(prefix) < 3:
prefix.append(0)
if len(prefix) == 3:
suffix = result[m.end():]
else:
suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]
prefix = prefix[:3]
prefix = '.'.join([str(i) for i in prefix])
suffix = suffix.strip()
if suffix:
#import pdb; pdb.set_trace()
# massage the suffix.
for pat, repl in _SUFFIX_REPLACEMENTS:
suffix = pat.sub(repl, suffix)
if not suffix:
result = prefix
else:
sep = '-' if 'dev' in suffix else '+'
result = prefix + sep + suffix
if not is_semver(result):
result = None
return result
def _suggest_normalized_version(s):
"""Suggest a normalized version close to the given version string.
If you have a version string that isn't rational (i.e. NormalizedVersion
doesn't like it) then you might be able to get an equivalent (or close)
rational version from this function.
This does a number of simple normalizations to the given string, based
on observation of versions currently in use on PyPI. Given a dump of
those version during PyCon 2009, 4287 of them:
- 2312 (53.93%) match NormalizedVersion without change
with the automatic suggestion
- 3474 (81.04%) match when using this suggestion method
@param s {str} An irrational version string.
@returns A rational version string, or None, if couldn't determine one.
"""
try:
_normalized_key(s)
return s # already rational
except UnsupportedVersionError:
pass
rs = s.lower()
# part of this could use maketrans
for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
('beta', 'b'), ('rc', 'c'), ('-final', ''),
('-pre', 'c'),
('-release', ''), ('.release', ''), ('-stable', ''),
('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
('final', '')):
rs = rs.replace(orig, repl)
# if something ends with dev or pre, we add a 0
rs = re.sub(r"pre$", r"pre0", rs)
rs = re.sub(r"dev$", r"dev0", rs)
# if we have something like "b-2" or "a.2" at the end of the
# version, that is probably beta, alpha, etc
# let's remove the dash or dot
rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs)
# 1.0-dev-r371 -> 1.0.dev371
# 0.1-dev-r79 -> 0.1.dev79
rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
# Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
# Clean: v0.3, v1.0
if rs.startswith('v'):
rs = rs[1:]
# Clean leading '0's on numbers.
#TODO: unintended side-effect on, e.g., "2003.05.09"
# PyPI stats: 77 (~2%) better
rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
# Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
# zero.
# PyPI stats: 245 (7.56%) better
rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
# the 'dev-rNNN' tag is a dev tag
rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
# clean the - when used as a pre delimiter
rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
# a terminal "dev" or "devel" can be changed into ".dev0"
rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
# a terminal "dev" can be changed into ".dev0"
rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
# a terminal "final" or "stable" can be removed
rs = re.sub(r"(final|stable)$", "", rs)
# The 'r' and the '-' tags are post release tags
# 0.4a1.r10 -> 0.4a1.post10
# 0.9.33-17222 -> 0.9.33.post17222
# 0.9.33-r17222 -> 0.9.33.post17222
rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
# Clean 'r' instead of 'dev' usage:
# 0.9.33+r17222 -> 0.9.33.dev17222
# 1.0dev123 -> 1.0.dev123
# 1.0.git123 -> 1.0.dev123
# 1.0.bzr123 -> 1.0.dev123
# 0.1a0dev.123 -> 0.1a0.dev123
# PyPI stats: ~150 (~4%) better
rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
# Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
# 0.2.pre1 -> 0.2c1
# 0.2-c1 -> 0.2c1
# 1.0preview123 -> 1.0c123
# PyPI stats: ~21 (0.62%) better
rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
# Tcl/Tk uses "px" for their post release markers
rs = re.sub(r"p(\d+)$", r".post\1", rs)
try:
_normalized_key(rs)
except UnsupportedVersionError:
rs = None
return rs
#
# Legacy version processing (distribute-compatible)
#
_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I)
_VERSION_REPLACE = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
'': None,
'.': None,
}
def _legacy_key(s):
def get_parts(s):
result = []
for p in _VERSION_PART.split(s.lower()):
p = _VERSION_REPLACE.get(p, p)
if p:
if '0' <= p[:1] <= '9':
p = p.zfill(8)
else:
p = '*' + p
result.append(p)
result.append('*final')
return result
result = []
for p in get_parts(s):
if p.startswith('*'):
if p < '*final':
while result and result[-1] == '*final-':
result.pop()
while result and result[-1] == '00000000':
result.pop()
result.append(p)
return tuple(result)
class LegacyVersion(Version):
def parse(self, s):
return _legacy_key(s)
@property
def is_prerelease(self):
result = False
for x in self._parts:
if (isinstance(x, string_types) and x.startswith('*') and
x < '*final'):
result = True
break
return result
class LegacyMatcher(Matcher):
version_class = LegacyVersion
_operators = dict(Matcher._operators)
_operators['~='] = '_match_compatible'
numeric_re = re.compile('^(\d+(\.\d+)*)')
def _match_compatible(self, version, constraint, prefix):
if version < constraint:
return False
m = self.numeric_re.match(str(constraint))
if not m:
logger.warning('Cannot compute compatible match for version %s '
' and constraint %s', version, constraint)
return True
s = m.groups()[0]
if '.' in s:
s = s.rsplit('.', 1)[0]
return _match_prefix(version, s)
#
# Semantic versioning
#
_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)'
r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?'
r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I)
def is_semver(s):
return _SEMVER_RE.match(s)
def _semantic_key(s):
def make_tuple(s, absent):
if s is None:
result = (absent,)
else:
parts = s[1:].split('.')
# We can't compare ints and strings on Python 3, so fudge it
# by zero-filling numeric values so simulate a numeric comparison
result = tuple([p.zfill(8) if p.isdigit() else p for p in parts])
return result
m = is_semver(s)
if not m:
raise UnsupportedVersionError(s)
groups = m.groups()
major, minor, patch = [int(i) for i in groups[:3]]
# choose the '|' and '*' so that versions sort correctly
pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*')
return (major, minor, patch), pre, build
class SemanticVersion(Version):
def parse(self, s):
return _semantic_key(s)
@property
def is_prerelease(self):
return self._parts[1][0] != '|'
class SemanticMatcher(Matcher):
version_class = SemanticVersion
class VersionScheme(object):
def __init__(self, key, matcher, suggester=None):
self.key = key
self.matcher = matcher
self.suggester = suggester
def is_valid_version(self, s):
try:
self.matcher.version_class(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_matcher(self, s):
try:
self.matcher(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_constraint_list(self, s):
"""
Used for processing some metadata fields
"""
return self.is_valid_matcher('dummy_name (%s)' % s)
def suggest(self, s):
if self.suggester is None:
result = None
else:
result = self.suggester(s)
return result
_SCHEMES = {
'normalized': VersionScheme(_normalized_key, NormalizedMatcher,
_suggest_normalized_version),
'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s),
'semantic': VersionScheme(_semantic_key, SemanticMatcher,
_suggest_semantic_version),
}
_SCHEMES['default'] = _SCHEMES['normalized']
def get_scheme(name):
if name not in _SCHEMES:
raise ValueError('unknown scheme name: %r' % name)
return _SCHEMES[name]
| bsd-3-clause |
danialbehzadi/Nokia-RM-1013-2.0.0.11 | webkit/Tools/Scripts/webkitpy/layout_tests/port/chromium.py | 15 | 22631 | #!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Chromium implementations of the Port interface."""
import errno
import logging
import re
import signal
import subprocess
import sys
import time
import webbrowser
from webkitpy.common.system import executive
from webkitpy.common.system.path import cygpath
from webkitpy.layout_tests.layout_package import test_expectations
from webkitpy.layout_tests.port import base
from webkitpy.layout_tests.port import http_server
from webkitpy.layout_tests.port import websocket_server
_log = logging.getLogger("webkitpy.layout_tests.port.chromium")
# FIXME: This function doesn't belong in this package.
class ChromiumPort(base.Port):
"""Abstract base class for Chromium implementations of the Port class."""
ALL_BASELINE_VARIANTS = [
'chromium-mac-snowleopard', 'chromium-mac-leopard',
'chromium-win-win7', 'chromium-win-vista', 'chromium-win-xp',
'chromium-linux-x86', 'chromium-linux-x86_64',
'chromium-gpu-mac-snowleopard', 'chromium-gpu-win-win7', 'chromium-gpu-linux-x86_64',
]
def __init__(self, **kwargs):
base.Port.__init__(self, **kwargs)
self._chromium_base_dir = None
def _check_file_exists(self, path_to_file, file_description,
override_step=None, logging=True):
"""Verify the file is present where expected or log an error.
Args:
file_name: The (human friendly) name or description of the file
you're looking for (e.g., "HTTP Server"). Used for error logging.
override_step: An optional string to be logged if the check fails.
logging: Whether or not log the error messages."""
if not self._filesystem.exists(path_to_file):
if logging:
_log.error('Unable to find %s' % file_description)
_log.error(' at %s' % path_to_file)
if override_step:
_log.error(' %s' % override_step)
_log.error('')
return False
return True
def baseline_path(self):
return self._webkit_baseline_path(self._name)
def check_build(self, needs_http):
result = True
dump_render_tree_binary_path = self._path_to_driver()
result = self._check_file_exists(dump_render_tree_binary_path,
'test driver') and result
if result and self.get_option('build'):
result = self._check_driver_build_up_to_date(
self.get_option('configuration'))
else:
_log.error('')
helper_path = self._path_to_helper()
if helper_path:
result = self._check_file_exists(helper_path,
'layout test helper') and result
if self.get_option('pixel_tests'):
result = self.check_image_diff(
'To override, invoke with --no-pixel-tests') and result
# It's okay if pretty patch isn't available, but we will at
# least log a message.
self._pretty_patch_available = self.check_pretty_patch()
return result
def check_sys_deps(self, needs_http):
cmd = [self._path_to_driver(), '--check-layout-test-sys-deps']
local_error = executive.ScriptError()
def error_handler(script_error):
local_error.exit_code = script_error.exit_code
output = self._executive.run_command(cmd, error_handler=error_handler)
if local_error.exit_code:
_log.error('System dependencies check failed.')
_log.error('To override, invoke with --nocheck-sys-deps')
_log.error('')
_log.error(output)
return False
return True
def check_image_diff(self, override_step=None, logging=True):
image_diff_path = self._path_to_image_diff()
return self._check_file_exists(image_diff_path, 'image diff exe',
override_step, logging)
def diff_image(self, expected_contents, actual_contents,
diff_filename=None):
# FIXME: need unit tests for this.
if not actual_contents and not expected_contents:
return False
if not actual_contents or not expected_contents:
return True
tempdir = self._filesystem.mkdtemp()
expected_filename = self._filesystem.join(str(tempdir), "expected.png")
self._filesystem.write_binary_file(expected_filename, expected_contents)
actual_filename = self._filesystem.join(str(tempdir), "actual.png")
self._filesystem.write_binary_file(actual_filename, actual_contents)
executable = self._path_to_image_diff()
if diff_filename:
cmd = [executable, '--diff', expected_filename,
actual_filename, diff_filename]
else:
cmd = [executable, expected_filename, actual_filename]
result = True
try:
exit_code = self._executive.run_command(cmd, return_exit_code=True)
if exit_code == 0:
# The images are the same.
result = False
elif exit_code != 1:
_log.error("image diff returned an exit code of "
+ str(exit_code))
# Returning False here causes the script to think that we
# successfully created the diff even though we didn't. If
# we return True, we think that the images match but the hashes
# don't match.
# FIXME: Figure out why image_diff returns other values.
result = False
except OSError, e:
if e.errno == errno.ENOENT or e.errno == errno.EACCES:
_compare_available = False
else:
raise e
finally:
self._filesystem.rmtree(str(tempdir))
return result
def driver_name(self):
return "DumpRenderTree"
def path_from_chromium_base(self, *comps):
"""Returns the full path to path made by joining the top of the
Chromium source tree and the list of path components in |*comps|."""
if not self._chromium_base_dir:
abspath = self._filesystem.abspath(__file__)
offset = abspath.find('third_party')
if offset == -1:
self._chromium_base_dir = self._filesystem.join(
abspath[0:abspath.find('Tools')],
'Source', 'WebKit', 'chromium')
else:
self._chromium_base_dir = abspath[0:offset]
return self._filesystem.join(self._chromium_base_dir, *comps)
def path_to_test_expectations_file(self):
return self.path_from_webkit_base('LayoutTests', 'platform',
'chromium', 'test_expectations.txt')
def default_results_directory(self):
try:
return self.path_from_chromium_base('webkit',
self.get_option('configuration'),
'layout-test-results')
except AssertionError:
return self._build_path(self.get_option('configuration'),
'layout-test-results')
def setup_test_run(self):
# Delete the disk cache if any to ensure a clean test run.
dump_render_tree_binary_path = self._path_to_driver()
cachedir = self._filesystem.dirname(dump_render_tree_binary_path)
cachedir = self._filesystem.join(cachedir, "cache")
if self._filesystem.exists(cachedir):
self._filesystem.rmtree(cachedir)
def create_driver(self, worker_number):
"""Starts a new Driver and returns a handle to it."""
return ChromiumDriver(self, worker_number)
def start_helper(self):
helper_path = self._path_to_helper()
if helper_path:
_log.debug("Starting layout helper %s" % helper_path)
# Note: Not thread safe: http://bugs.python.org/issue2320
self._helper = subprocess.Popen([helper_path],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None)
is_ready = self._helper.stdout.readline()
if not is_ready.startswith('ready'):
_log.error("layout_test_helper failed to be ready")
def stop_helper(self):
if self._helper:
_log.debug("Stopping layout test helper")
self._helper.stdin.write("x\n")
self._helper.stdin.close()
# wait() is not threadsafe and can throw OSError due to:
# http://bugs.python.org/issue1731717
self._helper.wait()
def all_baseline_variants(self):
return self.ALL_BASELINE_VARIANTS
def test_expectations(self):
"""Returns the test expectations for this port.
Basically this string should contain the equivalent of a
test_expectations file. See test_expectations.py for more details."""
expectations_path = self.path_to_test_expectations_file()
return self._filesystem.read_text_file(expectations_path)
def test_expectations_overrides(self):
try:
overrides_path = self.path_from_chromium_base('webkit', 'tools',
'layout_tests', 'test_expectations.txt')
except AssertionError:
return None
if not self._filesystem.exists(overrides_path):
return None
return self._filesystem.read_text_file(overrides_path)
def skipped_layout_tests(self, extra_test_files=None):
expectations_str = self.test_expectations()
overrides_str = self.test_expectations_overrides()
is_debug_mode = False
all_test_files = self.tests([])
if extra_test_files:
all_test_files.update(extra_test_files)
expectations = test_expectations.TestExpectations(
self, all_test_files, expectations_str, self.test_configuration(),
is_lint_mode=False, overrides=overrides_str)
tests_dir = self.layout_tests_dir()
return [self.relative_test_filename(test)
for test in expectations.get_tests_with_result_type(test_expectations.SKIP)]
def test_repository_paths(self):
# Note: for JSON file's backward-compatibility we use 'chrome' rather
# than 'chromium' here.
repos = super(ChromiumPort, self).test_repository_paths()
repos.append(('chrome', self.path_from_chromium_base()))
return repos
#
# PROTECTED METHODS
#
# These routines should only be called by other methods in this file
# or any subclasses.
#
def _check_driver_build_up_to_date(self, configuration):
if configuration in ('Debug', 'Release'):
try:
debug_path = self._path_to_driver('Debug')
release_path = self._path_to_driver('Release')
debug_mtime = self._filesystem.mtime(debug_path)
release_mtime = self._filesystem.mtime(release_path)
if (debug_mtime > release_mtime and configuration == 'Release' or
release_mtime > debug_mtime and configuration == 'Debug'):
_log.warning('You are not running the most '
'recent DumpRenderTree binary. You need to '
'pass --debug or not to select between '
'Debug and Release.')
_log.warning('')
# This will fail if we don't have both a debug and release binary.
# That's fine because, in this case, we must already be running the
# most up-to-date one.
except OSError:
pass
return True
def _chromium_baseline_path(self, platform):
if platform is None:
platform = self.name()
return self.path_from_webkit_base('LayoutTests', 'platform', platform)
def _convert_path(self, path):
"""Handles filename conversion for subprocess command line args."""
# See note above in diff_image() for why we need this.
if sys.platform == 'cygwin':
return cygpath(path)
return path
def _path_to_image_diff(self):
binary_name = 'ImageDiff'
return self._build_path(self.get_option('configuration'), binary_name)
class ChromiumDriver(base.Driver):
"""Abstract interface for DRT."""
def __init__(self, port, worker_number):
self._port = port
self._worker_number = worker_number
self._image_path = None
self.KILL_TIMEOUT = 3.0
if self._port.get_option('pixel_tests'):
self._image_path = self._port._filesystem.join(self._port.results_directory(),
'png_result%s.png' % self._worker_number)
def cmd_line(self):
cmd = self._command_wrapper(self._port.get_option('wrapper'))
cmd.append(self._port._path_to_driver())
if self._port.get_option('pixel_tests'):
# See note above in diff_image() for why we need _convert_path().
cmd.append("--pixel-tests=" +
self._port._convert_path(self._image_path))
cmd.append('--test-shell')
if self._port.get_option('startup_dialog'):
cmd.append('--testshell-startup-dialog')
if self._port.get_option('gp_fault_error_box'):
cmd.append('--gp-fault-error-box')
if self._port.get_option('js_flags') is not None:
cmd.append('--js-flags="' + self._port.get_option('js_flags') + '"')
if self._port.get_option('stress_opt'):
cmd.append('--stress-opt')
if self._port.get_option('stress_deopt'):
cmd.append('--stress-deopt')
if self._port.get_option('accelerated_compositing'):
cmd.append('--enable-accelerated-compositing')
if self._port.get_option('accelerated_2d_canvas'):
cmd.append('--enable-accelerated-2d-canvas')
if self._port.get_option('enable_hardware_gpu'):
cmd.append('--enable-hardware-gpu')
cmd.extend(self._port.get_option('additional_drt_flag', []))
return cmd
def start(self):
# FIXME: Should be an error to call this method twice.
cmd = self.cmd_line()
# We need to pass close_fds=True to work around Python bug #2320
# (otherwise we can hang when we kill DumpRenderTree when we are running
# multiple threads). See http://bugs.python.org/issue2320 .
# Note that close_fds isn't supported on Windows, but this bug only
# shows up on Mac and Linux.
close_flag = sys.platform not in ('win32', 'cygwin')
self._proc = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=close_flag)
def poll(self):
# poll() is not threadsafe and can throw OSError due to:
# http://bugs.python.org/issue1731717
return self._proc.poll()
def _write_command_and_read_line(self, input=None):
"""Returns a tuple: (line, did_crash)"""
try:
if input:
if isinstance(input, unicode):
# DRT expects utf-8
input = input.encode("utf-8")
self._proc.stdin.write(input)
# DumpRenderTree text output is always UTF-8. However some tests
# (e.g. webarchive) may spit out binary data instead of text so we
# don't bother to decode the output.
line = self._proc.stdout.readline()
# We could assert() here that line correctly decodes as UTF-8.
return (line, False)
except IOError, e:
_log.error("IOError communicating w/ DRT: " + str(e))
return (None, True)
def _test_shell_command(self, uri, timeoutms, checksum):
cmd = uri
if timeoutms:
cmd += ' ' + str(timeoutms)
if checksum:
cmd += ' ' + checksum
cmd += "\n"
return cmd
def _output_image(self):
"""Returns the image output which driver generated."""
png_path = self._image_path
if png_path and self._port._filesystem.exists(png_path):
return self._port._filesystem.read_binary_file(png_path)
else:
return None
def _output_image_with_retry(self):
# Retry a few more times because open() sometimes fails on Windows,
# raising "IOError: [Errno 13] Permission denied:"
retry_num = 50
timeout_seconds = 5.0
for i in range(retry_num):
try:
return self._output_image()
except IOError, e:
if e.errno == errno.EACCES:
time.sleep(timeout_seconds / retry_num)
else:
raise e
return self._output_image()
def _clear_output_image(self):
png_path = self._image_path
if png_path and self._port._filesystem.exists(png_path):
self._port._filesystem.remove(png_path)
def run_test(self, driver_input):
output = []
error = []
crash = False
timeout = False
actual_uri = None
actual_checksum = None
self._clear_output_image()
start_time = time.time()
uri = self._port.filename_to_uri(driver_input.filename)
cmd = self._test_shell_command(uri, driver_input.timeout,
driver_input.image_hash)
(line, crash) = self._write_command_and_read_line(input=cmd)
while not crash and line.rstrip() != "#EOF":
# Make sure we haven't crashed.
if line == '' and self.poll() is not None:
# This is hex code 0xc000001d, which is used for abrupt
# termination. This happens if we hit ctrl+c from the prompt
# and we happen to be waiting on DRT.
# sdoyon: Not sure for which OS and in what circumstances the
# above code is valid. What works for me under Linux to detect
# ctrl+c is for the subprocess returncode to be negative
# SIGINT. And that agrees with the subprocess documentation.
if (-1073741510 == self._proc.returncode or
- signal.SIGINT == self._proc.returncode):
raise KeyboardInterrupt
crash = True
break
# Don't include #URL lines in our output
if line.startswith("#URL:"):
actual_uri = line.rstrip()[5:]
if uri != actual_uri:
# GURL capitalizes the drive letter of a file URL.
if (not re.search("^file:///[a-z]:", uri) or
uri.lower() != actual_uri.lower()):
_log.fatal("Test got out of sync:\n|%s|\n|%s|" %
(uri, actual_uri))
raise AssertionError("test out of sync")
elif line.startswith("#MD5:"):
actual_checksum = line.rstrip()[5:]
elif line.startswith("#TEST_TIMED_OUT"):
timeout = True
# Test timed out, but we still need to read until #EOF.
elif actual_uri:
output.append(line)
else:
error.append(line)
(line, crash) = self._write_command_and_read_line(input=None)
# FIXME: Add support for audio when we're ready.
run_time = time.time() - start_time
output_image = self._output_image_with_retry()
text = ''.join(output)
if not text:
text = None
return base.DriverOutput(text, output_image, actual_checksum, audio=None,
crash=crash, test_time=run_time, timeout=timeout, error=''.join(error))
def stop(self):
if self._proc:
self._proc.stdin.close()
self._proc.stdout.close()
if self._proc.stderr:
self._proc.stderr.close()
# Closing stdin/stdout/stderr hangs sometimes on OS X,
# (see __init__(), above), and anyway we don't want to hang
# the harness if DRT is buggy, so we wait a couple
# seconds to give DRT a chance to clean up, but then
# force-kill the process if necessary.
timeout = time.time() + self.KILL_TIMEOUT
while self._proc.poll() is None and time.time() < timeout:
time.sleep(0.1)
if self._proc.poll() is None:
_log.warning('stopping test driver timed out, '
'killing it')
self._port._executive.kill_process(self._proc.pid)
# FIXME: This is sometime none. What is wrong? assert self._proc.poll() is not None
if self._proc.poll() is not None:
self._proc.wait()
self._proc = None
| gpl-3.0 |
dfunckt/django | tests/forms_tests/widget_tests/test_passwordinput.py | 247 | 1052 | from django.forms import PasswordInput
from .base import WidgetTest
class PasswordInputTest(WidgetTest):
widget = PasswordInput()
def test_render(self):
self.check_html(self.widget, 'password', '', html='<input type="password" name="password" />')
def test_render_ignore_value(self):
self.check_html(self.widget, 'password', 'secret', html='<input type="password" name="password" />')
def test_render_value_true(self):
"""
The render_value argument lets you specify whether the widget should
render its value. For security reasons, this is off by default.
"""
widget = PasswordInput(render_value=True)
self.check_html(widget, 'password', '', html='<input type="password" name="password" />')
self.check_html(widget, 'password', None, html='<input type="password" name="password" />')
self.check_html(
widget, 'password', 'test@example.com',
html='<input type="password" name="password" value="test@example.com" />',
)
| bsd-3-clause |
ning/collector | src/utils/py/thrift/TSCons.py | 145 | 1266 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from os import path
from SCons.Builder import Builder
def scons_env(env, add=''):
opath = path.dirname(path.abspath('$TARGET'))
lstr = 'thrift --gen cpp -o ' + opath + ' ' + add + ' $SOURCE'
cppbuild = Builder(action = lstr)
env.Append(BUILDERS = {'ThriftCpp' : cppbuild})
def gen_cpp(env, dir, file):
scons_env(env)
suffixes = ['_types.h', '_types.cpp']
targets = map(lambda s: 'gen-cpp/' + file + s, suffixes)
return env.ThriftCpp(targets, dir+file+'.thrift')
| apache-2.0 |
tailorian/Sick-Beard | lib/unidecode/x09a.py | 253 | 4623 | data = (
'E ', # 0x00
'Cheng ', # 0x01
'Xin ', # 0x02
'Ai ', # 0x03
'Lu ', # 0x04
'Zhui ', # 0x05
'Zhou ', # 0x06
'She ', # 0x07
'Pian ', # 0x08
'Kun ', # 0x09
'Tao ', # 0x0a
'Lai ', # 0x0b
'Zong ', # 0x0c
'Ke ', # 0x0d
'Qi ', # 0x0e
'Qi ', # 0x0f
'Yan ', # 0x10
'Fei ', # 0x11
'Sao ', # 0x12
'Yan ', # 0x13
'Jie ', # 0x14
'Yao ', # 0x15
'Wu ', # 0x16
'Pian ', # 0x17
'Cong ', # 0x18
'Pian ', # 0x19
'Qian ', # 0x1a
'Fei ', # 0x1b
'Huang ', # 0x1c
'Jian ', # 0x1d
'Huo ', # 0x1e
'Yu ', # 0x1f
'Ti ', # 0x20
'Quan ', # 0x21
'Xia ', # 0x22
'Zong ', # 0x23
'Kui ', # 0x24
'Rou ', # 0x25
'Si ', # 0x26
'Gua ', # 0x27
'Tuo ', # 0x28
'Kui ', # 0x29
'Sou ', # 0x2a
'Qian ', # 0x2b
'Cheng ', # 0x2c
'Zhi ', # 0x2d
'Liu ', # 0x2e
'Pang ', # 0x2f
'Teng ', # 0x30
'Xi ', # 0x31
'Cao ', # 0x32
'Du ', # 0x33
'Yan ', # 0x34
'Yuan ', # 0x35
'Zou ', # 0x36
'Sao ', # 0x37
'Shan ', # 0x38
'Li ', # 0x39
'Zhi ', # 0x3a
'Shuang ', # 0x3b
'Lu ', # 0x3c
'Xi ', # 0x3d
'Luo ', # 0x3e
'Zhang ', # 0x3f
'Mo ', # 0x40
'Ao ', # 0x41
'Can ', # 0x42
'Piao ', # 0x43
'Cong ', # 0x44
'Qu ', # 0x45
'Bi ', # 0x46
'Zhi ', # 0x47
'Yu ', # 0x48
'Xu ', # 0x49
'Hua ', # 0x4a
'Bo ', # 0x4b
'Su ', # 0x4c
'Xiao ', # 0x4d
'Lin ', # 0x4e
'Chan ', # 0x4f
'Dun ', # 0x50
'Liu ', # 0x51
'Tuo ', # 0x52
'Zeng ', # 0x53
'Tan ', # 0x54
'Jiao ', # 0x55
'Tie ', # 0x56
'Yan ', # 0x57
'Luo ', # 0x58
'Zhan ', # 0x59
'Jing ', # 0x5a
'Yi ', # 0x5b
'Ye ', # 0x5c
'Tuo ', # 0x5d
'Bin ', # 0x5e
'Zou ', # 0x5f
'Yan ', # 0x60
'Peng ', # 0x61
'Lu ', # 0x62
'Teng ', # 0x63
'Xiang ', # 0x64
'Ji ', # 0x65
'Shuang ', # 0x66
'Ju ', # 0x67
'Xi ', # 0x68
'Huan ', # 0x69
'Li ', # 0x6a
'Biao ', # 0x6b
'Ma ', # 0x6c
'Yu ', # 0x6d
'Tuo ', # 0x6e
'Xun ', # 0x6f
'Chi ', # 0x70
'Qu ', # 0x71
'Ri ', # 0x72
'Bo ', # 0x73
'Lu ', # 0x74
'Zang ', # 0x75
'Shi ', # 0x76
'Si ', # 0x77
'Fu ', # 0x78
'Ju ', # 0x79
'Zou ', # 0x7a
'Zhu ', # 0x7b
'Tuo ', # 0x7c
'Nu ', # 0x7d
'Jia ', # 0x7e
'Yi ', # 0x7f
'Tai ', # 0x80
'Xiao ', # 0x81
'Ma ', # 0x82
'Yin ', # 0x83
'Jiao ', # 0x84
'Hua ', # 0x85
'Luo ', # 0x86
'Hai ', # 0x87
'Pian ', # 0x88
'Biao ', # 0x89
'Li ', # 0x8a
'Cheng ', # 0x8b
'Yan ', # 0x8c
'Xin ', # 0x8d
'Qin ', # 0x8e
'Jun ', # 0x8f
'Qi ', # 0x90
'Qi ', # 0x91
'Ke ', # 0x92
'Zhui ', # 0x93
'Zong ', # 0x94
'Su ', # 0x95
'Can ', # 0x96
'Pian ', # 0x97
'Zhi ', # 0x98
'Kui ', # 0x99
'Sao ', # 0x9a
'Wu ', # 0x9b
'Ao ', # 0x9c
'Liu ', # 0x9d
'Qian ', # 0x9e
'Shan ', # 0x9f
'Piao ', # 0xa0
'Luo ', # 0xa1
'Cong ', # 0xa2
'Chan ', # 0xa3
'Zou ', # 0xa4
'Ji ', # 0xa5
'Shuang ', # 0xa6
'Xiang ', # 0xa7
'Gu ', # 0xa8
'Wei ', # 0xa9
'Wei ', # 0xaa
'Wei ', # 0xab
'Yu ', # 0xac
'Gan ', # 0xad
'Yi ', # 0xae
'Ang ', # 0xaf
'Tou ', # 0xb0
'Xie ', # 0xb1
'Bao ', # 0xb2
'Bi ', # 0xb3
'Chi ', # 0xb4
'Ti ', # 0xb5
'Di ', # 0xb6
'Ku ', # 0xb7
'Hai ', # 0xb8
'Qiao ', # 0xb9
'Gou ', # 0xba
'Kua ', # 0xbb
'Ge ', # 0xbc
'Tui ', # 0xbd
'Geng ', # 0xbe
'Pian ', # 0xbf
'Bi ', # 0xc0
'Ke ', # 0xc1
'Ka ', # 0xc2
'Yu ', # 0xc3
'Sui ', # 0xc4
'Lou ', # 0xc5
'Bo ', # 0xc6
'Xiao ', # 0xc7
'Pang ', # 0xc8
'Bo ', # 0xc9
'Ci ', # 0xca
'Kuan ', # 0xcb
'Bin ', # 0xcc
'Mo ', # 0xcd
'Liao ', # 0xce
'Lou ', # 0xcf
'Nao ', # 0xd0
'Du ', # 0xd1
'Zang ', # 0xd2
'Sui ', # 0xd3
'Ti ', # 0xd4
'Bin ', # 0xd5
'Kuan ', # 0xd6
'Lu ', # 0xd7
'Gao ', # 0xd8
'Gao ', # 0xd9
'Qiao ', # 0xda
'Kao ', # 0xdb
'Qiao ', # 0xdc
'Lao ', # 0xdd
'Zao ', # 0xde
'Biao ', # 0xdf
'Kun ', # 0xe0
'Kun ', # 0xe1
'Ti ', # 0xe2
'Fang ', # 0xe3
'Xiu ', # 0xe4
'Ran ', # 0xe5
'Mao ', # 0xe6
'Dan ', # 0xe7
'Kun ', # 0xe8
'Bin ', # 0xe9
'Fa ', # 0xea
'Tiao ', # 0xeb
'Peng ', # 0xec
'Zi ', # 0xed
'Fa ', # 0xee
'Ran ', # 0xef
'Ti ', # 0xf0
'Pao ', # 0xf1
'Pi ', # 0xf2
'Mao ', # 0xf3
'Fu ', # 0xf4
'Er ', # 0xf5
'Rong ', # 0xf6
'Qu ', # 0xf7
'Gong ', # 0xf8
'Xiu ', # 0xf9
'Gua ', # 0xfa
'Ji ', # 0xfb
'Peng ', # 0xfc
'Zhua ', # 0xfd
'Shao ', # 0xfe
'Sha ', # 0xff
)
| gpl-3.0 |
Rhombik/rhombik-object-repository | importer/thingiverse.py | 2 | 6930 | # -*- coding: utf-8 -*-
import scrapy
from scrapy.contrib.spiders import CrawlSpider, Rule
from scraper.spider.items import ProjectItem, fileObjectItem
from scrapy.contrib.linkextractors import LinkExtractor
import re
import urlparse
from twisted.internet import reactor
from scrapy.crawler import Crawler
from scrapy import log, signals
from scrapy.utils.project import get_project_settings
from django.contrib.auth.models import User
import os
os.environ.setdefault("SCRAPY_SETTINGS_MODULE", "scraper.spider.settings")
def runScraper(urls, user):
userID=user.pk
spider = ThingiverseSpider(urls, user=user)
settings = get_project_settings()
crawler = Crawler(settings)
crawler.signals.connect(reactor.stop, signal=signals.spider_closed)
crawler.configure()
crawler.crawl(spider)
crawler.start()
reactor.run(installSignalHandlers=0)
class ThingiverseSpider(CrawlSpider):
name = "thingiverse"
allowed_domains = ["thingiverse.com"]
download_delay = 0.4
##Find the links.
start_urls = None
def __init__(self, start_urls, user=None, *args, **kwargs):
self.start_urls = start_urls
if not user:
user = User.objects.filter(pk=1)[0]
self.user_id=user.pk
super(ThingiverseSpider, self).__init__(*args, **kwargs)
def start_requests(self):
requests=[]
for i in self.start_urls:
requests.append(scrapy.http.Request(url=i, callback=self.parse, dont_filter=True))
return requests
def parse(self, response):
## if it's a thing it's not a profile.
if re.search('thing:\d\d+',response.url):
yield scrapy.http.Request(url=response.url, callback=self.project)
else:
## sometimes thing pages link to other things with the 'design' tag. I haven't seen this on a user page.
design = LinkExtractor(allow=('design')).extract_links(response)
if design:
yield scrapy.http.Request(url=design[0].url, callback=self.projectGet)
def projectGet(self, response):
##Get next pages. We can be really lazy due to the scrapy dedupe
paginatorlinks=response.selector.xpath('//*[contains(@class,\'pagination\')]/ul/li/a/@href').extract()
#:/ I guess this makes sense.
from exceptions import IndexError
try:
paginatorlinks.pop(0)
except IndexError as e:
# e.message is dep, I guess using str(e) returning the message now is the thing.
if not str(e) == "pop from empty list":
raise
for i in paginatorlinks:
yield scrapy.http.Request(url=urlparse.urljoin(response.url, i), callback=self.projectGet)
objects = LinkExtractor(allow=('thing:\d\d+')).extract_links(response)
for i in objects:
# Teh hax! scrapy's dupefilter sees "foo.bar" and "foo.bar/" as different sites. This is bad. Maybe this should be pushed to scrapy proper...
if i.url[-1] == '/':
i.url=i.url[:-1]
yield scrapy.http.Request(url=i.url, callback=self.project)
###This is where we get items. Everything else is just URL handling.
def project(self,response):
## Get the project info proper.
projectObject=ProjectItem()
projectObject['author']=User.objects.get(pk=self.user_id)
projectObject['title']=response.selector.xpath('//*[contains(@class,\'thing-header-data\')]/h1/text()').extract()[0].strip()
projectObject['tags'] = response.selector.xpath("//*[contains(@class,\'thing-info-content thing-detail-tags-container\')]/div/a/text()").extract()
yield projectObject
## get special text files. (readme, instructions, license)
import html2text
h2t = html2text.HTML2Text()
#Get the reame file, do stuff to it.
readme = h2t.handle(response.selector.xpath("//*[@id = 'description']").extract()[0].strip())
import unicodedata
readmeItem=fileObjectItem()
readmeItem["name"]="README.md"
readmeItem["parent"]=projectObject['SID']
readmeItem["filename"]=u""+unicodedata.normalize('NFKD',readme).encode('ascii','ignore')
readmeItem['isReadme'] = True
yield readmeItem
#projectObject['readme'] = u""+unicodedata.normalize('NFKD',readme).encode('ascii','ignore')
#also a markdown file I guess we'd want.
try:
instructions = u""+h2t.handle(response.selector.xpath("//*[@id = 'instructions']").extract()[0].strip()).encode('ascii','ignore')
instructionItem=fileObjectItem()
instructionItem["name"]="Instructions.md"
instructionItem["parent"]=projectObject['SID']
instructionItem["filename"]=instructions
yield instructionItem
except IndexError:
pass
#print("xpath to get the instructions IndexError'd")
## now, because the format of the license on thingi is always the same, we can pull this off.
## but I expect it is rather fragile.
licenseurl =response.selector.xpath("//*[contains(@class,\'license-text\')]/a/@href")[2].extract().strip()
licensetext = response.selector.xpath("//*[contains(@class,\'license-text\')]/a/text()")[1].extract().strip()
licenceItem=fileObjectItem()
licenceItem["name"]="License.md"
licenceItem["parent"]=projectObject['SID']
licenceItem["filename"]="["+licensetext+"]("+licenseurl+")"
yield licenceItem
## get all the projects image and file objects
filelist = response.selector.xpath('//*[contains(@class,\'thing-file\')]/a/@href')
for i in filelist:
yield scrapy.http.Request(url=urlparse.urljoin(response.url, i.extract()), callback=self.item, meta={'parent':projectObject['SID']})
#Grab only raw images.
imagelist = response.selector.xpath('//*[contains(@class,\'thing-gallery-thumbs\')]/div[@data-track-action="viewThumb"][@data-thingiview-url=""]/@data-large-url')
for i in imagelist:
yield scrapy.http.Request(dont_filter=True, url=urlparse.urljoin(response.url, i.extract()), callback=self.item, meta={'parent':projectObject['SID']})
def closed(self, *args, **kwargs):
from scraper.spider import djangoAutoItem
from project.models import Project
from exceptions import KeyError
for key in djangoAutoItem.SIDmap:
project=Project.objects.get(pk=djangoAutoItem.SIDmap[key]['pk'])
project.save(enf_valid=True)
def item(self,response):
item=fileObjectItem()
## warning stupid preasent here.
# splitting and grabing from urlparse for filename may not be best.
item['name']=urlparse.urlparse(response.url)[2].split("/")[-1]
item['name']=item['name'].replace("_display_large","")
item['parent'] = response.meta['parent']
item['filename']=response.body
yield(item)
| agpl-3.0 |
fafaman/django | django/contrib/sites/managers.py | 472 | 2132 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.core import checks
from django.core.exceptions import FieldDoesNotExist
from django.db import models
class CurrentSiteManager(models.Manager):
"Use this to limit objects to those associated with the current site."
use_in_migrations = True
def __init__(self, field_name=None):
super(CurrentSiteManager, self).__init__()
self.__field_name = field_name
def check(self, **kwargs):
errors = super(CurrentSiteManager, self).check(**kwargs)
errors.extend(self._check_field_name())
return errors
def _check_field_name(self):
field_name = self._get_field_name()
try:
field = self.model._meta.get_field(field_name)
except FieldDoesNotExist:
return [
checks.Error(
"CurrentSiteManager could not find a field named '%s'." % field_name,
hint=None,
obj=self,
id='sites.E001',
)
]
if not isinstance(field, (models.ForeignKey, models.ManyToManyField)):
return [
checks.Error(
"CurrentSiteManager cannot use '%s.%s' as it is not a ForeignKey or ManyToManyField." % (
self.model._meta.object_name, field_name
),
hint=None,
obj=self,
id='sites.E002',
)
]
return []
def _get_field_name(self):
""" Return self.__field_name or 'site' or 'sites'. """
if not self.__field_name:
try:
self.model._meta.get_field('site')
except FieldDoesNotExist:
self.__field_name = 'sites'
else:
self.__field_name = 'site'
return self.__field_name
def get_queryset(self):
return super(CurrentSiteManager, self).get_queryset().filter(
**{self._get_field_name() + '__id': settings.SITE_ID})
| bsd-3-clause |
adminneyk/codificacionproyectando | application/views/Generacion/Generacion/lib/openoffice/openoffice.org/basis3.4/program/python-core-2.6.1/lib/repr.py | 417 | 4296 | """Redo the builtin repr() (representation) but with limits on most sizes."""
__all__ = ["Repr","repr"]
import __builtin__
from itertools import islice
class Repr:
def __init__(self):
self.maxlevel = 6
self.maxtuple = 6
self.maxlist = 6
self.maxarray = 5
self.maxdict = 4
self.maxset = 6
self.maxfrozenset = 6
self.maxdeque = 6
self.maxstring = 30
self.maxlong = 40
self.maxother = 20
def repr(self, x):
return self.repr1(x, self.maxlevel)
def repr1(self, x, level):
typename = type(x).__name__
if ' ' in typename:
parts = typename.split()
typename = '_'.join(parts)
if hasattr(self, 'repr_' + typename):
return getattr(self, 'repr_' + typename)(x, level)
else:
s = __builtin__.repr(x)
if len(s) > self.maxother:
i = max(0, (self.maxother-3)//2)
j = max(0, self.maxother-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def _repr_iterable(self, x, level, left, right, maxiter, trail=''):
n = len(x)
if level <= 0 and n:
s = '...'
else:
newlevel = level - 1
repr1 = self.repr1
pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]
if n > maxiter: pieces.append('...')
s = ', '.join(pieces)
if n == 1 and trail: right = trail + right
return '%s%s%s' % (left, s, right)
def repr_tuple(self, x, level):
return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',')
def repr_list(self, x, level):
return self._repr_iterable(x, level, '[', ']', self.maxlist)
def repr_array(self, x, level):
header = "array('%s', [" % x.typecode
return self._repr_iterable(x, level, header, '])', self.maxarray)
def repr_set(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'set([', '])', self.maxset)
def repr_frozenset(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'frozenset([', '])',
self.maxfrozenset)
def repr_deque(self, x, level):
return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque)
def repr_dict(self, x, level):
n = len(x)
if n == 0: return '{}'
if level <= 0: return '{...}'
newlevel = level - 1
repr1 = self.repr1
pieces = []
for key in islice(_possibly_sorted(x), self.maxdict):
keyrepr = repr1(key, newlevel)
valrepr = repr1(x[key], newlevel)
pieces.append('%s: %s' % (keyrepr, valrepr))
if n > self.maxdict: pieces.append('...')
s = ', '.join(pieces)
return '{%s}' % (s,)
def repr_str(self, x, level):
s = __builtin__.repr(x[:self.maxstring])
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = __builtin__.repr(x[:i] + x[len(x)-j:])
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_long(self, x, level):
s = __builtin__.repr(x) # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)//2)
j = max(0, self.maxlong-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_instance(self, x, level):
try:
s = __builtin__.repr(x)
# Bugs in x.__repr__() can cause arbitrary
# exceptions -- then make up something
except Exception:
return '<%s instance at %x>' % (x.__class__.__name__, id(x))
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def _possibly_sorted(x):
# Since not all sequences of items can be sorted and comparison
# functions may raise arbitrary exceptions, return an unsorted
# sequence in that case.
try:
return sorted(x)
except Exception:
return list(x)
aRepr = Repr()
repr = aRepr.repr
| mit |
cevaris/pants | src/python/pants/goal/goal.py | 7 | 8981 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.goal.error import GoalError
from pants.option.optionable import Optionable
class Goal(object):
"""Factory for objects representing goals.
Ensures that we have exactly one instance per goal name.
:API: public
"""
_goal_by_name = dict()
def __new__(cls, *args, **kwargs):
raise TypeError('Do not instantiate {0}. Call by_name() instead.'.format(cls))
@classmethod
def register(cls, name, description):
"""Register a goal description.
Otherwise the description must be set when registering some task on the goal,
which is clunky, and dependent on things like registration order of tasks in the goal.
A goal that isn't explicitly registered with a description will fall back to the description
of the task in that goal with the same name (if any). So singleton goals (e.g., 'clean-all')
need not be registered explicitly. This method is primarily useful for setting a
description on a generic goal like 'compile' or 'test', that multiple backends will
register tasks on.
:API: public
:param string name: The name of the goal; ie: the way to specify it on the command line.
:param string description: A description of the tasks in the goal do.
:return: The freshly registered goal.
:rtype: :class:`_Goal`
"""
goal = cls.by_name(name)
goal._description = description
return goal
@classmethod
def by_name(cls, name):
"""Returns the unique object representing the goal of the specified name.
:API: public
"""
if name not in cls._goal_by_name:
cls._goal_by_name[name] = _Goal(name)
return cls._goal_by_name[name]
@classmethod
def clear(cls):
"""Remove all goals and tasks.
This method is EXCLUSIVELY for use in tests and during pantsd startup.
:API: public
"""
cls._goal_by_name.clear()
@staticmethod
def scope(goal_name, task_name):
"""Returns options scope for specified task in specified goal.
:API: public
"""
return goal_name if goal_name == task_name else '{0}.{1}'.format(goal_name, task_name)
@staticmethod
def all():
"""Returns all active registered goals, sorted alphabetically by name.
:API: public
"""
return [goal for _, goal in sorted(Goal._goal_by_name.items()) if goal.active]
@classmethod
def subsystems(cls):
"""Returns all subsystem types used by all tasks, in no particular order.
:API: public
"""
ret = set()
for goal in cls.all():
ret.update(goal.subsystems())
return ret
class _Goal(object):
def __init__(self, name):
"""Don't call this directly.
Create goals only through the Goal.by_name() factory.
"""
Optionable.validate_scope_name_component(name)
self.name = name
self._description = ''
self.serialize = False
self._task_type_by_name = {} # name -> Task subclass.
self._ordered_task_names = [] # The task names, in the order imposed by registration.
@property
def description(self):
if self._description:
return self._description
# Return the docstring for the Task registered under the same name as this goal, if any.
# This is a very common case, and therefore a useful idiom.
namesake_task = self._task_type_by_name.get(self.name)
if namesake_task and namesake_task.__doc__:
# First line of docstring.
# TODO: This is repetitive of Optionable.get_description(). We should probably just
# make Goal an Optionable, for uniformity.
return namesake_task.__doc__.partition('\n')[0].strip()
return ''
def register_options(self, options):
for task_type in sorted(self.task_types(), key=lambda cls: cls.options_scope):
task_type.register_options_on_scope(options)
def install(self, task_registrar, first=False, replace=False, before=None, after=None):
"""Installs the given task in this goal.
The placement of the task in this goal's execution list defaults to the end but its position
can be influenced by specifying exactly one of the following arguments:
first: Places the task 1st in the execution list.
replace: Removes all existing tasks in this goal and installs this task.
before: Places the task before the named task in the execution list.
after: Places the task after the named task in the execution list.
:API: public
"""
if [bool(place) for place in [first, replace, before, after]].count(True) > 1:
raise GoalError('Can only specify one of first, replace, before or after')
task_name = task_registrar.name
Optionable.validate_scope_name_component(task_name)
options_scope = Goal.scope(self.name, task_name)
# Currently we need to support registering the same task type multiple times in different
# scopes. However we still want to have each task class know the options scope it was
# registered in. So we create a synthetic subclass here.
# TODO(benjy): Revisit this when we revisit the task lifecycle. We probably want to have
# a task *instance* know its scope, but this means converting option registration from
# a class method to an instance method, and instantiating the task much sooner in the
# lifecycle.
superclass = task_registrar.task_type
subclass_name = b'{0}_{1}'.format(superclass.__name__,
options_scope.replace('.', '_').replace('-', '_'))
task_type = type(subclass_name, (superclass,), {
'__doc__': superclass.__doc__,
'__module__': superclass.__module__,
'options_scope': options_scope,
'_stable_name': superclass.stable_name()
})
otn = self._ordered_task_names
if replace:
for tt in self.task_types():
tt.options_scope = None
del otn[:]
self._task_type_by_name = {}
if first:
otn.insert(0, task_name)
elif before in otn:
otn.insert(otn.index(before), task_name)
elif after in otn:
otn.insert(otn.index(after) + 1, task_name)
else:
otn.append(task_name)
self._task_type_by_name[task_name] = task_type
if task_registrar.serialize:
self.serialize = True
return self
def uninstall_task(self, name):
"""Removes the named task from this goal.
Allows external plugins to modify the execution plan. Use with caution.
Note: Does not relax a serialization requirement that originated
from the uninstalled task's install() call.
:API: public
"""
if name in self._task_type_by_name:
self._task_type_by_name[name].options_scope = None
del self._task_type_by_name[name]
self._ordered_task_names = [x for x in self._ordered_task_names if x != name]
else:
raise GoalError('Cannot uninstall unknown task: {0}'.format(name))
def known_scope_infos(self):
"""Yields ScopeInfos for all known scopes under this goal."""
# Note that we don't yield the goal's own scope. We don't need it (as we don't register
# options on it), and it's needlessly confusing when a task has the same name as its goal,
# in which case we shorten its scope to the goal's scope (e.g., idea.idea -> idea).
for task_type in self.task_types():
for scope_info in task_type.known_scope_infos():
yield scope_info
def subsystems(self):
"""Returns all subsystem types used by tasks in this goal, in no particular order."""
ret = set()
for task_type in self.task_types():
ret.update([dep.subsystem_cls for dep in task_type.subsystem_dependencies_iter()])
return ret
def ordered_task_names(self):
"""The task names in this goal, in registration order."""
return self._ordered_task_names
def task_type_by_name(self, name):
"""The task type registered under the given name."""
return self._task_type_by_name[name]
def task_types(self):
"""Returns the task types in this goal, unordered."""
return self._task_type_by_name.values()
def task_items(self):
for name, task_type in self._task_type_by_name.items():
yield name, task_type
def has_task_of_type(self, typ):
"""Returns True if this goal has a task of the given type (or a subtype of it)."""
for task_type in self.task_types():
if issubclass(task_type, typ):
return True
return False
@property
def active(self):
"""Return `True` if this goal has tasks installed.
Some goals are installed in pants core without associated tasks in anticipation of plugins
providing tasks that implement the goal being installed. If no such plugins are installed, the
goal may be inactive in the repo.
"""
return len(self._task_type_by_name) > 0
def __repr__(self):
return self.name
| apache-2.0 |
etsinko/Pentaho-reports-for-OpenERP | openerp_addon/pentaho_report_selection_sets/report_selections.py | 6 | 12597 | # -*- encoding: utf-8 -*-
from datetime import date, datetime
from dateutil import parser
import pytz
import json
from lxml import etree
from openerp import models, fields, api, _
from openerp.exceptions import except_orm
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.misc import frozendict
from openerp.addons.pentaho_reports.java_oe import *
from openerp.addons.pentaho_reports.core import VALID_OUTPUT_TYPES
from report_formulae import *
class selection_set_header(models.Model):
_name = 'ir.actions.report.set.header'
_description = 'Pentaho Report Selection Set Header'
name = fields.Char(string='Selection Set Description', size=64)
report_action_id = fields.Many2one('ir.actions.report.xml', string='Report Name', readonly=True)
output_type = fields.Selection(VALID_OUTPUT_TYPES, string='Report format', help='Choose the format for the output')
parameters_dictionary = fields.Text(string='parameter dictionary') # Not needed, but helpful if we build a parameter set master view...
detail_ids = fields.One2many('ir.actions.report.set.detail', 'header_id', string='Selection Details')
def_user_ids = fields.Many2many('res.users', 'ir_actions_report_set_def_user_rel', 'header_id', 'user_id', string='Users (Default)')
def_group_ids = fields.Many2many('res.groups', 'ir_actions_report_set_def_group_rel', 'header_id', 'group_id', string='Groups (Default)')
def selections_to_dictionary(self, cr, uid, id, parameters, x2m_unique_id, context=None):
detail_obj = self.pool.get('ir.actions.report.set.detail')
formula_obj = self.pool.get('ir.actions.report.set.formula')
selections_to_load = self.browse(cr, uid, id, context=context)
result = {'output_type': selections_to_load.output_type}
arbitrary_force_calc = None
known_variables = {}
for index in range(0, len(parameters)):
known_variables[parameters[index]['variable']] = {'type': parameters[index]['type'],
'x2m': parameter_can_2m(parameters, index),
'calculated': False,
}
while True:
any_calculated_this_time = False
still_needed_dependent_values = []
for index in range(0, len(parameters)):
if not known_variables[parameters[index]['variable']]['calculated']:
for detail in selections_to_load.detail_ids:
if detail.variable == parameters[index]['variable']:
expected_type = parameters[index]['type']
expected_2m = parameter_can_2m(parameters, index)
# check expected_type as TYPE_DATE / TYPE_TIME, etc... and validate display_value is compatible with it
calculate_formula_this_time = False
use_value_this_time = True
if detail.calc_formula:
formula = formula_obj.validate_formula(cr, uid, detail.calc_formula, expected_type, expected_2m, known_variables, context=context)
#
# if there is an error, we want to ignore the formula and use standard processing of the value...
# if we are arbitrarily forcing a value, then also use standard processing of the value...
# if no error, then try to evaluate the formula
if formula['error'] or detail.variable == arbitrary_force_calc:
pass
else:
calculate_formula_this_time = True
for dv in formula['dependent_values']:
if not known_variables[dv]['calculated']:
calculate_formula_this_time = False
use_value_this_time = False
still_needed_dependent_values.append(dv)
if calculate_formula_this_time or use_value_this_time:
if calculate_formula_this_time:
display_value = json.dumps(formula_obj.evaluate_formula(cr, uid, formula, expected_type, expected_2m, known_variables, context=context))
else:
display_value = detail.display_value
result[parameter_resolve_column_name(parameters, index)] = detail_obj.display_value_to_wizard(cr, uid, display_value, parameters, index, x2m_unique_id, context=context)
result[parameter_resolve_formula_column_name(parameters, index)] = detail.calc_formula
known_variables[parameters[index]['variable']].update({'calculated': True,
'calced_value': detail_obj.wizard_value_to_display(cr, uid,
result[parameter_resolve_column_name(parameters, index)],
parameters, index, context=context),
})
any_calculated_this_time = True
break
# if there are no outstanding calculations, then break
if not still_needed_dependent_values:
break
# if some were calculated, and there are outstanding calculations, then loop again
# if none were calculated, then force a calculation to break potential deadlocks of dependent values
if any_calculated_this_time:
arbitrary_force_calc = None
else:
arbitrary_force_calc = still_needed_dependent_values[0]
return result
class selection_set_detail(models.Model):
_name = 'ir.actions.report.set.detail'
_description = 'Pentaho Report Selection Set Detail'
header_id = fields.Many2one('ir.actions.report.set.header', string='Selection Set', ondelete='cascade', readonly=True)
variable = fields.Char(string='Variable Name', size=64, readonly=True)
label = fields.Char(string='Label', size=64, readonly=True)
counter = fields.Integer(string='Parameter Number', readonly=True)
type = fields.Selection(OPENERP_DATA_TYPES, string='Data Type', readonly=True)
x2m = fields.Boolean(string='Data List Type')
display_value = fields.Text(string='Value')
calc_formula = fields.Char(string='Formula')
_order = 'counter'
def wizard_value_to_display(self, cr, uid, wizard_value, parameters_dictionary, index, context=None):
result = self.pool.get('ir.actions.report.promptwizard').decode_wizard_value(cr, uid, parameters_dictionary, index, wizard_value, context=context)
result = json.dumps(result)
return result
def display_value_to_wizard(self, cr, uid, selection_value, parameters_dictionary, index, x2m_unique_id, context=None):
result = selection_value and json.loads(selection_value) or False
result = self.pool.get('ir.actions.report.promptwizard').encode_wizard_value(cr, uid, parameters_dictionary, index, x2m_unique_id, result, context=context)
return result
def formula_parameters(cls):
for counter in range(0, MAX_PARAMS):
setattr(cls, PARAM_XXX_FORMULA % counter, fields.Char(string="Formula"))
return cls
@formula_parameters
class report_prompt_with_selection_set(models.TransientModel):
_inherit = 'ir.actions.report.promptwizard'
has_selns = fields.Boolean(string='Has Selection Sets...')
selectionset_id = fields.Many2one('ir.actions.report.set.header', string='Stored Selections', ondelete='set null')
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
set_header_obj = self.pool.get('ir.actions.report.set.header')
result = super(report_prompt_with_selection_set, self).default_get(cr, uid, fields, context=context)
set_header_ids = set_header_obj.search(cr, uid, [('report_action_id', '=', result['report_action_id'])], context=context)
result['has_selns'] = len(set_header_ids) > 0
parameters = json.loads(result.get('parameters_dictionary', []))
for index in range(0, len(parameters)):
result[parameter_resolve_formula_column_name(parameters, index)] = ''
if context.get('populate_selectionset_id'):
selectionset = set_header_obj.browse(cr, uid, context['populate_selectionset_id'], context=context)
if selectionset.report_action_id.id != result['report_action_id']:
raise except_orm(_('Error'), _('Report selections do not match service name called.'))
# set this and let onchange be triggered and initialise correct values
if type(context) != frozendict:
result['selectionset_id'] = context.pop('populate_selectionset_id')
else:
result['selectionset_id'] = context['populate_selectionset_id']
#TODO:
# Really, we are finished with the value in context, and should pop it, but the new API seems to not respect the first "popping", and even more bizarrely,
# when it calls this routine in "add_missing_values" passes in a frozen dict, and it can't be popped (although it should have been removed the first time!!)
else:
default_selset_id = False
for sel_set in set_header_obj.browse(cr, uid, set_header_ids, context=context):
if uid in [u.id for u in sel_set.def_user_ids]:
default_selset_id = sel_set.id
break # This will break out of the main loop, which is correct - we have an explicit default
for g in sel_set.def_group_ids:
if uid in [u.id for u in g.users]:
default_selset_id = sel_set.id
break # This will break out of the inner loop, which is correct - we want to repeat the outer loop in case there is an explicit overriding default
if default_selset_id:
result['selectionset_id'] = default_selset_id
return result
def fvg_add_one_parameter(self, cr, uid, result, selection_groups, parameters, index, first_parameter, context=None):
def add_subelement(element, type, **kwargs):
sf = etree.SubElement(element, type)
for k, v in kwargs.iteritems():
if v is not None:
sf.set(k, v)
super(report_prompt_with_selection_set, self).fvg_add_one_parameter(cr, uid, result, selection_groups, parameters, index, first_parameter, context=context)
field_name = parameter_resolve_formula_column_name(parameters, index)
result['fields'][field_name] = {'selectable': self._columns[field_name].selectable,
'type': self._columns[field_name]._type,
'size': self._columns[field_name].size,
'string': self._columns[field_name].string,
'views': {}
}
for sel_group in selection_groups:
add_subelement(sel_group,
'field',
name = field_name,
modifiers = '{"invisible": true}',
)
@api.onchange('selectionset_id')
def _onchange_selectionset_id(self):
if self.selectionset_id:
parameters = json.loads(self.parameters_dictionary)
values_dict = self.pool.get('ir.actions.report.set.header').selections_to_dictionary(self.env.cr, self.env.uid, self.selectionset_id.id, parameters, self.x2m_unique_id, context=self.env.context)
for k, v in values_dict.iteritems():
self.__setattr__(k, v) | gpl-2.0 |
zhenzhai/edx-platform | lms/envs/test_static_optimized.py | 26 | 2169 | """
Settings used when generating static assets for use in tests.
For example, Bok Choy uses two different settings files:
1. test_static_optimized is used when invoking collectstatic
2. bok_choy is used when running CMS and LMS
Note: it isn't possible to have a single settings file, because Django doesn't
support both generating static assets to a directory and also serving static
from the same directory.
"""
# Start with the common settings
from .common import * # pylint: disable=wildcard-import, unused-wildcard-import
# Use an in-memory database since this settings file is only used for updating assets
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'ATOMIC_REQUESTS': True,
},
'student_module_history': {
'ENGINE': 'django.db.backends.sqlite3',
},
}
# Provide a dummy XQUEUE_INTERFACE setting as LMS expects it to exist on start up
XQUEUE_INTERFACE = {
"url": "https://sandbox-xqueue.edx.org",
"django_auth": {
"username": "lms",
"password": "***REMOVED***"
},
"basic_auth": ('anant', 'agarwal'),
}
######################### PIPELINE ####################################
# Use RequireJS optimized storage
STATICFILES_STORAGE = 'openedx.core.lib.django_require.staticstorage.OptimizedCachedRequireJsStorage'
# Revert to the default set of finders as we don't want to dynamically pick up files from the pipeline
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'openedx.core.lib.xblock_pipeline.finder.XBlockPipelineFinder',
]
# Redirect to the test_root folder within the repo
TEST_ROOT = REPO_ROOT / "test_root"
LOG_DIR = (TEST_ROOT / "log").abspath()
# Store the static files under test root so that they don't overwrite existing static assets
STATIC_ROOT = (TEST_ROOT / "staticfiles" / "lms").abspath()
# Disable uglify when tests are running (used by build.js).
# 1. Uglify is by far the slowest part of the build process
# 2. Having full source code makes debugging tests easier for developers
os.environ['REQUIRE_BUILD_PROFILE_OPTIMIZE'] = 'none'
| agpl-3.0 |
houshengbo/nova_vmware_compute_driver | nova/virt/hyperv/vmops.py | 1 | 25971 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for basic VM operations.
"""
import os
import uuid
from nova.api.metadata import base as instance_metadata
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import configdrive
from nova.virt.hyperv import baseops
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
LOG = logging.getLogger(__name__)
hyperv_opts = [
cfg.StrOpt('vswitch_name',
default=None,
help='Default vSwitch Name, '
'if none provided first external is used'),
cfg.BoolOpt('limit_cpu_features',
default=False,
help='Required for live migration among '
'hosts with different CPU features'),
cfg.BoolOpt('config_drive_inject_password',
default=False,
help='Sets the admin password in the config drive image'),
cfg.StrOpt('qemu_img_cmd',
default="qemu-img.exe",
help='qemu-img is used to convert between '
'different image types'),
cfg.BoolOpt('config_drive_cdrom',
default=False,
help='Attaches the Config Drive image as a cdrom drive '
'instead of a disk drive')
]
CONF = cfg.CONF
CONF.register_opts(hyperv_opts)
CONF.import_opt('use_cow_images', 'nova.config')
class VMOps(baseops.BaseOps):
def __init__(self, volumeops):
super(VMOps, self).__init__()
self._vmutils = vmutils.VMUtils()
self._volumeops = volumeops
def list_instances(self):
""" Return the names of all the instances known to Hyper-V. """
vms = [v.ElementName
for v in self._conn.Msvm_ComputerSystem(['ElementName'],
Caption="Virtual Machine")]
return vms
def get_info(self, instance):
"""Get information about the VM"""
LOG.debug(_("get_info called for instance"), instance=instance)
return self._get_info(instance['name'])
def _get_info(self, instance_name):
vm = self._vmutils.lookup(self._conn, instance_name)
if vm is None:
raise exception.InstanceNotFound(instance=instance_name)
vm = self._conn.Msvm_ComputerSystem(
ElementName=instance_name)[0]
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vmsettings = vm.associators(
wmi_association_class='Msvm_SettingsDefineState',
wmi_result_class='Msvm_VirtualSystemSettingData')
settings_paths = [v.path_() for v in vmsettings]
#See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
summary_info = vs_man_svc.GetSummaryInformation(
[constants.VM_SUMMARY_NUM_PROCS,
constants.VM_SUMMARY_ENABLED_STATE,
constants.VM_SUMMARY_MEMORY_USAGE,
constants.VM_SUMMARY_UPTIME],
settings_paths)[1]
info = summary_info[0]
LOG.debug(_("hyperv vm state: %s"), info.EnabledState)
state = constants.HYPERV_POWER_STATE[info.EnabledState]
memusage = str(info.MemoryUsage)
numprocs = str(info.NumberOfProcessors)
uptime = str(info.UpTime)
LOG.debug(_("Got Info for vm %(instance_name)s: state=%(state)d,"
" mem=%(memusage)s, num_cpu=%(numprocs)s,"
" uptime=%(uptime)s"), locals())
return {'state': state,
'max_mem': info.MemoryUsage,
'mem': info.MemoryUsage,
'num_cpu': info.NumberOfProcessors,
'cpu_time': info.UpTime}
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None):
""" Create a new VM and start it."""
vm = self._vmutils.lookup(self._conn, instance['name'])
if vm is not None:
raise exception.InstanceExists(name=instance['name'])
ebs_root = self._volumeops.volume_in_mapping(
self._volumeops.get_default_root_device(),
block_device_info)
#If is not a boot from volume spawn
if not (ebs_root):
#Fetch the file, assume it is a VHD file.
vhdfile = self._vmutils.get_vhd_path(instance['name'])
try:
self._cache_image(fn=self._vmutils.fetch_image,
context=context,
target=vhdfile,
fname=instance['image_ref'],
image_id=instance['image_ref'],
user=instance['user_id'],
project=instance['project_id'],
cow=CONF.use_cow_images)
except Exception as exn:
LOG.exception(_('cache image failed: %s'), exn)
self.destroy(instance)
try:
self._create_vm(instance)
if not ebs_root:
self._attach_ide_drive(instance['name'], vhdfile, 0, 0,
constants.IDE_DISK)
else:
self._volumeops.attach_boot_volume(block_device_info,
instance['name'])
#A SCSI controller for volumes connection is created
self._create_scsi_controller(instance['name'])
for vif in network_info:
mac_address = vif['address'].replace(':', '')
self._create_nic(instance['name'], mac_address)
if configdrive.required_by(instance):
self._create_config_drive(instance, injected_files,
admin_password)
LOG.debug(_('Starting VM %s '), instance['name'])
self._set_vm_state(instance['name'], 'Enabled')
LOG.info(_('Started VM %s '), instance['name'])
except Exception as exn:
LOG.exception(_('spawn vm failed: %s'), exn)
self.destroy(instance)
raise exn
def _create_config_drive(self, instance, injected_files, admin_password):
if CONF.config_drive_format != 'iso9660':
vmutils.HyperVException(_('Invalid config_drive_format "%s"') %
CONF.config_drive_format)
LOG.info(_('Using config drive'), instance=instance)
extra_md = {}
if admin_password and CONF.config_drive_inject_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(instance,
content=injected_files, extra_md=extra_md)
instance_path = self._vmutils.get_instance_path(
instance['name'])
configdrive_path_iso = os.path.join(instance_path, 'configdrive.iso')
LOG.info(_('Creating config drive at %(path)s'),
{'path': configdrive_path_iso}, instance=instance)
with configdrive.config_drive_helper(instance_md=inst_md) as cdb:
try:
cdb.make_drive(configdrive_path_iso)
except exception.ProcessExecutionError, e:
LOG.error(_('Creating config drive failed with error: %s'),
e, instance=instance)
raise
if not CONF.config_drive_cdrom:
drive_type = constants.IDE_DISK
configdrive_path = os.path.join(instance_path,
'configdrive.vhd')
utils.execute(CONF.qemu_img_cmd,
'convert',
'-f',
'raw',
'-O',
'vpc',
configdrive_path_iso,
configdrive_path,
attempts=1)
os.remove(configdrive_path_iso)
else:
drive_type = constants.IDE_DVD
configdrive_path = configdrive_path_iso
self._attach_ide_drive(instance['name'], configdrive_path, 1, 0,
drive_type)
def _create_vm(self, instance):
"""Create a VM but don't start it. """
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
vs_gs_data.ElementName = instance["name"]
(job, ret_val) = vs_man_svc.DefineVirtualSystem(
[], None, vs_gs_data.GetText_(1))[1:]
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._vmutils.check_job_status(job)
else:
success = (ret_val == 0)
if not success:
raise vmutils.HyperVException(_('Failed to create VM %s') %
instance["name"])
LOG.debug(_('Created VM %s...'), instance["name"])
vm = self._conn.Msvm_ComputerSystem(ElementName=instance["name"])[0]
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
vmsetting = [s for s in vmsettings
if s.SettingType == 3][0] # avoid snapshots
memsetting = vmsetting.associators(
wmi_result_class='Msvm_MemorySettingData')[0]
#No Dynamic Memory, so reservation, limit and quantity are identical.
mem = long(str(instance['memory_mb']))
memsetting.VirtualQuantity = mem
memsetting.Reservation = mem
memsetting.Limit = mem
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
vm.path_(), [memsetting.GetText_(1)])
LOG.debug(_('Set memory for vm %s...'), instance["name"])
procsetting = vmsetting.associators(
wmi_result_class='Msvm_ProcessorSettingData')[0]
vcpus = long(instance['vcpus'])
procsetting.VirtualQuantity = vcpus
procsetting.Reservation = vcpus
procsetting.Limit = 100000 # static assignment to 100%
if CONF.limit_cpu_features:
procsetting.LimitProcessorFeatures = True
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
vm.path_(), [procsetting.GetText_(1)])
LOG.debug(_('Set vcpus for vm %s...'), instance["name"])
def _create_scsi_controller(self, vm_name):
""" Create an iscsi controller ready to mount volumes """
LOG.debug(_('Creating a scsi controller for %(vm_name)s for volume '
'attaching') % locals())
vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
vm = vms[0]
scsicontrldefault = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
WHERE ResourceSubType = 'Microsoft Synthetic SCSI Controller'\
AND InstanceID LIKE '%Default%'")[0]
if scsicontrldefault is None:
raise vmutils.HyperVException(_('Controller not found'))
scsicontrl = self._vmutils.clone_wmi_obj(self._conn,
'Msvm_ResourceAllocationSettingData', scsicontrldefault)
scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
scsiresource = self._vmutils.add_virt_resource(self._conn,
scsicontrl, vm)
if scsiresource is None:
raise vmutils.HyperVException(
_('Failed to add scsi controller to VM %s') %
vm_name)
def _get_ide_controller(self, vm, ctrller_addr):
#Find the IDE controller for the vm.
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
rasds = vmsettings[0].associators(
wmi_result_class='MSVM_ResourceAllocationSettingData')
ctrller = [r for r in rasds
if r.ResourceSubType == 'Microsoft Emulated IDE Controller'
and r.Address == str(ctrller_addr)]
return ctrller
def _attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
drive_type=constants.IDE_DISK):
"""Create an IDE drive and attach it to the vm"""
LOG.debug(_('Creating disk for %(vm_name)s by attaching'
' disk file %(path)s') % locals())
vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
vm = vms[0]
ctrller = self._get_ide_controller(vm, ctrller_addr)
if drive_type == constants.IDE_DISK:
resSubType = 'Microsoft Synthetic Disk Drive'
elif drive_type == constants.IDE_DVD:
resSubType = 'Microsoft Synthetic DVD Drive'
#Find the default disk drive object for the vm and clone it.
drivedflt = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
WHERE ResourceSubType LIKE '%(resSubType)s'\
AND InstanceID LIKE '%%Default%%'" % locals())[0]
drive = self._vmutils.clone_wmi_obj(self._conn,
'Msvm_ResourceAllocationSettingData', drivedflt)
#Set the IDE ctrller as parent.
drive.Parent = ctrller[0].path_()
drive.Address = drive_addr
#Add the cloned disk drive object to the vm.
new_resources = self._vmutils.add_virt_resource(self._conn,
drive, vm)
if new_resources is None:
raise vmutils.HyperVException(
_('Failed to add drive to VM %s') %
vm_name)
drive_path = new_resources[0]
LOG.debug(_('New %(drive_type)s drive path is %(drive_path)s') %
locals())
if drive_type == constants.IDE_DISK:
resSubType = 'Microsoft Virtual Hard Disk'
elif drive_type == constants.IDE_DVD:
resSubType = 'Microsoft Virtual CD/DVD Disk'
#Find the default VHD disk object.
drivedefault = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
WHERE ResourceSubType LIKE '%(resSubType)s' AND \
InstanceID LIKE '%%Default%%' " % locals())[0]
#Clone the default and point it to the image file.
res = self._vmutils.clone_wmi_obj(self._conn,
'Msvm_ResourceAllocationSettingData', drivedefault)
#Set the new drive as the parent.
res.Parent = drive_path
res.Connection = [path]
#Add the new vhd object as a virtual hard disk to the vm.
new_resources = self._vmutils.add_virt_resource(self._conn, res, vm)
if new_resources is None:
raise vmutils.HyperVException(
_('Failed to add %(drive_type)s image to VM %(vm_name)s') %
locals())
LOG.info(_('Created drive type %(drive_type)s for %(vm_name)s') %
locals())
def _create_nic(self, vm_name, mac):
"""Create a (synthetic) nic and attach it to the vm"""
LOG.debug(_('Creating nic for %s '), vm_name)
#Find the vswitch that is connected to the physical nic.
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
extswitch = self._find_external_network()
if extswitch is None:
raise vmutils.HyperVException(_('Cannot find vSwitch'))
vm = vms[0]
switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0]
#Find the default nic and clone it to create a new nic for the vm.
#Use Msvm_SyntheticEthernetPortSettingData for Windows or Linux with
#Linux Integration Components installed.
syntheticnics_data = self._conn.Msvm_SyntheticEthernetPortSettingData()
default_nic_data = [n for n in syntheticnics_data
if n.InstanceID.rfind('Default') > 0]
new_nic_data = self._vmutils.clone_wmi_obj(self._conn,
'Msvm_SyntheticEthernetPortSettingData',
default_nic_data[0])
#Create a port on the vswitch.
(new_port, ret_val) = switch_svc.CreateSwitchPort(
Name=str(uuid.uuid4()),
FriendlyName=vm_name,
ScopeOfResidence="",
VirtualSwitch=extswitch.path_())
if ret_val != 0:
LOG.error(_('Failed creating a port on the external vswitch'))
raise vmutils.HyperVException(_('Failed creating port for %s') %
vm_name)
ext_path = extswitch.path_()
LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s")
% locals())
#Connect the new nic to the new port.
new_nic_data.Connection = [new_port]
new_nic_data.ElementName = vm_name + ' nic'
new_nic_data.Address = mac
new_nic_data.StaticMacAddress = 'True'
new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
#Add the new nic to the vm.
new_resources = self._vmutils.add_virt_resource(self._conn,
new_nic_data, vm)
if new_resources is None:
raise vmutils.HyperVException(_('Failed to add nic to VM %s') %
vm_name)
LOG.info(_("Created nic for %s "), vm_name)
def _find_external_network(self):
"""Find the vswitch that is connected to the physical nic.
Assumes only one physical nic on the host
"""
#If there are no physical nics connected to networks, return.
LOG.debug(_("Attempting to bind NIC to %s ")
% CONF.vswitch_name)
if CONF.vswitch_name:
LOG.debug(_("Attempting to bind NIC to %s ")
% CONF.vswitch_name)
bound = self._conn.Msvm_VirtualSwitch(
ElementName=CONF.vswitch_name)
else:
LOG.debug(_("No vSwitch specified, attaching to default"))
self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')
if len(bound) == 0:
return None
if CONF.vswitch_name:
return self._conn.Msvm_VirtualSwitch(
ElementName=CONF.vswitch_name)[0]\
.associators(wmi_result_class='Msvm_SwitchPort')[0]\
.associators(wmi_result_class='Msvm_VirtualSwitch')[0]
else:
return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')\
.associators(wmi_result_class='Msvm_SwitchPort')[0]\
.associators(wmi_result_class='Msvm_VirtualSwitch')[0]
def reboot(self, instance, network_info, reboot_type):
"""Reboot the specified instance."""
vm = self._vmutils.lookup(self._conn, instance['name'])
if vm is None:
raise exception.InstanceNotFound(instance_id=instance["id"])
self._set_vm_state(instance['name'], 'Reboot')
def destroy(self, instance, network_info=None, cleanup=True):
"""Destroy the VM. Also destroy the associated VHD disk files"""
LOG.debug(_("Got request to destroy vm %s"), instance['name'])
vm = self._vmutils.lookup(self._conn, instance['name'])
if vm is None:
return
vm = self._conn.Msvm_ComputerSystem(ElementName=instance['name'])[0]
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
#Stop the VM first.
self._set_vm_state(instance['name'], 'Disabled')
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
rasds = vmsettings[0].associators(
wmi_result_class='MSVM_ResourceAllocationSettingData')
disks = [r for r in rasds
if r.ResourceSubType == 'Microsoft Virtual Hard Disk']
disk_files = []
volumes = [r for r in rasds
if r.ResourceSubType == 'Microsoft Physical Disk Drive']
volumes_drives_list = []
#collect the volumes information before destroying the VM.
for volume in volumes:
hostResources = volume.HostResource
drive_path = hostResources[0]
#Appending the Msvm_Disk path
volumes_drives_list.append(drive_path)
#Collect disk file information before destroying the VM.
for disk in disks:
disk_files.extend([c for c in disk.Connection])
#Nuke the VM. Does not destroy disks.
(job, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._vmutils.check_job_status(job)
elif ret_val == 0:
success = True
if not success:
raise vmutils.HyperVException(_('Failed to destroy vm %s') %
instance['name'])
#Disconnect volumes
for volume_drive in volumes_drives_list:
self._volumeops.disconnect_volume(volume_drive)
#Delete associated vhd disk files.
for disk in disk_files:
vhdfile = self._conn_cimv2.query(
"Select * from CIM_DataFile where Name = '" +
disk.replace("'", "''") + "'")[0]
LOG.debug(_("Del: disk %(vhdfile)s vm %(name)s")
% {'vhdfile': vhdfile, 'name': instance['name']})
vhdfile.Delete()
def pause(self, instance):
"""Pause VM instance."""
LOG.debug(_("Pause instance"), instance=instance)
self._set_vm_state(instance["name"], 'Paused')
def unpause(self, instance):
"""Unpause paused VM instance."""
LOG.debug(_("Unpause instance"), instance=instance)
self._set_vm_state(instance["name"], 'Enabled')
def suspend(self, instance):
"""Suspend the specified instance."""
print instance
LOG.debug(_("Suspend instance"), instance=instance)
self._set_vm_state(instance["name"], 'Suspended')
def resume(self, instance):
"""Resume the suspended VM instance."""
LOG.debug(_("Resume instance"), instance=instance)
self._set_vm_state(instance["name"], 'Enabled')
def power_off(self, instance):
"""Power off the specified instance."""
LOG.debug(_("Power off instance"), instance=instance)
self._set_vm_state(instance["name"], 'Disabled')
def power_on(self, instance):
"""Power on the specified instance"""
LOG.debug(_("Power on instance"), instance=instance)
self._set_vm_state(instance["name"], 'Enabled')
def _set_vm_state(self, vm_name, req_state):
"""Set the desired state of the VM"""
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
if len(vms) == 0:
return False
(job, ret_val) = vms[0].RequestStateChange(
constants.REQ_POWER_STATE[req_state])
success = False
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._vmutils.check_job_status(job)
elif ret_val == 0:
success = True
elif ret_val == 32775:
#Invalid state for current operation. Typically means it is
#already in the state requested
success = True
if success:
LOG.info(_("Successfully changed vm state of %(vm_name)s"
" to %(req_state)s") % locals())
else:
msg = _("Failed to change vm state of %(vm_name)s"
" to %(req_state)s") % locals()
LOG.error(msg)
raise vmutils.HyperVException(msg)
def _cache_image(self, fn, target, fname, cow=False, Size=None,
*args, **kwargs):
"""Wrapper for a method that creates an image that caches the image.
This wrapper will save the image into a common store and create a
copy for use by the hypervisor.
The underlying method should specify a kwarg of target representing
where the image will be saved.
fname is used as the filename of the base image. The filename needs
to be unique to a given image.
If cow is True, it will make a CoW image instead of a copy.
"""
@lockutils.synchronized(fname, 'nova-')
def call_if_not_exists(path, fn, *args, **kwargs):
if not os.path.exists(path):
fn(target=path, *args, **kwargs)
if not os.path.exists(target):
LOG.debug(_("use_cow_image:%s"), cow)
if cow:
base = self._vmutils.get_base_vhd_path(fname)
call_if_not_exists(base, fn, *args, **kwargs)
image_service = self._conn.query(
"Select * from Msvm_ImageManagementService")[0]
(job, ret_val) = \
image_service.CreateDifferencingVirtualHardDisk(
Path=target, ParentPath=base)
LOG.debug(
"Creating difference disk: JobID=%s, Source=%s, Target=%s",
job, base, target)
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._vmutils.check_job_status(job)
else:
success = (ret_val == 0)
if not success:
raise vmutils.HyperVException(
_('Failed to create Difference Disk from '
'%(base)s to %(target)s') % locals())
else:
call_if_not_exists(target, fn, *args, **kwargs)
| apache-2.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/networkx/algorithms/shortest_paths/dense.py | 4 | 6750 | # -*- coding: utf-8 -*-
"""Floyd-Warshall algorithm for shortest paths.
"""
# Copyright (C) 2004-2018 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Authors: Aric Hagberg <aric.hagberg@gmail.com>
# Miguel Sozinho Ramalho <m.ramalho@fe.up.pt>
import networkx as nx
__all__ = ['floyd_warshall',
'floyd_warshall_predecessor_and_distance',
'reconstruct_path',
'floyd_warshall_numpy']
def floyd_warshall_numpy(G, nodelist=None, weight='weight'):
"""Find all-pairs shortest path lengths using Floyd's algorithm.
Parameters
----------
G : NetworkX graph
nodelist : list, optional
The rows and columns are ordered by the nodes in nodelist.
If nodelist is None then the ordering is produced by G.nodes().
weight: string, optional (default= 'weight')
Edge data key corresponding to the edge weight.
Returns
-------
distance : NumPy matrix
A matrix of shortest path distances between nodes.
If there is no path between to nodes the corresponding matrix entry
will be Inf.
Notes
------
Floyd's algorithm is appropriate for finding shortest paths in
dense graphs or graphs with negative weights when Dijkstra's
algorithm fails. This algorithm can still fail if there are
negative cycles. It has running time $O(n^3)$ with running space of $O(n^2)$.
"""
try:
import numpy as np
except ImportError:
raise ImportError(
"to_numpy_matrix() requires numpy: http://scipy.org/ ")
# To handle cases when an edge has weight=0, we must make sure that
# nonedges are not given the value 0 as well.
A = nx.to_numpy_matrix(G, nodelist=nodelist, multigraph_weight=min,
weight=weight, nonedge=np.inf)
n, m = A.shape
I = np.identity(n)
A[I == 1] = 0 # diagonal elements should be zero
for i in range(n):
A = np.minimum(A, A[i, :] + A[:, i])
return A
def floyd_warshall_predecessor_and_distance(G, weight='weight'):
"""Find all-pairs shortest path lengths using Floyd's algorithm.
Parameters
----------
G : NetworkX graph
weight: string, optional (default= 'weight')
Edge data key corresponding to the edge weight.
Returns
-------
predecessor,distance : dictionaries
Dictionaries, keyed by source and target, of predecessors and distances
in the shortest path.
Examples
--------
>>> G = nx.DiGraph()
>>> G.add_weighted_edges_from([('s', 'u', 10), ('s', 'x', 5),
... ('u', 'v', 1), ('u', 'x', 2), ('v', 'y', 1), ('x', 'u', 3),
... ('x', 'v', 5), ('x', 'y', 2), ('y', 's', 7), ('y', 'v', 6)])
>>> predecessors, _ = nx.floyd_warshall_predecessor_and_distance(G)
>>> print(nx.reconstruct_path('s', 'v', predecessors))
['s', 'x', 'u', 'v']
Notes
------
Floyd's algorithm is appropriate for finding shortest paths
in dense graphs or graphs with negative weights when Dijkstra's algorithm
fails. This algorithm can still fail if there are negative cycles.
It has running time $O(n^3)$ with running space of $O(n^2)$.
See Also
--------
floyd_warshall
floyd_warshall_numpy
all_pairs_shortest_path
all_pairs_shortest_path_length
"""
from collections import defaultdict
# dictionary-of-dictionaries representation for dist and pred
# use some defaultdict magick here
# for dist the default is the floating point inf value
dist = defaultdict(lambda: defaultdict(lambda: float('inf')))
for u in G:
dist[u][u] = 0
pred = defaultdict(dict)
# initialize path distance dictionary to be the adjacency matrix
# also set the distance to self to 0 (zero diagonal)
undirected = not G.is_directed()
for u, v, d in G.edges(data=True):
e_weight = d.get(weight, 1.0)
dist[u][v] = min(e_weight, dist[u][v])
pred[u][v] = u
if undirected:
dist[v][u] = min(e_weight, dist[v][u])
pred[v][u] = v
for w in G:
for u in G:
for v in G:
if dist[u][v] > dist[u][w] + dist[w][v]:
dist[u][v] = dist[u][w] + dist[w][v]
pred[u][v] = pred[w][v]
return dict(pred), dict(dist)
def reconstruct_path(source, target, predecessors):
"""Reconstruct a path from source to target using the predecessors
dict as returned by floyd_warshall_predecessor_and_distance
Parameters
----------
source : node
Starting node for path
target : node
Ending node for path
predecessors: dictionary
Dictionary, keyed by source and target, of predecessors in the
shortest path, as returned by floyd_warshall_predecessor_and_distance
Returns
-------
path : list
A list of nodes containing the shortest path from source to target
If source and target are the same, an empty list is returned
Notes
------
This function is meant to give more applicability to the
floyd_warshall_predecessor_and_distance function
See Also
--------
floyd_warshall_predecessor_and_distance
"""
if source == target:
return []
prev = predecessors[source]
curr = prev[target]
path = [target, curr]
while curr != source:
curr = prev[curr]
path.append(curr)
return list(reversed(path))
def floyd_warshall(G, weight='weight'):
"""Find all-pairs shortest path lengths using Floyd's algorithm.
Parameters
----------
G : NetworkX graph
weight: string, optional (default= 'weight')
Edge data key corresponding to the edge weight.
Returns
-------
distance : dict
A dictionary, keyed by source and target, of shortest paths distances
between nodes.
Notes
------
Floyd's algorithm is appropriate for finding shortest paths
in dense graphs or graphs with negative weights when Dijkstra's algorithm
fails. This algorithm can still fail if there are negative cycles.
It has running time $O(n^3)$ with running space of $O(n^2)$.
See Also
--------
floyd_warshall_predecessor_and_distance
floyd_warshall_numpy
all_pairs_shortest_path
all_pairs_shortest_path_length
"""
# could make this its own function to reduce memory costs
return floyd_warshall_predecessor_and_distance(G, weight=weight)[1]
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
| gpl-3.0 |
B-MOOC/edx-platform | common/lib/xmodule/xmodule/tests/test_content.py | 62 | 7973 | """Tests for contents"""
import os
import unittest
import ddt
from path import path
from xmodule.contentstore.content import StaticContent, StaticContentStream
from xmodule.contentstore.content import ContentStore
from opaque_keys.edx.locations import SlashSeparatedCourseKey, AssetLocation
from xmodule.static_content import _write_js, _list_descriptors
SAMPLE_STRING = """
This is a sample string with more than 1024 bytes, the default STREAM_DATA_CHUNK_SIZE
Lorem Ipsum is simply dummy text of the printing and typesetting industry.
Lorem Ipsum has been the industry's standard dummy text ever since the 1500s,
when an unknown printer took a galley of type and scrambled it to make a type
specimen book. It has survived not only five centuries, but also the leap into
electronic typesetting, remaining essentially unchanged. It was popularised in
the 1960s with the release of Letraset sheets containing Lorem Ipsum passages,
nd more recently with desktop publishing software like Aldus PageMaker including
versions of Lorem Ipsum.
It is a long established fact that a reader will be distracted by the readable
content of a page when looking at its layout. The point of using Lorem Ipsum is
that it has a more-or-less normal distribution of letters, as opposed to using
'Content here, content here', making it look like readable English. Many desktop
ublishing packages and web page editors now use Lorem Ipsum as their default model
text, and a search for 'lorem ipsum' will uncover many web sites still in their infancy.
Various versions have evolved over the years, sometimes by accident, sometimes on purpose
injected humour and the like).
Lorem Ipsum is simply dummy text of the printing and typesetting industry.
Lorem Ipsum has been the industry's standard dummy text ever since the 1500s,
when an unknown printer took a galley of type and scrambled it to make a type
specimen book. It has survived not only five centuries, but also the leap into
electronic typesetting, remaining essentially unchanged. It was popularised in
the 1960s with the release of Letraset sheets containing Lorem Ipsum passages,
nd more recently with desktop publishing software like Aldus PageMaker including
versions of Lorem Ipsum.
It is a long established fact that a reader will be distracted by the readable
content of a page when looking at its layout. The point of using Lorem Ipsum is
that it has a more-or-less normal distribution of letters, as opposed to using
'Content here, content here', making it look like readable English. Many desktop
ublishing packages and web page editors now use Lorem Ipsum as their default model
text, and a search for 'lorem ipsum' will uncover many web sites still in their infancy.
Various versions have evolved over the years, sometimes by accident, sometimes on purpose
injected humour and the like).
"""
class Content(object):
"""
A class with location and content_type members
"""
def __init__(self, location, content_type):
self.location = location
self.content_type = content_type
class FakeGridFsItem(object):
"""
This class provides the basic methods to get data from a GridFS item
"""
def __init__(self, string_data):
self.cursor = 0
self.data = string_data
self.length = len(string_data)
def seek(self, position):
"""
Set the cursor at "position"
"""
self.cursor = position
def read(self, chunk_size):
"""
Read "chunk_size" bytes of data at position cursor and move the cursor
"""
chunk = self.data[self.cursor:(self.cursor + chunk_size)]
self.cursor += chunk_size
return chunk
@ddt.ddt
class ContentTest(unittest.TestCase):
def test_thumbnail_none(self):
# We had a bug where a thumbnail location of None was getting transformed into a Location tuple, with
# all elements being None. It is important that the location be just None for rendering.
content = StaticContent('loc', 'name', 'content_type', 'data', None, None, None)
self.assertIsNone(content.thumbnail_location)
content = StaticContent('loc', 'name', 'content_type', 'data')
self.assertIsNone(content.thumbnail_location)
def test_static_url_generation_from_courseid(self):
course_key = SlashSeparatedCourseKey('foo', 'bar', 'bz')
url = StaticContent.convert_legacy_static_url_with_course_id('images_course_image.jpg', course_key)
self.assertEqual(url, '/c4x/foo/bar/asset/images_course_image.jpg')
@ddt.data(
(u"monsters__.jpg", u"monsters__.jpg"),
(u"monsters__.png", u"monsters__-png.jpg"),
(u"dots.in.name.jpg", u"dots.in.name.jpg"),
(u"dots.in.name.png", u"dots.in.name-png.jpg"),
)
@ddt.unpack
def test_generate_thumbnail_image(self, original_filename, thumbnail_filename):
contentStore = ContentStore()
content = Content(AssetLocation(u'mitX', u'800', u'ignore_run', u'asset', original_filename), None)
(thumbnail_content, thumbnail_file_location) = contentStore.generate_thumbnail(content)
self.assertIsNone(thumbnail_content)
self.assertEqual(AssetLocation(u'mitX', u'800', u'ignore_run', u'thumbnail', thumbnail_filename), thumbnail_file_location)
def test_compute_location(self):
# We had a bug that __ got converted into a single _. Make sure that substitution of INVALID_CHARS (like space)
# still happen.
asset_location = StaticContent.compute_location(
SlashSeparatedCourseKey('mitX', '400', 'ignore'), 'subs__1eo_jXvZnE .srt.sjson'
)
self.assertEqual(AssetLocation(u'mitX', u'400', u'ignore', u'asset', u'subs__1eo_jXvZnE_.srt.sjson', None), asset_location)
def test_get_location_from_path(self):
asset_location = StaticContent.get_location_from_path(u'/c4x/foo/bar/asset/images_course_image.jpg')
self.assertEqual(
AssetLocation(u'foo', u'bar', None, u'asset', u'images_course_image.jpg', None),
asset_location
)
def test_static_content_stream_stream_data(self):
"""
Test StaticContentStream stream_data function, asserts that we get all the bytes
"""
data = SAMPLE_STRING
item = FakeGridFsItem(data)
static_content_stream = StaticContentStream('loc', 'name', 'type', item, length=item.length)
total_length = 0
stream = static_content_stream.stream_data()
for chunck in stream:
total_length += len(chunck)
self.assertEqual(total_length, static_content_stream.length)
def test_static_content_stream_stream_data_in_range(self):
"""
Test StaticContentStream stream_data_in_range function,
asserts that we get the requested number of bytes
first_byte and last_byte are chosen to be simple but non trivial values
and to have total_length > STREAM_DATA_CHUNK_SIZE (1024)
"""
data = SAMPLE_STRING
item = FakeGridFsItem(data)
static_content_stream = StaticContentStream('loc', 'name', 'type', item, length=item.length)
first_byte = 100
last_byte = 1500
total_length = 0
stream = static_content_stream.stream_data_in_range(first_byte, last_byte)
for chunck in stream:
total_length += len(chunck)
self.assertEqual(total_length, last_byte - first_byte + 1)
def test_static_content_write_js(self):
"""
Test that only one filename starts with 000.
"""
output_root = path(u'common/static/xmodule/descriptors/js')
js_file_paths = _write_js(output_root, _list_descriptors())
js_file_paths = [file_path for file_path in js_file_paths if os.path.basename(file_path).startswith('000-')]
self.assertEqual(len(js_file_paths), 1)
self.assertIn("XModule.Descriptor = (function () {", open(js_file_paths[0]).read())
| agpl-3.0 |
zhouzhenghui/python-for-android | python3-alpha/python3-src/Lib/plat-linux3/TYPES.py | 171 | 3416 | # Generated by h2py from /usr/include/sys/types.h
_SYS_TYPES_H = 1
# Included from features.h
_FEATURES_H = 1
__USE_ANSI = 1
__FAVOR_BSD = 1
_ISOC99_SOURCE = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 199506
_XOPEN_SOURCE = 600
_XOPEN_SOURCE_EXTENDED = 1
_LARGEFILE64_SOURCE = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
__USE_ISOC99 = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 2
_POSIX_C_SOURCE = 199506
__USE_POSIX = 1
__USE_POSIX2 = 1
__USE_POSIX199309 = 1
__USE_POSIX199506 = 1
__USE_XOPEN = 1
__USE_XOPEN_EXTENDED = 1
__USE_UNIX98 = 1
_LARGEFILE_SOURCE = 1
__USE_XOPEN2K = 1
__USE_ISOC99 = 1
__USE_XOPEN_EXTENDED = 1
__USE_LARGEFILE = 1
__USE_LARGEFILE64 = 1
__USE_FILE_OFFSET64 = 1
__USE_MISC = 1
__USE_BSD = 1
__USE_SVID = 1
__USE_GNU = 1
__USE_REENTRANT = 1
__STDC_IEC_559__ = 1
__STDC_IEC_559_COMPLEX__ = 1
__STDC_ISO_10646__ = 200009
__GNU_LIBRARY__ = 6
__GLIBC__ = 2
__GLIBC_MINOR__ = 2
# Included from sys/cdefs.h
_SYS_CDEFS_H = 1
def __PMT(args): return args
def __P(args): return args
def __PMT(args): return args
def __STRING(x): return #x
__flexarr = []
__flexarr = [0]
__flexarr = []
__flexarr = [1]
def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
def __attribute__(xyz): return
def __attribute_format_arg__(x): return __attribute__ ((__format_arg__ (x)))
def __attribute_format_arg__(x): return
__USE_LARGEFILE = 1
__USE_LARGEFILE64 = 1
__USE_EXTERN_INLINES = 1
# Included from gnu/stubs.h
# Included from bits/types.h
_BITS_TYPES_H = 1
__FD_SETSIZE = 1024
# Included from bits/pthreadtypes.h
_BITS_PTHREADTYPES_H = 1
# Included from bits/sched.h
SCHED_OTHER = 0
SCHED_FIFO = 1
SCHED_RR = 2
CSIGNAL = 0x000000ff
CLONE_VM = 0x00000100
CLONE_FS = 0x00000200
CLONE_FILES = 0x00000400
CLONE_SIGHAND = 0x00000800
CLONE_PID = 0x00001000
CLONE_PTRACE = 0x00002000
CLONE_VFORK = 0x00004000
__defined_schedparam = 1
# Included from time.h
_TIME_H = 1
# Included from bits/time.h
_BITS_TIME_H = 1
CLOCKS_PER_SEC = 1000000
CLOCK_REALTIME = 0
CLOCK_PROCESS_CPUTIME_ID = 2
CLOCK_THREAD_CPUTIME_ID = 3
TIMER_ABSTIME = 1
_STRUCT_TIMEVAL = 1
CLK_TCK = CLOCKS_PER_SEC
__clock_t_defined = 1
__time_t_defined = 1
__clockid_t_defined = 1
__timer_t_defined = 1
__timespec_defined = 1
def __isleap(year): return \
__BIT_TYPES_DEFINED__ = 1
# Included from endian.h
_ENDIAN_H = 1
__LITTLE_ENDIAN = 1234
__BIG_ENDIAN = 4321
__PDP_ENDIAN = 3412
# Included from bits/endian.h
__BYTE_ORDER = __LITTLE_ENDIAN
__FLOAT_WORD_ORDER = __BYTE_ORDER
LITTLE_ENDIAN = __LITTLE_ENDIAN
BIG_ENDIAN = __BIG_ENDIAN
PDP_ENDIAN = __PDP_ENDIAN
BYTE_ORDER = __BYTE_ORDER
# Included from sys/select.h
_SYS_SELECT_H = 1
# Included from bits/select.h
def __FD_ZERO(fdsp): return \
def __FD_ZERO(set): return \
# Included from bits/sigset.h
_SIGSET_H_types = 1
_SIGSET_H_fns = 1
def __sigmask(sig): return \
def __sigemptyset(set): return \
def __sigfillset(set): return \
def __sigisemptyset(set): return \
def __FDELT(d): return ((d) / __NFDBITS)
FD_SETSIZE = __FD_SETSIZE
def FD_ZERO(fdsetp): return __FD_ZERO (fdsetp)
# Included from sys/sysmacros.h
_SYS_SYSMACROS_H = 1
def major(dev): return ((int)(((dev) >> 8) & 0xff))
def minor(dev): return ((int)((dev) & 0xff))
def major(dev): return (((dev).__val[1] >> 8) & 0xff)
def minor(dev): return ((dev).__val[1] & 0xff)
def major(dev): return (((dev).__val[0] >> 8) & 0xff)
def minor(dev): return ((dev).__val[0] & 0xff)
| apache-2.0 |
edx/course-discovery | course_discovery/apps/course_metadata/tests/test_emails.py | 1 | 14576 | import datetime
import re
from django.conf import settings
from django.contrib.auth.models import Group
from django.core import mail
from django.test import TestCase
from opaque_keys.edx.keys import CourseKey
from testfixtures import LogCapture, StringComparison
from course_discovery.apps.core.tests.factories import UserFactory
from course_discovery.apps.course_metadata import emails
from course_discovery.apps.course_metadata.models import CourseEditor
from course_discovery.apps.course_metadata.tests.factories import (
CourseEditorFactory, CourseRunFactory, OrganizationFactory
)
from course_discovery.apps.publisher.choices import InternalUserRole
from course_discovery.apps.publisher.constants import LEGAL_TEAM_GROUP_NAME
from course_discovery.apps.publisher.tests.factories import (
GroupFactory, OrganizationExtensionFactory, OrganizationUserRoleFactory, UserAttributeFactory
)
class EmailTests(TestCase):
def setUp(self):
super().setUp()
self.org = OrganizationFactory(name='MyOrg', key='myorg')
self.course_run = CourseRunFactory(draft=True, title_override='MyCourse')
self.course = self.course_run.course
self.course.authoring_organizations.add(self.org)
self.partner = self.course.partner
self.group = GroupFactory()
self.pc = self.make_user(email='pc@example.com')
self.editor = self.make_user(groups=[self.group])
self.editor2 = self.make_user(groups=[self.group])
self.non_editor = self.make_user(groups=[self.group])
self.legal = self.make_user(groups=[Group.objects.get(name=LEGAL_TEAM_GROUP_NAME)])
CourseEditorFactory(user=self.editor, course=self.course)
CourseEditorFactory(user=self.editor2, course=self.course)
OrganizationExtensionFactory(group=self.group, organization=self.org)
OrganizationUserRoleFactory(user=self.pc, organization=self.org, role=InternalUserRole.ProjectCoordinator)
self.publisher_url = f'{self.partner.publisher_url}courses/{self.course_run.course.uuid}'
self.studio_url = f'{self.partner.studio_url}course/{self.course_run.key}'
self.admin_url = 'https://{}/admin/course_metadata/courserun/{}/change/'.format(
self.partner.site.domain, self.course_run.id
)
self.run_num = CourseKey.from_string(self.course_run.key).run
@staticmethod
def make_user(groups=None, **kwargs):
user = UserFactory(**kwargs)
UserAttributeFactory(user=user, enable_email_notification=True)
if groups:
user.groups.set(groups)
return user
def assertEmailContains(self, subject=None, to_users=None, both_regexes=None, text_regexes=None,
html_regexes=None, index=0):
email = mail.outbox[index]
if to_users is not None:
assert set(email.to) == {u.email for u in to_users}
if subject is not None:
self.assertRegex(str(email.subject), subject)
assert len(email.alternatives) == 1
assert email.alternatives[0][1] == 'text/html'
text = email.body
html = email.alternatives[0][0]
for regex in both_regexes or []:
self.assertRegex(text, regex)
self.assertRegex(html, regex)
for regex in text_regexes or []:
self.assertRegex(text, regex)
for regex in html_regexes or []:
self.assertRegex(html, regex)
def assertEmailDoesNotContain(self, both_regexes=None, text_regexes=None, html_regexes=None, index=0):
email = mail.outbox[index]
text = email.body
html = email.alternatives[0][0]
for regex in both_regexes or []:
self.assertNotRegex(text, regex)
self.assertNotRegex(html, regex)
for regex in text_regexes or []:
self.assertNotRegex(text, regex)
for regex in html_regexes or []:
self.assertNotRegex(html, regex)
def assertEmailSent(self, function, subject=None, to_users=None, both_regexes=None, text_regexes=None,
html_regexes=None, index=0, total=1):
function(self.course_run)
assert len(mail.outbox) == total
self.assertEmailContains(subject=subject, to_users=to_users, both_regexes=both_regexes,
text_regexes=text_regexes, html_regexes=html_regexes, index=index)
def assertEmailNotSent(self, function, reason):
with LogCapture(emails.logger.name) as log_capture:
function(self.course_run)
assert len(mail.outbox) == 0
if reason:
log_capture.check(
(
emails.logger.name,
'INFO',
StringComparison('Not sending notification email for template course_metadata/email/.* because ' +
reason),
)
)
def test_send_email_for_legal_review(self):
"""
Verify that send_email_for_legal_review's happy path works as expected
"""
self.assertEmailSent(
emails.send_email_for_legal_review,
f'^Legal review requested: {self.course_run.title}$',
[self.legal],
both_regexes=[
'Dear legal team,',
'MyOrg has submitted MyCourse for review.',
'Note: This email address is unable to receive replies.',
],
html_regexes=[
'<a href="%s">View this course run in Publisher</a> to determine OFAC status.' % self.publisher_url,
'For questions or comments, please contact '
'<a href="mailto:pc@example.com">the Project Coordinator</a>.',
],
text_regexes=[
'%s\nView this course run in Publisher above to determine OFAC status.' % self.publisher_url,
'For questions or comments, please contact the Project Coordinator at pc@example.com.',
],
)
def test_send_email_for_internal_review(self):
"""
Verify that send_email_for_internal_review's happy path works as expected
"""
restricted_url = self.partner.lms_admin_url.rstrip('/') + '/embargo/restrictedcourse/'
self.assertEmailSent(
emails.send_email_for_internal_review,
f'^Review requested: {self.course_run.key} - {self.course_run.title}$',
[self.pc],
both_regexes=[
'Dear %s,' % self.pc.full_name,
'MyOrg has submitted %s for review.' % self.course_run.key,
],
html_regexes=[
'<a href="%s">View this course run in Publisher</a> to review the changes and mark it as reviewed.' %
self.publisher_url,
'This is a good time to <a href="%s">review this course run in Studio</a>.' % self.studio_url,
'Visit the <a href="%s">restricted course admin page</a> to set embargo rules for this course, '
'as needed.' % restricted_url,
],
text_regexes=[
'\n\nPublisher page: %s\n' % self.publisher_url,
'\n\nStudio page: %s\n' % self.studio_url,
'\n\nRestricted Course admin: %s\n' % restricted_url,
],
)
def test_send_email_for_reviewed(self):
"""
Verify that send_email_for_reviewed's happy path works as expected
"""
self.assertEmailSent(
emails.send_email_for_reviewed,
f'^Review complete: {self.course_run.title}$',
[self.editor, self.editor2],
both_regexes=[
'Dear course team,',
'The course run about page is now published.',
'Note: This email address is unable to receive replies.',
],
html_regexes=[
'The <a href="%s">%s course run</a> of %s has been reviewed and approved by %s.' %
(self.publisher_url, self.run_num, self.course_run.title, settings.PLATFORM_NAME),
'For questions or comments, please contact '
'<a href="mailto:pc@example.com">your Project Coordinator</a>.',
],
text_regexes=[
'The %s course run of %s has been reviewed and approved by %s.' %
(self.run_num, self.course_run.title, settings.PLATFORM_NAME),
'\n\nView the course run in Publisher: %s\n' % self.publisher_url,
'For questions or comments, please contact your Project Coordinator at pc@example.com.',
],
)
def test_send_email_for_go_live(self):
"""
Verify that send_email_for_go_live's happy path works as expected
"""
kwargs = {
'both_regexes': [
'The About page for the %s course run of %s has been published.' %
(self.run_num, self.course_run.title),
'No further action is necessary.',
],
'html_regexes': [
'<a href="%s">View this About page.</a>' % self.course_run.marketing_url,
'For questions or comments, please contact '
'<a href="mailto:pc@example.com">your Project Coordinator</a>.',
],
'text_regexes': [
'\n\nView this About page. %s\n' % self.course_run.marketing_url,
'For questions or comments, please contact your Project Coordinator at pc@example.com.',
],
}
self.assertEmailSent(
emails.send_email_for_go_live,
f'^Published: {self.course_run.title}$',
[self.editor, self.editor2],
total=2,
**kwargs,
)
self.assertEmailContains(
subject=f'^Published: {self.course_run.key} - {self.course_run.title}$',
to_users=[self.pc],
index=1,
**kwargs,
)
def test_no_project_coordinator(self):
"""
Verify that no email is sent and a message is logged if no PC is defined
"""
self.pc.delete()
self.assertEmailNotSent(
emails.send_email_for_internal_review,
'no project coordinator is defined for organization myorg'
)
def test_no_organization(self):
"""
Verify that no email is sent and a message is logged if no org is defined
"""
self.org.delete()
self.assertEmailNotSent(
emails.send_email_for_internal_review,
'no organization is defined for course %s' % self.course_run.course.key
)
def test_no_publisher_url(self):
"""
Verify that no email is sent and a message is logged if the publisher_url is missing
"""
self.partner.publisher_url = None
self.partner.save()
self.assertEmailNotSent(
emails.send_email_for_internal_review,
'no publisher URL is defined for partner %s' % self.partner.short_code
)
def test_no_studio_url(self):
"""
Verify that no email is sent and a message is logged if the studio_url is missing
"""
self.partner.studio_url = None
self.partner.save()
self.assertEmailNotSent(
emails.send_email_for_internal_review,
'no studio URL is defined for partner %s' % self.partner.short_code
)
def test_no_lms_admin_url(self):
"""
Verify that no link is provided to the restricted course admin if we don't have lms_admin_url
"""
self.partner.lms_admin_url = None
self.partner.save()
self.assertEmailSent(emails.send_email_for_internal_review)
self.assertEmailDoesNotContain(
both_regexes=[
re.compile('restricted', re.IGNORECASE),
],
)
def test_no_editors(self):
"""
Verify that no reviewed email is sent if no editors exist
"""
self.editor.delete()
self.editor2.delete()
self.non_editor.delete()
self.assertEmailNotSent(emails.send_email_for_reviewed, None)
def test_respect_for_no_email_flag(self):
"""
Verify that no email is sent if the user requests it
"""
self.editor.attributes.enable_email_notification = False
self.editor.attributes.save()
self.assertEmailSent(emails.send_email_for_reviewed, to_users=[self.editor2])
def test_emails_all_org_users_if_no_editors(self):
"""
Verify that we send email to all org users if no editors exist
"""
CourseEditor.objects.all().delete()
self.assertEmailSent(emails.send_email_for_reviewed, to_users=[self.editor, self.editor2, self.non_editor])
def test_reviewed_go_live_date_in_future(self):
"""
Verify that we mention when the course run will go live, if it's in the future
"""
self.course_run.go_live_date = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(days=10)
self.assertEmailSent(
emails.send_email_for_reviewed,
both_regexes=[
'The course run about page will be published on %s' % self.course_run.go_live_date.strftime('%x'),
],
)
def test_reviewed_go_live_date_in_past(self):
"""
Verify that we mention when the course run is now live, if we missed the go live date
"""
self.course_run.go_live_date = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(days=10)
self.assertEmailSent(
emails.send_email_for_reviewed,
both_regexes=[
'The course run about page is now published.',
],
)
def test_comment_email_sent(self):
comment = 'This is a test comment'
emails.send_email_for_comment({
'user': {
'username': self.editor.username,
'email': self.editor.email,
'first_name': self.editor.first_name,
'last_name': self.editor.last_name,
},
'comment': comment,
'created': datetime.datetime.now(datetime.timezone.utc).isoformat(),
}, self.course, self.editor)
assert len(mail.outbox) == 1
self.assertEmailContains(
both_regexes=[
f'{self.editor.username} made the following comment on',
comment
],
)
| agpl-3.0 |
arnaud-morvan/QGIS | tests/src/python/test_qgsserver_response.py | 23 | 2422 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsServerResponse.
From build dir, run: ctest -R PyQgsServerResponse -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import unittest
__author__ = 'Alessandro Pasotti'
__date__ = '29/04/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.server import QgsBufferServerResponse
class QgsServerResponseTest(unittest.TestCase):
def test_responseHeaders(self):
"""Test response headers"""
headers = {'header-key-1': 'header-value-1', 'header-key-2': 'header-value-2'}
response = QgsBufferServerResponse()
for k, v in headers.items():
response.setHeader(k, v)
for k, v in response.headers().items():
self.assertEqual(headers[k], v)
response.removeHeader('header-key-1')
self.assertEqual(response.headers(), {'header-key-2': 'header-value-2'})
response.setHeader('header-key-1', 'header-value-1')
for k, v in response.headers().items():
self.assertEqual(headers[k], v)
def test_statusCode(self):
"""Test return status HTTP code"""
response = QgsBufferServerResponse()
response.setStatusCode(222)
self.assertEqual(response.statusCode(), 222)
def test_write(self):
"""Test that writing on the buffer sets the body"""
# Set as str
response = QgsBufferServerResponse()
response.write('Greetings from Essen Linux Hotel 2017 Hack Fest!')
self.assertEqual(bytes(response.body()), b'')
response.finish()
self.assertEqual(bytes(response.body()), b'Greetings from Essen Linux Hotel 2017 Hack Fest!')
self.assertEqual(response.headers(), {'Content-Length': '48'})
# Set as a byte array
response = QgsBufferServerResponse()
response.write(b'Greetings from Essen Linux Hotel 2017 Hack Fest!')
self.assertEqual(bytes(response.body()), b'')
response.finish()
self.assertEqual(bytes(response.body()), b'Greetings from Essen Linux Hotel 2017 Hack Fest!')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
muminoff/fabric-bolt | src/fabric_bolt/accounts/admin.py | 3 | 1979 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.utils.translation import ugettext_lazy as _
from fabric_bolt.accounts.models import DeployUser
from fabric_bolt.accounts.forms import UserChangeForm, UserCreationForm
class UserChangeAdminFrom(UserChangeForm):
password = ReadOnlyPasswordHashField(label=_("Password"),
help_text=_("Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"password/\">this form</a>."))
def __init__(self, *args, **kwargs):
super(UserChangeAdminFrom, self).__init__(*args, **kwargs)
self.fields['user_level'].required = False
class DeployUserAdmin(UserAdmin):
# The forms to add and change user instances
form = UserChangeAdminFrom
add_form = UserCreationForm
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference specific fields on auth.User.
list_display = ('email', 'first_name', 'last_name', 'last_login', 'is_staff', )
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name', 'template')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined', )}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')
}),
)
search_fields = ('email', 'first_name', 'last_name', )
ordering = ('email',)
filter_horizontal = ('groups', 'user_permissions',)
# Register the new DeployUserAdmin
admin.site.register(DeployUser, DeployUserAdmin) | mit |
ctrezzo/hadoop | hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/job_history_summary.py | 323 | 3444 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
pat = re.compile('(?P<name>[^=]+)="(?P<value>[^"]*)" *')
counterPat = re.compile('(?P<name>[^:]+):(?P<value>[^,]*),?')
def parse(tail):
result = {}
for n,v in re.findall(pat, tail):
result[n] = v
return result
mapStartTime = {}
mapEndTime = {}
reduceStartTime = {}
reduceShuffleTime = {}
reduceSortTime = {}
reduceEndTime = {}
reduceBytes = {}
for line in sys.stdin:
words = line.split(" ",1)
event = words[0]
attrs = parse(words[1])
if event == 'MapAttempt':
if attrs.has_key("START_TIME"):
mapStartTime[attrs["TASKID"]] = int(attrs["START_TIME"])/1000
elif attrs.has_key("FINISH_TIME"):
mapEndTime[attrs["TASKID"]] = int(attrs["FINISH_TIME"])/1000
elif event == 'ReduceAttempt':
if attrs.has_key("START_TIME"):
reduceStartTime[attrs["TASKID"]] = int(attrs["START_TIME"]) / 1000
elif attrs.has_key("FINISH_TIME"):
reduceShuffleTime[attrs["TASKID"]] = int(attrs["SHUFFLE_FINISHED"])/1000
reduceSortTime[attrs["TASKID"]] = int(attrs["SORT_FINISHED"])/1000
reduceEndTime[attrs["TASKID"]] = int(attrs["FINISH_TIME"])/1000
elif event == 'Task':
if attrs["TASK_TYPE"] == "REDUCE" and attrs.has_key("COUNTERS"):
for n,v in re.findall(counterPat, attrs["COUNTERS"]):
if n == "File Systems.HDFS bytes written":
reduceBytes[attrs["TASKID"]] = int(v)
runningMaps = {}
shufflingReduces = {}
sortingReduces = {}
runningReduces = {}
startTime = min(reduce(min, mapStartTime.values()),
reduce(min, reduceStartTime.values()))
endTime = max(reduce(max, mapEndTime.values()),
reduce(max, reduceEndTime.values()))
reduces = reduceBytes.keys()
reduces.sort()
print "Name reduce-output-bytes shuffle-finish reduce-finish"
for r in reduces:
print r, reduceBytes[r], reduceShuffleTime[r] - startTime,
print reduceEndTime[r] - startTime
print
for t in range(startTime, endTime):
runningMaps[t] = 0
shufflingReduces[t] = 0
sortingReduces[t] = 0
runningReduces[t] = 0
for map in mapStartTime.keys():
for t in range(mapStartTime[map], mapEndTime[map]):
runningMaps[t] += 1
for reduce in reduceStartTime.keys():
for t in range(reduceStartTime[reduce], reduceShuffleTime[reduce]):
shufflingReduces[t] += 1
for t in range(reduceShuffleTime[reduce], reduceSortTime[reduce]):
sortingReduces[t] += 1
for t in range(reduceSortTime[reduce], reduceEndTime[reduce]):
runningReduces[t] += 1
print "time maps shuffle merge reduce"
for t in range(startTime, endTime):
print t - startTime, runningMaps[t], shufflingReduces[t], sortingReduces[t],
print runningReduces[t]
| apache-2.0 |
jlspyaozhongkai/Uter | third_party_build/Python-2.7.9/lib/python2.7/test/test_cgi.py | 43 | 15492 | from test.test_support import run_unittest, check_warnings
import cgi
import os
import sys
import tempfile
import unittest
from collections import namedtuple
class HackedSysModule:
# The regression test will have real values in sys.argv, which
# will completely confuse the test of the cgi module
argv = []
stdin = sys.stdin
cgi.sys = HackedSysModule()
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class ComparableException:
def __init__(self, err):
self.err = err
def __str__(self):
return str(self.err)
def __cmp__(self, anExc):
if not isinstance(anExc, Exception):
return -1
x = cmp(self.err.__class__, anExc.__class__)
if x != 0:
return x
return cmp(self.err.args, anExc.args)
def __getattr__(self, attr):
return getattr(self.err, attr)
def do_test(buf, method):
env = {}
if method == "GET":
fp = None
env['REQUEST_METHOD'] = 'GET'
env['QUERY_STRING'] = buf
elif method == "POST":
fp = StringIO(buf)
env['REQUEST_METHOD'] = 'POST'
env['CONTENT_TYPE'] = 'application/x-www-form-urlencoded'
env['CONTENT_LENGTH'] = str(len(buf))
else:
raise ValueError, "unknown method: %s" % method
try:
return cgi.parse(fp, env, strict_parsing=1)
except StandardError, err:
return ComparableException(err)
parse_strict_test_cases = [
("", ValueError("bad query field: ''")),
("&", ValueError("bad query field: ''")),
("&&", ValueError("bad query field: ''")),
(";", ValueError("bad query field: ''")),
(";&;", ValueError("bad query field: ''")),
# Should the next few really be valid?
("=", {}),
("=&=", {}),
("=;=", {}),
# This rest seem to make sense
("=a", {'': ['a']}),
("&=a", ValueError("bad query field: ''")),
("=a&", ValueError("bad query field: ''")),
("=&a", ValueError("bad query field: 'a'")),
("b=a", {'b': ['a']}),
("b+=a", {'b ': ['a']}),
("a=b=a", {'a': ['b=a']}),
("a=+b=a", {'a': [' b=a']}),
("&b=a", ValueError("bad query field: ''")),
("b&=a", ValueError("bad query field: 'b'")),
("a=a+b&b=b+c", {'a': ['a b'], 'b': ['b c']}),
("a=a+b&a=b+a", {'a': ['a b', 'b a']}),
("x=1&y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("x=1;y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("x=1;y=2.0;z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("Hbc5161168c542333633315dee1182227:key_store_seqid=400006&cuyer=r&view=bustomer&order_id=0bb2e248638833d48cb7fed300000f1b&expire=964546263&lobale=en-US&kid=130003.300038&ss=env",
{'Hbc5161168c542333633315dee1182227:key_store_seqid': ['400006'],
'cuyer': ['r'],
'expire': ['964546263'],
'kid': ['130003.300038'],
'lobale': ['en-US'],
'order_id': ['0bb2e248638833d48cb7fed300000f1b'],
'ss': ['env'],
'view': ['bustomer'],
}),
("group_id=5470&set=custom&_assigned_to=31392&_status=1&_category=100&SUBMIT=Browse",
{'SUBMIT': ['Browse'],
'_assigned_to': ['31392'],
'_category': ['100'],
'_status': ['1'],
'group_id': ['5470'],
'set': ['custom'],
})
]
def first_elts(list):
return map(lambda x:x[0], list)
def first_second_elts(list):
return map(lambda p:(p[0], p[1][0]), list)
def gen_result(data, environ):
fake_stdin = StringIO(data)
fake_stdin.seek(0)
form = cgi.FieldStorage(fp=fake_stdin, environ=environ)
result = {}
for k, v in dict(form).items():
result[k] = isinstance(v, list) and form.getlist(k) or v.value
return result
class CgiTests(unittest.TestCase):
def test_escape(self):
self.assertEqual("test & string", cgi.escape("test & string"))
self.assertEqual("<test string>", cgi.escape("<test string>"))
self.assertEqual(""test string"", cgi.escape('"test string"', True))
def test_strict(self):
for orig, expect in parse_strict_test_cases:
# Test basic parsing
d = do_test(orig, "GET")
self.assertEqual(d, expect, "Error parsing %s" % repr(orig))
d = do_test(orig, "POST")
self.assertEqual(d, expect, "Error parsing %s" % repr(orig))
env = {'QUERY_STRING': orig}
fcd = cgi.FormContentDict(env)
sd = cgi.SvFormContentDict(env)
fs = cgi.FieldStorage(environ=env)
if isinstance(expect, dict):
# test dict interface
self.assertEqual(len(expect), len(fcd))
self.assertItemsEqual(expect.keys(), fcd.keys())
self.assertItemsEqual(expect.values(), fcd.values())
self.assertItemsEqual(expect.items(), fcd.items())
self.assertEqual(fcd.get("nonexistent field", "default"), "default")
self.assertEqual(len(sd), len(fs))
self.assertItemsEqual(sd.keys(), fs.keys())
self.assertEqual(fs.getvalue("nonexistent field", "default"), "default")
# test individual fields
for key in expect.keys():
expect_val = expect[key]
self.assertTrue(fcd.has_key(key))
self.assertItemsEqual(fcd[key], expect[key])
self.assertEqual(fcd.get(key, "default"), fcd[key])
self.assertTrue(fs.has_key(key))
if len(expect_val) > 1:
single_value = 0
else:
single_value = 1
try:
val = sd[key]
except IndexError:
self.assertFalse(single_value)
self.assertEqual(fs.getvalue(key), expect_val)
else:
self.assertTrue(single_value)
self.assertEqual(val, expect_val[0])
self.assertEqual(fs.getvalue(key), expect_val[0])
self.assertItemsEqual(sd.getlist(key), expect_val)
if single_value:
self.assertItemsEqual(sd.values(),
first_elts(expect.values()))
self.assertItemsEqual(sd.items(),
first_second_elts(expect.items()))
def test_weird_formcontentdict(self):
# Test the weird FormContentDict classes
env = {'QUERY_STRING': "x=1&y=2.0&z=2-3.%2b0&1=1abc"}
expect = {'x': 1, 'y': 2.0, 'z': '2-3.+0', '1': '1abc'}
d = cgi.InterpFormContentDict(env)
for k, v in expect.items():
self.assertEqual(d[k], v)
for k, v in d.items():
self.assertEqual(expect[k], v)
self.assertItemsEqual(expect.values(), d.values())
def test_log(self):
cgi.log("Testing")
cgi.logfp = StringIO()
cgi.initlog("%s", "Testing initlog 1")
cgi.log("%s", "Testing log 2")
self.assertEqual(cgi.logfp.getvalue(), "Testing initlog 1\nTesting log 2\n")
if os.path.exists("/dev/null"):
cgi.logfp = None
cgi.logfile = "/dev/null"
cgi.initlog("%s", "Testing log 3")
cgi.log("Testing log 4")
def test_fieldstorage_readline(self):
# FieldStorage uses readline, which has the capacity to read all
# contents of the input file into memory; we use readline's size argument
# to prevent that for files that do not contain any newlines in
# non-GET/HEAD requests
class TestReadlineFile:
def __init__(self, file):
self.file = file
self.numcalls = 0
def readline(self, size=None):
self.numcalls += 1
if size:
return self.file.readline(size)
else:
return self.file.readline()
def __getattr__(self, name):
file = self.__dict__['file']
a = getattr(file, name)
if not isinstance(a, int):
setattr(self, name, a)
return a
f = TestReadlineFile(tempfile.TemporaryFile())
f.write('x' * 256 * 1024)
f.seek(0)
env = {'REQUEST_METHOD':'PUT'}
fs = cgi.FieldStorage(fp=f, environ=env)
# if we're not chunking properly, readline is only called twice
# (by read_binary); if we are chunking properly, it will be called 5 times
# as long as the chunksize is 1 << 16.
self.assertGreater(f.numcalls, 2)
def test_fieldstorage_invalid(self):
fs = cgi.FieldStorage()
self.assertFalse(fs)
self.assertRaises(TypeError, bool(fs))
self.assertEqual(list(fs), list(fs.keys()))
fs.list.append(namedtuple('MockFieldStorage', 'name')('fieldvalue'))
self.assertTrue(fs)
def test_fieldstorage_multipart(self):
#Test basic FieldStorage multipart parsing
env = {'REQUEST_METHOD':'POST', 'CONTENT_TYPE':'multipart/form-data; boundary=---------------------------721837373350705526688164684', 'CONTENT_LENGTH':'558'}
postdata = """-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="id"
1234
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="title"
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="file"; filename="test.txt"
Content-Type: text/plain
Testing 123.
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="submit"
Add\x20
-----------------------------721837373350705526688164684--
"""
fs = cgi.FieldStorage(fp=StringIO(postdata), environ=env)
self.assertEqual(len(fs.list), 4)
expect = [{'name':'id', 'filename':None, 'value':'1234'},
{'name':'title', 'filename':None, 'value':''},
{'name':'file', 'filename':'test.txt','value':'Testing 123.\n'},
{'name':'submit', 'filename':None, 'value':' Add '}]
for x in range(len(fs.list)):
for k, exp in expect[x].items():
got = getattr(fs.list[x], k)
self.assertEqual(got, exp)
def test_fieldstorage_multipart_maxline(self):
# Issue #18167
maxline = 1 << 16
self.maxDiff = None
def check(content):
data = """
---123
Content-Disposition: form-data; name="upload"; filename="fake.txt"
Content-Type: text/plain
%s
---123--
""".replace('\n', '\r\n') % content
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
'REQUEST_METHOD': 'POST',
}
self.assertEqual(gen_result(data, environ), {'upload': content})
check('x' * (maxline - 1))
check('x' * (maxline - 1) + '\r')
check('x' * (maxline - 1) + '\r' + 'y' * (maxline - 1))
_qs_result = {
'key1': 'value1',
'key2': ['value2x', 'value2y'],
'key3': 'value3',
'key4': 'value4'
}
def testQSAndUrlEncode(self):
data = "key2=value2x&key3=value3&key4=value4"
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'QUERY_STRING': 'key1=value1&key2=value2y',
'REQUEST_METHOD': 'POST',
}
v = gen_result(data, environ)
self.assertEqual(self._qs_result, v)
def testQSAndFormData(self):
data = """
---123
Content-Disposition: form-data; name="key2"
value2y
---123
Content-Disposition: form-data; name="key3"
value3
---123
Content-Disposition: form-data; name="key4"
value4
---123--
"""
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
'QUERY_STRING': 'key1=value1&key2=value2x',
'REQUEST_METHOD': 'POST',
}
v = gen_result(data, environ)
self.assertEqual(self._qs_result, v)
def testQSAndFormDataFile(self):
data = """
---123
Content-Disposition: form-data; name="key2"
value2y
---123
Content-Disposition: form-data; name="key3"
value3
---123
Content-Disposition: form-data; name="key4"
value4
---123
Content-Disposition: form-data; name="upload"; filename="fake.txt"
Content-Type: text/plain
this is the content of the fake file
---123--
"""
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
'QUERY_STRING': 'key1=value1&key2=value2x',
'REQUEST_METHOD': 'POST',
}
result = self._qs_result.copy()
result.update({
'upload': 'this is the content of the fake file\n'
})
v = gen_result(data, environ)
self.assertEqual(result, v)
def test_deprecated_parse_qs(self):
# this func is moved to urlparse, this is just a sanity check
with check_warnings(('cgi.parse_qs is deprecated, use urlparse.'
'parse_qs instead', PendingDeprecationWarning)):
self.assertEqual({'a': ['A1'], 'B': ['B3'], 'b': ['B2']},
cgi.parse_qs('a=A1&b=B2&B=B3'))
def test_deprecated_parse_qsl(self):
# this func is moved to urlparse, this is just a sanity check
with check_warnings(('cgi.parse_qsl is deprecated, use urlparse.'
'parse_qsl instead', PendingDeprecationWarning)):
self.assertEqual([('a', 'A1'), ('b', 'B2'), ('B', 'B3')],
cgi.parse_qsl('a=A1&b=B2&B=B3'))
def test_parse_header(self):
self.assertEqual(
cgi.parse_header("text/plain"),
("text/plain", {}))
self.assertEqual(
cgi.parse_header("text/vnd.just.made.this.up ; "),
("text/vnd.just.made.this.up", {}))
self.assertEqual(
cgi.parse_header("text/plain;charset=us-ascii"),
("text/plain", {"charset": "us-ascii"}))
self.assertEqual(
cgi.parse_header('text/plain ; charset="us-ascii"'),
("text/plain", {"charset": "us-ascii"}))
self.assertEqual(
cgi.parse_header('text/plain ; charset="us-ascii"; another=opt'),
("text/plain", {"charset": "us-ascii", "another": "opt"}))
self.assertEqual(
cgi.parse_header('attachment; filename="silly.txt"'),
("attachment", {"filename": "silly.txt"}))
self.assertEqual(
cgi.parse_header('attachment; filename="strange;name"'),
("attachment", {"filename": "strange;name"}))
self.assertEqual(
cgi.parse_header('attachment; filename="strange;name";size=123;'),
("attachment", {"filename": "strange;name", "size": "123"}))
self.assertEqual(
cgi.parse_header('form-data; name="files"; filename="fo\\"o;bar"'),
("form-data", {"name": "files", "filename": 'fo"o;bar'}))
def test_main():
run_unittest(CgiTests)
if __name__ == '__main__':
test_main()
| gpl-3.0 |
mganeva/mantid | qt/applications/workbench/workbench/widgets/plotselector/presenter.py | 1 | 15293 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
#
from __future__ import absolute_import, print_function
import os
import re
from .model import PlotSelectorModel
from .view import PlotSelectorView, Column
class PlotSelectorPresenter(object):
"""
Presenter for the plot selector widget. This class can be
responsible for the creation of the model and view, passing in
the GlobalFigureManager as an argument, or the presenter and view
can be passed as arguments (only intended for testing).
"""
def __init__(self, global_figure_manager, view=None, model=None):
"""
Initialise the presenter, creating the view and model, and
setting the initial plot list
:param global_figure_manager: The GlobalFigureManager class
:param view: Optional - a view to use instead of letting the
class create one (intended for testing)
:param model: Optional - a model to use instead of letting
the class create one (intended for testing)
"""
# Create model and view, or accept mocked versions
if view is None:
self.view = PlotSelectorView(self)
else:
self.view = view
if model is None:
self.model = PlotSelectorModel(self, global_figure_manager)
else:
self.model = model
# Make sure the plot list is up to date
self.update_plot_list()
def get_plot_name_from_number(self, plot_number):
return self.model.get_plot_name_from_number(plot_number)
# ------------------------ Plot Updates ------------------------
def update_plot_list(self):
"""
Updates the plot list in the model and the view. Filter text
is applied to the updated selection if required.
"""
plot_list = self.model.get_plot_list()
self.view.set_plot_list(plot_list)
def append_to_plot_list(self, plot_number):
"""
Appends the plot name to the end of the plot list
:param plot_number: The unique number in GlobalFigureManager
"""
self.view.append_to_plot_list(plot_number)
self.view.set_visibility_icon(plot_number, self.model.is_visible(plot_number))
def remove_from_plot_list(self, plot_number):
"""
Removes the plot name from the plot list
:param plot_number: The unique number in GlobalFigureManager
"""
self.view.remove_from_plot_list(plot_number)
def rename_in_plot_list(self, plot_number, new_name):
"""
Replaces a name in the plot list
:param plot_number: The unique number in GlobalFigureManager
:param new_name: The new name for the plot
"""
self.view.rename_in_plot_list(plot_number, new_name)
# ----------------------- Plot Filtering ------------------------
def filter_text_changed(self):
"""
Called by the view when the filter text is changed (e.g. by
typing or clearing the text)
"""
if self.view.get_filter_text():
self.view.filter_plot_list()
else:
self.view.unhide_all_plots()
def is_shown_by_filter(self, plot_number):
"""
:param plot_number: The unique number in GlobalFigureManager
:return: True if shown, or False if filtered out
"""
filter_text = self.view.get_filter_text()
plot_name = self.get_plot_name_from_number(plot_number)
return filter_text.lower() in plot_name.lower()
# ------------------------ Plot Showing ------------------------
def show_single_selected(self):
"""
When a list item is double clicked the view calls this method
to bring the selected plot to the front
"""
plot_number = self.view.get_currently_selected_plot_number()
self._make_plot_active(plot_number)
def show_multiple_selected(self):
"""
Shows multiple selected plots, e.g. from pressing the 'Show'
button with multiple selected plots
"""
selected_plots = self.view.get_all_selected_plot_numbers()
for plot_number in selected_plots:
self._make_plot_active(plot_number)
def _make_plot_active(self, plot_number):
"""
Make the plot with the given name active - bring it to the
front and make it the choice for overplotting
:param plot_number: The unique number in GlobalFigureManager
"""
try:
self.model.show_plot(plot_number)
except ValueError as e:
print(e)
def set_active_font(self, plot_number):
"""
Set the icon for the active plot to be colored
:param plot_number: The unique number in GlobalFigureManager
"""
active_plot_number = self.view.active_plot_number
if active_plot_number > 0:
try:
self.view.set_active_font(active_plot_number, False)
except TypeError:
pass
# The last active plot could have been closed
# already, so there is nothing to do
self.view.set_active_font(plot_number, True)
self.view.active_plot_number = plot_number
# ------------------------ Plot Hiding -------------------------
def hide_selected_plots(self):
"""
Hide all plots that are selected in the view
"""
selected_plots = self.view.get_all_selected_plot_numbers()
for plot_number in selected_plots:
self._hide_plot(plot_number)
def _hide_plot(self, plot_number):
"""
Hides a single plot
"""
try:
self.model.hide_plot(plot_number)
except ValueError as e:
print(e)
def toggle_plot_visibility(self, plot_number):
"""
Toggles a plot between hidden and shown
:param plot_number: The unique number in GlobalFigureManager
"""
if self.model.is_visible(plot_number):
self._hide_plot(plot_number)
else:
self._make_plot_active(plot_number)
self.update_visibility_icon(plot_number)
def update_visibility_icon(self, plot_number):
"""
Updates the icon to indicate a plot as hidden or visible
:param plot_number: The unique number in GlobalFigureManager
"""
try:
is_visible = self.model.is_visible(plot_number)
self.view.set_visibility_icon(plot_number, is_visible)
except ValueError:
# There is a chance the plot was closed, which calls an
# update to this method. If we can not get the visibility
# status it is safe to assume the plot has been closed.
pass
# ------------------------ Plot Renaming ------------------------
def rename_figure(self, plot_number, new_name):
"""
Replaces a name in the plot list
:param plot_number: The unique number in GlobalFigureManager
:param new_name: The new plot name
"""
try:
self.model.rename_figure(plot_number, new_name)
except ValueError as e:
# We need to undo the rename in the view
self.view.rename_in_plot_list(plot_number, new_name)
print(e)
# ------------------------ Plot Closing -------------------------
def close_action_called(self):
"""
This is called by the view when closing plots is requested
(e.g. pressing close or delete).
"""
selected_plots = self.view.get_all_selected_plot_numbers()
self._close_plots(selected_plots)
def close_single_plot(self, plot_number):
"""
This is used to close plots when a close action is called
that does not refer to the selected plot(s)
:param plot_number: The unique number in GlobalFigureManager
"""
self._close_plots([plot_number])
def _close_plots(self, list_of_plot_numbers):
"""
Accepts a list of plot names to close
:param list_of_plots: A list of strings containing plot names
"""
for plot_number in list_of_plot_numbers:
try:
self.model.close_plot(plot_number)
except ValueError as e:
print(e)
# ----------------------- Plot Sorting --------------------------
def set_sort_order(self, is_ascending):
"""
Sets the sort order in the view
:param is_ascending: If true ascending order, else descending
"""
self.view.set_sort_order(is_ascending)
def set_sort_type(self, sort_type):
"""
Sets the sort order in the view
:param sort_type: A Column enum with the column to sort on
"""
self.view.set_sort_type(sort_type)
self.update_last_active_order()
def update_last_active_order(self):
"""
Update the sort keys in the view. This is only required when
changes to the last shown order occur in the model, when
renaming the key is set already
"""
if self.view.sort_type() == Column.LastActive:
self._set_last_active_order()
def _set_last_active_order(self):
"""
Set the last shown order in the view. This checks the sorting
currently set and then sets the sort keys to the appropriate
values
"""
last_active_values = self.model.last_active_values()
self.view.set_last_active_values(last_active_values)
def get_initial_last_active_value(self, plot_number):
"""
Gets the initial last active value for a plot just added, in
this case it is assumed to not have been shown
:param plot_number: The unique number in GlobalFigureManager
:return: A string with the last active value
"""
return '_' + self.model.get_plot_name_from_number(plot_number)
def get_renamed_last_active_value(self, plot_number, old_last_active_value):
"""
Gets the initial last active value for a plot that was
renamed. If the plot had a numeric value, i.e. has been shown
this is retained, else it is set
:param plot_number: The unique number in GlobalFigureManager
:param old_last_active_value: The previous last active value
"""
if old_last_active_value.isdigit():
return old_last_active_value
else:
return self.get_initial_last_active_value(plot_number)
# ---------------------- Plot Exporting -------------------------
def export_plots_called(self, extension):
"""
Export plots called from the view, then a single or multiple
plots exported depending on the number currently selected
:param extension: The file extension as a string including
a '.', for example '.png' (must be a type
supported by matplotlib)
"""
plot_numbers = self.view.get_all_selected_plot_numbers()
if len(plot_numbers) == 1:
self._export_single_plot(plot_numbers[0], extension)
elif len(plot_numbers) > 1:
self._export_multiple_plots(plot_numbers, extension)
def _export_single_plot(self, plot_number, extension):
"""
Called when a single plot is selected to export - prompts for
a filename then tries to save the plot
:param plot_number: The unique number in GlobalFigureManager
:param extension: The file extension as a string including
a '.', for example '.png' (must be a type
supported by matplotlib)
"""
absolute_path = self.view.get_file_name_for_saving(extension)
if not absolute_path[-4:] == extension:
absolute_path += extension
try:
self.model.export_plot(plot_number, absolute_path)
except ValueError as e:
print(e)
def _export_multiple_plots(self, plot_numbers, extension):
"""
Export all selected plots in the plot_numbers list, first
prompting for a save directory then sanitising plot names to
unique, usable file names
:param plot_numbers: A list of plot numbers to export
:param extension: The file extension as a string including
a '.', for example '.png' (must be a type
supported by matplotlib)
"""
dir_name = self.view.get_directory_name_for_saving()
# A temporary dictionary holding plot numbers as keys, plot
# names as values
plots = {}
for plot_number in plot_numbers:
plot_name = self.model.get_plot_name_from_number(plot_number)
plot_name = self._replace_special_characters(plot_name)
if plot_name in plots.values():
plot_name = self._make_unique_name(plot_name, plots)
plots[plot_number] = plot_name
self._export_plot(plot_number, plot_name, dir_name, extension)
def _replace_special_characters(self, string):
"""
Removes any characters that are not valid in file names
across all operating systems ('/' for Linux/Mac), more for
Windows
:param string: The string to replace characters in
:return: The string with special characters replace by '-'
"""
return re.sub(r'[<>:"/|\\?*]', r'-', string)
def _make_unique_name(self, name, dictionary):
"""
Given a name and a dictionary, make a unique name that does
not already exist in the dictionary values by appending
' (1)', ' (2)', ' (3)' etc. to the end of the name
:param name: A string with the non-unique name
:param dictionary: A dictionary with string values
:return : The unique plot name
"""
i = 1
while True:
plot_name_attempt = name + ' ({})'.format(str(i))
if plot_name_attempt not in dictionary.values():
break
i += 1
return plot_name_attempt
def _export_plot(self, plot_number, plot_name, dir_name, extension):
"""
Given a plot number, plot name, directory and extension
construct the absolute path name and call the model to save
the figure
:param plot_number: The unique number in GlobalFigureManager
:param plot_name: The name to use for saving
:param dir_name: The directory to save to
:param extension: The file extension as a string including
a '.', for example '.png' (must be a type
supported by matplotlib)
"""
if dir_name:
filename = os.path.join(dir_name, plot_name + extension)
try:
self.model.export_plot(plot_number, filename)
except ValueError as e:
print(e)
| gpl-3.0 |
dell-o/EFLS | src/mavProxyLink_pb2.py | 2 | 10113 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mavProxyLink.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='mavProxyLink.proto',
package='mav',
syntax='proto2',
serialized_pb=_b('\n\x12mavProxyLink.proto\x12\x03mav\"\xb1\x01\n\x08\x41ircraft\x12\x0b\n\x03lat\x18\x01 \x02(\x01\x12\x0b\n\x03lon\x18\x02 \x02(\x01\x12\x0f\n\x07\x62\x65\x61ring\x18\x03 \x02(\x01\x12\r\n\x05speed\x18\x04 \x02(\x01\x12\x10\n\x08\x61ltitude\x18\x05 \x02(\x01\x12\x12\n\nwind_speed\x18\x06 \x02(\x01\x12\x16\n\x0ewind_direction\x18\x07 \x02(\x01\x12\x15\n\rmotor_current\x18\x08 \x02(\x01\x12\x16\n\x0emotor_throttle\x18\t \x02(\x01\",\n\tWaypoints\x12\x1f\n\x08waypoint\x18\x01 \x03(\x0b\x32\r.mav.Waypoint\"S\n\x08Waypoint\x12\x0b\n\x03lat\x18\x01 \x02(\x01\x12\x0b\n\x03lon\x18\x02 \x02(\x01\x12\x10\n\x08\x61ltitude\x18\x03 \x02(\x01\x12\r\n\x05speed\x18\x04 \x02(\x01\x12\x0c\n\x04type\x18\x05 \x02(\x05\"S\n\x0c\x41ircraftLink\x12 \n\taircrafts\x18\x01 \x03(\x0b\x32\r.mav.Aircraft\x12!\n\twaypoints\x18\x02 \x03(\x0b\x32\x0e.mav.Waypoints')
)
_AIRCRAFT = _descriptor.Descriptor(
name='Aircraft',
full_name='mav.Aircraft',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lat', full_name='mav.Aircraft.lat', index=0,
number=1, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lon', full_name='mav.Aircraft.lon', index=1,
number=2, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bearing', full_name='mav.Aircraft.bearing', index=2,
number=3, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='speed', full_name='mav.Aircraft.speed', index=3,
number=4, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='altitude', full_name='mav.Aircraft.altitude', index=4,
number=5, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='wind_speed', full_name='mav.Aircraft.wind_speed', index=5,
number=6, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='wind_direction', full_name='mav.Aircraft.wind_direction', index=6,
number=7, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='motor_current', full_name='mav.Aircraft.motor_current', index=7,
number=8, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='motor_throttle', full_name='mav.Aircraft.motor_throttle', index=8,
number=9, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=28,
serialized_end=205,
)
_WAYPOINTS = _descriptor.Descriptor(
name='Waypoints',
full_name='mav.Waypoints',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='waypoint', full_name='mav.Waypoints.waypoint', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=207,
serialized_end=251,
)
_WAYPOINT = _descriptor.Descriptor(
name='Waypoint',
full_name='mav.Waypoint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lat', full_name='mav.Waypoint.lat', index=0,
number=1, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lon', full_name='mav.Waypoint.lon', index=1,
number=2, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='altitude', full_name='mav.Waypoint.altitude', index=2,
number=3, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='speed', full_name='mav.Waypoint.speed', index=3,
number=4, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='mav.Waypoint.type', index=4,
number=5, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=253,
serialized_end=336,
)
_AIRCRAFTLINK = _descriptor.Descriptor(
name='AircraftLink',
full_name='mav.AircraftLink',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='aircrafts', full_name='mav.AircraftLink.aircrafts', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='waypoints', full_name='mav.AircraftLink.waypoints', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=338,
serialized_end=421,
)
_WAYPOINTS.fields_by_name['waypoint'].message_type = _WAYPOINT
_AIRCRAFTLINK.fields_by_name['aircrafts'].message_type = _AIRCRAFT
_AIRCRAFTLINK.fields_by_name['waypoints'].message_type = _WAYPOINTS
DESCRIPTOR.message_types_by_name['Aircraft'] = _AIRCRAFT
DESCRIPTOR.message_types_by_name['Waypoints'] = _WAYPOINTS
DESCRIPTOR.message_types_by_name['Waypoint'] = _WAYPOINT
DESCRIPTOR.message_types_by_name['AircraftLink'] = _AIRCRAFTLINK
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Aircraft = _reflection.GeneratedProtocolMessageType('Aircraft', (_message.Message,), dict(
DESCRIPTOR = _AIRCRAFT,
__module__ = 'mavProxyLink_pb2'
# @@protoc_insertion_point(class_scope:mav.Aircraft)
))
_sym_db.RegisterMessage(Aircraft)
Waypoints = _reflection.GeneratedProtocolMessageType('Waypoints', (_message.Message,), dict(
DESCRIPTOR = _WAYPOINTS,
__module__ = 'mavProxyLink_pb2'
# @@protoc_insertion_point(class_scope:mav.Waypoints)
))
_sym_db.RegisterMessage(Waypoints)
Waypoint = _reflection.GeneratedProtocolMessageType('Waypoint', (_message.Message,), dict(
DESCRIPTOR = _WAYPOINT,
__module__ = 'mavProxyLink_pb2'
# @@protoc_insertion_point(class_scope:mav.Waypoint)
))
_sym_db.RegisterMessage(Waypoint)
AircraftLink = _reflection.GeneratedProtocolMessageType('AircraftLink', (_message.Message,), dict(
DESCRIPTOR = _AIRCRAFTLINK,
__module__ = 'mavProxyLink_pb2'
# @@protoc_insertion_point(class_scope:mav.AircraftLink)
))
_sym_db.RegisterMessage(AircraftLink)
# @@protoc_insertion_point(module_scope)
| gpl-3.0 |
DailyActie/Surrogate-Model | surrogate/sampling/samLatinHypercube.py | 1 | 8477 | # MIT License
#
# Copyright (c) 2016 Daily Actie
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: Quan Pan <quanpan302@hotmail.com>
# License: MIT License
# Create: 2016-12-02
import numpy as np
def samLatinHypercube(n, samples=None, criterion=None, iterations=None):
"""Generate a latin-hypercube design
:param n: The number of factors to generate samples for
:param samples: The number of samples to generate for each factor (Default: n)
:param criterion: Allowable values are "center" or "c", "maximin" or "m",
"centermaximin" or "cm", and "correlation" or "corr". If no value
given, the design is simply randomized.
:param iterations: The number of iterations in the maximin and correlations algorithms
(Default: 5).
:returns: An n-by-samples design matrix that has been normalized so factor values
are uniformly spaced between zero and one.
This code was originally published by the following individuals for use with
Scilab:
- Copyright (C) 2012 - 2013 - Michael Baudin
- Copyright (C) 2012 - Maria Christopoulou
- Copyright (C) 2010 - 2011 - INRIA - Michael Baudin
- Copyright (C) 2009 - Yann Collette
- Copyright (C) 2009 - CEA - Jean-Marc Martinez
web: forge.scilab.org/index.php/p/scidoe/sourcetree/master/macros
Much thanks goes to these individuals. It has been converted to Python by
Abraham Lee.
:Example:
A 3-factor design (defaults to 3 samples):
>>> samLatinHypercube(3)
array([[ 0.40069325, 0.08118402, 0.69763298],
[ 0.19524568, 0.41383587, 0.29947106],
[ 0.85341601, 0.75460699, 0.360024 ]])
A 4-factor design with 6 samples:
>>> samLatinHypercube(4, samples=6)
array([[ 0.27226812, 0.02811327, 0.62792445, 0.91988196],
[ 0.76945538, 0.43501682, 0.01107457, 0.09583358],
[ 0.45702981, 0.76073773, 0.90245401, 0.18773015],
[ 0.99342115, 0.85814198, 0.16996665, 0.65069309],
[ 0.63092013, 0.22148567, 0.33616859, 0.36332478],
[ 0.05276917, 0.5819198 , 0.67194243, 0.78703262]])
A 2-factor design with 5 centered samples:
>>> samLatinHypercube(2, samples=5, criterion='center')
array([[ 0.3, 0.5],
[ 0.7, 0.9],
[ 0.1, 0.3],
[ 0.9, 0.1],
[ 0.5, 0.7]])
A 3-factor design with 4 samples where the minimum distance between
all samples has been maximized:
>>> samLatinHypercube(3, samples=4, criterion='maximin')
array([[ 0.02642564, 0.55576963, 0.50261649],
[ 0.51606589, 0.88933259, 0.34040838],
[ 0.98431735, 0.0380364 , 0.01621717],
[ 0.40414671, 0.33339132, 0.84845707]])
A 4-factor design with 5 samples where the samples are as uncorrelated
as possible (within 10 iterations):
>>> samLatinHypercube(4, samples=5, criterion='correlate', iterations=10)
"""
H = None
if samples is None:
samples = n
if criterion is not None:
assert criterion.lower() in ('center', 'c', 'maximin', 'm',
'centermaximin', 'cm', 'correlation',
'corr'), 'Invalid value for "criterion": {}'.format(criterion)
else:
H = _lhsclassic(n, samples)
if criterion is None:
criterion = 'center'
if iterations is None:
iterations = 5
if H is None:
if criterion.lower() in ('center', 'c'):
H = _lhscentered(n, samples)
elif criterion.lower() in ('maximin', 'm'):
H = _lhsmaximin(n, samples, iterations, 'maximin')
elif criterion.lower() in ('centermaximin', 'cm'):
H = _lhsmaximin(n, samples, iterations, 'centermaximin')
elif criterion.lower() in ('correlate', 'corr'):
H = _lhscorrelate(n, samples, iterations)
return H
################################################################################
def _lhsclassic(n, samples):
# Generate the intervals
cut = np.linspace(0, 1, samples + 1)
# Fill points uniformly in each interval
u = np.random.rand(samples, n)
a = cut[:samples]
b = cut[1:samples + 1]
rdpoints = np.zeros_like(u)
for j in range(n):
rdpoints[:, j] = u[:, j] * (b - a) + a
# Make the random pairings
H = np.zeros_like(rdpoints)
for j in range(n):
order = np.random.permutation(range(samples))
H[:, j] = rdpoints[order, j]
return H
################################################################################
def _lhscentered(n, samples):
# Generate the intervals
cut = np.linspace(0, 1, samples + 1)
# Fill points uniformly in each interval
u = np.random.rand(samples, n)
a = cut[:samples]
b = cut[1:samples + 1]
_center = (a + b) / 2
# Make the random pairings
H = np.zeros_like(u)
for j in range(n):
H[:, j] = np.random.permutation(_center)
return H
################################################################################
def _lhsmaximin(n, samples, iterations, lhstype):
maxdist = 0
# Maximize the minimum distance between points
for i in range(iterations):
if lhstype == 'maximin':
Hcandidate = _lhsclassic(n, samples)
else:
Hcandidate = _lhscentered(n, samples)
d = _pdist(Hcandidate)
if maxdist < np.min(d):
maxdist = np.min(d)
H = Hcandidate.copy()
return H
################################################################################
def _lhscorrelate(n, samples, iterations):
mincorr = np.inf
# Minimize the components correlation coefficients
for i in range(iterations):
# Generate a random LHS
Hcandidate = _lhsclassic(n, samples)
R = np.corrcoef(Hcandidate)
if np.max(np.abs(R[R != 1])) < mincorr:
mincorr = np.max(np.abs(R - np.eye(R.shape[0])))
print('new candidate solution found with max,abs corrcoef = {}'.format(mincorr))
H = Hcandidate.copy()
return H
################################################################################
def _pdist(x):
"""Calculate the pair-wise point distances of a matrix
:param x: An m-by-n array of scalars, where there are m points in n dimensions.
:type x: 2d-array
:returns: d array
A 1-by-b array of scalars, where b = m*(m - 1)/2. This array contains
all the pair-wise point distances, arranged in the order (1, 0),
(2, 0), ..., (m-1, 0), (2, 1), ..., (m-1, 1), ..., (m-1, m-2).
:Example:
>>> x = np.array([[0.1629447, 0.8616334],
... [0.5811584, 0.3826752],
... [0.2270954, 0.4442068],
... [0.7670017, 0.7264718],
... [0.8253975, 0.1937736]])
>>> _pdist(x)
array([ 0.6358488, 0.4223272, 0.6189940, 0.9406808, 0.3593699,
0.3908118, 0.3087661, 0.6092392, 0.6486001, 0.5358894])
"""
x = np.atleast_2d(x)
assert len(x.shape) == 2, 'Input array must be 2d-dimensional'
m, n = x.shape
if m < 2:
return []
d = []
for i in range(m - 1):
for j in range(i + 1, m):
d.append((sum((x[j, :] - x[i, :]) ** 2)) ** 0.5)
return np.array(d)
| mit |
pyannote/pyannote-parser | tests/test_repere.py | 1 | 2075 | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014-2015 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
from __future__ import print_function
import pytest
from pyannote.core import Segment
from pyannote.parser import REPEREParser
import tempfile
import os
SAMPLE_ANNOTATION = """uri1 1.0 3.5 speech alice
uri1 3.0 7.5 speech barbara
uri1 6.0 9.0 speech chris
"""
@pytest.fixture
def sample_annotation(request):
_, filename = tempfile.mkstemp()
with open(filename, 'w') as f:
f.write(SAMPLE_ANNOTATION)
def delete():
os.remove(filename)
request.addfinalizer(delete)
return filename
def test_load_annotation(sample_annotation):
parser = REPEREParser()
annotations = parser.read(sample_annotation)
speech1 = annotations(uri="uri1", modality="speech")
assert list(speech1.itertracks(label=True)) == [
(Segment(1, 3.5), 0, 'alice'),
(Segment(3, 7.5), 1, 'barbara'),
(Segment(6, 9), 2, 'chris')]
| mit |
turiphro/dockerfiles | devbox/etc/.vim/bundle/jedi-vim/conftest.py | 4 | 1687 | import os
import urllib
import zipfile
import subprocess
CACHE_FOLDER = '.cache'
VSPEC_FOLDER = os.path.join(CACHE_FOLDER, 'vim-vspec-master')
VSPEC_RUNNER = os.path.join(VSPEC_FOLDER, 'bin/vspec')
TEST_DIR = 'test'
class IntegrationTestFile(object):
def __init__(self, path):
self.path = path
def run(self):
output = subprocess.check_output([VSPEC_RUNNER, '.', VSPEC_FOLDER, self.path])
for line in output.splitlines():
if line.startswith(b'not ok') or line.startswith(b'Error'):
print(output)
assert False
def __repr__(self):
return "<%s: %s>" % (type(self), self.path)
def pytest_configure(config):
if not os.path.isdir(CACHE_FOLDER):
os.mkdir(CACHE_FOLDER)
if not os.path.exists(VSPEC_FOLDER):
url = 'https://github.com/kana/vim-vspec/archive/master.zip'
name, hdrs = urllib.urlretrieve(url)
z = zipfile.ZipFile(name)
for n in z.namelist():
dest = os.path.join(CACHE_FOLDER, n)
destdir = os.path.dirname(dest)
if not os.path.isdir(destdir):
os.makedirs(destdir)
data = z.read(n)
if not os.path.isdir(dest):
with open(dest, 'w') as f:
f.write(data)
z.close()
os.chmod(VSPEC_RUNNER, 0o777)
def pytest_generate_tests(metafunc):
"""
:type metafunc: _pytest.python.Metafunc
"""
def collect_tests():
for f in os.listdir(TEST_DIR):
if f.endswith('.vim'):
yield IntegrationTestFile(os.path.join(TEST_DIR, f))
metafunc.parametrize('case', list(collect_tests()))
| mit |
Rdbaker/Mealbound | tests/models/test_transactions.py | 1 | 4543 | """Test the Transaction models."""
from unittest.mock import patch
import pytest
from ceraon.models.transactions import Transaction
@pytest.mark.usefixtures('db')
class TestTransaction:
"""Transaction tests."""
def test_get_by_id(self, meal, host, guest):
"""Get Transaction by id."""
transaction = Transaction(payer=guest, amount=meal.price, payee=host,
meal=meal)
transaction.save()
retrieved = Transaction.find(transaction.id)
assert retrieved == transaction
@patch('ceraon.models.transactions.stripe')
def test_charge_returns_true_without_error(self, stripe_mock, transaction):
"""Test that charge() returns True if no stripe error is raised."""
assert transaction.charge() is True
@patch('ceraon.models.transactions.stripe')
def test_successful_charge_sets_property(self, stripe_mock, transaction):
"""Test that charge() sets transaction_went_through to True."""
transaction.charge()
assert transaction.transaction_went_through is True
@patch('ceraon.models.transactions.stripe')
def test_failed_charge_returns_false(self, stripe_mock, transaction):
"""Test that charge() returns false if stripe throws an error."""
stripe_mock.Charge.create.side_effect = RuntimeError('failed charge')
assert transaction.charge() is False
@patch('ceraon.models.transactions.stripe')
def test_failed_charge_doesnt_set_attribute(self, stripe_mock, transaction):
"""Test that a failed charge() doesn't set transaction_went_through."""
stripe_mock.Charge.create.side_effect = RuntimeError('failed charge')
transaction.charge()
assert transaction.transaction_went_through is False
def test_cancel_sets_canceled(self, transaction):
"""Test that calling cancel() sets the canceled property."""
transaction.cancel()
assert transaction.canceled is True
@patch('ceraon.models.transactions.stripe')
def test_set_stripe_source_on_user_no_stripe_id(self, stripe_mock, user):
"""Test that setting the stripe customer ID works."""
customer_id = 'this is the stripe customer id'
stripe_mock.Customer.create.return_value.id = customer_id
Transaction.set_stripe_source_on_user(user=user, token='some token')
assert user.stripe_customer_id == customer_id
@patch('ceraon.models.transactions.stripe')
def test_set_stripe_source_on_user_returns_true(self, stripe_mock, user):
"""Test that setting the stripe customer ID returns True."""
customer_id = 'this is the stripe customer id'
stripe_mock.Customer.create.return_value.id = customer_id
assert Transaction.set_stripe_source_on_user(
user=user, token='some token') is True
@patch('ceraon.models.transactions.stripe')
def test_set_stripe_source_on_user_existing_id(self, stripe_mock, user):
"""Test that resetting the stripe customer ID works."""
customer_id = 'this is the stripe customer id'
assert user.stripe_customer_id is None
user.stripe_customer_id = customer_id
assert Transaction.set_stripe_source_on_user(
user=user, token='some token') is True
stripe_mock.Customer.retrieve.assert_called_once()
@patch('ceraon.models.transactions.stripe')
def test_set_stripe_source_on_user_fail(self, stripe_mock, user):
"""Test that a stripe failure returns false."""
stripe_mock.Customer.create.side_effect = RuntimeError('stripe error')
assert Transaction.set_stripe_source_on_user(
user=user, token='some token') is False
@pytest.mark.parametrize('amount,expected', [
(5.00, 0.5),
(5.05, 0.505),
(4.00, 0.5),
(90.00, 9),
(42.10, 4.21),
(2.50, 0.5)
])
def test_operational_overhead_cut(self, transaction, amount, expected):
"""Test that the operational_overhead_cost is as expected."""
transaction.amount = amount
assert transaction.operational_overhead_cut == expected
@pytest.mark.parametrize('amount,expected', [
(5.00, 4.5),
(5.05, 4.545),
(4.00, 3.5),
(90.00, 81),
(42.10, 37.89),
(2.50, 2)
])
def test_takehome_amount(self, transaction, amount, expected):
"""Test that the takehome_amount is as expected."""
transaction.amount = amount
assert transaction.takehome_amount == expected
| bsd-3-clause |
JamieFBousfield/heekscnc | nc/num_reader.py | 30 | 1811 | import nc_read as nc
import sys
import math
# a base class for hpgl parsers, and maybe others
class NumReader(nc.Parser):
def __init__(self, writer):
nc.Parser.__init__(self, writer)
def get_number(self):
number = ''
# skip spaces and commas at start of number
while(self.line_index < self.line_length):
c = self.line[self.line_index]
if c == ' ' or c == ',':
self.parse_word += c
else:
break
self.line_index = self.line_index + 1
while(self.line_index < self.line_length):
c = self.line[self.line_index]
if c == '.' or c == '0' or c == '1' or c == '2' or c == '3' or c == '4' or c == '5' or c == '6' or c == '7' or c == '8' or c == '9' or c == '-':
number += c
else:
break
self.parse_word += c
self.line_index = self.line_index + 1
return number
def add_word(self, color):
self.writer.add_text(self.parse_word, color, None)
self.parse_word = ""
def Parse(self, name):
self.file_in = open(name, 'r')
while self.readline():
self.writer.begin_ncblock()
self.parse_word = ""
self.line_index = 0
self.line_length = len(self.line)
while self.line_index < self.line_length:
c = self.line[self.line_index]
self.parse_word += c
self.ParseFromFirstLetter(c)
self.line_index = self.line_index + 1
self.writer.add_text(self.parse_word, None, None)
self.writer.end_ncblock()
self.file_in.close()
| bsd-3-clause |
xkmato/casepro | casepro/urls.py | 1 | 1472 | from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.views import static
from casepro.backend import get_backend
from casepro.utils.views import PartialTemplate
urlpatterns = [
url(r'', include('casepro.cases.urls')),
url(r'', include('casepro.contacts.urls')),
url(r'', include('casepro.msg_board.urls')),
url(r'', include('casepro.msgs.urls')),
url(r'', include('casepro.rules.urls')),
url(r'', include('casepro.profiles.urls')),
url(r'', include('casepro.orgs_ext.urls')),
url(r'^pods/', include('casepro.pods.urls')),
url(r'^stats/', include('casepro.statistics.urls')),
url(r'^users/', include('dash.users.urls')),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^comments/', include('django_comments.urls')),
url(r'^partials/(?P<template>[a-z0-9\-_]+)\.html$', PartialTemplate.as_view(), name='utils.partial_template')
]
backend_urls = get_backend().get_url_patterns() or []
urlpatterns += backend_urls
if settings.DEBUG: # pragma: no cover
try:
import debug_toolbar
urlpatterns.append(url(r'^__debug__/', include(debug_toolbar.urls)))
except ImportError:
pass
urlpatterns = [
url(r'^media/(?P<path>.*)$', static.serve, {'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
url(r'', include('django.contrib.staticfiles.urls'))
] + urlpatterns
| bsd-3-clause |
cblecker/test-infra | scenarios/kubernetes_build.py | 11 | 6144 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Need to figure out why this only fails on travis
# pylint: disable=bad-continuation
"""Builds kubernetes with specified config"""
import argparse
import os
import re
import subprocess
import sys
def check(*cmd):
"""Log and run the command, raising on errors."""
print >>sys.stderr, 'Run:', cmd
subprocess.check_call(cmd)
def check_no_stdout(*cmd):
"""Log and run the command, suppress stdout & stderr, raising on errors."""
print >>sys.stderr, 'Run:', cmd
null = open(os.devnull, 'w')
subprocess.check_call(cmd, stdout=null, stderr=null)
def check_output(*cmd):
"""Log and run the command, raising on errors, return output"""
print >>sys.stderr, 'Run:', cmd
return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def check_build_exists(gcs, suffix, fast):
""" check if a k8s build with same version
already exists in remote path
"""
if not os.path.exists('hack/print-workspace-status.sh'):
print >>sys.stderr, 'hack/print-workspace-status.sh not found, continue'
return False
version = ''
try:
match = re.search(
r'gitVersion ([^\n]+)',
check_output('hack/print-workspace-status.sh')
)
if match:
version = match.group(1)
except subprocess.CalledProcessError as exc:
# fallback with doing a real build
print >>sys.stderr, 'Failed to get k8s version, continue: %s' % exc
return False
if version:
if not gcs:
gcs = 'kubernetes-release-dev'
gcs = 'gs://' + gcs
mode = 'ci'
if fast:
mode += '/fast'
if suffix:
mode += suffix
gcs = os.path.join(gcs, mode, version)
try:
check_no_stdout('gsutil', 'ls', gcs)
check_no_stdout('gsutil', 'ls', gcs + "/kubernetes.tar.gz")
check_no_stdout('gsutil', 'ls', gcs + "/bin")
return True
except subprocess.CalledProcessError as exc:
print >>sys.stderr, (
'gcs path %s (or some files under it) does not exist yet, continue' % gcs)
return False
def main(args):
# pylint: disable=too-many-branches
"""Build and push kubernetes.
This is a python port of the kubernetes/hack/jenkins/build.sh script.
"""
if os.path.split(os.getcwd())[-1] != 'kubernetes':
print >>sys.stderr, (
'Scenario should only run from either kubernetes directory!')
sys.exit(1)
# pre-check if target build exists in gcs bucket or not
# if so, don't make duplicated builds
if check_build_exists(args.release, args.suffix, args.fast):
print >>sys.stderr, 'build already exists, exit'
sys.exit(0)
env = {
# Skip gcloud update checking; do we still need this?
'CLOUDSDK_COMPONENT_MANAGER_DISABLE_UPDATE_CHECK': 'true',
# Don't run any unit/integration tests when building
'KUBE_RELEASE_RUN_TESTS': 'n',
}
push_build_args = ['--nomock', '--verbose', '--ci']
if args.suffix:
push_build_args.append('--gcs-suffix=%s' % args.suffix)
if args.release:
push_build_args.append('--bucket=%s' % args.release)
if args.registry:
push_build_args.append('--docker-registry=%s' % args.registry)
if args.extra_publish_file:
push_build_args.append('--extra-publish-file=%s' % args.extra_publish_file)
if args.extra_version_markers:
push_build_args.append('--extra-version-markers=%s' % args.extra_version_markers)
if args.fast:
push_build_args.append('--fast')
if args.allow_dup:
push_build_args.append('--allow-dup')
if args.skip_update_latest:
push_build_args.append('--noupdatelatest')
if args.register_gcloud_helper:
# Configure docker client for gcr.io authentication to allow communication
# with non-public registries.
check_no_stdout('gcloud', 'auth', 'configure-docker')
for key, value in env.items():
os.environ[key] = value
check('make', 'clean')
if args.fast:
check('make', 'quick-release')
else:
check('make', 'release')
output = check_output(args.push_build_script, *push_build_args)
print >>sys.stderr, 'Push build result: ', output
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(
'Build and push.')
PARSER.add_argument(
'--release', help='Upload binaries to the specified gs:// path')
PARSER.add_argument(
'--suffix', help='Append suffix to the upload path if set')
PARSER.add_argument(
'--registry', help='Push images to the specified docker registry')
PARSER.add_argument(
'--extra-publish-file', help='Additional version file uploads to')
PARSER.add_argument(
'--extra-version-markers', help='Additional version file uploads to')
PARSER.add_argument(
'--fast', action='store_true', help='Specifies a fast build')
PARSER.add_argument(
'--allow-dup', action='store_true', help='Allow overwriting if the build exists on gcs')
PARSER.add_argument(
'--skip-update-latest', action='store_true', help='Do not update the latest file')
PARSER.add_argument(
'--push-build-script', default='../release/push-build.sh', help='location of push-build.sh')
PARSER.add_argument(
'--register-gcloud-helper', action='store_true',
help='Register gcloud as docker credentials helper')
ARGS = PARSER.parse_args()
main(ARGS)
| apache-2.0 |
shabab12/edx-platform | openedx/core/djangoapps/course_groups/partition_scheme.py | 105 | 4510 | """
Provides a UserPartition driver for cohorts.
"""
import logging
from courseware.masquerade import ( # pylint: disable=import-error
get_course_masquerade,
get_masquerading_group_info,
is_masquerading_as_specific_student,
)
from xmodule.partitions.partitions import NoSuchUserPartitionGroupError
from .cohorts import get_cohort, get_group_info_for_cohort
log = logging.getLogger(__name__)
class CohortPartitionScheme(object):
"""
This scheme uses lms cohorts (CourseUserGroups) and cohort-partition
mappings (CourseUserGroupPartitionGroup) to map lms users into Partition
Groups.
"""
# pylint: disable=unused-argument
@classmethod
def get_group_for_user(cls, course_key, user, user_partition, track_function=None, use_cached=True):
"""
Returns the Group from the specified user partition to which the user
is assigned, via their cohort membership and any mappings from cohorts
to partitions / groups that might exist.
If the user has not yet been assigned to a cohort, an assignment *might*
be created on-the-fly, as determined by the course's cohort config.
Any such side-effects will be triggered inside the call to
cohorts.get_cohort().
If the user has no cohort mapping, or there is no (valid) cohort ->
partition group mapping found, the function returns None.
"""
# First, check if we have to deal with masquerading.
# If the current user is masquerading as a specific student, use the
# same logic as normal to return that student's group. If the current
# user is masquerading as a generic student in a specific group, then
# return that group.
if get_course_masquerade(user, course_key) and not is_masquerading_as_specific_student(user, course_key):
group_id, user_partition_id = get_masquerading_group_info(user, course_key)
if user_partition_id == user_partition.id and group_id is not None:
try:
return user_partition.get_group(group_id)
except NoSuchUserPartitionGroupError:
return None
# The user is masquerading as a generic student. We can't show any particular group.
return None
cohort = get_cohort(user, course_key, use_cached=use_cached)
if cohort is None:
# student doesn't have a cohort
return None
group_id, partition_id = get_group_info_for_cohort(cohort, use_cached=use_cached)
if partition_id is None:
# cohort isn't mapped to any partition group.
return None
if partition_id != user_partition.id:
# if we have a match but the partition doesn't match the requested
# one it means the mapping is invalid. the previous state of the
# partition configuration may have been modified.
log.warn(
"partition mismatch in CohortPartitionScheme: %r",
{
"requested_partition_id": user_partition.id,
"found_partition_id": partition_id,
"found_group_id": group_id,
"cohort_id": cohort.id,
}
)
# fail silently
return None
try:
return user_partition.get_group(group_id)
except NoSuchUserPartitionGroupError:
# if we have a match but the group doesn't exist in the partition,
# it means the mapping is invalid. the previous state of the
# partition configuration may have been modified.
log.warn(
"group not found in CohortPartitionScheme: %r",
{
"requested_partition_id": user_partition.id,
"requested_group_id": group_id,
"cohort_id": cohort.id,
},
exc_info=True
)
# fail silently
return None
def get_cohorted_user_partition(course):
"""
Returns the first user partition from the specified course which uses the CohortPartitionScheme,
or None if one is not found. Note that it is currently recommended that each course have only
one cohorted user partition.
"""
for user_partition in course.user_partitions:
if user_partition.scheme == CohortPartitionScheme:
return user_partition
return None
| agpl-3.0 |
wangkua1/sportvu | sportvu/detection_from_raw_pred.py | 1 | 3391 | """detection_from_raw_pred.py
* not super useful, a simple script that plots a) raw pred, b) gt pnr, c) detector output
at 1 single setting
Usage:
detection_from_raw_pred.py <fold_index> <f_data_config> <f_model_config> <f_detect_config> --train
Arguments:
Example:
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sys
import os
from tqdm import tqdm
from docopt import docopt
import yaml
import gc
import matplotlib.pylab as plt
import cPickle as pkl
##
from sportvu.data.dataset import BaseDataset
from sportvu.detect.running_window_p import RunWindowP
from sportvu.detect.nms import NMS
from sportvu.detect.utils import smooth_1D_array
arguments = docopt(__doc__)
print ("...Docopt... ")
print(arguments)
print ("............\n")
f_data_config = arguments['<f_data_config>']
f_model_config = arguments['<f_model_config>']
f_detect_config = arguments['<f_detect_config>']
if arguments['--train']:
dataset = BaseDataset(f_data_config, fold_index=int(arguments['<fold_index>']), load_raw=True)
# pre_trained = arguments['<pre_trained>']
data_config = yaml.load(open(f_data_config, 'rb'))
model_config = yaml.load(open(f_model_config, 'rb'))
model_name = os.path.basename(f_model_config).split('.')[0]
data_name = os.path.basename(f_data_config).split('.')[0]
exp_name = '%s-X-%s' % (model_name, data_name)
detect_config = yaml.load(open(f_detect_config, 'rb'))
detector = eval(detect_config['class'])(detect_config)
plot_folder = os.path.join('./plots', exp_name)
if not os.path.exists(plot_folder):
raise Exception('Run test.py first to get raw predictions')
def label_in_cand(cand, labels):
for l in labels:
if l > cand[1] and l < cand[0]:
return True
return False
plt.figure()
if arguments['--train']:
split = 'train'
else:
split = 'val'
all_pred_f = filter(lambda s:'.pkl' in s and split in s
and 'meta' not in s,os.listdir(os.path.join(plot_folder,'pkl')))
if arguments['--train']:
annotations = []
for _, f in tqdm(enumerate(all_pred_f)):
ind = int(f.split('.')[0].split('-')[1])
gameclocks, pnr_probs, labels = pkl.load(open(os.path.join(plot_folder,'pkl/%s-%i.pkl'%(split,ind)), 'rb'))
meta = pkl.load( open(
os.path.join(plot_folder, 'pkl/%s-meta-%i.pkl' %(split, ind)), 'rb'))
cands, mp, frame_indices = detector.detect(pnr_probs, gameclocks, True)
print (cands)
plt.plot(gameclocks, pnr_probs, '-')
if mp is not None:
plt.plot(gameclocks, mp, '-')
plt.plot(np.array(labels), np.ones((len(labels))), '.')
for ind, cand in enumerate(cands):
cand_x = np.arange(cand[1], cand[0], .1)
plt.plot(cand_x, np.ones((len(cand_x))) * .95, '-' )
## if FP, record annotations
if arguments['--train'] and not label_in_cand(cand, labels):
anno = {'gameid':meta[1], 'gameclock':gameclocks[frame_indices[ind]],
'eid':meta[0], 'quarter':dataset.games[meta[1]]['events'][meta[0]]['quarter']}
annotations.append(anno)
plt.ylim([0,1])
plt.title('Game: %s, Event: %i'%(meta[1], meta[0]))
plt.savefig(os.path.join(plot_folder, '%s-%s-%i.png' %(detect_config['class'], split, ind)))
plt.clf()
pkl.dump(annotations, open(os.path.join(plot_folder,'pkl/hard-negative-examples.pkl'), 'wb')) | mit |
ptisserand/ansible | lib/ansible/modules/cloud/amazon/cloudfront_distribution.py | 1 | 85955 | #!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudfront_distribution
short_description: create, update and delete aws cloudfront distributions.
description:
- Allows for easy creation, updating and deletion of CloudFront distributions.
requirements:
- boto3 >= 1.0.0
- python >= 2.6
version_added: "2.5"
author:
- Willem van Ketwich (@wilvk)
- Will Thames (@willthames)
extends_documentation_fragment:
- aws
- ec2
options:
state:
description:
- The desired state of the distribution
present - creates a new distribution or updates an existing distribution.
absent - deletes an existing distribution.
choices: ['present', 'absent']
default: 'present'
distribution_id:
description:
- The id of the cloudfront distribution. This parameter can be exchanged with I(alias) or I(caller_reference) and is used in conjunction with I(e_tag).
e_tag:
description:
- A unique identifier of a modified or existing distribution. Used in conjunction with I(distribution_id).
Is determined automatically if not specified.
caller_reference:
description:
- A unique identifier for creating and updating cloudfront distributions. Each caller reference must be unique across all distributions. e.g. a caller
reference used in a web distribution cannot be reused in a streaming distribution. This parameter can be used instead of I(distribution_id)
to reference an existing distribution. If not specified, this defaults to a datetime stamp of the format
'YYYY-MM-DDTHH:MM:SS.ffffff'.
tags:
description:
- Should be input as a dict() of key-value pairs.
Note that numeric keys or values must be wrapped in quotes. e.g. "Priority:" '1'
purge_tags:
description:
- Specifies whether existing tags will be removed before adding new tags. When I(purge_tags=yes), existing tags are removed and I(tags) are added, if
specified. If no tags are specified, it removes all existing tags for the distribution. When I(purge_tags=no), existing tags are kept and I(tags)
are added, if specified.
default: 'no'
choices: ['yes', 'no']
alias:
description:
- The name of an alias (CNAME) that is used in a distribution. This is used to effectively reference a distribution by its alias as an alias can only
be used by one distribution per AWS account. This variable avoids having to provide the I(distribution_id) as well as
the I(e_tag), or I(caller_reference) of an existing distribution.
aliases:
description:
- A I(list[]) of domain name aliases (CNAMEs) as strings to be used for the distribution. Each alias must be unique across all distribution for the AWS
account.
purge_aliases:
description:
- Specifies whether existing aliases will be removed before adding new aliases. When I(purge_aliases=yes), existing aliases are removed and I(aliases)
are added.
default: 'no'
choices: ['yes', 'no']
default_root_object:
description:
- A config element that specifies the path to request when the user requests the origin. e.g. if specified as 'index.html', this maps to
www.example.com/index.html when www.example.com is called by the user. This prevents the entire distribution origin from being exposed at the root.
default_origin_domain_name:
description:
- The domain name to use for an origin if no I(origins) have been specified. Should only be used on a first run of generating a distribution and not on
subsequent runs. Should not be used in conjunction with I(distribution_id), I(caller_reference) or I(alias).
default_origin_path:
description:
- The default origin path to specify for an origin if no I(origins) have been specified. Defaults to empty if not specified.
origins:
description:
- A config element that is a I(list[]) of complex origin objects to be specified for the distribution. Used for creating and updating distributions.
Each origin item comprises the attributes
I(id)
I(domain_name) (defaults to default_origin_domain_name if not specified)
I(origin_path) (defaults to default_origin_path if not specified)
I(custom_headers[])
I(header_name)
I(header_value)
I(s3_origin_access_identity_enabled)
I(custom_origin_config)
I(http_port)
I(https_port)
I(origin_protocol_policy)
I(origin_ssl_protocols[])
I(origin_read_timeout)
I(origin_keepalive_timeout)
purge_origins:
description: Whether to remove any origins that aren't listed in I(origins)
default: false
default_cache_behavior:
description:
- A config element that is a complex object specifying the default cache behavior of the distribution. If not specified, the I(target_origin_id) is
defined as the I(target_origin_id) of the first valid I(cache_behavior) in I(cache_behaviors) with defaults.
The default cache behavior comprises the attributes
I(target_origin_id)
I(forwarded_values)
I(query_string)
I(cookies)
I(forward)
I(whitelisted_names)
I(headers[])
I(query_string_cache_keys[])
I(trusted_signers)
I(enabled)
I(items[])
I(viewer_protocol_policy)
I(min_ttl)
I(allowed_methods)
I(items[])
I(cached_methods[])
I(smooth_streaming)
I(default_ttl)
I(max_ttl)
I(compress)
I(lambda_function_associations[])
I(lambda_function_arn)
I(event_type)
cache_behaviors:
description:
- A config element that is a I(list[]) of complex cache behavior objects to be specified for the distribution. The order
of the list is preserved across runs unless C(purge_cache_behavior) is enabled.
Each cache behavior comprises the attributes
I(path_pattern)
I(target_origin_id)
I(forwarded_values)
I(query_string)
I(cookies)
I(forward)
I(whitelisted_names)
I(headers[])
I(query_string_cache_keys[])
I(trusted_signers)
I(enabled)
I(items[])
I(viewer_protocol_policy)
I(min_ttl)
I(allowed_methods)
I(items[])
I(cached_methods[])
I(smooth_streaming)
I(default_ttl)
I(max_ttl)
I(compress)
I(lambda_function_associations[])
purge_cache_behaviors:
description: Whether to remove any cache behaviors that aren't listed in I(cache_behaviors). This switch
also allows the reordering of cache_behaviors.
default: false
custom_error_responses:
description:
- A config element that is a I(list[]) of complex custom error responses to be specified for the distribution. This attribute configures custom http
error messages returned to the user.
Each custom error response object comprises the attributes
I(error_code)
I(reponse_page_path)
I(response_code)
I(error_caching_min_ttl)
purge_custom_error_responses:
description: Whether to remove any custom error responses that aren't listed in I(custom_error_responses)
default: false
comment:
description:
- A comment that describes the cloudfront distribution. If not specified, it defaults to a
generic message that it has been created with Ansible, and a datetime stamp.
logging:
description:
- A config element that is a complex object that defines logging for the distribution.
The logging object comprises the attributes
I(enabled)
I(include_cookies)
I(bucket)
I(prefix)
price_class:
description:
- A string that specifies the pricing class of the distribution. As per
U(https://aws.amazon.com/cloudfront/pricing/)
I(price_class=PriceClass_100) consists of the areas
United States
Canada
Europe
I(price_class=PriceClass_200) consists of the areas
United States
Canada
Europe
Hong Kong, Philippines, S. Korea, Singapore & Taiwan
Japan
India
I(price_class=PriceClass_All) consists of the areas
United States
Canada
Europe
Hong Kong, Philippines, S. Korea, Singapore & Taiwan
Japan
India
South America
Australia
choices: ['PriceClass_100', 'PriceClass_200', 'PriceClass_All']
default: aws defaults this to 'PriceClass_All'
enabled:
description:
- A boolean value that specifies whether the distribution is enabled or disabled.
default: 'yes'
choices: ['yes', 'no']
viewer_certificate:
description:
- A config element that is a complex object that specifies the encryption details of the distribution.
Comprises the following attributes
I(cloudfront_default_certificate)
I(iam_certificate_id)
I(acm_certificate_arn)
I(ssl_support_method)
I(minimum_protocol_version)
I(certificate)
I(certificate_source)
restrictions:
description:
- A config element that is a complex object that describes how a distribution should restrict it's content.
The restriction object comprises the following attributes
I(geo_restriction)
I(restriction_type)
I(items[])
web_acl_id:
description:
- The id of a Web Application Firewall (WAF) Access Control List (ACL).
http_version:
description:
- The version of the http protocol to use for the distribution.
choices: [ 'http1.1', 'http2' ]
default: aws defaults this to 'http2'
ipv6_enabled:
description:
- Determines whether IPv6 support is enabled or not.
choices: ['yes', 'no']
default: 'no'
wait:
description:
- Specifies whether the module waits until the distribution has completed processing the creation or update.
choices: ['yes', 'no']
default: 'no'
wait_timeout:
description:
- Specifies the duration in seconds to wait for a timeout of a cloudfront create or update. Defaults to 1800 seconds (30 minutes).
default: 1800
'''
EXAMPLES = '''
# create a basic distribution with defaults and tags
- cloudfront_distribution:
state: present
default_origin_domain_name: www.my-cloudfront-origin.com
tags:
Name: example distribution
Project: example project
Priority: '1'
# update a distribution comment by distribution_id
- cloudfront_distribution:
state: present
distribution_id: E1RP5A2MJ8073O
comment: modified by ansible cloudfront.py
# update a distribution comment by caller_reference
- cloudfront_distribution:
state: present
caller_reference: my cloudfront distribution 001
comment: modified by ansible cloudfront.py
# update a distribution's aliases and comment using the distribution_id as a reference
- cloudfront_distribution:
state: present
distribution_id: E1RP5A2MJ8073O
comment: modified by cloudfront.py again
aliases: [ 'www.my-distribution-source.com', 'zzz.aaa.io' ]
# update a distribution's aliases and comment using an alias as a reference
- cloudfront_distribution:
state: present
caller_reference: my test distribution
comment: modified by cloudfront.py again
aliases:
- www.my-distribution-source.com
- zzz.aaa.io
# update a distribution's comment and aliases and tags and remove existing tags
- cloudfront_distribution:
state: present
distribution_id: E15BU8SDCGSG57
comment: modified by cloudfront.py again
aliases:
- tested.com
tags:
Project: distribution 1.2
purge_tags: yes
# create a distribution with an origin, logging and default cache behavior
- cloudfront_distribution:
state: present
caller_reference: unique test distribution id
origins:
- id: 'my test origin-000111'
domain_name: www.example.com
origin_path: /production
custom_headers:
- header_name: MyCustomHeaderName
header_value: MyCustomHeaderValue
default_cache_behavior:
target_origin_id: 'my test origin-000111'
forwarded_values:
query_string: true
cookies:
forward: all
headers:
- '*'
viewer_protocol_policy: allow-all
smooth_streaming: true
compress: true
allowed_methods:
items:
- GET
- HEAD
cached_methods:
- GET
- HEAD
logging:
enabled: true
include_cookies: false
bucket: mylogbucket.s3.amazonaws.com
prefix: myprefix/
enabled: false
comment: this is a cloudfront distribution with logging
# delete a distribution
- cloudfront_distribution:
state: absent
caller_reference: replaceable distribution
'''
RETURN = '''
active_trusted_signers:
description: Key pair IDs that CloudFront is aware of for each trusted signer
returned: always
type: complex
contains:
enabled:
description: Whether trusted signers are in use
returned: always
type: bool
sample: false
quantity:
description: Number of trusted signers
returned: always
type: int
sample: 1
items:
description: Number of trusted signers
returned: when there are trusted signers
type: list
sample:
- key_pair_id
aliases:
description: Aliases that refer to the distribution
returned: always
type: complex
contains:
items:
description: List of aliases
returned: always
type: list
sample:
- test.example.com
quantity:
description: Number of aliases
returned: always
type: int
sample: 1
arn:
description: Amazon Resource Name of the distribution
returned: always
type: string
sample: arn:aws:cloudfront::123456789012:distribution/E1234ABCDEFGHI
cache_behaviors:
description: Cloudfront cache behaviors
returned: always
type: complex
contains:
items:
description: List of cache behaviors
returned: always
type: complex
contains:
allowed_methods:
description: Methods allowed by the cache behavior
returned: always
type: complex
contains:
cached_methods:
description: Methods cached by the cache behavior
returned: always
type: complex
contains:
items:
description: List of cached methods
returned: always
type: list
sample:
- HEAD
- GET
quantity:
description: Count of cached methods
returned: always
type: int
sample: 2
items:
description: List of methods allowed by the cache behavior
returned: always
type: list
sample:
- HEAD
- GET
quantity:
description: Count of methods allowed by the cache behavior
returned: always
type: int
sample: 2
compress:
description: Whether compression is turned on for the cache behavior
returned: always
type: bool
sample: false
default_ttl:
description: Default Time to Live of the cache behavior
returned: always
type: int
sample: 86400
forwarded_values:
description: Values forwarded to the origin for this cache behavior
returned: always
type: complex
contains:
cookies:
description: Cookies to forward to the origin
returned: always
type: complex
contains:
forward:
description: Which cookies to forward to the origin for this cache behavior
returned: always
type: string
sample: none
whitelisted_names:
description: The names of the cookies to forward to the origin for this cache behavior
returned: when I(forward) is C(whitelist)
type: complex
contains:
quantity:
description: Count of cookies to forward
returned: always
type: int
sample: 1
items:
description: List of cookies to forward
returned: when list is not empty
type: list
sample: my_cookie
headers:
description: Which headers are used to vary on cache retrievals
returned: always
type: complex
contains:
quantity:
description: Count of headers to vary on
returned: always
type: int
sample: 1
items:
description: List of headers to vary on
returned: when list is not empty
type: list
sample:
- Host
query_string:
description: Whether the query string is used in cache lookups
returned: always
type: bool
sample: false
query_string_cache_keys:
description: Which query string keys to use in cache lookups
returned: always
type: complex
contains:
quantity:
description: Count of query string cache keys to use in cache lookups
returned: always
type: int
sample: 1
items:
description: List of query string cache keys to use in cache lookups
returned: when list is not empty
type: list
sample:
lambda_function_associations:
description: Lambda function associations for a cache behavior
returned: always
type: complex
contains:
quantity:
description: Count of lambda function associations
returned: always
type: int
sample: 1
items:
description: List of lambda function associations
returned: when list is not empty
type: list
sample:
- lambda_function_arn: arn:aws:lambda:123456789012:us-east-1/lambda/lambda-function
event_type: viewer-response
max_ttl:
description: Maximum Time to Live
returned: always
type: int
sample: 31536000
min_ttl:
description: Minimum Time to Live
returned: always
type: int
sample: 0
path_pattern:
description: Path pattern that determines this cache behavior
returned: always
type: string
sample: /path/to/files/*
smooth_streaming:
description: Whether smooth streaming is enabled
returned: always
type: bool
sample: false
target_origin_id:
description: Id of origin reference by this cache behavior
returned: always
type: string
sample: origin_abcd
trusted_signers:
description: Trusted signers
returned: always
type: complex
contains:
enabled:
description: Whether trusted signers are enabled for this cache behavior
returned: always
type: bool
sample: false
quantity:
description: Count of trusted signers
returned: always
type: int
sample: 1
viewer_protocol_policy:
description: Policy of how to handle http/https
returned: always
type: string
sample: redirect-to-https
quantity:
description: Count of cache behaviors
returned: always
type: int
sample: 1
caller_reference:
description: Idempotency reference given when creating cloudfront distribution
returned: always
type: string
sample: '1484796016700'
comment:
description: Any comments you want to include about the distribution
returned: always
type: string
sample: 'my first cloudfront distribution'
custom_error_responses:
description: Custom error responses to use for error handling
returned: always
type: complex
contains:
items:
description: List of custom error responses
returned: always
type: complex
contains:
error_caching_min_ttl:
description: Mininum time to cache this error response
returned: always
type: int
sample: 300
error_code:
description: Origin response code that triggers this error response
returned: always
type: int
sample: 500
response_code:
description: Response code to return to the requester
returned: always
type: string
sample: '500'
response_page_path:
description: Path that contains the error page to display
returned: always
type: string
sample: /errors/5xx.html
quantity:
description: Count of custom error response items
returned: always
type: int
sample: 1
default_cache_behavior:
description: Default cache behavior
returned: always
type: complex
contains:
allowed_methods:
description: Methods allowed by the cache behavior
returned: always
type: complex
contains:
cached_methods:
description: Methods cached by the cache behavior
returned: always
type: complex
contains:
items:
description: List of cached methods
returned: always
type: list
sample:
- HEAD
- GET
quantity:
description: Count of cached methods
returned: always
type: int
sample: 2
items:
description: List of methods allowed by the cache behavior
returned: always
type: list
sample:
- HEAD
- GET
quantity:
description: Count of methods allowed by the cache behavior
returned: always
type: int
sample: 2
compress:
description: Whether compression is turned on for the cache behavior
returned: always
type: bool
sample: false
default_ttl:
description: Default Time to Live of the cache behavior
returned: always
type: int
sample: 86400
forwarded_values:
description: Values forwarded to the origin for this cache behavior
returned: always
type: complex
contains:
cookies:
description: Cookies to forward to the origin
returned: always
type: complex
contains:
forward:
description: Which cookies to forward to the origin for this cache behavior
returned: always
type: string
sample: none
whitelisted_names:
description: The names of the cookies to forward to the origin for this cache behavior
returned: when I(forward) is C(whitelist)
type: complex
contains:
quantity:
description: Count of cookies to forward
returned: always
type: int
sample: 1
items:
description: List of cookies to forward
returned: when list is not empty
type: list
sample: my_cookie
headers:
description: Which headers are used to vary on cache retrievals
returned: always
type: complex
contains:
quantity:
description: Count of headers to vary on
returned: always
type: int
sample: 1
items:
description: List of headers to vary on
returned: when list is not empty
type: list
sample:
- Host
query_string:
description: Whether the query string is used in cache lookups
returned: always
type: bool
sample: false
query_string_cache_keys:
description: Which query string keys to use in cache lookups
returned: always
type: complex
contains:
quantity:
description: Count of query string cache keys to use in cache lookups
returned: always
type: int
sample: 1
items:
description: List of query string cache keys to use in cache lookups
returned: when list is not empty
type: list
sample:
lambda_function_associations:
description: Lambda function associations for a cache behavior
returned: always
type: complex
contains:
quantity:
description: Count of lambda function associations
returned: always
type: int
sample: 1
items:
description: List of lambda function associations
returned: when list is not empty
type: list
sample:
- lambda_function_arn: arn:aws:lambda:123456789012:us-east-1/lambda/lambda-function
event_type: viewer-response
max_ttl:
description: Maximum Time to Live
returned: always
type: int
sample: 31536000
min_ttl:
description: Minimum Time to Live
returned: always
type: int
sample: 0
path_pattern:
description: Path pattern that determines this cache behavior
returned: always
type: string
sample: /path/to/files/*
smooth_streaming:
description: Whether smooth streaming is enabled
returned: always
type: bool
sample: false
target_origin_id:
description: Id of origin reference by this cache behavior
returned: always
type: string
sample: origin_abcd
trusted_signers:
description: Trusted signers
returned: always
type: complex
contains:
enabled:
description: Whether trusted signers are enabled for this cache behavior
returned: always
type: bool
sample: false
quantity:
description: Count of trusted signers
returned: always
type: int
sample: 1
viewer_protocol_policy:
description: Policy of how to handle http/https
returned: always
type: string
sample: redirect-to-https
default_root_object:
description: The object that you want CloudFront to request from your origin (for example, index.html)
when a viewer requests the root URL for your distribution
returned: always
type: string
sample: ''
diff:
description: Difference between previous configuration and new configuration
returned: always
type: dict
sample: {}
domain_name:
description: Domain name of cloudfront distribution
returned: always
type: string
sample: d1vz8pzgurxosf.cloudfront.net
enabled:
description: Whether the cloudfront distribution is enabled or not
returned: always
type: bool
sample: true
http_version:
description: Version of HTTP supported by the distribution
returned: always
type: string
sample: http2
id:
description: Cloudfront distribution ID
returned: always
type: string
sample: E123456ABCDEFG
in_progress_invalidation_batches:
description: The number of invalidation batches currently in progress
returned: always
type: int
sample: 0
is_ipv6_enabled:
description: Whether IPv6 is enabled
returned: always
type: bool
sample: true
last_modified_time:
description: Date and time distribution was last modified
returned: always
type: string
sample: '2017-10-13T01:51:12.656000+00:00'
logging:
description: Logging information
returned: always
type: complex
contains:
bucket:
description: S3 bucket logging destination
returned: always
type: string
sample: logs-example-com.s3.amazonaws.com
enabled:
description: Whether logging is enabled
returned: always
type: bool
sample: true
include_cookies:
description: Whether to log cookies
returned: always
type: bool
sample: false
prefix:
description: Prefix added to logging object names
returned: always
type: string
sample: cloudfront/test
origins:
description: Origins in the cloudfront distribution
returned: always
type: complex
contains:
items:
description: List of origins
returned: always
type: complex
contains:
custom_headers:
description: Custom headers passed to the origin
returned: always
type: complex
contains:
quantity:
description: Count of headers
returned: always
type: int
sample: 1
custom_origin_config:
description: Configuration of the origin
returned: always
type: complex
contains:
http_port:
description: Port on which HTTP is listening
returned: always
type: int
sample: 80
https_port:
description: Port on which HTTPS is listening
returned: always
type: int
sample: 443
origin_keepalive_timeout:
description: Keep-alive timeout
returned: always
type: int
sample: 5
origin_protocol_policy:
description: Policy of which protocols are supported
returned: always
type: string
sample: https-only
origin_read_timeout:
description: Timeout for reads to the origin
returned: always
type: int
sample: 30
origin_ssl_protocols:
description: SSL protocols allowed by the origin
returned: always
type: complex
contains:
items:
description: List of SSL protocols
returned: always
type: list
sample:
- TLSv1
- TLSv1.1
- TLSv1.2
quantity:
description: Count of SSL protocols
returned: always
type: int
sample: 3
domain_name:
description: Domain name of the origin
returned: always
type: string
sample: test-origin.example.com
id:
description: ID of the origin
returned: always
type: string
sample: test-origin.example.com
origin_path:
description: Subdirectory to prefix the request from the S3 or HTTP origin
returned: always
type: string
sample: ''
quantity:
description: Count of origins
returned: always
type: int
sample: 1
price_class:
description: Price class of cloudfront distribution
returned: always
type: string
sample: PriceClass_All
restrictions:
description: Restrictions in use by Cloudfront
returned: always
type: complex
contains:
geo_restriction:
description: Controls the countries in which your content is distributed.
returned: always
type: complex
contains:
quantity:
description: Count of restrictions
returned: always
type: int
sample: 1
items:
description: List of country codes allowed or disallowed
returned: always
type: list
sample: xy
restriction_type:
description: Type of restriction
returned: always
type: string
sample: blacklist
status:
description: Status of the cloudfront distribution
returned: always
type: string
sample: InProgress
tags:
description: Distribution tags
returned: always
type: dict
sample:
Hello: World
viewer_certificate:
description: Certificate used by cloudfront distribution
returned: always
type: complex
contains:
acm_certificate_arn:
description: ARN of ACM certificate
returned: when certificate comes from ACM
type: string
sample: arn:aws:acm:us-east-1:123456789012:certificate/abcd1234-1234-1234-abcd-123456abcdef
certificate:
description: Reference to certificate
returned: always
type: string
sample: arn:aws:acm:us-east-1:123456789012:certificate/abcd1234-1234-1234-abcd-123456abcdef
certificate_source:
description: Where certificate comes from
returned: always
type: string
sample: acm
minimum_protocol_version:
description: Minimum SSL/TLS protocol supported by this distribution
returned: always
type: string
sample: TLSv1
ssl_support_method:
description: Support for pre-SNI browsers or not
returned: always
type: string
sample: sni-only
web_acl_id:
description: ID of Web Access Control List (from WAF service)
returned: always
type: string
sample: abcd1234-1234-abcd-abcd-abcd12345678
'''
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager
from ansible.module_utils.ec2 import get_aws_connection_info
from ansible.module_utils.ec2 import ec2_argument_spec, boto3_conn, compare_aws_tags
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list
from ansible.module_utils.ec2 import snake_dict_to_camel_dict, boto3_tag_list_to_ansible_dict
import datetime
try:
from collections import OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict
except ImportError:
pass # caught by AnsibleAWSModule (as python 2.6 + boto3 => ordereddict is installed)
try:
import botocore
except ImportError:
pass
def change_dict_key_name(dictionary, old_key, new_key):
if old_key in dictionary:
dictionary[new_key] = dictionary.get(old_key)
dictionary.pop(old_key, None)
return dictionary
def merge_validation_into_config(config, validated_node, node_name):
if validated_node is not None:
if isinstance(validated_node, dict):
config_node = config.get(node_name)
if config_node is not None:
config_node_items = list(config_node.items())
else:
config_node_items = []
config[node_name] = dict(config_node_items + list(validated_node.items()))
if isinstance(validated_node, list):
config[node_name] = list(set(config.get(node_name) + validated_node))
return config
def ansible_list_to_cloudfront_list(list_items=None, include_quantity=True):
if list_items is None:
list_items = []
if not isinstance(list_items, list):
raise ValueError('Expected a list, got a {0} with value {1}'.format(type(list_items).__name__, str(list_items)))
result = {}
if include_quantity:
result['quantity'] = len(list_items)
if len(list_items) > 0:
result['items'] = list_items
return result
def recursive_diff(dict1, dict2):
left = dict((k, v) for (k, v) in dict1.items() if k not in dict2)
right = dict((k, v) for (k, v) in dict2.items() if k not in dict1)
for k in (set(dict1.keys()) & set(dict2.keys())):
if isinstance(dict1[k], dict) and isinstance(dict2[k], dict):
result = recursive_diff(dict1[k], dict2[k])
if result:
left[k] = result[0]
right[k] = result[1]
elif dict1[k] != dict2[k]:
left[k] = dict1[k]
right[k] = dict2[k]
if left or right:
return left, right
else:
return None
def create_distribution(client, module, config, tags):
try:
if not tags:
return client.create_distribution(DistributionConfig=config)['Distribution']
else:
distribution_config_with_tags = {
'DistributionConfig': config,
'Tags': {
'Items': tags
}
}
return client.create_distribution_with_tags(DistributionConfigWithTags=distribution_config_with_tags)['Distribution']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error creating distribution")
def delete_distribution(client, module, distribution):
try:
return client.delete_distribution(Id=distribution['Distribution']['Id'], IfMatch=distribution['ETag'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error deleting distribution %s" % to_native(distribution['Distribution']))
def update_distribution(client, module, config, distribution_id, e_tag):
try:
return client.update_distribution(DistributionConfig=config, Id=distribution_id, IfMatch=e_tag)['Distribution']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error updating distribution to %s" % to_native(config))
def tag_resource(client, module, arn, tags):
try:
return client.tag_resource(Resource=arn, Tags=dict(Items=tags))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error tagging resource")
def untag_resource(client, module, arn, tag_keys):
try:
return client.untag_resource(Resource=arn, TagKeys=dict(Items=tag_keys))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error untagging resource")
def list_tags_for_resource(client, module, arn):
try:
response = client.list_tags_for_resource(Resource=arn)
return boto3_tag_list_to_ansible_dict(response.get('Tags').get('Items'))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error listing tags for resource")
def update_tags(client, module, existing_tags, valid_tags, purge_tags, arn):
changed = False
to_add, to_remove = compare_aws_tags(existing_tags, valid_tags, purge_tags)
if to_remove:
untag_resource(client, module, arn, to_remove)
changed = True
if to_add:
tag_resource(client, module, arn, ansible_dict_to_boto3_tag_list(to_add))
changed = True
return changed
class CloudFrontValidationManager(object):
"""
Manages Cloudfront validations
"""
def __init__(self, module):
self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module)
self.module = module
self.__default_distribution_enabled = True
self.__default_http_port = 80
self.__default_https_port = 443
self.__default_ipv6_enabled = False
self.__default_origin_ssl_protocols = [
'TLSv1',
'TLSv1.1',
'TLSv1.2'
]
self.__default_custom_origin_protocol_policy = 'match-viewer'
self.__default_custom_origin_read_timeout = 30
self.__default_custom_origin_keepalive_timeout = 5
self.__default_datetime_string = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')
self.__default_cache_behavior_min_ttl = 0
self.__default_cache_behavior_max_ttl = 31536000
self.__default_cache_behavior_default_ttl = 86400
self.__default_cache_behavior_compress = False
self.__default_cache_behavior_viewer_protocol_policy = 'allow-all'
self.__default_cache_behavior_smooth_streaming = False
self.__default_cache_behavior_forwarded_values_forward_cookies = 'none'
self.__default_cache_behavior_forwarded_values_query_string = True
self.__default_trusted_signers_enabled = False
self.__valid_price_classes = set([
'PriceClass_100',
'PriceClass_200',
'PriceClass_All'
])
self.__valid_origin_protocol_policies = set([
'http-only',
'match-viewer',
'https-only'
])
self.__valid_origin_ssl_protocols = set([
'SSLv3',
'TLSv1',
'TLSv1.1',
'TLSv1.2'
])
self.__valid_cookie_forwarding = set([
'none',
'whitelist',
'all'
])
self.__valid_viewer_protocol_policies = set([
'allow-all',
'https-only',
'redirect-to-https'
])
self.__valid_methods = set([
'GET',
'HEAD',
'POST',
'PUT',
'PATCH',
'OPTIONS',
'DELETE'
])
self.__valid_methods_cached_methods = [
set([
'GET',
'HEAD'
]),
set([
'GET',
'HEAD',
'OPTIONS'
])
]
self.__valid_methods_allowed_methods = [
self.__valid_methods_cached_methods[0],
self.__valid_methods_cached_methods[1],
self.__valid_methods
]
self.__valid_lambda_function_association_event_types = set([
'viewer-request',
'viewer-response',
'origin-request',
'origin-response'
])
self.__valid_viewer_certificate_ssl_support_methods = set([
'sni-only',
'vip'
])
self.__valid_viewer_certificate_minimum_protocol_versions = set([
'SSLv3',
'TLSv1'
])
self.__valid_viewer_certificate_certificate_sources = set([
'cloudfront',
'iam',
'acm'
])
self.__valid_http_versions = set([
'http1.1',
'http2'
])
self.__s3_bucket_domain_identifier = '.s3.amazonaws.com'
def add_missing_key(self, dict_object, key_to_set, value_to_set):
if key_to_set not in dict_object and value_to_set is not None:
dict_object[key_to_set] = value_to_set
return dict_object
def add_key_else_change_dict_key(self, dict_object, old_key, new_key, value_to_set):
if old_key not in dict_object and value_to_set is not None:
dict_object[new_key] = value_to_set
else:
dict_object = change_dict_key_name(dict_object, old_key, new_key)
return dict_object
def add_key_else_validate(self, dict_object, key_name, attribute_name, value_to_set, valid_values, to_aws_list=False):
if key_name in dict_object:
self.validate_attribute_with_allowed_values(value_to_set, attribute_name, valid_values)
else:
if to_aws_list:
dict_object[key_name] = ansible_list_to_cloudfront_list(value_to_set)
elif value_to_set is not None:
dict_object[key_name] = value_to_set
return dict_object
def validate_logging(self, logging):
try:
if logging is None:
return None
valid_logging = {}
if logging and not set(['enabled', 'include_cookies', 'bucket', 'prefix']).issubset(logging):
self.module.fail_json(msg="The logging parameters enabled, include_cookies, bucket and prefix must be specified.")
valid_logging['include_cookies'] = logging.get('include_cookies')
valid_logging['enabled'] = logging.get('enabled')
valid_logging['bucket'] = logging.get('bucket')
valid_logging['prefix'] = logging.get('prefix')
return valid_logging
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution logging")
def validate_is_list(self, list_to_validate, list_name):
if not isinstance(list_to_validate, list):
self.module.fail_json(msg='%s is of type %s. Must be a list.' % (list_name, type(list_to_validate).__name__))
def validate_required_key(self, key_name, full_key_name, dict_object):
if key_name not in dict_object:
self.module.fail_json(msg="%s must be specified." % full_key_name)
def validate_origins(self, client, config, origins, default_origin_domain_name,
default_origin_path, create_distribution, purge_origins=False):
try:
if origins is None:
if default_origin_domain_name is None and not create_distribution:
if purge_origins:
return None
else:
return ansible_list_to_cloudfront_list(config)
if default_origin_domain_name is not None:
origins = [{
'domain_name': default_origin_domain_name,
'origin_path': default_origin_path or ''
}]
else:
origins = []
self.validate_is_list(origins, 'origins')
if not origins and default_origin_domain_name is None and create_distribution:
self.module.fail_json(msg="Both origins[] and default_origin_domain_name have not been specified. Please specify at least one.")
all_origins = OrderedDict()
new_domains = list()
for origin in config:
all_origins[origin.get('domain_name')] = origin
for origin in origins:
origin = self.validate_origin(client, all_origins.get(origin.get('domain_name'), {}), origin, default_origin_path)
all_origins[origin['domain_name']] = origin
new_domains.append(origin['domain_name'])
if purge_origins:
for domain in list(all_origins.keys()):
if domain not in new_domains:
del(all_origins[domain])
return ansible_list_to_cloudfront_list(list(all_origins.values()))
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution origins")
def validate_s3_origin_configuration(self, client, existing_config, origin):
if origin['s3_origin_access_identity_enabled'] and existing_config.get('s3_origin_config', {}).get('origin_access_identity'):
return existing_config['s3_origin_config']['origin_access_identity']
if not origin['s3_origin_access_identity_enabled']:
return None
try:
comment = "access-identity-by-ansible-%s-%s" % (origin.get('domain_name'), self.__default_datetime_string)
cfoai_config = dict(CloudFrontOriginAccessIdentityConfig=dict(CallerReference=self.__default_datetime_string,
Comment=comment))
oai = client.create_cloud_front_origin_access_identity(**cfoai_config)['CloudFrontOriginAccessIdentity']['Id']
except Exception as e:
self.module.fail_json_aws(e, msg="Couldn't create Origin Access Identity for id %s" % origin['id'])
return "origin-access-identity/cloudfront/%s" % oai
def validate_origin(self, client, existing_config, origin, default_origin_path):
try:
origin = self.add_missing_key(origin, 'origin_path', existing_config.get('origin_path', default_origin_path or ''))
self.validate_required_key('origin_path', 'origins[].origin_path', origin)
origin = self.add_missing_key(origin, 'id', existing_config.get('id', self.__default_datetime_string))
if 'custom_headers' in origin and len(origin.get('custom_headers')) > 0:
for custom_header in origin.get('custom_headers'):
if 'header_name' not in custom_header or 'header_value' not in custom_header:
self.module.fail_json(msg="Both origins[].custom_headers.header_name and origins[].custom_headers.header_value must be specified.")
origin['custom_headers'] = ansible_list_to_cloudfront_list(origin.get('custom_headers'))
else:
origin['custom_headers'] = ansible_list_to_cloudfront_list()
if self.__s3_bucket_domain_identifier in origin.get('domain_name').lower():
if origin.get("s3_origin_access_identity_enabled") is not None:
s3_origin_config = self.validate_s3_origin_configuration(client, existing_config, origin)
if s3_origin_config:
oai = s3_origin_config
else:
oai = ""
origin["s3_origin_config"] = dict(origin_access_identity=oai)
del(origin["s3_origin_access_identity_enabled"])
if 'custom_origin_config' in origin:
self.module.fail_json(msg="s3_origin_access_identity_enabled and custom_origin_config are mutually exclusive")
else:
origin = self.add_missing_key(origin, 'custom_origin_config', existing_config.get('custom_origin_config', {}))
custom_origin_config = origin.get('custom_origin_config')
custom_origin_config = self.add_key_else_validate(custom_origin_config, 'origin_protocol_policy',
'origins[].custom_origin_config.origin_protocol_policy',
self.__default_custom_origin_protocol_policy, self.__valid_origin_protocol_policies)
custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_read_timeout', self.__default_custom_origin_read_timeout)
custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_keepalive_timeout', self.__default_custom_origin_keepalive_timeout)
custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'http_port', 'h_t_t_p_port', self.__default_http_port)
custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'https_port', 'h_t_t_p_s_port', self.__default_https_port)
if custom_origin_config.get('origin_ssl_protocols', {}).get('items'):
custom_origin_config['origin_ssl_protocols'] = custom_origin_config['origin_ssl_protocols']['items']
if custom_origin_config.get('origin_ssl_protocols'):
self.validate_attribute_list_with_allowed_list(custom_origin_config['origin_ssl_protocols'], 'origins[].origin_ssl_protocols',
self.__valid_origin_ssl_protocols)
else:
custom_origin_config['origin_ssl_protocols'] = self.__default_origin_ssl_protocols
custom_origin_config['origin_ssl_protocols'] = ansible_list_to_cloudfront_list(custom_origin_config['origin_ssl_protocols'])
return origin
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error validating distribution origin")
def validate_cache_behaviors(self, config, cache_behaviors, valid_origins, purge_cache_behaviors=False):
try:
if cache_behaviors is None and valid_origins is not None and purge_cache_behaviors is False:
return ansible_list_to_cloudfront_list(config)
all_cache_behaviors = OrderedDict()
# cache behaviors are order dependent so we don't preserve the existing ordering when purge_cache_behaviors
# is true (if purge_cache_behaviors is not true, we can't really know the full new order)
if not purge_cache_behaviors:
for behavior in config:
all_cache_behaviors[behavior['path_pattern']] = behavior
for cache_behavior in cache_behaviors:
valid_cache_behavior = self.validate_cache_behavior(all_cache_behaviors.get(cache_behavior.get('path_pattern'), {}),
cache_behavior, valid_origins)
all_cache_behaviors[cache_behavior['path_pattern']] = valid_cache_behavior
if purge_cache_behaviors:
for target_origin_id in set(all_cache_behaviors.keys()) - set([cb['path_pattern'] for cb in cache_behaviors]):
del(all_cache_behaviors[target_origin_id])
return ansible_list_to_cloudfront_list(list(all_cache_behaviors.values()))
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution cache behaviors")
def validate_cache_behavior(self, config, cache_behavior, valid_origins, is_default_cache=False):
if is_default_cache and cache_behavior is None:
cache_behavior = {}
if cache_behavior is None and valid_origins is not None:
return config
cache_behavior = self.validate_cache_behavior_first_level_keys(config, cache_behavior, valid_origins, is_default_cache)
cache_behavior = self.validate_forwarded_values(config, cache_behavior.get('forwarded_values'), cache_behavior)
cache_behavior = self.validate_allowed_methods(config, cache_behavior.get('allowed_methods'), cache_behavior)
cache_behavior = self.validate_lambda_function_associations(config, cache_behavior.get('lambda_function_associations'), cache_behavior)
cache_behavior = self.validate_trusted_signers(config, cache_behavior.get('trusted_signers'), cache_behavior)
return cache_behavior
def validate_cache_behavior_first_level_keys(self, config, cache_behavior, valid_origins, is_default_cache):
try:
cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'min_ttl', 'min_t_t_l',
config.get('min_t_t_l', self.__default_cache_behavior_min_ttl))
cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'max_ttl', 'max_t_t_l',
config.get('max_t_t_l', self.__default_cache_behavior_max_ttl))
cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'default_ttl', 'default_t_t_l',
config.get('default_t_t_l', self.__default_cache_behavior_default_ttl))
cache_behavior = self.add_missing_key(cache_behavior, 'compress', config.get('compress', self.__default_cache_behavior_compress))
target_origin_id = cache_behavior.get('target_origin_id', config.get('target_origin_id'))
if not target_origin_id:
target_origin_id = self.get_first_origin_id_for_default_cache_behavior(valid_origins)
if target_origin_id not in [origin['id'] for origin in valid_origins.get('items', [])]:
if is_default_cache:
cache_behavior_name = 'Default cache behavior'
else:
cache_behavior_name = 'Cache behavior for path %s' % cache_behavior['path_pattern']
self.module.fail_json(msg="%s has target_origin_id pointing to an origin that does not exist." %
cache_behavior_name)
cache_behavior['target_origin_id'] = target_origin_id
cache_behavior = self.add_key_else_validate(cache_behavior, 'viewer_protocol_policy', 'cache_behavior.viewer_protocol_policy',
config.get('viewer_protocol_policy',
self.__default_cache_behavior_viewer_protocol_policy),
self.__valid_viewer_protocol_policies)
cache_behavior = self.add_missing_key(cache_behavior, 'smooth_streaming',
config.get('smooth_streaming', self.__default_cache_behavior_smooth_streaming))
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution cache behavior first level keys")
def validate_forwarded_values(self, config, forwarded_values, cache_behavior):
try:
if not forwarded_values:
forwarded_values = dict()
existing_config = config.get('forwarded_values', {})
headers = forwarded_values.get('headers', existing_config.get('headers', {}).get('items'))
if headers:
headers.sort()
forwarded_values['headers'] = ansible_list_to_cloudfront_list(headers)
if 'cookies' not in forwarded_values:
forward = existing_config.get('cookies', {}).get('forward', self.__default_cache_behavior_forwarded_values_forward_cookies)
forwarded_values['cookies'] = {'forward': forward}
else:
existing_whitelist = existing_config.get('cookies', {}).get('whitelisted_names', {}).get('items')
whitelist = forwarded_values.get('cookies').get('whitelisted_names', existing_whitelist)
if whitelist:
self.validate_is_list(whitelist, 'forwarded_values.whitelisted_names')
forwarded_values['cookies']['whitelisted_names'] = ansible_list_to_cloudfront_list(whitelist)
cookie_forwarding = forwarded_values.get('cookies').get('forward', existing_config.get('cookies', {}).get('forward'))
self.validate_attribute_with_allowed_values(cookie_forwarding, 'cache_behavior.forwarded_values.cookies.forward',
self.__valid_cookie_forwarding)
forwarded_values['cookies']['forward'] = cookie_forwarding
query_string_cache_keys = forwarded_values.get('query_string_cache_keys', existing_config.get('query_string_cache_keys', {}).get('items', []))
self.validate_is_list(query_string_cache_keys, 'forwarded_values.query_string_cache_keys')
forwarded_values['query_string_cache_keys'] = ansible_list_to_cloudfront_list(query_string_cache_keys)
forwarded_values = self.add_missing_key(forwarded_values, 'query_string',
existing_config.get('query_string', self.__default_cache_behavior_forwarded_values_query_string))
cache_behavior['forwarded_values'] = forwarded_values
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating forwarded values")
def validate_lambda_function_associations(self, config, lambda_function_associations, cache_behavior):
try:
if lambda_function_associations is not None:
self.validate_is_list(lambda_function_associations, 'lambda_function_associations')
for association in lambda_function_associations:
association = change_dict_key_name(association, 'lambda_function_arn', 'lambda_function_a_r_n')
self.validate_attribute_with_allowed_values(association.get('event_type'), 'cache_behaviors[].lambda_function_associations.event_type',
self.__valid_lambda_function_association_event_types)
cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list(lambda_function_associations)
else:
if 'lambda_function_associations' in config:
cache_behavior['lambda_function_associations'] = config.get('lambda_function_associations')
else:
cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list([])
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating lambda function associations")
def validate_allowed_methods(self, config, allowed_methods, cache_behavior):
try:
if allowed_methods is not None:
self.validate_required_key('items', 'cache_behavior.allowed_methods.items[]', allowed_methods)
temp_allowed_items = allowed_methods.get('items')
self.validate_is_list(temp_allowed_items, 'cache_behavior.allowed_methods.items')
self.validate_attribute_list_with_allowed_list(temp_allowed_items, 'cache_behavior.allowed_methods.items[]',
self.__valid_methods_allowed_methods)
cached_items = allowed_methods.get('cached_methods')
if 'cached_methods' in allowed_methods:
self.validate_is_list(cached_items, 'cache_behavior.allowed_methods.cached_methods')
self.validate_attribute_list_with_allowed_list(cached_items, 'cache_behavior.allowed_items.cached_methods[]',
self.__valid_methods_cached_methods)
# we don't care if the order of how cloudfront stores the methods differs - preserving existing
# order reduces likelihood of making unnecessary changes
if 'allowed_methods' in config and set(config['allowed_methods']['items']) == set(temp_allowed_items):
cache_behavior['allowed_methods'] = config['allowed_methods']
else:
cache_behavior['allowed_methods'] = ansible_list_to_cloudfront_list(temp_allowed_items)
if cached_items and set(cached_items) == set(config.get('allowed_methods', {}).get('cached_methods', {}).get('items', [])):
cache_behavior['allowed_methods']['cached_methods'] = config['allowed_methods']['cached_methods']
else:
cache_behavior['allowed_methods']['cached_methods'] = ansible_list_to_cloudfront_list(cached_items)
else:
if 'allowed_methods' in config:
cache_behavior['allowed_methods'] = config.get('allowed_methods')
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating allowed methods")
def validate_trusted_signers(self, config, trusted_signers, cache_behavior):
try:
if trusted_signers is None:
trusted_signers = {}
if 'items' in trusted_signers:
valid_trusted_signers = ansible_list_to_cloudfront_list(trusted_signers.get('items'))
else:
valid_trusted_signers = dict(quantity=config.get('quantity', 0))
if 'items' in config:
valid_trusted_signers = dict(items=config['items'])
valid_trusted_signers['enabled'] = trusted_signers.get('enabled', config.get('enabled', self.__default_trusted_signers_enabled))
cache_behavior['trusted_signers'] = valid_trusted_signers
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating trusted signers")
def validate_viewer_certificate(self, viewer_certificate):
try:
if viewer_certificate is None:
return None
if viewer_certificate.get('cloudfront_default_certificate') and viewer_certificate.get('ssl_support_method') is not None:
self.module.fail_json(msg="viewer_certificate.ssl_support_method should not be specified with viewer_certificate_cloudfront_default" +
"_certificate set to true.")
self.validate_attribute_with_allowed_values(viewer_certificate.get('ssl_support_method'), 'viewer_certificate.ssl_support_method',
self.__valid_viewer_certificate_ssl_support_methods)
self.validate_attribute_with_allowed_values(viewer_certificate.get('minimum_protocol_version'), 'viewer_certificate.minimum_protocol_version',
self.__valid_viewer_certificate_minimum_protocol_versions)
self.validate_attribute_with_allowed_values(viewer_certificate.get('certificate_source'), 'viewer_certificate.certificate_source',
self.__valid_viewer_certificate_certificate_sources)
viewer_certificate = change_dict_key_name(viewer_certificate, 'cloudfront_default_certificate', 'cloud_front_default_certificate')
viewer_certificate = change_dict_key_name(viewer_certificate, 'ssl_support_method', 's_s_l_support_method')
viewer_certificate = change_dict_key_name(viewer_certificate, 'iam_certificate_id', 'i_a_m_certificate_id')
viewer_certificate = change_dict_key_name(viewer_certificate, 'acm_certificate_arn', 'a_c_m_certificate_arn')
return viewer_certificate
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating viewer certificate")
def validate_custom_error_responses(self, config, custom_error_responses, purge_custom_error_responses):
try:
if custom_error_responses is None and not purge_custom_error_responses:
return ansible_list_to_cloudfront_list(config)
self.validate_is_list(custom_error_responses, 'custom_error_responses')
result = list()
existing_responses = dict((response['error_code'], response) for response in custom_error_responses)
for custom_error_response in custom_error_responses:
self.validate_required_key('error_code', 'custom_error_responses[].error_code', custom_error_response)
custom_error_response = change_dict_key_name(custom_error_response, 'error_caching_min_ttl', 'error_caching_min_t_t_l')
if 'response_code' in custom_error_response:
custom_error_response['response_code'] = str(custom_error_response['response_code'])
if custom_error_response['error_code'] in existing_responses:
del(existing_responses[custom_error_response['error_code']])
result.append(custom_error_response)
if not purge_custom_error_responses:
result.extend(existing_responses.values())
return ansible_list_to_cloudfront_list(result)
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating custom error responses")
def validate_restrictions(self, config, restrictions, purge_restrictions=False):
try:
if restrictions is None:
if purge_restrictions:
return None
else:
return config
self.validate_required_key('geo_restriction', 'restrictions.geo_restriction', restrictions)
geo_restriction = restrictions.get('geo_restriction')
self.validate_required_key('restriction_type', 'restrictions.geo_restriction.restriction_type', geo_restriction)
existing_restrictions = config.get('geo_restriction', {}).get(geo_restriction['restriction_type'], {}).get('items', [])
geo_restriction_items = geo_restriction.get('items')
if not purge_restrictions:
geo_restriction_items.extend([rest for rest in existing_restrictions if
rest not in geo_restriction_items])
valid_restrictions = ansible_list_to_cloudfront_list(geo_restriction_items)
valid_restrictions['restriction_type'] = geo_restriction.get('restriction_type')
return valid_restrictions
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating restrictions")
def validate_distribution_config_parameters(self, config, default_root_object, ipv6_enabled, http_version, web_acl_id):
try:
config['default_root_object'] = default_root_object or config.get('default_root_object', '')
config['is_i_p_v_6_enabled'] = ipv6_enabled or config.get('i_p_v_6_enabled', self.__default_ipv6_enabled)
if http_version is not None or config.get('http_version'):
self.validate_attribute_with_allowed_values(http_version, 'http_version', self.__valid_http_versions)
config['http_version'] = http_version or config.get('http_version')
if web_acl_id or config.get('web_a_c_l_id'):
config['web_a_c_l_id'] = web_acl_id or config.get('web_a_c_l_id')
return config
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution config parameters")
def validate_common_distribution_parameters(self, config, enabled, aliases, logging, price_class, purge_aliases=False):
try:
if config is None:
config = {}
if aliases is not None:
if not purge_aliases:
aliases.extend([alias for alias in config.get('aliases', {}).get('items', [])
if alias not in aliases])
config['aliases'] = ansible_list_to_cloudfront_list(aliases)
if logging is not None:
config['logging'] = self.validate_logging(logging)
config['enabled'] = enabled or config.get('enabled', self.__default_distribution_enabled)
if price_class is not None:
self.validate_attribute_with_allowed_values(price_class, 'price_class', self.__valid_price_classes)
config['price_class'] = price_class
return config
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating common distribution parameters")
def validate_comment(self, config, comment):
config['comment'] = comment or config.get('comment', "Distribution created by Ansible with datetime stamp " + self.__default_datetime_string)
return config
def validate_caller_reference(self, caller_reference):
return caller_reference or self.__default_datetime_string
def get_first_origin_id_for_default_cache_behavior(self, valid_origins):
try:
if valid_origins is not None:
valid_origins_list = valid_origins.get('items')
if valid_origins_list is not None and isinstance(valid_origins_list, list) and len(valid_origins_list) > 0:
return str(valid_origins_list[0].get('id'))
self.module.fail_json(msg="There are no valid origins from which to specify a target_origin_id for the default_cache_behavior configuration.")
except Exception as e:
self.module.fail_json_aws(e, msg="Error getting first origin_id for default cache behavior")
def validate_attribute_list_with_allowed_list(self, attribute_list, attribute_list_name, allowed_list):
try:
self.validate_is_list(attribute_list, attribute_list_name)
if (isinstance(allowed_list, list) and set(attribute_list) not in allowed_list or
isinstance(allowed_list, set) and not set(allowed_list).issuperset(attribute_list)):
self.module.fail_json(msg='The attribute list {0} must be one of [{1}]'.format(attribute_list_name, ' '.join(str(a) for a in allowed_list)))
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating attribute list with allowed value list")
def validate_attribute_with_allowed_values(self, attribute, attribute_name, allowed_list):
if attribute is not None and attribute not in allowed_list:
self.module.fail_json(msg='The attribute {0} must be one of [{1}]'.format(attribute_name, ' '.join(str(a) for a in allowed_list)))
def validate_distribution_from_caller_reference(self, caller_reference):
try:
distributions = self.__cloudfront_facts_mgr.list_distributions(False)
distribution_name = 'Distribution'
distribution_config_name = 'DistributionConfig'
distribution_ids = [dist.get('Id') for dist in distributions]
for distribution_id in distribution_ids:
config = self.__cloudfront_facts_mgr.get_distribution(distribution_id)
distribution = config.get(distribution_name)
if distribution is not None:
distribution_config = distribution.get(distribution_config_name)
if distribution_config is not None and distribution_config.get('CallerReference') == caller_reference:
distribution['DistributionConfig'] = distribution_config
return distribution
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution from caller reference")
def validate_distribution_from_aliases_caller_reference(self, distribution_id, aliases, caller_reference):
try:
if caller_reference is not None:
return self.validate_distribution_from_caller_reference(caller_reference)
else:
if aliases:
distribution_id = self.validate_distribution_id_from_alias(aliases)
if distribution_id:
return self.__cloudfront_facts_mgr.get_distribution(distribution_id)
return None
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution_id from alias, aliases and caller reference")
def validate_distribution_id_from_alias(self, aliases):
distributions = self.__cloudfront_facts_mgr.list_distributions(False)
if distributions:
for distribution in distributions:
distribution_aliases = distribution.get('Aliases', {}).get('Items', [])
if set(aliases) & set(distribution_aliases):
return distribution['Id']
return None
def wait_until_processed(self, client, wait_timeout, distribution_id, caller_reference):
if distribution_id is None:
distribution_id = self.validate_distribution_id_from_caller_reference(caller_reference=caller_reference)
try:
waiter = client.get_waiter('distribution_deployed')
attempts = 1 + int(wait_timeout / 60)
waiter.wait(Id=distribution_id, WaiterConfig={'MaxAttempts': attempts})
except botocore.exceptions.WaiterError as e:
self.module.fail_json(msg="Timeout waiting for cloudfront action. Waited for {0} seconds before timeout. "
"Error: {1}".format(to_text(wait_timeout), to_native(e)))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error getting distribution {0}".format(distribution_id))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(choices=['present', 'absent'], default='present'),
caller_reference=dict(),
comment=dict(),
distribution_id=dict(),
e_tag=dict(),
tags=dict(type='dict', default={}),
purge_tags=dict(type='bool', default=False),
alias=dict(),
aliases=dict(type='list', default=[]),
purge_aliases=dict(type='bool', default=False),
default_root_object=dict(),
origins=dict(type='list'),
purge_origins=dict(type='bool', default=False),
default_cache_behavior=dict(type='dict'),
cache_behaviors=dict(type='list'),
purge_cache_behaviors=dict(type='bool', default=False),
custom_error_responses=dict(type='list'),
purge_custom_error_responses=dict(type='bool', default=False),
logging=dict(type='dict'),
price_class=dict(),
enabled=dict(type='bool'),
viewer_certificate=dict(type='dict'),
restrictions=dict(type='dict'),
web_acl_id=dict(),
http_version=dict(),
ipv6_enabled=dict(type='bool'),
default_origin_domain_name=dict(),
default_origin_path=dict(),
wait=dict(default=False, type='bool'),
wait_timeout=dict(default=1800, type='int')
))
result = {}
changed = True
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=False,
mutually_exclusive=[
['distribution_id', 'alias'],
['default_origin_domain_name', 'distribution_id'],
['default_origin_domain_name', 'alias'],
]
)
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='cloudfront', region=region, endpoint=ec2_url, **aws_connect_kwargs)
validation_mgr = CloudFrontValidationManager(module)
state = module.params.get('state')
caller_reference = module.params.get('caller_reference')
comment = module.params.get('comment')
e_tag = module.params.get('e_tag')
tags = module.params.get('tags')
purge_tags = module.params.get('purge_tags')
distribution_id = module.params.get('distribution_id')
alias = module.params.get('alias')
aliases = module.params.get('aliases')
purge_aliases = module.params.get('purge_aliases')
default_root_object = module.params.get('default_root_object')
origins = module.params.get('origins')
purge_origins = module.params.get('purge_origins')
default_cache_behavior = module.params.get('default_cache_behavior')
cache_behaviors = module.params.get('cache_behaviors')
purge_cache_behaviors = module.params.get('purge_cache_behaviors')
custom_error_responses = module.params.get('custom_error_responses')
purge_custom_error_responses = module.params.get('purge_custom_error_responses')
logging = module.params.get('logging')
price_class = module.params.get('price_class')
enabled = module.params.get('enabled')
viewer_certificate = module.params.get('viewer_certificate')
restrictions = module.params.get('restrictions')
purge_restrictions = module.params.get('purge_restrictions')
web_acl_id = module.params.get('web_acl_id')
http_version = module.params.get('http_version')
ipv6_enabled = module.params.get('ipv6_enabled')
default_origin_domain_name = module.params.get('default_origin_domain_name')
default_origin_path = module.params.get('default_origin_path')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
if alias and alias not in aliases:
aliases.append(alias)
distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference)
update = state == 'present' and distribution
create = state == 'present' and not distribution
delete = state == 'absent' and distribution
if not (update or create or delete):
module.exit_json(changed=False)
if update or delete:
config = distribution['Distribution']['DistributionConfig']
e_tag = distribution['ETag']
distribution_id = distribution['Distribution']['Id']
else:
config = dict()
if update:
config = camel_dict_to_snake_dict(config, reversible=True)
if create or update:
config = validation_mgr.validate_common_distribution_parameters(config, enabled, aliases, logging, price_class, purge_aliases)
config = validation_mgr.validate_distribution_config_parameters(config, default_root_object, ipv6_enabled, http_version, web_acl_id)
config['origins'] = validation_mgr.validate_origins(client, config.get('origins', {}).get('items', []), origins, default_origin_domain_name,
default_origin_path, create, purge_origins)
config['cache_behaviors'] = validation_mgr.validate_cache_behaviors(config.get('cache_behaviors', {}).get('items', []),
cache_behaviors, config['origins'], purge_cache_behaviors)
config['default_cache_behavior'] = validation_mgr.validate_cache_behavior(config.get('default_cache_behavior', {}),
default_cache_behavior, config['origins'], True)
config['custom_error_responses'] = validation_mgr.validate_custom_error_responses(config.get('custom_error_responses', {}).get('items', []),
custom_error_responses, purge_custom_error_responses)
valid_restrictions = validation_mgr.validate_restrictions(config.get('restrictions', {}), restrictions, purge_restrictions)
if valid_restrictions:
config['restrictions'] = valid_restrictions
valid_viewer_certificate = validation_mgr.validate_viewer_certificate(viewer_certificate)
config = merge_validation_into_config(config, valid_viewer_certificate, 'viewer_certificate')
config = validation_mgr.validate_comment(config, comment)
config = snake_dict_to_camel_dict(config, capitalize_first=True)
if create:
config['CallerReference'] = validation_mgr.validate_caller_reference(caller_reference)
result = create_distribution(client, module, config, ansible_dict_to_boto3_tag_list(tags))
result = camel_dict_to_snake_dict(result)
result['tags'] = list_tags_for_resource(client, module, result['arn'])
if delete:
if config['Enabled']:
config['Enabled'] = False
result = update_distribution(client, module, config, distribution_id, e_tag)
validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference'))
distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference)
# e_tag = distribution['ETag']
result = delete_distribution(client, module, distribution)
if update:
changed = config != distribution['Distribution']['DistributionConfig']
if changed:
result = update_distribution(client, module, config, distribution_id, e_tag)
else:
result = distribution['Distribution']
existing_tags = list_tags_for_resource(client, module, result['ARN'])
distribution['Distribution']['DistributionConfig']['tags'] = existing_tags
changed |= update_tags(client, module, existing_tags, tags, purge_tags, result['ARN'])
result = camel_dict_to_snake_dict(result)
result['distribution_config']['tags'] = config['tags'] = list_tags_for_resource(client, module, result['arn'])
result['diff'] = dict()
diff = recursive_diff(distribution['Distribution']['DistributionConfig'], config)
if diff:
result['diff']['before'] = diff[0]
result['diff']['after'] = diff[1]
if wait and (create or update):
validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference'))
if 'distribution_config' in result:
result.update(result['distribution_config'])
del(result['distribution_config'])
module.exit_json(changed=changed, **result)
if __name__ == '__main__':
main()
| gpl-3.0 |
akosel/incubator-airflow | airflow/contrib/kubernetes/volume.py | 17 | 1345 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
class Volume:
"""Defines Kubernetes Volume"""
def __init__(self, name, configs):
""" Adds Kubernetes Volume to pod. allows pod to access features like ConfigMaps
and Persistent Volumes
:param name: the name of the volume mount
:type: name: str
:param configs: dictionary of any features needed for volume.
We purposely keep this vague since there are multiple volume types with changing
configs.
:type: configs: dict
"""
self.name = name
self.configs = configs
| apache-2.0 |
mostafa-mahmoud/HyPRec | tests/collaborative_tests.py | 2 | 2676 | #!/usr/bin/env python
import numpy
import unittest
from lib.abstract_recommender import AbstractRecommender
from lib.collaborative_filtering import CollaborativeFiltering
from lib.evaluator import Evaluator
from util.data_parser import DataParser
from util.model_initializer import ModelInitializer
class TestcaseBase(unittest.TestCase):
def setUp(self):
"""
Setup method that is called at the beginning of each test.
"""
self.documents, self.users = 30, 4
documents_cnt, users_cnt = self.documents, self.users
self.n_factors = 5
self.n_iterations = 20
self.k_folds = 3
self.hyperparameters = {'n_factors': self.n_factors, '_lambda': 0.01}
self.options = {'k_folds': self.k_folds, 'n_iterations': self.n_iterations}
self.initializer = ModelInitializer(self.hyperparameters.copy(), self.n_iterations)
def mock_get_ratings_matrix(self=None):
return [[int(not bool((article + user) % 3)) for article in range(documents_cnt)]
for user in range(users_cnt)]
self.ratings_matrix = numpy.array(mock_get_ratings_matrix())
self.evaluator = Evaluator(self.ratings_matrix)
setattr(DataParser, "get_ratings_matrix", mock_get_ratings_matrix)
class TestALS(TestcaseBase):
def runTest(self):
cf = CollaborativeFiltering(self.initializer, self.evaluator, self.hyperparameters,
self.options, load_matrices=False)
self.assertEqual(cf.n_factors, self.n_factors)
self.assertEqual(cf.n_items, self.documents)
cf.train()
self.assertEqual(cf.get_predictions().shape, (self.users, self.documents))
self.assertTrue(isinstance(cf, AbstractRecommender))
shape = (self.users, self.documents)
ratings = cf.get_ratings()
self.assertLessEqual(numpy.amax(ratings), 1 + 1e-6)
self.assertGreaterEqual(numpy.amin(ratings), -1e-6)
self.assertTrue(ratings.shape == shape)
rounded_predictions = cf.rounded_predictions()
self.assertLessEqual(numpy.amax(rounded_predictions), 1 + 1e-6)
self.assertGreaterEqual(numpy.amin(rounded_predictions), -1e-6)
self.assertTrue(rounded_predictions.shape == shape)
recall = cf.evaluator.calculate_recall(ratings, cf.get_predictions())
self.assertTrue(-1e-6 <= recall <= 1 + 1e-6)
random_user = int(numpy.random.random() * self.users)
random_item = int(numpy.random.random() * self.documents)
random_prediction = cf.predict(random_user, random_item)
self.assertTrue(isinstance(random_prediction, numpy.float64))
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.