repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
omarkhan/ansible-modules-core | packaging/os/apt_repository.py | 93 | 17068 | #!/usr/bin/python
# encoding: utf-8
# (c) 2012, Matt Wright <matt@nobien.net>
# (c) 2013, Alexander Saltanov <asd@mokote.com>
# (c) 2014, Rutger Spiertz <rutger@kumina.nl>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: apt_repository
short_description: Add and remove APT repositories
description:
- Add or remove an APT repositories in Ubuntu and Debian.
notes:
- This module works on Debian and Ubuntu and requires C(python-apt).
- This module supports Debian Squeeze (version 6) as well as its successors.
- This module treats Debian and Ubuntu distributions separately. So PPA could be installed only on Ubuntu machines.
options:
repo:
required: true
default: none
description:
- A source string for the repository.
state:
required: false
choices: [ "absent", "present" ]
default: "present"
description:
- A source string state.
mode:
required: false
default: 0644
description:
- The octal mode for newly created files in sources.list.d
version_added: "1.6"
update_cache:
description:
- Run the equivalent of C(apt-get update) when a change occurs. Cache updates are run after making changes.
required: false
default: "yes"
choices: [ "yes", "no" ]
validate_certs:
version_added: '1.8'
description:
- If C(no), SSL certificates for the target repo will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
author: "Alexander Saltanov (@sashka)"
version_added: "0.7"
requirements: [ python-apt ]
'''
EXAMPLES = '''
# Add specified repository into sources list.
apt_repository: repo='deb http://archive.canonical.com/ubuntu hardy partner' state=present
# Add source repository into sources list.
apt_repository: repo='deb-src http://archive.canonical.com/ubuntu hardy partner' state=present
# Remove specified repository from sources list.
apt_repository: repo='deb http://archive.canonical.com/ubuntu hardy partner' state=absent
# On Ubuntu target: add nginx stable repository from PPA and install its signing key.
# On Debian target: adding PPA is not available, so it will fail immediately.
apt_repository: repo='ppa:nginx/stable'
'''
import glob
import os
import re
import tempfile
try:
import apt
import apt_pkg
import aptsources.distro as aptsources_distro
distro = aptsources_distro.get_distro()
HAVE_PYTHON_APT = True
except ImportError:
distro = None
HAVE_PYTHON_APT = False
VALID_SOURCE_TYPES = ('deb', 'deb-src')
def install_python_apt(module):
if not module.check_mode:
apt_get_path = module.get_bin_path('apt-get')
if apt_get_path:
rc, so, se = module.run_command('%s update && %s install python-apt -y -q' % (apt_get_path, apt_get_path), use_unsafe_shell=True)
if rc == 0:
global apt, apt_pkg, aptsources_distro, distro, HAVE_PYTHON_APT
import apt
import apt_pkg
import aptsources.distro as aptsources_distro
distro = aptsources_distro.get_distro()
HAVE_PYTHON_APT = True
else:
module.fail_json(msg="Failed to auto-install python-apt. Error was: '%s'" % se.strip())
class InvalidSource(Exception):
pass
# Simple version of aptsources.sourceslist.SourcesList.
# No advanced logic and no backups inside.
class SourcesList(object):
def __init__(self, module):
self.module = module
self.files = {} # group sources by file
# Repositories that we're adding -- used to implement mode param
self.new_repos = set()
self.default_file = self._apt_cfg_file('Dir::Etc::sourcelist')
# read sources.list if it exists
if os.path.isfile(self.default_file):
self.load(self.default_file)
# read sources.list.d
for file in glob.iglob('%s/*.list' % self._apt_cfg_dir('Dir::Etc::sourceparts')):
self.load(file)
def __iter__(self):
'''Simple iterator to go over all sources. Empty, non-source, and other not valid lines will be skipped.'''
for file, sources in self.files.items():
for n, valid, enabled, source, comment in sources:
if valid:
yield file, n, enabled, source, comment
raise StopIteration
def _expand_path(self, filename):
if '/' in filename:
return filename
else:
return os.path.abspath(os.path.join(self._apt_cfg_dir('Dir::Etc::sourceparts'), filename))
def _suggest_filename(self, line):
def _cleanup_filename(s):
return '_'.join(re.sub('[^a-zA-Z0-9]', ' ', s).split())
def _strip_username_password(s):
if '@' in s:
s = s.split('@', 1)
s = s[-1]
return s
# Drop options and protocols.
line = re.sub('\[[^\]]+\]', '', line)
line = re.sub('\w+://', '', line)
# split line into valid keywords
parts = [part for part in line.split() if part not in VALID_SOURCE_TYPES]
# Drop usernames and passwords
parts[0] = _strip_username_password(parts[0])
return '%s.list' % _cleanup_filename(' '.join(parts[:1]))
def _parse(self, line, raise_if_invalid_or_disabled=False):
valid = False
enabled = True
source = ''
comment = ''
line = line.strip()
if line.startswith('#'):
enabled = False
line = line[1:]
# Check for another "#" in the line and treat a part after it as a comment.
i = line.find('#')
if i > 0:
comment = line[i+1:].strip()
line = line[:i]
# Split a source into substring to make sure that it is source spec.
# Duplicated whitespaces in a valid source spec will be removed.
source = line.strip()
if source:
chunks = source.split()
if chunks[0] in VALID_SOURCE_TYPES:
valid = True
source = ' '.join(chunks)
if raise_if_invalid_or_disabled and (not valid or not enabled):
raise InvalidSource(line)
return valid, enabled, source, comment
@staticmethod
def _apt_cfg_file(filespec):
'''
Wrapper for `apt_pkg` module for running with Python 2.5
'''
try:
result = apt_pkg.config.find_file(filespec)
except AttributeError:
result = apt_pkg.Config.FindFile(filespec)
return result
@staticmethod
def _apt_cfg_dir(dirspec):
'''
Wrapper for `apt_pkg` module for running with Python 2.5
'''
try:
result = apt_pkg.config.find_dir(dirspec)
except AttributeError:
result = apt_pkg.Config.FindDir(dirspec)
return result
def load(self, file):
group = []
f = open(file, 'r')
for n, line in enumerate(f):
valid, enabled, source, comment = self._parse(line)
group.append((n, valid, enabled, source, comment))
self.files[file] = group
def save(self):
for filename, sources in self.files.items():
if sources:
d, fn = os.path.split(filename)
fd, tmp_path = tempfile.mkstemp(prefix=".%s-" % fn, dir=d)
f = os.fdopen(fd, 'w')
for n, valid, enabled, source, comment in sources:
chunks = []
if not enabled:
chunks.append('# ')
chunks.append(source)
if comment:
chunks.append(' # ')
chunks.append(comment)
chunks.append('\n')
line = ''.join(chunks)
try:
f.write(line)
except IOError, err:
self.module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, unicode(err)))
self.module.atomic_move(tmp_path, filename)
# allow the user to override the default mode
if filename in self.new_repos:
this_mode = self.module.params['mode']
self.module.set_mode_if_different(filename, this_mode, False)
else:
del self.files[filename]
if os.path.exists(filename):
os.remove(filename)
def dump(self):
return '\n'.join([str(i) for i in self])
def _choice(self, new, old):
if new is None:
return old
return new
def modify(self, file, n, enabled=None, source=None, comment=None):
'''
This function to be used with iterator, so we don't care of invalid sources.
If source, enabled, or comment is None, original value from line ``n`` will be preserved.
'''
valid, enabled_old, source_old, comment_old = self.files[file][n][1:]
self.files[file][n] = (n, valid, self._choice(enabled, enabled_old), self._choice(source, source_old), self._choice(comment, comment_old))
def _add_valid_source(self, source_new, comment_new, file):
# We'll try to reuse disabled source if we have it.
# If we have more than one entry, we will enable them all - no advanced logic, remember.
found = False
for filename, n, enabled, source, comment in self:
if source == source_new:
self.modify(filename, n, enabled=True)
found = True
if not found:
if file is None:
file = self.default_file
else:
file = self._expand_path(file)
if file not in self.files:
self.files[file] = []
files = self.files[file]
files.append((len(files), True, True, source_new, comment_new))
self.new_repos.add(file)
def add_source(self, line, comment='', file=None):
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
# Prefer separate files for new sources.
self._add_valid_source(source, comment, file=file or self._suggest_filename(source))
def _remove_valid_source(self, source):
# If we have more than one entry, we will remove them all (not comment, remove!)
for filename, n, enabled, src, comment in self:
if source == src and enabled:
self.files[filename].pop(n)
def remove_source(self, line):
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
self._remove_valid_source(source)
class UbuntuSourcesList(SourcesList):
LP_API = 'https://launchpad.net/api/1.0/~%s/+archive/%s'
def __init__(self, module, add_ppa_signing_keys_callback=None):
self.module = module
self.add_ppa_signing_keys_callback = add_ppa_signing_keys_callback
super(UbuntuSourcesList, self).__init__(module)
def _get_ppa_info(self, owner_name, ppa_name):
lp_api = self.LP_API % (owner_name, ppa_name)
headers = dict(Accept='application/json')
response, info = fetch_url(self.module, lp_api, headers=headers)
if info['status'] != 200:
self.module.fail_json(msg="failed to fetch PPA information, error was: %s" % info['msg'])
return json.load(response)
def _expand_ppa(self, path):
ppa = path.split(':')[1]
ppa_owner = ppa.split('/')[0]
try:
ppa_name = ppa.split('/')[1]
except IndexError:
ppa_name = 'ppa'
line = 'deb http://ppa.launchpad.net/%s/%s/ubuntu %s main' % (ppa_owner, ppa_name, distro.codename)
return line, ppa_owner, ppa_name
def _key_already_exists(self, key_fingerprint):
rc, out, err = self.module.run_command('apt-key export %s' % key_fingerprint, check_rc=True)
return len(err) == 0
def add_source(self, line, comment='', file=None):
if line.startswith('ppa:'):
source, ppa_owner, ppa_name = self._expand_ppa(line)
if source in self.repos_urls:
# repository already exists
return
if self.add_ppa_signing_keys_callback is not None:
info = self._get_ppa_info(ppa_owner, ppa_name)
if not self._key_already_exists(info['signing_key_fingerprint']):
command = ['apt-key', 'adv', '--recv-keys', '--keyserver', 'hkp://keyserver.ubuntu.com:80', info['signing_key_fingerprint']]
self.add_ppa_signing_keys_callback(command)
file = file or self._suggest_filename('%s_%s' % (line, distro.codename))
else:
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
file = file or self._suggest_filename(source)
self._add_valid_source(source, comment, file)
def remove_source(self, line):
if line.startswith('ppa:'):
source = self._expand_ppa(line)[0]
else:
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
self._remove_valid_source(source)
@property
def repos_urls(self):
_repositories = []
for parsed_repos in self.files.values():
for parsed_repo in parsed_repos:
enabled = parsed_repo[1]
source_line = parsed_repo[3]
if not enabled:
continue
if source_line.startswith('ppa:'):
source, ppa_owner, ppa_name = self._expand_ppa(source_line)
_repositories.append(source)
else:
_repositories.append(source_line)
return _repositories
def get_add_ppa_signing_key_callback(module):
def _run_command(command):
module.run_command(command, check_rc=True)
if module.check_mode:
return None
else:
return _run_command
def main():
module = AnsibleModule(
argument_spec=dict(
repo=dict(required=True),
state=dict(choices=['present', 'absent'], default='present'),
mode=dict(required=False, default=0644),
update_cache = dict(aliases=['update-cache'], type='bool', default='yes'),
# this should not be needed, but exists as a failsafe
install_python_apt=dict(required=False, default="yes", type='bool'),
validate_certs = dict(default='yes', type='bool'),
),
supports_check_mode=True,
)
params = module.params
repo = module.params['repo']
state = module.params['state']
update_cache = module.params['update_cache']
sourceslist = None
if not HAVE_PYTHON_APT:
if params['install_python_apt']:
install_python_apt(module)
else:
module.fail_json(msg='python-apt is not installed, and install_python_apt is False')
if isinstance(distro, aptsources_distro.UbuntuDistribution):
sourceslist = UbuntuSourcesList(module,
add_ppa_signing_keys_callback=get_add_ppa_signing_key_callback(module))
elif isinstance(distro, aptsources_distro.Distribution):
sourceslist = SourcesList(module)
else:
module.fail_json(msg='Module apt_repository supports only Debian and Ubuntu.')
sources_before = sourceslist.dump()
try:
if state == 'present':
sourceslist.add_source(repo)
elif state == 'absent':
sourceslist.remove_source(repo)
except InvalidSource, err:
module.fail_json(msg='Invalid repository string: %s' % unicode(err))
sources_after = sourceslist.dump()
changed = sources_before != sources_after
if not module.check_mode and changed:
try:
sourceslist.save()
if update_cache:
cache = apt.Cache()
cache.update()
except OSError, err:
module.fail_json(msg=unicode(err))
module.exit_json(changed=changed, repo=repo, state=state)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 |
azet/capirca | tools/ldpush/fake_ssh_connection.py | 7 | 2426 | #!/usr/bin/python
#
# Copyright 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fake classes for unit tests.
The class FakeSshClient is a fake for paramiko.SSHClient, it implements a very
minimal set of methods just enough too stub out paramiko.SSHClient when used in
unit test for clients based on pexpect_client.ParamikoSshConnection.
The classes FakeChannel and FakeTransport are substitutes for their paramiko
counterparts Channel and Transport.
"""
# pylint: disable=g-bad-name
class Error(Exception):
pass
class FakeChannelError(Error):
"""An error occured in the fake Channel class."""
class FakeTransport(object):
"""A fake transport class for unit test purposes."""
def __init__(self):
self.active = True
def is_active(self):
return self.active
class FakeChannel(object):
"""A fake channel class for unit test purposes."""
def __init__(self, command_response_dict):
self.command_response_dict = command_response_dict
self.transport = FakeTransport()
self.timeout = None
self.last_sent = '__logged_in__'
def set_combine_stderr(self, unused_arg):
pass
def get_id(self):
return 1
def get_transport(self):
return self.transport
def settimeout(self, timeout):
self.timeout = timeout
def recv(self, unused_size):
if self.last_sent:
last_sent = self.last_sent
self.last_sent = None
if last_sent in self.command_response_dict:
return self.command_response_dict[last_sent]
else:
raise FakeChannelError('unknown input %r' % last_sent)
def send(self, command):
self.last_sent = command
class FakeSshClient(object):
"""A fake SSH client class for unit test purposes."""
def __init__(self, command_response_dict):
self.channel = FakeChannel(command_response_dict)
def Connect(self, **unused_kwargs):
return self
def invoke_shell(self):
return self.channel
| apache-2.0 |
gsarma/PyOpenWorm | tests/EvidenceQualityTest.py | 1 | 2772 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from .DataTestTemplate import _DataTest
import PyOpenWorm
from PyOpenWorm.evidence import Evidence
from PyOpenWorm.document import Document
from PyOpenWorm.website import Website
from PyOpenWorm.context import Context
from six.moves.urllib.parse import urlparse
import pytest
import re
# Regular expressions copied from:
# https://www.crossref.org/blog/dois-and-matching-regular-expressions/
DOI_REGEXEN = [re.compile(x, re.I) for x in (r'^10.\d{4,9}/[-._;()/:A-Z0-9]+$',
r'^10.1002/\S+$')]
@pytest.mark.inttest
class EvidenceQualityTests(_DataTest):
'''
Tests for the quality of evidence. As distinct from coverage, these test things like whether accession information
is included and usable, whether certain fields are properly formatted, etc.
'''
def setUp(self):
PyOpenWorm.connect(configFile='tests/data_integrity_test.conf')
self.g = PyOpenWorm.config("rdf.graph")
self.context = Context()
self.qctx = self.context.stored
def tearDown(self):
PyOpenWorm.disconnect()
def test_has_valid_resource(self):
"""Checks if the object has either a valid DOI or URL"""
ev = self.qctx(Evidence)()
allEvidence = set(ev.load())
qualityEvidence = set()
for evobj in allEvidence:
ref = evobj.reference()
if isinstance(ref, Document):
doi = ref.doi()
if doi:
for pat in DOI_REGEXEN:
if pat.match(doi):
qualityEvidence.add(evobj)
break
else: # no break
continue
urls = ref.uri.get()
good_uris = True
for uri in urls:
parsed = urlparse(uri)
if not parsed.scheme or not parsed.netloc:
good_uris = False
break
if not good_uris:
continue
elif isinstance(ref, Website):
urls = ref.url.get()
urls = list(urls)
print(urls)
good_uris = True
for uri in urls:
parsed = urlparse(uri)
if not parsed.scheme or not parsed.netloc:
good_uris = False
break
if not good_uris:
continue
qualityEvidence.add(evobj)
self.assertSetEqual(allEvidence, qualityEvidence,
msg='\n'.join(str(x.reference()) for x in (allEvidence - qualityEvidence)))
| mit |
DarthMaulware/EquationGroupLeaks | Leak #1 - Equation Group Cyber Weapons Auction - Invitation/EQGRP-Free-File/Firewall/EXPLOITS/ELCA/fosho/requests/packages/urllib3/request.py | 2 | 2191 | # urllib3/request.py
##
##
##
##
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from .filepost import encode_multipart_formdata
__all__ = ['RequestMethods']
class RequestMethods(object):
''''''
_encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
_encode_body_methods = set(['PATCH', 'POST', 'PUT', 'TRACE'])
def urlopen(self, method, url, body=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**kw): ##
raise NotImplemented("Classes extending RequestMethods must implement "
"their own ``urlopen`` method.")
def request(self, method, url, fields=None, headers=None, **urlopen_kw):
''''''
method = method.upper()
if method in self._encode_url_methods:
return self.request_encode_url(method, url, fields=fields,
headers=headers,
**urlopen_kw)
else:
return self.request_encode_body(method, url, fields=fields,
headers=headers,
**urlopen_kw)
def request_encode_url(self, method, url, fields=None, **urlopen_kw):
''''''
if fields:
url += '?' + urlencode(fields)
return self.urlopen(method, url, **urlopen_kw)
def request_encode_body(self, method, url, fields=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**urlopen_kw):
''''''
if encode_multipart:
body, content_type = encode_multipart_formdata(fields or {},
boundary=multipart_boundary)
else:
body, content_type = (urlencode(fields or {}),
'application/x-www-form-urlencoded')
headers = headers or {}
headers.update({'Content-Type': content_type})
return self.urlopen(method, url, body=body, headers=headers,
**urlopen_kw)
| unlicense |
puttarajubr/commcare-hq | custom/succeed/reports/patient_task_list.py | 1 | 14071 | from datetime import datetime
import logging
from django.core.urlresolvers import reverse
from django.utils import html
from django.utils.translation import ugettext as _, ugettext_noop
import json
from corehq.apps.api.es import ReportCaseES
from corehq.apps.cloudcare.api import get_cloudcare_app, get_cloudcare_form_url
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.filters.search import SearchFilter
from corehq.apps.reports.generic import ElasticProjectInspectionReport
from corehq.apps.reports.standard import CustomProjectReport, ProjectReportParametersMixin
from corehq.apps.reports.standard.cases.data_sources import CaseDisplay
from corehq.elastic import es_query
from corehq.pillows.base import restore_property_dict
from corehq.pillows.mappings.reportcase_mapping import REPORT_CASE_INDEX
from custom.succeed.reports.patient_Info import PatientInfoReport
from custom.succeed.reports import VISIT_SCHEDULE, LAST_INTERACTION_LIST, EMPTY_FIELD, \
INPUT_DATE_FORMAT, OUTPUT_DATE_FORMAT, CM_APP_UPDATE_VIEW_TASK_MODULE, CM_UPDATE_TASK, TASK_RISK_FACTOR, TASK_ACTIVITY
from custom.succeed.utils import is_succeed_admin, has_any_role, SUCCEED_CM_APPNAME, get_app_build
from casexml.apps.case.models import CommCareCase
from dimagi.utils.decorators.memoized import memoized
class PatientTaskListReportDisplay(CaseDisplay):
def __init__(self, report, case_dict):
next_visit = VISIT_SCHEDULE[0]
last_inter = None
for action in case_dict['actions']:
if action['xform_xmlns'] in LAST_INTERACTION_LIST:
last_inter = action
for visit_key, visit in enumerate(VISIT_SCHEDULE):
for key, action in enumerate(case_dict['actions']):
if visit['xmlns'] == action['xform_xmlns']:
try:
next_visit = VISIT_SCHEDULE[visit_key + 1]
del case_dict['actions'][key]
break
except IndexError:
next_visit = 'last'
self.next_visit = next_visit
if last_inter:
self.last_interaction = last_inter['date']
self.domain = report.domain
self.app_dict = get_cloudcare_app(self.domain, SUCCEED_CM_APPNAME)
self.latest_build = get_app_build(self.app_dict)
super(PatientTaskListReportDisplay, self).__init__(report, case_dict)
def get_property(self, key):
if key in self.case:
return self.case[key]
else:
return EMPTY_FIELD
def get_link(self, url, field):
if url:
return html.mark_safe("<a class='ajax_dialog' href='%s' target='_blank'>%s</a>" % (url, html.escape(field)))
else:
return "%s (bad ID format)" % self.case["indices"][0]["referenced_id"]
def get_form_url(self, app_dict, app_build_id, module_idx, form, case_id=None):
try:
module = app_dict['modules'][module_idx]
form_idx = [ix for (ix, f) in enumerate(module['forms']) if f['xmlns'] == form][0]
except IndexError:
form_idx = None
return html.escape(get_cloudcare_form_url(domain=self.domain,
app_build_id=app_build_id,
module_id=module_idx,
form_id=form_idx,
case_id=case_id) + '/enter/')
@property
@memoized
def full_name(self):
return CommCareCase.get(self.get_property("indices")[0]["referenced_id"])["full_name"]
@property
def full_name_url(self):
return html.escape(
PatientInfoReport.get_url(*[self.case["domain"]]) + "?patient_id=%s" % self.case["indices"][0]["referenced_id"])
@property
def full_name_link(self):
return self.get_link(self.full_name_url, self.full_name)
@property
def name(self):
return self.get_property("name")
@property
def name_url(self):
if self.status == "Closed":
url = reverse('case_details', args=[self.domain, self.get_property("_id")])
return url + '#!history'
else:
return self.get_form_url(self.app_dict, self.latest_build, CM_APP_UPDATE_VIEW_TASK_MODULE, CM_UPDATE_TASK, self.get_property("_id"))
@property
def name_link(self):
return self.get_link(self.name_url, self.name)
@property
def task_responsible(self):
return self.get_property("task_responsible")
@property
def case_filter(self):
filters = []
care_site = self.request_params.get('task_responsible', '')
if care_site != '':
filters.append({'term': {'task_responsible.#value': care_site.lower()}})
return {'and': filters} if filters else {}
@property
def status(self):
return self.get_property("closed") and "Closed" or "Open"
@property
def task_due(self):
rand_date = self.get_property("task_due")
if rand_date and rand_date != EMPTY_FIELD:
date = datetime.strptime(rand_date, INPUT_DATE_FORMAT)
return date.strftime(OUTPUT_DATE_FORMAT)
else:
return EMPTY_FIELD
@property
def last_modified(self):
rand_date = self.get_property("last_updated")
if rand_date and rand_date != EMPTY_FIELD:
date = datetime.strptime(rand_date, INPUT_DATE_FORMAT)
return date.strftime(OUTPUT_DATE_FORMAT)
else:
return EMPTY_FIELD
@property
def task_activity(self):
key = self.case.get("task_activity", EMPTY_FIELD)
return TASK_ACTIVITY.get(key, key)
@property
def task_risk_factor(self):
key = self.case.get("task_risk_factor", EMPTY_FIELD)
return TASK_RISK_FACTOR.get(key, key)
@property
def task_details(self):
return self.get_property("task_details")
class PatientTaskListReport(CustomProjectReport, ElasticProjectInspectionReport, ProjectReportParametersMixin):
ajax_pagination = True
name = ugettext_noop('Patient Tasks')
slug = 'patient_task_list'
default_sort = {'task_due.#value': 'asc'}
base_template_filters = 'succeed/report.html'
case_type = 'task'
fields = ['custom.succeed.fields.ResponsibleParty',
'custom.succeed.fields.PatientName',
'custom.succeed.fields.TaskStatus',
'corehq.apps.reports.standard.cases.filters.CaseSearchFilter']
@classmethod
def show_in_navigation(cls, domain=None, project=None, user=None):
return True
@property
@memoized
def rendered_report_title(self):
return self.name
@property
@memoized
def case_es(self):
return ReportCaseES(self.domain)
@property
def case_filter(self):
filters = []
care_site = self.request_params.get('care_site', '')
if care_site != '':
filters.append({'term': {'care_site.#value': care_site.lower()}})
return {'and': filters} if filters else {}
@property
def headers(self):
headers = DataTablesHeader(
DataTablesColumn(_("Patient Name"), sortable=False),
DataTablesColumn(_("Task Name"), prop_name="name"),
DataTablesColumn(_("Responsible Party"), prop_name="task_responsible", sortable=False),
DataTablesColumn(_("Status"), prop_name='status', sortable=False),
DataTablesColumn(_("Action Due"), prop_name="task_due.#value"),
DataTablesColumn(_("Last Update"), prop_name='last_updated.#value'),
DataTablesColumn(_("Task Type"), prop_name="task_activity.#value"),
DataTablesColumn(_("Associated Risk Factor"), prop_name="task_risk_factor.#value"),
DataTablesColumn(_("Details"), prop_name="task_details", sortable=False),
)
return headers
@property
@memoized
def es_results(self):
q = { "query": {
"filtered": {
"query": {
"match_all": {}
},
"filter": {
"and": [
{"term": { "domain.exact": "succeed" }},
]
}
}
},
'sort': self.get_sorting_block(),
'from': self.pagination.start if self.pagination else None,
'size': self.pagination.count if self.pagination else None,
}
search_string = SearchFilter.get_value(self.request, self.domain)
es_filters = q["query"]["filtered"]["filter"]
responsible_party = self.request_params.get('responsible_party', '')
if responsible_party != '':
if responsible_party == 'Care Manager':
es_filters["and"].append({"term": {"task_responsible.#value": "cm"}})
else:
es_filters["and"].append({"term": {"task_responsible.#value": "chw"}})
task_status = self.request_params.get('task_status', '')
if task_status != '':
if task_status == 'closed':
es_filters["and"].append({"term": {"closed": True}})
else:
es_filters["and"].append({"term": {"closed": False}})
patient_id = self.request_params.get('patient_id', '')
if patient_id != '':
es_filters["and"].append({"term": {"indices.referenced_id": patient_id}})
def _filter_gen(key, flist):
return {"terms": {
key: [item.lower() for item in flist if item]
}}
user = self.request.couch_user
if not user.is_web_user():
owner_ids = user.get_group_ids()
user_ids = [user._id]
owner_filters = _filter_gen('owner_id', owner_ids)
user_filters = _filter_gen('user_id', user_ids)
filters = filter(None, [owner_filters, user_filters])
subterms = []
subterms.append({'or': filters})
es_filters["and"].append({'and': subterms} if subterms else {})
if self.case_type:
es_filters["and"].append({"term": {"type.exact": 'task'}})
if search_string:
query_block = {"queryString": {"query": "*" + search_string + "*"}}
q["query"]["filtered"]["query"] = query_block
sorting_block = self.get_sorting_block()[0].keys()[0] if len(self.get_sorting_block()) != 0 else None
order = self.get_sorting_block()[0].values()[0] if len(self.get_sorting_block()) != 0 else None
if sorting_block == 'task_risk_factor.#value':
sort = {
"_script": {
"script":
"""
foreach(String key : task_risk_factor_list.keySet()) {
String value = _source.task_risk_factor.get('#value');
if (value == null) {
return '';
} else {
return task_risk_factor_list.get(value);
}
}
return ''
""",
"type": "string",
"params": {
"task_risk_factor_list": TASK_RISK_FACTOR
},
"order": order
}
}
q['sort'] = sort
if sorting_block == 'task_activity.#value':
sort = {
"_script": {
"script":
"""
foreach(String key : task_activity_list.keySet()) {
String value = _source.task_activity.get('#value');
if (value == null) {
return value;
} else {
return task_activity_list.get(value);
}
}
return ''
""",
"type": "string",
"params": {
"task_activity_list": TASK_ACTIVITY
},
"order": order
}
}
q['sort'] = sort
logging.info("ESlog: [%s.%s] ESquery: %s" % (self.__class__.__name__, self.domain, json.dumps(q)))
if self.pagination:
return es_query(q=q, es_url=REPORT_CASE_INDEX + '/_search', dict_only=False, start_at=self.pagination.start)
else:
return es_query(q=q, es_url=REPORT_CASE_INDEX + '/_search', dict_only=False)
@property
def get_all_rows(self):
return self.rows
@property
def rows(self):
case_displays = (PatientTaskListReportDisplay(self, restore_property_dict(self.get_case(case)))
for case in self.es_results['hits'].get('hits', []))
for disp in case_displays:
yield [
disp.full_name_link,
disp.name_link,
disp.task_responsible,
disp.status,
disp.task_due,
disp.last_modified,
disp.task_activity,
disp.task_risk_factor,
disp.task_details
]
@property
def user_filter(self):
return super(PatientTaskListReport, self).user_filter
def get_case(self, row):
if '_source' in row:
case_dict = row['_source']
else:
raise ValueError("Case object is not in search result %s" % row)
if case_dict['domain'] != self.domain:
raise Exception("case.domain != self.domain; %r and %r, respectively" % (case_dict['domain'], self.domain))
return case_dict
| bsd-3-clause |
JosephCastro/selenium | py/selenium/webdriver/__init__.py | 14 | 1608 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .firefox.webdriver import WebDriver as Firefox
from .firefox.firefox_profile import FirefoxProfile
from .chrome.webdriver import WebDriver as Chrome
from .chrome.options import Options as ChromeOptions
from .ie.webdriver import WebDriver as Ie
from .edge.webdriver import WebDriver as Edge
from .opera.webdriver import WebDriver as Opera
from .safari.webdriver import WebDriver as Safari
from .blackberry.webdriver import WebDriver as BlackBerry
from .phantomjs.webdriver import WebDriver as PhantomJS
from .android.webdriver import WebDriver as Android
from .remote.webdriver import WebDriver as Remote
from .common.desired_capabilities import DesiredCapabilities
from .common.action_chains import ActionChains
from .common.touch_actions import TouchActions
from .common.proxy import Proxy
__version__ = '2.47.3'
| apache-2.0 |
nightjean/Deep-Learning | tensorflow/contrib/solvers/python/ops/lanczos.py | 80 | 9878 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lanczos algorithms."""
# TODO(rmlarsen): Add implementation of symmetric Lanczos algorithm.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.solvers.python.ops import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
def lanczos_bidiag(operator,
k,
orthogonalize=True,
starting_vector=None,
name="lanczos_bidiag"):
"""Computes a Lanczos bidiagonalization for a linear operator.
Computes matrices `U` of shape `[m, k+1]`, `V` of shape `[n, k]` and lower
bidiagonal matrix `B` of shape `[k+1, k]`, that satisfy the equations
`A * V = U * B` and `A' * U[:, :-1] = V * B[:-1, :]'`.
The columns of `U` are orthonormal and form a basis for the Krylov subspace
`K(A*A', U[:,0])`.
The columns of `V` are orthonormal and form a basis for the Krylov subspace
`K(A'*A, A' U[:,0])`.
Args:
operator: An object representing a linear operator with attributes:
- shape: Either a list of integers or a 1-D `Tensor` of type `int32` of
length 2. `shape[0]` is the dimension on the domain of the operator,
`shape[1]` is the dimension of the co-domain of the operator. On other
words, if operator represents an M x N matrix A, `shape` must contain
`[M, N]`.
- dtype: The datatype of input to and output from `apply` and
`apply_adjoint`.
- apply: Callable object taking a vector `x` as input and returning a
vector with the result of applying the operator to `x`, i.e. if
`operator` represents matrix `A`, `apply` should return `A * x`.
- apply_adjoint: Callable object taking a vector `x` as input and
returning a vector with the result of applying the adjoint operator
to `x`, i.e. if `operator` represents matrix `A`, `apply_adjoint` should
return `conj(transpose(A)) * x`.
k: An integer or a scalar Tensor of type `int32`. Determines the maximum
number of steps to run. If an invariant subspace is found, the algorithm
may terminate before `k` steps have been run.
orthogonalize: If `True`, perform full orthogonalization. If `False` no
orthogonalization is performed.
starting_vector: If not null, must be a `Tensor` of shape `[n]`.
name: A name scope for the operation.
Returns:
output: A namedtuple representing a Lanczos bidiagonalization of
`operator` with attributes:
u: A rank-2 `Tensor` of type `operator.dtype` and shape
`[operator.shape[0], k_actual+1]`, where `k_actual` is the number of
steps run.
v: A rank-2 `Tensor` of type `operator.dtype` and shape
`[operator.shape[1], k_actual]`, where `k_actual` is the number of steps
run.
alpha: A rank-1 `Tensor` of type `operator.dtype` and shape `[k]`.
beta: A rank-1 `Tensor` of type `operator.dtype` and shape `[k]`.
"""
def tarray(size, dtype, name):
return tensor_array_ops.TensorArray(
dtype=dtype, size=size, tensor_array_name=name, clear_after_read=False)
# Reads a row-vector at location i in tarray and returns it as a
# column-vector.
def read_colvec(tarray, i):
return array_ops.expand_dims(tarray.read(i), -1)
# Writes an column-vector as a row-vecor at location i in tarray.
def write_colvec(tarray, colvec, i):
return tarray.write(i, array_ops.squeeze(colvec))
# Ephemeral class holding Lanczos bidiagonalization state:
# u = left Lanczos vectors
# v = right Lanczos vectors
# alpha = diagonal of B_k.
# beta = subdiagonal of B_k.
# Notice that we store the left and right Lanczos vectors as the _rows_
# of u and v. This is done because tensors are stored row-major and
# TensorArray only supports packing along dimension 0.
lanzcos_bidiag_state = collections.namedtuple("LanczosBidiagState",
["u", "v", "alpha", "beta"])
def update_state(old, i, u, v, alpha, beta):
return lanzcos_bidiag_state(
write_colvec(old.u, u, i + 1),
write_colvec(old.v, v, i),
old.alpha.write(i, alpha), old.beta.write(i, beta))
def gram_schmidt_step(j, basis, v):
"""Makes v orthogonal to the j'th vector in basis."""
v_shape = v.get_shape()
basis_vec = read_colvec(basis, j)
v -= math_ops.matmul(basis_vec, v, adjoint_a=True) * basis_vec
v.set_shape(v_shape)
return j + 1, basis, v
def orthogonalize_once(i, basis, v):
j = constant_op.constant(0, dtype=dtypes.int32)
_, _, v = control_flow_ops.while_loop(lambda j, basis, v: j < i,
gram_schmidt_step, [j, basis, v])
return util.l2normalize(v)
# Iterated modified Gram-Schmidt orthogonalization adapted from PROPACK.
# TODO(rmlarsen): This is possibly the slowest implementation of
# iterated Gram-Schmidt orthogonalization since the abacus. Move to C++.
def orthogonalize_(i, basis, v):
v_norm = util.l2norm(v)
v_new, v_new_norm = orthogonalize_once(i, basis, v)
# If the norm decreases more than 1/sqrt(2), run a second
# round of MGS. See proof in:
# B. N. Parlett, ``The Symmetric Eigenvalue Problem'',
# Prentice-Hall, Englewood Cliffs, NJ, 1980. pp. 105-109
return control_flow_ops.cond(v_new_norm < 0.7071 * v_norm,
lambda: orthogonalize_once(i, basis, v),
lambda: (v_new, v_new_norm))
def stopping_criterion(i, _):
# TODO(rmlarsen): Stop if an invariant subspace is detected.
return i < k
def lanczos_bidiag_step(i, ls):
"""Extends the Lanczos bidiagonalization ls by one step."""
u = read_colvec(ls.u, i)
r = operator.apply_adjoint(u)
# The shape inference doesn't work across cond, save and reapply the shape.
r_shape = r.get_shape()
r = control_flow_ops.cond(
i > 0, lambda: r - ls.beta.read(i - 1) * read_colvec(ls.v, i - 1),
lambda: r)
r.set_shape(r_shape)
if orthogonalize:
v, alpha = orthogonalize_(i - 1, ls.v, r)
else:
v, alpha = util.l2normalize(r)
p = operator.apply(v) - alpha * u
if orthogonalize:
u, beta = orthogonalize_(i, ls.u, p)
else:
u, beta = util.l2normalize(p)
return i + 1, update_state(ls, i, u, v, alpha, beta)
with ops.name_scope(name):
dtype = operator.dtype
if starting_vector is None:
starting_vector = random_ops.random_uniform(
operator.shape[:1], -1, 1, dtype=dtype)
u0, _ = util.l2normalize(starting_vector)
ls = lanzcos_bidiag_state(
u=write_colvec(tarray(k + 1, dtype, "u"), u0, 0),
v=tarray(k, dtype, "v"),
alpha=tarray(k, dtype, "alpha"),
beta=tarray(k, dtype, "beta"))
i = constant_op.constant(0, dtype=dtypes.int32)
_, ls = control_flow_ops.while_loop(stopping_criterion, lanczos_bidiag_step,
[i, ls])
return lanzcos_bidiag_state(
array_ops.matrix_transpose(ls.u.stack()),
array_ops.matrix_transpose(ls.v.stack()),
ls.alpha.stack(), ls.beta.stack())
# TODO(rmlarsen): Implement C++ ops for handling bidiagonal matrices
# efficiently. Such a module should provide
# - multiplication,
# - linear system solution by back-substitution,
# - QR factorization,
# - SVD.
def bidiag_matmul(matrix, alpha, beta, adjoint_b=False, name="bidiag_matmul"):
"""Multiplies a matrix by a bidiagonal matrix.
alpha and beta are length k vectors representing the diagonal and first lower
subdiagonal of (K+1) x K matrix B.
If adjoint_b is False, computes A * B as follows:
A * B = A[:, :-1] * diag(alpha) + A[:, 1:] * diag(beta)
If adjoint_b is True, computes A * B[:-1, :]' as follows
A * B[:-1, :]' =
A * diag(alpha) + [zeros(m,1), A[:, :-1] * diag(beta[:-1])]
Args:
matrix: A rank-2 `Tensor` representing matrix A.
alpha: A rank-1 `Tensor` representing the diagonal of B.
beta: A rank-1 `Tensor` representing the lower subdiagonal diagonal of B.
adjoint_b: `bool` determining what to compute.
name: A name scope for the operation.
Returns:
If `adjoint_b` is False the `A * B` is returned.
If `adjoint_b` is True the `A * B'` is returned.
"""
with ops.name_scope(name):
alpha = array_ops.expand_dims(alpha, 0)
if adjoint_b is False:
beta = array_ops.expand_dims(beta, 0)
return matrix[:, :-1] * alpha + matrix[:, 1:] * beta
else:
beta = array_ops.expand_dims(beta[:-1], 0)
shape = array_ops.shape(matrix)
zero_column = array_ops.expand_dims(
array_ops.zeros(
shape[:1], dtype=matrix.dtype), 1)
return matrix * alpha + array_ops.concat(
[zero_column, matrix[:, :-1] * beta], 1)
| apache-2.0 |
ojengwa/odoo | doc/_extensions/odoo/switcher.py | 262 | 1500 | from docutils import nodes, utils
from docutils.parsers.rst import Directive
from pygments.lexers import get_lexer_by_name
def setup(app):
app.add_directive('switcher', SwitcherDirective)
app.add_directive('case', CaseDirective)
class SwitcherDirective(Directive):
has_content = True
def run(self):
self.assert_has_content()
body = nodes.compound('\n'.join(self.content), classes=['tabs'])
self.state.nested_parse(self.content, self.content_offset, body)
titles = []
for child in body.children:
if isinstance(child, nodes.literal_block):
titles.append(get_lexer_by_name(child['language']).name)
else:
assert child['names'], ("A switcher case must be either a "\
"code block or a compound with a name")
titles.append(' '.join(child['names']))
tabs = nodes.bullet_list('', *[
nodes.list_item('', nodes.Text(title))
for title in titles
])
node = nodes.compound('', tabs, body, classes=['content-switcher'])
return [node]
class CaseDirective(Directive):
required_arguments = 1
final_argument_whitespace = True
has_content = True
def run(self):
self.assert_has_content()
node = nodes.compound('\n'.join(self.content), names=[self.arguments[0]])
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
| agpl-3.0 |
Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/software-center/softwarecenter/cmdfinder.py | 2 | 2531 | # Copyright (C) 2011 Canonical
#
# Authors:
# Matthew McGowan
# Michael Vogt
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import absolute_import
import os
import logging
LOG=logging.getLogger(__name__)
class CmdFinder(object):
""" helper class that can find binaries in packages """
# standard ubuntu PATH
PATH = [ "/usr/local/sbin",
"/usr/local/bin",
"/usr/sbin",
"/usr/bin",
"/sbin",
"/bin",
"/usr/games"
]
def __init__(self, cache):
self._cache = cache
return
def _is_exec(self, f):
return (os.path.dirname(f) in self.PATH and
os.path.exists(f) and
not os.path.isdir(f) and
os.access(f, os.X_OK))
def _get_exec_candidates(self, pkg):
return filter(self._is_exec, pkg.installed_files)
def _find_alternatives_for_cmds(self, cmds):
alternatives = set()
root = "/etc/alternatives"
for p in os.listdir(root):
if os.path.realpath(os.path.join(root, p)) in cmds:
alternatives.add(p)
return alternatives
def find_cmds_from_pkgname(self, pkgname):
""" find the executables binaries for a given package """
try:
pkg = self._cache[pkgname]
except KeyError:
LOG.debug("can't find %s" % pkgname)
return []
if not pkg.is_installed:
return []
cmds = self._get_exec_candidates(pkg)
cmds += self._find_alternatives_for_cmds(cmds)
return sorted([os.path.basename(p) for p in cmds])
#~
#~ class CmdFinderWidget(gtk.VBox, CmdFinder):
#~
#~ def __init__(self, cache):
#~ CmdFinder.__init__(self, cache)
#~ return
#~
#~ def cmds_from_pkgname(self, pkgname):
#~ cmds = CmdFinder.cmds_from_pkgname(self, pkgname)
| gpl-3.0 |
meteogrid/OWSLib | owslib/swe/observation/waterml2.py | 5 | 5221 | # -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2014 Pete Taylor
#
# Authors : Pete Taylor <peterataylor@gmail.com>
#
# Contact email: peterataylor@gmail.com
# =============================================================================
from owslib.util import nspath_eval
from owslib.namespaces import Namespaces
from owslib.util import testXMLAttribute, testXMLValue
from owslib.swe.common import Quantity
from dateutil import parser
from owslib.swe.observation.om import OM_Observation, Result
def get_namespaces():
ns = Namespaces()
return ns.get_namespaces(["swe20", "xlink", "sos20", "om20", "gml32",
"xsi", "wml2"])
namespaces = get_namespaces()
def nspv(path):
return nspath_eval(path, namespaces)
class MeasurementTimeseriesObservation(OM_Observation):
''' A timeseries observation that has a measurement timeseries as
result. An implementation of the WaterML2
MeasurementTimeseriesObservation. '''
def __init__(self, element):
super(MeasurementTimeseriesObservation, self).__init__(element)
self._parse_result()
def _parse_result(self):
''' Parse the result element of the observation type '''
if self.result is not None:
result = self.result.find(nspv(
"wml2:MeasurementTimeseries"))
self.result = MeasurementTimeseries(result)
def get_result(self):
return self.result
class Timeseries(Result):
''' Generic time-series class '''
def __init__(self, element):
super(Timeseries, self).__init__(element)
class MeasurementTimeseries(Timeseries):
''' A WaterML2.0 timeseries of measurements, with per-value metadata. '''
def __init__(self, element):
super(MeasurementTimeseries, self).__init__(element)
self.defaultTVPMetadata = TVPMeasurementMetadata(element.find(
nspv("wml2:defaultPointMetadata/wml2:DefaultTVPMeasurementMetadata")))
elems = element.findall(nspv("wml2:point"))
self.points = []
for point in elems:
self.points.append(TimeValuePair(point))
def __iter__(self):
for point in self.points:
yield point
def __len__(self):
return len(self.points)
def _parse_metadata(self, element):
''' Parse metadata elements relating to timeseries:
TS: baseTime, spacing, commentBlock, parameter
MTS: startAnchor, endAnchor, cumulative, accAnchor/Length, maxGap
'''
pass
class TimeValuePair(object):
''' A time-value pair as specified by WaterML2.0
Currently no support for tvp metadata.
'''
def __init__(self, element):
date_str = testXMLValue(
element.find(nspv("wml2:MeasurementTVP/wml2:time")))
try:
self.datetime = parser.parse(date_str)
except:
raise ValueError("Error parsing datetime string: %s" % date_str)
value_str = testXMLValue(element.find(nspv(
"wml2:MeasurementTVP/wml2:value")))
try:
self.value = float(value_str)
except:
self.value = float('nan')
def __str__(self):
return str(self.datetime) + "," + str(self.value)
class TVPMetadata(object):
def __init__(self, element):
''' Base time-value pair metadata. Still to do:
- relatedObservation
'''
self.quality = testXMLAttribute(element.find(nspv(
"wml2:quality")), nspv("xlink:href"))
self.nilReason = testXMLAttribute(element.find(nspv(
"wml2:nilReason")), nspv("xlink:href"))
self.comment = testXMLValue(element.find(nspv(
"wml2:comment")))
self.qualifier = testXMLAttribute(element.find(nspv(
"wml2:qualifier")), nspv("xlink:href"))
self.processing = testXMLValue(element.find(nspv(
"wml2:processing")), nspv("xlink:href"))
self.source = testXMLValue(element.find(nspv(
"wml2:source")), nspv("xlink:href"))
class TVPMeasurementMetadata(TVPMetadata):
''' Measurement specific metadata. Still to do:
- aggregationDuration
'''
def __init__(self, element):
super(TVPMeasurementMetadata, self).__init__(element)
self.uom = testXMLAttribute(element.find(nspv(
"wml2:uom")), "code")
self.interpolationType = testXMLAttribute(element.find(nspv(
"wml2:interpolationType")), nspv("xlink:href"))
self.censoredReason = testXMLAttribute(element.find(nspv(
"wml2:censoredReason")), "xlink:href")
accuracy = testXMLValue(element.find(nspv("wml2:accuracy")))
if accuracy is not None:
self.accuracy = Quantity(element)
class MeasurementTimeseriesDomainRange(Timeseries):
''' Class to implement domain range timeseries encoding '''
def __init__(self, element):
super(MeasurementTimeseriesDomainRange, self, element).__init__()
class MonitoringPoint(object):
''' A WaterML2.0 Monitoring Point, which is a specialised O&M SamplingPoint
'''
def __init__(self, element):
pass
| bsd-3-clause |
mahmoudShaheen/PyMedox | packages/arduino.py | 1 | 1286 | #!/usr/bin/env python
#################################
# @author: Mahmoud Shaheen #
# MedicalBox IOT Project #
# Arduino #
#################################
#functions for serial communication with Arduino
#called from controlHardware module
import serial
import data
import time
ser = serial.Serial(data.arduinoPort)
ser.baudrate = data.baudRate
time.sleep(5) #wait for serial communication to start
#encodes string and sends it on serial port for Arduino
def sendSerial(serialString): #checks if the port is closed to re-open it
if(not ser.isOpen):
ser.open()
time.sleep(5)
serialString = str(serialString) #makes sure that the data is string "convert any to string"
serialString = serialString.encode() #encodes the string "converts string to byte array"
print "serial to write: " + serialString
ser.write(serialString)
#gets a line from serial port from Arduino
def getSerial():
if(not ser.isOpen): #checks if the port is closed to re-open it
ser.open()
time.sleep(5)
line = ser.readline() #get a line from serial terminated by \n
line = line.strip() #removers \r\n at the end of the string
line = line.decode("utf-8") #removes b at the start of the string "converts byte to string"
print "serial received: ", line
return line
| mit |
Greennut/ostproject | django/contrib/webdesign/templatetags/webdesign.py | 91 | 2167 | from django.contrib.webdesign.lorem_ipsum import words, paragraphs
from django import template
register = template.Library()
class LoremNode(template.Node):
def __init__(self, count, method, common):
self.count, self.method, self.common = count, method, common
def render(self, context):
try:
count = int(self.count.resolve(context))
except (ValueError, TypeError):
count = 1
if self.method == 'w':
return words(count, common=self.common)
else:
paras = paragraphs(count, common=self.common)
if self.method == 'p':
paras = ['<p>%s</p>' % p for p in paras]
return u'\n\n'.join(paras)
@register.tag
def lorem(parser, token):
"""
Creates random Latin text useful for providing test data in templates.
Usage format::
{% lorem [count] [method] [random] %}
``count`` is a number (or variable) containing the number of paragraphs or
words to generate (default is 1).
``method`` is either ``w`` for words, ``p`` for HTML paragraphs, ``b`` for
plain-text paragraph blocks (default is ``b``).
``random`` is the word ``random``, which if given, does not use the common
paragraph (starting "Lorem ipsum dolor sit amet, consectetuer...").
Examples:
* ``{% lorem %}`` will output the common "lorem ipsum" paragraph
* ``{% lorem 3 p %}`` will output the common "lorem ipsum" paragraph
and two random paragraphs each wrapped in HTML ``<p>`` tags
* ``{% lorem 2 w random %}`` will output two random latin words
"""
bits = list(token.split_contents())
tagname = bits[0]
# Random bit
common = bits[-1] != 'random'
if not common:
bits.pop()
# Method bit
if bits[-1] in ('w', 'p', 'b'):
method = bits.pop()
else:
method = 'b'
# Count bit
if len(bits) > 1:
count = bits.pop()
else:
count = '1'
count = parser.compile_filter(count)
if len(bits) != 1:
raise template.TemplateSyntaxError("Incorrect format for %r tag" % tagname)
return LoremNode(count, method, common)
| bsd-3-clause |
Ladeia/ProjectEuler | Problem145/Python/solution_1.py | 1 | 3513 | #!/usr/bin/env python3
#coding=utf-8
"""
How many reversible numbers are there below one-billion?
Problem 145
Some positive integers n have the property that the sum [ n + reverse(n) ] consists entirely of odd (decimal) digits.
For instance, 36 + 63 = 99 and 409 + 904 = 1313. We will call such numbers reversible; so 36, 63, 409, and 904 are reversible. Leading zeroes are not allowed in either n or reverse(n).
There are 120 reversible numbers below one-thousand.
How many reversible numbers are there below one-billion (10^9)?
"""
from functools import reduce
from itertools import permutations, combinations_with_replacement
def other_function(n):
for i in xrange(11, n):
if str(i)[0] == '0' or str(i)[-1] == '0':
continue
yield i
# combs = reduce(list.__add__, [[(x, y) for y in range(0, 10) if (x ^ y) & 1 and not (x == 0 or y == 0)] for x in range(0, 10)])
# for comb in combs:
# start, end = comb
# num = int(str(start) + str(end))
# yield num
# for i in range(n):
# for comb in combs:
# start, end = comb
# num = int(str(start) + str(i) + str(end))
# if num > n:
# break
# yield num
def gen_filtred(n, start = 1): #n - potencia de 10
#combinações de números possíveis simetricamente
combs = reduce(list.__add__, [[(x, y) for y in range(0, 10) if (x ^ y) & 1] for x in range(0, 10)])
exp = start
while exp < n:
tamanho = len(str(10 ** exp))//2
if exp & 1 == 1: #expoente impar na base 10 -> tamanho par
for comb in combinations_with_replacement(combs, tamanho):
for perm in set(permutations(comb)):
first = perm[0]
head, tail = first
if head == 0 or tail == 0:
continue
index = exp
newnum = 0
for mostnum, lessnum in perm:
newnum += mostnum * 10 ** index + lessnum * 10 ** abs(index - exp)
index -= 1
yield newnum
else: #expoente par na base 10 -> tamanho impar
for comb in combinations_with_replacement(combs, tamanho):
for perm in set(permutations(comb)):
first = perm[0]
head, tail = first
if head == 0 or tail == 0:
continue
for middle in range(10):
#print('Comb: {}| Perm: {}'.format(comb, perm))
index = exp
newnum = middle * 10 ** (exp // 2)
for mostnum, lessnum in perm:
newnum += mostnum * 10 ** index + lessnum * 10 ** abs(index - exp)
index -= 1
yield newnum
exp += 1
def sumreverse(num):
return str((int(str(num)[::-1]) + num))
def reversible(num):
return reduce(lambda a, b: a*b, [int(y) for y in sumreverse(num)]) & 1
range_x = lambda x: gen_filtred(len(str(x)) - 1)
range_y = lambda y: other_function(y)
test = 10 ** 9
print('Testando até: %s ' %test)
gen_list = []
total = 0
for i in range_x(test):
#print(i)
if reversible(i) == 1:
#print('%d + %s = %s' %(i, str(i)[::-1], sumreverse(i)))
total += 1
#gen_list.append(i)
#print(total)
#else:
#print('Não é reversível: %s' %i)
print('Total range_x: %d' %total)
#Usado para testes
# other_list = []
# total = 0
# for i in range_y(test):
# if reversible(i) == 1:
# #print('%d + %s = %s' %(i, str(i)[::-1], sumreverse(i)))
# total += 1
# other_list.append(i)
# #print(total)
# #else:
# #print('Não é reversível: %s' %i)
# print('Total range_y: %d' %total)
for gen, other in zip(gen_list, other_list):
if gen not in other_list:
print('A função other não está gerando o reversível: ' %gen)
if other not in gen_list:
print('A função gen_filtred não está gerando o reversível: ' %other) | mit |
szilveszter/django | tests/extra_regress/tests.py | 55 | 15018 | from __future__ import unicode_literals
from collections import OrderedDict
import datetime
from django.contrib.auth.models import User
from django.test import TestCase
from .models import TestObject, Order, RevisionableModel
class ExtraRegressTests(TestCase):
def setUp(self):
self.u = User.objects.create_user(
username="fred",
password="secret",
email="fred@example.com"
)
def test_regression_7314_7372(self):
"""
Regression tests for #7314 and #7372
"""
rm = RevisionableModel.objects.create(
title='First Revision',
when=datetime.datetime(2008, 9, 28, 10, 30, 0)
)
self.assertEqual(rm.pk, rm.base.pk)
rm2 = rm.new_revision()
rm2.title = "Second Revision"
rm.when = datetime.datetime(2008, 9, 28, 14, 25, 0)
rm2.save()
self.assertEqual(rm2.title, 'Second Revision')
self.assertEqual(rm2.base.title, 'First Revision')
self.assertNotEqual(rm2.pk, rm.pk)
self.assertEqual(rm2.base.pk, rm.pk)
# Queryset to match most recent revision:
qs = RevisionableModel.objects.extra(
where=["%(table)s.id IN (SELECT MAX(rev.id) FROM %(table)s rev GROUP BY rev.base_id)" % {
'table': RevisionableModel._meta.db_table,
}]
)
self.assertQuerysetEqual(qs,
[('Second Revision', 'First Revision')],
transform=lambda r: (r.title, r.base.title)
)
# Queryset to search for string in title:
qs2 = RevisionableModel.objects.filter(title__contains="Revision")
self.assertQuerysetEqual(qs2,
[
('First Revision', 'First Revision'),
('Second Revision', 'First Revision'),
],
transform=lambda r: (r.title, r.base.title),
ordered=False
)
# Following queryset should return the most recent revision:
self.assertQuerysetEqual(qs & qs2,
[('Second Revision', 'First Revision')],
transform=lambda r: (r.title, r.base.title),
ordered=False
)
def test_extra_stay_tied(self):
# Extra select parameters should stay tied to their corresponding
# select portions. Applies when portions are updated or otherwise
# moved around.
qs = User.objects.extra(
select=OrderedDict((("alpha", "%s"), ("beta", "2"), ("gamma", "%s"))),
select_params=(1, 3)
)
qs = qs.extra(select={"beta": 4})
qs = qs.extra(select={"alpha": "%s"}, select_params=[5])
self.assertEqual(
list(qs.filter(id=self.u.id).values('alpha', 'beta', 'gamma')),
[{'alpha': 5, 'beta': 4, 'gamma': 3}]
)
def test_regression_7957(self):
"""
Regression test for #7957: Combining extra() calls should leave the
corresponding parameters associated with the right extra() bit. I.e.
internal dictionary must remain sorted.
"""
self.assertEqual(
(User.objects
.extra(select={"alpha": "%s"}, select_params=(1,))
.extra(select={"beta": "%s"}, select_params=(2,))[0].alpha),
1
)
self.assertEqual(
(User.objects
.extra(select={"beta": "%s"}, select_params=(1,))
.extra(select={"alpha": "%s"}, select_params=(2,))[0].alpha),
2
)
def test_regression_7961(self):
"""
Regression test for #7961: When not using a portion of an
extra(...) in a query, remove any corresponding parameters from the
query as well.
"""
self.assertEqual(
list(User.objects
.extra(select={"alpha": "%s"}, select_params=(-6,))
.filter(id=self.u.id)
.values_list('id', flat=True)),
[self.u.id]
)
def test_regression_8063(self):
"""
Regression test for #8063: limiting a query shouldn't discard any
extra() bits.
"""
qs = User.objects.all().extra(where=['id=%s'], params=[self.u.id])
self.assertQuerysetEqual(qs, ['<User: fred>'])
self.assertQuerysetEqual(qs[:1], ['<User: fred>'])
def test_regression_8039(self):
"""
Regression test for #8039: Ordering sometimes removed relevant tables
from extra(). This test is the critical case: ordering uses a table,
but then removes the reference because of an optimization. The table
should still be present because of the extra() call.
"""
self.assertQuerysetEqual(
(Order.objects
.extra(where=["username=%s"], params=["fred"], tables=["auth_user"])
.order_by('created_by')),
[]
)
def test_regression_8819(self):
"""
Regression test for #8819: Fields in the extra(select=...) list
should be available to extra(order_by=...).
"""
self.assertQuerysetEqual(
User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}).distinct(),
['<User: fred>']
)
self.assertQuerysetEqual(
User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}, order_by=['extra_field']),
['<User: fred>']
)
self.assertQuerysetEqual(
User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}, order_by=['extra_field']).distinct(),
['<User: fred>']
)
def test_dates_query(self):
"""
When calling the dates() method on a queryset with extra selection
columns, we can (and should) ignore those columns. They don't change
the result and cause incorrect SQL to be produced otherwise.
"""
RevisionableModel.objects.create(
title='First Revision',
when=datetime.datetime(2008, 9, 28, 10, 30, 0)
)
self.assertQuerysetEqual(
RevisionableModel.objects.extra(select={"the_answer": 'id'}).datetimes('when', 'month'),
[datetime.datetime(2008, 9, 1, 0, 0)],
transform=lambda d: d,
)
def test_values_with_extra(self):
"""
Regression test for #10256... If there is a values() clause, Extra
columns are only returned if they are explicitly mentioned.
"""
obj = TestObject(first='first', second='second', third='third')
obj.save()
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values()),
[{'bar': 'second', 'third': 'third', 'second': 'second', 'whiz': 'third', 'foo': 'first', 'id': obj.pk, 'first': 'first'}]
)
# Extra clauses after an empty values clause are still included
self.assertEqual(
list(TestObject.objects.values().extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))),
[{'bar': 'second', 'third': 'third', 'second': 'second', 'whiz': 'third', 'foo': 'first', 'id': obj.pk, 'first': 'first'}]
)
# Extra columns are ignored if not mentioned in the values() clause
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values('first', 'second')),
[{'second': 'second', 'first': 'first'}]
)
# Extra columns after a non-empty values() clause are ignored
self.assertEqual(
list(TestObject.objects.values('first', 'second').extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))),
[{'second': 'second', 'first': 'first'}]
)
# Extra columns can be partially returned
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values('first', 'second', 'foo')),
[{'second': 'second', 'foo': 'first', 'first': 'first'}]
)
# Also works if only extra columns are included
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values('foo', 'whiz')),
[{'foo': 'first', 'whiz': 'third'}]
)
# Values list works the same way
# All columns are returned for an empty values_list()
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list()),
[('first', 'second', 'third', obj.pk, 'first', 'second', 'third')]
)
# Extra columns after an empty values_list() are still included
self.assertEqual(
list(TestObject.objects.values_list().extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))),
[('first', 'second', 'third', obj.pk, 'first', 'second', 'third')]
)
# Extra columns ignored completely if not mentioned in values_list()
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('first', 'second')),
[('first', 'second')]
)
# Extra columns after a non-empty values_list() clause are ignored completely
self.assertEqual(
list(TestObject.objects.values_list('first', 'second').extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))),
[('first', 'second')]
)
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('second', flat=True)),
['second']
)
# Only the extra columns specified in the values_list() are returned
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('first', 'second', 'whiz')),
[('first', 'second', 'third')]
)
# ...also works if only extra columns are included
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('foo', 'whiz')),
[('first', 'third')]
)
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('whiz', flat=True)),
['third']
)
# ... and values are returned in the order they are specified
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('whiz', 'foo')),
[('third', 'first')]
)
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('first', 'id')),
[('first', obj.pk)]
)
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('whiz', 'first', 'bar', 'id')),
[('third', 'first', 'second', obj.pk)]
)
def test_regression_10847(self):
"""
Regression for #10847: the list of extra columns can always be
accurately evaluated. Using an inner query ensures that as_sql() is
producing correct output without requiring full evaluation and
execution of the inner query.
"""
obj = TestObject(first='first', second='second', third='third')
obj.save()
self.assertEqual(
list(TestObject.objects.extra(select={'extra': 1}).values('pk')),
[{'pk': obj.pk}]
)
self.assertQuerysetEqual(
TestObject.objects.filter(
pk__in=TestObject.objects.extra(select={'extra': 1}).values('pk')
),
['<TestObject: TestObject: first,second,third>']
)
self.assertEqual(
list(TestObject.objects.values('pk').extra(select={'extra': 1})),
[{'pk': obj.pk}]
)
self.assertQuerysetEqual(
TestObject.objects.filter(
pk__in=TestObject.objects.values('pk').extra(select={'extra': 1})
),
['<TestObject: TestObject: first,second,third>']
)
self.assertQuerysetEqual(
TestObject.objects.filter(pk=obj.pk) | TestObject.objects.extra(where=["id > %s"], params=[obj.pk]),
['<TestObject: TestObject: first,second,third>']
)
def test_regression_17877(self):
"""
Ensure that extra WHERE clauses get correctly ANDed, even when they
contain OR operations.
"""
# Test Case 1: should appear in queryset.
t = TestObject(first='a', second='a', third='a')
t.save()
# Test Case 2: should appear in queryset.
t = TestObject(first='b', second='a', third='a')
t.save()
# Test Case 3: should not appear in queryset, bug case.
t = TestObject(first='a', second='a', third='b')
t.save()
# Test Case 4: should not appear in queryset.
t = TestObject(first='b', second='a', third='b')
t.save()
# Test Case 5: should not appear in queryset.
t = TestObject(first='b', second='b', third='a')
t.save()
# Test Case 6: should not appear in queryset, bug case.
t = TestObject(first='a', second='b', third='b')
t.save()
self.assertQuerysetEqual(
TestObject.objects.extra(
where=["first = 'a' OR second = 'a'", "third = 'a'"],
),
['<TestObject: TestObject: a,a,a>', '<TestObject: TestObject: b,a,a>'],
ordered=False
)
def test_extra_values_distinct_ordering(self):
t1 = TestObject.objects.create(first='a', second='a', third='a')
t2 = TestObject.objects.create(first='a', second='b', third='b')
qs = TestObject.objects.extra(
select={'second_extra': 'second'}
).values_list('id', flat=True).distinct()
self.assertQuerysetEqual(
qs.order_by('second_extra'), [t1.pk, t2.pk], lambda x: x)
self.assertQuerysetEqual(
qs.order_by('-second_extra'), [t2.pk, t1.pk], lambda x: x)
# Note: the extra ordering must appear in select clause, so we get two
# non-distinct results here (this is on purpose, see #7070).
self.assertQuerysetEqual(
qs.order_by('-second_extra').values_list('first', flat=True),
['a', 'a'], lambda x: x)
| bsd-3-clause |
rvmoura96/projeto-almoxarifado | myvenv/Lib/site-packages/pip/_vendor/packaging/markers.py | 324 | 8230 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import operator
import os
import platform
import sys
from pip._vendor.pyparsing import (
ParseException, ParseResults, stringStart, stringEnd,
)
from pip._vendor.pyparsing import ZeroOrMore, Group, Forward, QuotedString
from pip._vendor.pyparsing import Literal as L # noqa
from ._compat import string_types
from .specifiers import Specifier, InvalidSpecifier
__all__ = [
"InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName",
"Marker", "default_environment",
]
class InvalidMarker(ValueError):
"""
An invalid marker was found, users should refer to PEP 508.
"""
class UndefinedComparison(ValueError):
"""
An invalid operation was attempted on a value that doesn't support it.
"""
class UndefinedEnvironmentName(ValueError):
"""
A name was attempted to be used that does not exist inside of the
environment.
"""
class Node(object):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def __repr__(self):
return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
def serialize(self):
raise NotImplementedError
class Variable(Node):
def serialize(self):
return str(self)
class Value(Node):
def serialize(self):
return '"{0}"'.format(self)
class Op(Node):
def serialize(self):
return str(self)
VARIABLE = (
L("implementation_version") |
L("platform_python_implementation") |
L("implementation_name") |
L("python_full_version") |
L("platform_release") |
L("platform_version") |
L("platform_machine") |
L("platform_system") |
L("python_version") |
L("sys_platform") |
L("os_name") |
L("os.name") | # PEP-345
L("sys.platform") | # PEP-345
L("platform.version") | # PEP-345
L("platform.machine") | # PEP-345
L("platform.python_implementation") | # PEP-345
L("python_implementation") | # undocumented setuptools legacy
L("extra")
)
ALIASES = {
'os.name': 'os_name',
'sys.platform': 'sys_platform',
'platform.version': 'platform_version',
'platform.machine': 'platform_machine',
'platform.python_implementation': 'platform_python_implementation',
'python_implementation': 'platform_python_implementation'
}
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
VERSION_CMP = (
L("===") |
L("==") |
L(">=") |
L("<=") |
L("!=") |
L("~=") |
L(">") |
L("<")
)
MARKER_OP = VERSION_CMP | L("not in") | L("in")
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
MARKER_VALUE = QuotedString("'") | QuotedString('"')
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
BOOLOP = L("and") | L("or")
MARKER_VAR = VARIABLE | MARKER_VALUE
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
MARKER_EXPR = Forward()
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
MARKER = stringStart + MARKER_EXPR + stringEnd
def _coerce_parse_result(results):
if isinstance(results, ParseResults):
return [_coerce_parse_result(i) for i in results]
else:
return results
def _format_marker(marker, first=True):
assert isinstance(marker, (list, tuple, string_types))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (isinstance(marker, list) and len(marker) == 1 and
isinstance(marker[0], (list, tuple))):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker
_operators = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _eval_op(lhs, op, rhs):
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs)
oper = _operators.get(op.serialize())
if oper is None:
raise UndefinedComparison(
"Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
)
return oper(lhs, rhs)
_undefined = object()
def _get_env(environment, name):
value = environment.get(name, _undefined)
if value is _undefined:
raise UndefinedEnvironmentName(
"{0!r} does not exist in evaluation environment.".format(name)
)
return value
def _evaluate_markers(markers, environment):
groups = [[]]
for marker in markers:
assert isinstance(marker, (list, tuple, string_types))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
lhs_value = _get_env(environment, lhs.value)
rhs_value = rhs.value
else:
lhs_value = lhs.value
rhs_value = _get_env(environment, rhs.value)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info):
version = '{0.major}.{0.minor}.{0.micro}'.format(info)
kind = info.releaselevel
if kind != 'final':
version += kind[0] + str(info.serial)
return version
def default_environment():
if hasattr(sys, 'implementation'):
iver = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
else:
iver = '0'
implementation_name = ''
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": platform.python_version()[:3],
"sys_platform": sys.platform,
}
class Marker(object):
def __init__(self, marker):
try:
self._markers = _coerce_parse_result(MARKER.parseString(marker))
except ParseException as e:
err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
marker, marker[e.loc:e.loc + 8])
raise InvalidMarker(err_str)
def __str__(self):
return _format_marker(self._markers)
def __repr__(self):
return "<Marker({0!r})>".format(str(self))
def evaluate(self, environment=None):
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined environment.
The environment is determined from the current Python process.
"""
current_environment = default_environment()
if environment is not None:
current_environment.update(environment)
return _evaluate_markers(self._markers, current_environment)
| mit |
flypy/pykit | pykit/ir/passes.py | 3 | 1349 | # -*- coding: utf-8 -*-
"""
Pass helpers.
"""
from __future__ import print_function, division, absolute_import
from pykit.ir import defs, combine
from pykit.ir.builder import OpBuilder, Builder
from pykit.utils import prefix as prefix_, mergedicts
class FunctionPass(object):
"""
Can be used from visitors or transformers, holds a builder and opbuilder.
"""
opbuilder = OpBuilder()
def __init__(self, func):
self.func = func
self.builder = Builder(func)
#===------------------------------------------------------------------===
# Pass to group operations such as add/mul
#===------------------------------------------------------------------===
def opgrouper(visitor, prefix='op_'):
"""
op_add, op_mul, ... -> op_binary
"""
handlers = mergedicts(unop_handlers(visitor.op_unary, prefix),
binop_handlers(visitor.op_binary, prefix),
compare_handlers(visitor.op_compare, prefix))
return combine(visitor, handlers)
def unop_handlers(handler, prefix='op_'):
return dict.fromkeys(prefix_(defs.unary, prefix), handler)
def binop_handlers(handler, prefix='op_'):
return dict.fromkeys(prefix_(defs.binary, prefix), handler)
def compare_handlers(handler, prefix='op_'):
return dict.fromkeys(prefix_(defs.compare, prefix), handler) | bsd-3-clause |
hroncok/devassistant | test/test_snippet.py | 8 | 2063 | import os
import pytest
from devassistant import settings
from devassistant.loaded_yaml import LoadedYaml
from devassistant.snippet import Snippet
class TestSnippet(object):
@pytest.mark.parametrize(('yaml', 'expected'), [
({}, {}),
({'args': {}}, {}),
({'args': {'foo': 'bar'}}, {'foo': 'bar'})
])
def test_args(self, yaml, expected):
snip = Snippet('', yaml, '')
assert snip.args == expected
@pytest.mark.parametrize(('yaml', 'name', 'expected'), [
({}, 'foo', {}),
({'args': {'foo': 'bar'}}, 'foo', 'bar')
])
def test_get_arg_by_name(self, yaml, name, expected):
snip = Snippet('', yaml, '')
assert snip.get_arg_by_name(name) == expected
@pytest.mark.parametrize(('yaml', 'section_name', 'expected'), [
({}, 'foo', None),
({'run': ['foo', 'bar']}, 'run', ['foo', 'bar'])
])
def test_get_run_section(self, yaml, section_name, expected):
snip = Snippet('', yaml, '')
assert snip.get_run_section(section_name) == expected
@pytest.mark.parametrize(('yaml', 'expected'), [
({}, os.path.dirname(__file__) + '/fixtures/files/snippets/'),
({'files_dir': 'foo'}, 'foo')
])
def test_get_files_dir(self, yaml, expected):
snip = Snippet('', yaml, '')
assert snip.get_files_dir() == expected
@pytest.mark.parametrize(('yaml', 'section_name', 'expected'), [
({}, 'dependencies', None),
({'dependencies': ['foo']}, 'dependencies', ['foo']),
({'dependencies': ['foo'], 'bar': ['baz']}, 'bar', ['foo', 'baz'])
])
def test_get_dependencies_section(self, yaml, section_name, expected):
snip = Snippet('', yaml, '')
assert snip.get_dependencies_section(section_name) == expected
@pytest.mark.parametrize(('yaml', 'expected'), [
({}, {}),
({'files': 'foo'}, 'foo')
])
def test_get_files_section(self, yaml, expected):
snip = Snippet('', yaml, '')
assert snip.get_files_section() == expected
| gpl-2.0 |
a3qz/networked_platformer | editor/editor.py | 1 | 4284 | import random
import time
import data
import sprite
import sys
import pygame
import constants
import collectable
import client
import wall
from struct import *
import board
class Game:
def __init__(self, s):
self.objects = [] #start with a list of no objects
self.screen = s #get the screen surface
#make a player ship to use to control the view
self.player = Ship(self, 100, 100, "91913")
#load a font for drawing our typed string
self.bigfont = pygame.font.Font("./fonts/megaman_2.ttf", 32)
#make a board and load in the level editor level
self.board = board.Board(self)
self.board.parse("./levels/beta.lvl")
def tick(self): #handle just our player for the editor
self.player.tick()
def draw(self):
#draw the background
self.screen.fill(constants.GREEN)
#draw the objects in reversed order, for depth reasons
for b in reversed(self.objects):
b.draw()
#handle player actions
def handleKeyDown(self, k):
self.player.handleKeyDown(k)
def handleKeyUp(self, k):
self.player.handleKeyUp(k)
#no longer used
def handleMUP(self, xxt, yyt):
pass
def handleMDOWN(self, xxt, yyt, event):
#figure out which grid space the player clicked on
x = int((16 + xxt - self.player.view[0])/32)*32
y = int((16 + yyt - self.player.view[1])/32)*32
#check if they are left clicking or not
if event.button == 1:
#if left click, add a thing to the board based off
#where you clicked and what the user typed
self.board.ref[int(self.player.toadd)](self, x, y,
int(self.player.toadd))
else:
#otherwise, make a rectangle and figure out who you clicked on
rect = pygame.Rect(0, 0, 1, 1)
l1 = self.objects
l2 = [w.rect for w in l1]
#check the objects for collision
i = rect.move(x, y).collidelist(l2)
#if we clicked on a valid thing to remove, remove it
if i != -1 and not isinstance(l1[i], Ship):
self.objects = [o for o in self.objects if o != l1[i]]
class Ship(sprite.Sprite):
def __init__(self, game, x, y, descriptor):
super(Ship, self).__init__(game)
self.rect.move_ip(x, y) #move to the correct coordinates
#load an image
self.img = pygame.image.load('imgs/cards/smaller_pngs/{}'.format(data.num_as_key[descriptor])).convert_alpha()
#set up our game's viewport
self.view = (0, 0)
#start a string for typing
self.toadd = ''
#make us our correct size
self.rect.inflate_ip(100, 145)
#we aren't pressing anything
self.keys = 0
def tick(self):
#move us based off our velocity
self.rect.move_ip(self.vx, self.vy)
#move our view to the right place
self.view = (constants.WIDTH/2 - self.rect.x,
(constants.HEIGHT*3)/4 - self.rect.y)
#handle keys
self.fly()
def draw(self):
self.game.screen.blit(self.img, self.rect.move(*self.view))
label = self.game.bigfont.render(self.toadd, 1, (255, 255, 255))
self.game.screen.blit(label, (10, 10))
def handleKeyDown(self, k):
#asdw control flight
if k == 'a':
self.keys |= 1
elif k == 'd':
self.keys |= 2
elif k == 'w':
self.keys |= 4
elif k == 's':
self.keys |= 8
elif k.isdigit() and len(k) == 1: #if we did a single digit, type it
self.toadd = self.toadd + k
elif k == 'backspace': #if we backspaced, delete a char from our string
self.toadd = self.toadd[:-1]
#stop flying when releasing keys
def handleKeyUp(self, k):
if k == 'a':
self.keys &= ~1
elif k == 'd':
self.keys &= ~2
elif k == 'w':
self.keys &= ~4
elif k == 's':
self.keys &= ~8
def fly(self):
#handle our velocities
self.vx = (((self.keys & 2)>>1) - ((self.keys & 1)>>0)) * 7
self.vy = (((self.keys & 4)>>2) - ((self.keys & 8)>>3)) * -7
| gpl-3.0 |
M4sse/chromium.src | tools/dromaeo_benchmark_runner/dromaeo_benchmark_runner.py | 176 | 9706 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dromaeo benchmark automation script.
Script runs dromaeo tests in browsers specified by --browser switch and saves
results to a spreadsheet on docs.google.com.
Prerequisites:
1. Install Google Data APIs Python Client Library from
http://code.google.com/p/gdata-python-client.
2. Checkout Dromaeo benchmark from
http://src.chromium.org/svn/trunk/src/chrome/test/data/dromaeo and provide
local path to it in --dromaeo_home switch.
3. Create a spreadsheet at http://docs.google.com and specify its name in
--spreadsheet switch
Benchmark results are presented in the following format:
browser | date time
test 1 name|m11|...|m1n|test 1 average mean| |e11|...|e1n|test 1 average error
test 2 name|m21|...|m2n|test 2 average mean| |e21|...|e2n|test 2 average error
...
Here mij is mean run/s in individual dromaeo test i during benchmark run j,
eij is error in individual dromaeo test i during benchmark run j.
Example usage:
dromaeo_benchmark_runner.py -b "E:\chromium\src\chrome\Release\chrome.exe"
-b "C:\Program Files (x86)\Safari\safari.exe"
-b "C:\Program Files (x86)\Opera 10.50 pre-alpha\opera.exe" -n 1
-d "E:\chromium\src\chrome\test\data\dromaeo" -f dom -e example@gmail.com
"""
import getpass
import json
import os
import re
import subprocess
import time
import urlparse
from optparse import OptionParser
from BaseHTTPServer import HTTPServer
import SimpleHTTPServer
import gdata.spreadsheet.service
max_spreadsheet_columns = 20
test_props = ['mean', 'error']
def ParseArguments():
parser = OptionParser()
parser.add_option("-b", "--browser",
action="append", dest="browsers",
help="list of browsers to test")
parser.add_option("-n", "--run_count", dest="run_count", type="int",
default=5, help="number of runs")
parser.add_option("-d", "--dromaeo_home", dest="dromaeo_home",
help="directory with your dromaeo files")
parser.add_option("-p", "--port", dest="port", type="int",
default=8080, help="http server port")
parser.add_option("-f", "--filter", dest="filter",
default="dom", help="dromaeo suite filter")
parser.add_option("-e", "--email", dest="email",
help="your google docs account")
parser.add_option("-s", "--spreadsheet", dest="spreadsheet_title",
default="dromaeo",
help="your google docs spreadsheet name")
options = parser.parse_args()[0]
if not options.dromaeo_home:
raise Exception('please specify dromaeo_home')
return options
def KillProcessByName(process_name):
process = subprocess.Popen('wmic process get processid, executablepath',
stdout=subprocess.PIPE)
stdout = str(process.communicate()[0])
match = re.search(re.escape(process_name) + '\s+(\d+)', stdout)
if match:
pid = match.group(1)
subprocess.call('taskkill /pid %s' % pid)
class SpreadsheetWriter(object):
"Utility class for storing benchmarking results in Google spreadsheets."
def __init__(self, email, spreadsheet_title):
'''Login to google docs and search for spreadsheet'''
self.token_file = os.path.expanduser("~/.dromaeo_bot_auth_token")
self.gd_client = gdata.spreadsheet.service.SpreadsheetsService()
authenticated = False
if os.path.exists(self.token_file):
token = ''
try:
file = open(self.token_file, 'r')
token = file.read()
file.close()
self.gd_client.SetClientLoginToken(token)
self.gd_client.GetSpreadsheetsFeed()
authenticated = True
except (IOError, gdata.service.RequestError):
pass
if not authenticated:
self.gd_client.email = email
self.gd_client.password = getpass.getpass('Password for %s: ' % email)
self.gd_client.source = 'python robot for dromaeo'
self.gd_client.ProgrammaticLogin()
token = self.gd_client.GetClientLoginToken()
try:
file = open(self.token_file, 'w')
file.write(token)
file.close()
except (IOError):
pass
os.chmod(self.token_file, 0600)
# Search for the spreadsheet with title = spreadsheet_title.
spreadsheet_feed = self.gd_client.GetSpreadsheetsFeed()
for spreadsheet in spreadsheet_feed.entry:
if spreadsheet.title.text == spreadsheet_title:
self.spreadsheet_key = spreadsheet.id.text.rsplit('/', 1)[1]
if not self.spreadsheet_key:
raise Exception('Spreadsheet %s not found' % spreadsheet_title)
# Get the key of the first worksheet in spreadsheet.
worksheet_feed = self.gd_client.GetWorksheetsFeed(self.spreadsheet_key)
self.worksheet_key = worksheet_feed.entry[0].id.text.rsplit('/', 1)[1]
def _InsertRow(self, row):
row = dict([('c' + str(i), row[i]) for i in xrange(len(row))])
self.gd_client.InsertRow(row, self.spreadsheet_key, self.worksheet_key)
def _InsertBlankRow(self):
self._InsertRow('-' * self.columns_count)
def PrepareSpreadsheet(self, run_count):
"""Update cells in worksheet topmost row with service information.
Calculate column count corresponding to run_count and create worksheet
column titles [c0, c1, ...] in the topmost row to speed up spreadsheet
updates (it allows to insert a whole row with a single request)
"""
# Calculate the number of columns we need to present all test results.
self.columns_count = (run_count + 2) * len(test_props)
if self.columns_count > max_spreadsheet_columns:
# Google spreadsheet has just max_spreadsheet_columns columns.
max_run_count = max_spreadsheet_columns / len(test_props) - 2
raise Exception('maximum run count is %i' % max_run_count)
# Create worksheet column titles [c0, c1, ..., cn].
for i in xrange(self.columns_count):
self.gd_client.UpdateCell(1, i + 1, 'c' + str(i), self.spreadsheet_key,
self.worksheet_key)
def WriteColumnTitles(self, run_count):
"Create titles for test results (mean 1, mean 2, ..., average mean, ...)"
row = []
for prop in test_props:
row.append('')
for i in xrange(run_count):
row.append('%s %i' % (prop, i + 1))
row.append('average ' + prop)
self._InsertRow(row)
def WriteBrowserBenchmarkTitle(self, browser_name):
"Create browser benchmark title (browser name, date time)"
self._InsertBlankRow()
self._InsertRow([browser_name, time.strftime('%d.%m.%Y %H:%M:%S')])
def WriteBrowserBenchmarkResults(self, test_name, test_data):
"Insert a row with single test results"
row = []
for prop in test_props:
if not row:
row.append(test_name)
else:
row.append('')
row.extend([str(x) for x in test_data[prop]])
row.append(str(sum(test_data[prop]) / len(test_data[prop])))
self._InsertRow(row)
class DromaeoHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_POST(self):
self.send_response(200)
self.end_headers()
self.wfile.write("<HTML>POST OK.<BR><BR>");
length = int(self.headers.getheader('content-length'))
parameters = urlparse.parse_qs(self.rfile.read(length))
self.server.got_post = True
self.server.post_data = parameters['data']
class BenchmarkResults(object):
"Storage class for dromaeo benchmark results"
def __init__(self):
self.data = {}
def ProcessBrowserPostData(self, data):
"Convert dromaeo test results in internal format"
tests = json.loads(data[0])
for test in tests:
test_name = test['name']
if test_name not in self.data:
# Test is encountered for the first time.
self.data[test_name] = dict([(prop, []) for prop in test_props])
# Append current run results.
for prop in test_props:
value = -1
if prop in test: value = test[prop] # workaround for Opera 10.5
self.data[test_name][prop].append(value)
def main():
options = ParseArguments()
# Start sever with dromaeo.
os.chdir(options.dromaeo_home)
server = HTTPServer(('', options.port), DromaeoHandler)
# Open and prepare spreadsheet on google docs.
spreadsheet_writer = SpreadsheetWriter(options.email,
options.spreadsheet_title)
spreadsheet_writer.PrepareSpreadsheet(options.run_count)
spreadsheet_writer.WriteColumnTitles(options.run_count)
for browser in options.browsers:
browser_name = os.path.splitext(os.path.basename(browser))[0]
spreadsheet_writer.WriteBrowserBenchmarkTitle(browser_name)
benchmark_results = BenchmarkResults()
for run_number in xrange(options.run_count):
print '%s run %i' % (browser_name, run_number + 1)
# Run browser.
test_page = 'http://localhost:%i/index.html?%s&automated&post_json' % (
options.port, options.filter)
browser_process = subprocess.Popen('%s "%s"' % (browser, test_page))
server.got_post = False
server.post_data = None
# Wait until POST request from browser.
while not server.got_post:
server.handle_request()
benchmark_results.ProcessBrowserPostData(server.post_data)
# Kill browser.
KillProcessByName(browser)
browser_process.wait()
# Insert test results into spreadsheet.
for (test_name, test_data) in benchmark_results.data.iteritems():
spreadsheet_writer.WriteBrowserBenchmarkResults(test_name, test_data)
server.socket.close()
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
Kapeli/PopClip-Extensions | source/InstantTranslate/requests/packages/chardet/eucjpprober.py | 2919 | 3678 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
# PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| mit |
yaojingwu1992/XlsxWriter | xlsxwriter/test/comparison/test_header_image10.py | 8 | 1486 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'header_image10.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.image_dir = test_dir + 'images/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {'xl/worksheets/sheet1.xml': ['<pageMargins', '<pageSetup'],
'xl/worksheets/sheet2.xml': ['<pageMargins', '<pageSetup']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
worksheet1.set_header('&L&G',
{'image_left': self.image_dir + 'red.jpg'})
worksheet2.write('A1', 'Foo')
worksheet2.write_comment('B2', 'Some text')
worksheet2.set_comments_author('John')
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
Dreae/PyCrest | docs/conf.py | 4 | 7522 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Distill Framework documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 10 11:21:12 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
version = "0.0.1"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PyCrest'
copyright = '2015, Dreae'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyCrest'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'PyCrest.tex', 'PyCrest Documentation',
'Dreae', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pycrest', 'PyCrest Documentation',
['Dreae'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PyCrest', 'PyCrest Documentation',
'Dreae', 'PyCrest', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
onitake/ansible | lib/ansible/utils/module_docs_fragments/openswitch.py | 58 | 3815 | #
# (c) 2015, Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport. Note this argument
does not affect the SSH argument.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device. This value applies to either I(cli) or I(rest). The port
value will default to the appropriate transport common port if
none is provided in the task. (cli=22, http=80, https=443). Note
this argument does not affect the SSH transport.
default: 0 (use common port)
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
either the CLI login or the eAPI authentication depending on which
transport is used. Note this argument does not affect the SSH
transport. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This is a common argument used for either I(cli)
or I(rest) transports. Note this argument does not affect the SSH
transport. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This argument is only used for the I(cli)
transports. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
transport:
description:
- Configures the transport connection to use when connecting to the
remote device. The transport argument supports connectivity to the
device over ssh, cli or REST.
required: true
default: ssh
choices: ['ssh', 'cli', 'rest']
use_ssl:
description:
- Configures the I(transport) to use SSL if set to true only when the
I(transport) argument is configured as rest. If the transport
argument is not I(rest), this value is ignored.
type: bool
default: 'yes'
provider:
description:
- Convenience method that allows all I(openswitch) arguments to be passed as
a dict object. All constraints (required, choices, etc) must be
met either by individual arguments or values in this dict.
"""
| gpl-3.0 |
megaserg/pants | tests/python/pants_test/backend/core/tasks/test_mutex_task_mixin.py | 5 | 4378 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import functools
from contextlib import contextmanager
from pants.backend.core.tasks.mutex_task_mixin import MutexTaskMixin
from pants.build_graph.target import Target
from pants.util.contextutil import temporary_dir
from pants_test.base_test import BaseTest
class LogViewerTaskMixin(MutexTaskMixin):
@classmethod
def mutex_base(cls):
return LogViewerTaskMixin
def __init__(self, *args, **kwargs):
super(LogViewerTaskMixin, self).__init__(*args, **kwargs)
self._executed = None
@property
def executed(self):
return self._executed
def execute_for(self, targets):
self._executed = targets
class RedTarget(Target):
pass
class RedLogViewer(LogViewerTaskMixin):
options_scope = 'test_scope_red'
@classmethod
def select_targets(cls, target):
return isinstance(target, RedTarget)
class BlueTarget(Target):
pass
class BlueLogViewer(LogViewerTaskMixin):
options_scope = 'test_scope_blue'
@classmethod
def select_targets(cls, target):
return isinstance(target, BlueTarget)
class GreenTarget(Target):
pass
class GreenLogViewer(LogViewerTaskMixin):
options_scope = 'test_scope_green'
@classmethod
def select_targets(cls, target):
return isinstance(target, GreenTarget)
class MutexTaskMixinTest(BaseTest):
def tearDown(self):
super(MutexTaskMixinTest, self).tearDown()
LogViewerTaskMixin.reset_implementations()
@contextmanager
def mutex_group(self, targets=None):
context = self.context(target_roots=targets,
for_task_types=[RedLogViewer, BlueLogViewer, GreenLogViewer])
def prepare_task(task_type):
task_type.prepare(self.options, round_manager=None)
prepare_task(RedLogViewer)
prepare_task(BlueLogViewer)
prepare_task(GreenLogViewer)
def create_task(workdir, task_type):
return task_type(context, workdir)
with temporary_dir() as red, temporary_dir() as blue, temporary_dir() as green:
red_viewer = create_task(red, RedLogViewer)
blue_viewer = create_task(blue, BlueLogViewer)
green_viewer = create_task(green, GreenLogViewer)
yield red_viewer, blue_viewer, green_viewer
def test_one(self):
red = self.make_target('red', RedTarget)
with self.mutex_group(targets=[red]) as (red_viewer, blue_viewer, green_viewer):
red_viewer.execute()
blue_viewer.execute()
green_viewer.execute()
self.assertEqual([red], red_viewer.executed)
self.assertIsNone(blue_viewer.executed)
self.assertIsNone(green_viewer.executed)
def assert_activation_error(self, error_type, viewer):
with self.assertRaises(error_type):
viewer.execute()
self.assertIsNone(viewer.executed)
def test_none(self):
assert_no_activations = functools.partial(self.assert_activation_error,
MutexTaskMixin.NoActivationsError)
with self.mutex_group() as (red_viewer, blue_viewer, green_viewer):
assert_no_activations(red_viewer)
assert_no_activations(blue_viewer)
assert_no_activations(green_viewer)
def assert_incompatible_activations(self, viewer):
self.assert_activation_error(MutexTaskMixin.IncompatibleActivationsError, viewer)
def test_some_incompatible(self):
red = self.make_target('red', RedTarget)
blue = self.make_target('blue', BlueTarget)
with self.mutex_group(targets=[red, blue]) as (red_viewer, blue_viewer, green_viewer):
self.assert_incompatible_activations(red_viewer)
self.assert_incompatible_activations(blue_viewer)
green_viewer.execute()
self.assertIsNone(green_viewer.executed)
def test_all_incompatible(self):
red = self.make_target('red', RedTarget)
blue = self.make_target('blue', BlueTarget)
green = self.make_target('green', GreenTarget)
with self.mutex_group(targets=[red, blue, green]) as (red_viewer, blue_viewer, green_viewer):
self.assert_incompatible_activations(red_viewer)
self.assert_incompatible_activations(blue_viewer)
self.assert_incompatible_activations(green_viewer)
| apache-2.0 |
redhat-openstack/rally | rally/cli/commands/plugin.py | 10 | 4075 | # Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import textwrap
from rally.cli import cliutils
from rally.common.plugin import plugin
from rally.common import utils
from rally import plugins
class PluginCommands(object):
"""Command allows to manage Rally plugins."""
@staticmethod
def _print_plugins_list(plugin_list):
rows = [utils.Struct(name=f.get_name(),
namespace=f.get_namespace(),
title=f.get_info()["title"])
for f in plugin_list]
cliutils.print_list(rows, fields=["name", "namespace", "title"])
@cliutils.args("--name", dest="name", type=str,
help="Plugin name.")
@cliutils.args("--namespace", dest="namespace", type=str,
help="Plugin namespace.")
@plugins.ensure_plugins_are_loaded
def show(self, name, namespace=None):
"""Show detailed information about Rally plugin."""
name_lw = name.lower()
all_plugins = plugin.Plugin.get_all(namespace=namespace)
found = [p for p in all_plugins if name_lw in p.get_name().lower()]
exact_match = [p for p in found if name_lw == p.get_name().lower()]
if not found:
if namespace:
print(
"There is no plugin: %(name)s in %(namespace)s namespace"
% {"name": name, "namespace": namespace}
)
else:
print("There is no plugin: %s" % name)
elif len(found) == 1 or exact_match:
plugin_ = found[0] if len(found) == 1 else exact_match[0]
plugin_info = plugin_.get_info()
print(cliutils.make_header(plugin_info["title"]))
print("NAME\n\t%s" % plugin_info["name"])
print("NAMESPACE\n\t%s" % plugin_info["namespace"])
print("MODULE\n\t%s" % plugin_info["module"])
if plugin_info["description"]:
print("DESCRIPTION\n\t", end="")
print(textwrap.fill(plugin_info["description"],
subsequent_indent="\t"))
if plugin_info["parameters"]:
print("PARAMETERS")
rows = [utils.Struct(name=p["name"],
description="g%s\n" % p["doc"])
for p in plugin_info["parameters"]]
cliutils.print_list(rows, fields=["name", "description"])
else:
print("Multiple plugins found:")
self._print_plugins_list(found)
@cliutils.args("--name", dest="name", type=str,
help="List only plugins that match passed name.")
@cliutils.args("--namespace", dest="namespace", type=str,
help="List only plugins that are in specified namespace")
@plugins.ensure_plugins_are_loaded
def list(self, name=None, namespace=None):
"""List all Rally plugins that match name and namespace."""
all_plugins = plugin.Plugin.get_all(namespace=namespace)
matched = all_plugins
if name:
name_lw = name.lower()
matched = [p for p in all_plugins
if name_lw in p.get_name().lower()]
if not all_plugins:
print("There is no plugin namespace: %s" % namespace)
elif not matched:
print("There is no plugin: %s" % name)
else:
self._print_plugins_list(matched)
| apache-2.0 |
23andMe/ansible-modules-extras | network/a10/a10_service_group.py | 117 | 13447 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to manage A10 Networks slb service-group objects
(c) 2014, Mischa Peters <mpeters@a10networks.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: a10_service_group
version_added: 1.8
short_description: Manage A10 Networks devices' service groups
description:
- Manage slb service-group objects on A10 Networks devices via aXAPI
author: "Mischa Peters (@mischapeters)"
notes:
- Requires A10 Networks aXAPI 2.1
- When a server doesn't exist and is added to the service-group the server will be created
options:
host:
description:
- hostname or ip of your A10 Networks device
required: true
default: null
aliases: []
choices: []
username:
description:
- admin account of your A10 Networks device
required: true
default: null
aliases: ['user', 'admin']
choices: []
password:
description:
- admin password of your A10 Networks device
required: true
default: null
aliases: ['pass', 'pwd']
choices: []
service_group:
description:
- slb service-group name
required: true
default: null
aliases: ['service', 'pool', 'group']
choices: []
service_group_protocol:
description:
- slb service-group protocol
required: false
default: tcp
aliases: ['proto', 'protocol']
choices: ['tcp', 'udp']
service_group_method:
description:
- slb service-group loadbalancing method
required: false
default: round-robin
aliases: ['method']
choices: ['round-robin', 'weighted-rr', 'least-connection', 'weighted-least-connection', 'service-least-connection', 'service-weighted-least-connection', 'fastest-response', 'least-request', 'round-robin-strict', 'src-ip-only-hash', 'src-ip-hash']
servers:
description:
- A list of servers to add to the service group. Each list item should be a
dictionary which specifies the C(server:) and C(port:), but can also optionally
specify the C(status:). See the examples below for details.
required: false
default: null
aliases: []
choices: []
write_config:
description:
- If C(yes), any changes will cause a write of the running configuration
to non-volatile memory. This will save I(all) configuration changes,
including those that may have been made manually or through other modules,
so care should be taken when specifying C(yes).
required: false
default: "no"
choices: ["yes", "no"]
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled devices using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
EXAMPLES = '''
# Create a new service-group
- a10_service_group:
host: a10.mydomain.com
username: myadmin
password: mypassword
service_group: sg-80-tcp
servers:
- server: foo1.mydomain.com
port: 8080
- server: foo2.mydomain.com
port: 8080
- server: foo3.mydomain.com
port: 8080
- server: foo4.mydomain.com
port: 8080
status: disabled
'''
VALID_SERVICE_GROUP_FIELDS = ['name', 'protocol', 'lb_method']
VALID_SERVER_FIELDS = ['server', 'port', 'status']
def validate_servers(module, servers):
for item in servers:
for key in item:
if key not in VALID_SERVER_FIELDS:
module.fail_json(msg="invalid server field (%s), must be one of: %s" % (key, ','.join(VALID_SERVER_FIELDS)))
# validate the server name is present
if 'server' not in item:
module.fail_json(msg="server definitions must define the server field")
# validate the port number is present and an integer
if 'port' in item:
try:
item['port'] = int(item['port'])
except:
module.fail_json(msg="server port definitions must be integers")
else:
module.fail_json(msg="server definitions must define the port field")
# convert the status to the internal API integer value
if 'status' in item:
item['status'] = axapi_enabled_disabled(item['status'])
else:
item['status'] = 1
def main():
argument_spec = a10_argument_spec()
argument_spec.update(url_argument_spec())
argument_spec.update(
dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
service_group=dict(type='str', aliases=['service', 'pool', 'group'], required=True),
service_group_protocol=dict(type='str', default='tcp', aliases=['proto', 'protocol'], choices=['tcp', 'udp']),
service_group_method=dict(type='str', default='round-robin',
aliases=['method'],
choices=['round-robin',
'weighted-rr',
'least-connection',
'weighted-least-connection',
'service-least-connection',
'service-weighted-least-connection',
'fastest-response',
'least-request',
'round-robin-strict',
'src-ip-only-hash',
'src-ip-hash']),
servers=dict(type='list', aliases=['server', 'member'], default=[]),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False
)
host = module.params['host']
username = module.params['username']
password = module.params['password']
state = module.params['state']
write_config = module.params['write_config']
slb_service_group = module.params['service_group']
slb_service_group_proto = module.params['service_group_protocol']
slb_service_group_method = module.params['service_group_method']
slb_servers = module.params['servers']
if slb_service_group is None:
module.fail_json(msg='service_group is required')
axapi_base_url = 'https://' + host + '/services/rest/V2.1/?format=json'
load_balancing_methods = {'round-robin': 0,
'weighted-rr': 1,
'least-connection': 2,
'weighted-least-connection': 3,
'service-least-connection': 4,
'service-weighted-least-connection': 5,
'fastest-response': 6,
'least-request': 7,
'round-robin-strict': 8,
'src-ip-only-hash': 14,
'src-ip-hash': 15}
if not slb_service_group_proto or slb_service_group_proto.lower() == 'tcp':
protocol = 2
else:
protocol = 3
# validate the server data list structure
validate_servers(module, slb_servers)
json_post = {
'service_group': {
'name': slb_service_group,
'protocol': protocol,
'lb_method': load_balancing_methods[slb_service_group_method],
}
}
# first we authenticate to get a session id
session_url = axapi_authenticate(module, axapi_base_url, username, password)
# then we check to see if the specified group exists
slb_result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group}))
slb_service_group_exist = not axapi_failure(slb_result)
changed = False
if state == 'present':
# before creating/updating we need to validate that servers
# defined in the servers list exist to prevent errors
checked_servers = []
for server in slb_servers:
result = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': server['server']}))
if axapi_failure(result):
module.fail_json(msg="the server %s specified in the servers list does not exist" % server['server'])
checked_servers.append(server['server'])
if not slb_service_group_exist:
result = axapi_call(module, session_url + '&method=slb.service_group.create', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg=result['response']['err']['msg'])
changed = True
else:
# check to see if the service group definition without the
# server members is different, and update that individually
# if it needs it
do_update = False
for field in VALID_SERVICE_GROUP_FIELDS:
if json_post['service_group'][field] != slb_result['service_group'][field]:
do_update = True
break
if do_update:
result = axapi_call(module, session_url + '&method=slb.service_group.update', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg=result['response']['err']['msg'])
changed = True
# next we pull the defined list of servers out of the returned
# results to make it a bit easier to iterate over
defined_servers = slb_result.get('service_group', {}).get('member_list', [])
# next we add/update new member servers from the user-specified
# list if they're different or not on the target device
for server in slb_servers:
found = False
different = False
for def_server in defined_servers:
if server['server'] == def_server['server']:
found = True
for valid_field in VALID_SERVER_FIELDS:
if server[valid_field] != def_server[valid_field]:
different = True
break
if found or different:
break
# add or update as required
server_data = {
"name": slb_service_group,
"member": server,
}
if not found:
result = axapi_call(module, session_url + '&method=slb.service_group.member.create', json.dumps(server_data))
changed = True
elif different:
result = axapi_call(module, session_url + '&method=slb.service_group.member.update', json.dumps(server_data))
changed = True
# finally, remove any servers that are on the target
# device but were not specified in the list given
for server in defined_servers:
found = False
for slb_server in slb_servers:
if server['server'] == slb_server['server']:
found = True
break
# remove if not found
server_data = {
"name": slb_service_group,
"member": server,
}
if not found:
result = axapi_call(module, session_url + '&method=slb.service_group.member.delete', json.dumps(server_data))
changed = True
# if we changed things, get the full info regarding
# the service group for the return data below
if changed:
result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group}))
else:
result = slb_result
elif state == 'absent':
if slb_service_group_exist:
result = axapi_call(module, session_url + '&method=slb.service_group.delete', json.dumps({'name': slb_service_group}))
changed = True
else:
result = dict(msg="the service group was not present")
# if the config has changed, save the config unless otherwise requested
if changed and write_config:
write_result = axapi_call(module, session_url + '&method=system.action.write_memory')
if axapi_failure(write_result):
module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg'])
# log out of the session nicely and exit
axapi_call(module, session_url + '&method=session.close')
module.exit_json(changed=changed, content=result)
# standard ansible module imports
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils.a10 import *
main()
| gpl-3.0 |
cherokee/webserver | admin/CTK/CTK/Template.py | 5 | 2582 | # CTK: Cherokee Toolkit
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2009-2014 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
import os
import stat
import inspect
from Widget import Widget
from util import formatter
class Template (Widget):
cache = {}
def __init__ (self, **kwargs):
Widget.__init__ (self)
self.filename = None
self.content = None
self.vars = {}
if kwargs.has_key('filename'):
self.filename = kwargs['filename']
elif kwargs.has_key('content'):
self.content = kwargs['content']
def _content_update (self):
content = open(self.filename, 'r').read()
mtime = os.stat (self.filename)[stat.ST_MTIME]
Template.cache[self.filename] = {'mtime': mtime,
'content': content}
def _content_get (self):
try:
cached = Template.cache[self.filename]
s = os.stat (self.filename)
mtime = s[stat.ST_MTIME]
if mtime <= cached['mtime']:
# Hit
return cached['content']
except:
pass
# Miss
if (self.filename and
os.path.exists (self.filename)):
self._content_update()
return Template.cache[self.filename]['content']
else:
return self.content
def __setitem__ (self, key, val):
self.vars[key] = val
def __getitem__ (self, key):
return self.vars.get(key)
def Render (self):
content = self._content_get()
while True:
prev = content[:]
content = formatter (content, self.vars)
if content == prev:
break
# Get rid of %%s
return content %({})
def _figure_vars (self):
vars = globals()
vars.update (inspect.currentframe(1).f_locals)
return vars
| gpl-2.0 |
2013Commons/HUE-SHARK | build/env/lib/python2.7/site-packages/Django-1.2.3-py2.7.egg/django/views/decorators/http.py | 63 | 5999 | """
Decorators for views based on HTTP headers.
"""
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from calendar import timegm
from datetime import timedelta
from email.Utils import formatdate
from django.utils.decorators import decorator_from_middleware, available_attrs
from django.utils.http import parse_etags, quote_etag
from django.middleware.http import ConditionalGetMiddleware
from django.http import HttpResponseNotAllowed, HttpResponseNotModified, HttpResponse
conditional_page = decorator_from_middleware(ConditionalGetMiddleware)
def require_http_methods(request_method_list):
"""
Decorator to make a view only accept particular request methods. Usage::
@require_http_methods(["GET", "POST"])
def my_view(request):
# I can assume now that only GET or POST requests make it this far
# ...
Note that request methods should be in uppercase.
"""
def decorator(func):
def inner(request, *args, **kwargs):
if request.method not in request_method_list:
return HttpResponseNotAllowed(request_method_list)
return func(request, *args, **kwargs)
return wraps(func, assigned=available_attrs(func))(inner)
return decorator
require_GET = require_http_methods(["GET"])
require_GET.__doc__ = "Decorator to require that a view only accept the GET method."
require_POST = require_http_methods(["POST"])
require_POST.__doc__ = "Decorator to require that a view only accept the POST method."
def condition(etag_func=None, last_modified_func=None):
"""
Decorator to support conditional retrieval (or change) for a view
function.
The parameters are callables to compute the ETag and last modified time for
the requested resource, respectively. The callables are passed the same
parameters as the view itself. The Etag function should return a string (or
None if the resource doesn't exist), whilst the last_modified function
should return a datetime object (or None if the resource doesn't exist).
If both parameters are provided, all the preconditions must be met before
the view is processed.
This decorator will either pass control to the wrapped view function or
return an HTTP 304 response (unmodified) or 412 response (preconditions
failed), depending upon the request method.
Any behavior marked as "undefined" in the HTTP spec (e.g. If-none-match
plus If-modified-since headers) will result in the view function being
called.
"""
def decorator(func):
def inner(request, *args, **kwargs):
# Get HTTP request headers
if_modified_since = request.META.get("HTTP_IF_MODIFIED_SINCE")
if_none_match = request.META.get("HTTP_IF_NONE_MATCH")
if_match = request.META.get("HTTP_IF_MATCH")
if if_none_match or if_match:
# There can be more than one ETag in the request, so we
# consider the list of values.
try:
etags = parse_etags(if_none_match or if_match)
except ValueError:
# In case of invalid etag ignore all ETag headers.
# Apparently Opera sends invalidly quoted headers at times
# (we should be returning a 400 response, but that's a
# little extreme) -- this is Django bug #10681.
if_none_match = None
if_match = None
# Compute values (if any) for the requested resource.
if etag_func:
res_etag = etag_func(request, *args, **kwargs)
else:
res_etag = None
if last_modified_func:
dt = last_modified_func(request, *args, **kwargs)
if dt:
res_last_modified = formatdate(timegm(dt.utctimetuple()))[:26] + 'GMT'
else:
res_last_modified = None
else:
res_last_modified = None
response = None
if not ((if_match and (if_modified_since or if_none_match)) or
(if_match and if_none_match)):
# We only get here if no undefined combinations of headers are
# specified.
if ((if_none_match and (res_etag in etags or
"*" in etags and res_etag)) and
(not if_modified_since or
res_last_modified == if_modified_since)):
if request.method in ("GET", "HEAD"):
response = HttpResponseNotModified()
else:
response = HttpResponse(status=412)
elif if_match and ((not res_etag and "*" in etags) or
(res_etag and res_etag not in etags)):
response = HttpResponse(status=412)
elif (not if_none_match and if_modified_since and
request.method == "GET" and
res_last_modified == if_modified_since):
response = HttpResponseNotModified()
if response is None:
response = func(request, *args, **kwargs)
# Set relevant headers on the response if they don't already exist.
if res_last_modified and not response.has_header('Last-Modified'):
response['Last-Modified'] = res_last_modified
if res_etag and not response.has_header('ETag'):
response['ETag'] = quote_etag(res_etag)
return response
return inner
return decorator
# Shortcut decorators for common cases based on ETag or Last-Modified only
def etag(etag_func):
return condition(etag_func=etag_func)
def last_modified(last_modified_func):
return condition(last_modified_func=last_modified_func)
| apache-2.0 |
dmlc/mxnet | example/reinforcement-learning/dqn/replay_memory.py | 9 | 12097 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function
import mxnet as mx
import mxnet.ndarray as nd
import numpy
import copy
from utils import get_numpy_rng
class ReplayMemory(object):
def __init__(self, history_length, memory_size=1000000, replay_start_size=100,
state_dim=(), action_dim=(), state_dtype='uint8', action_dtype='uint8',
ctx=mx.gpu()):
self.rng = get_numpy_rng()
self.ctx = ctx
assert type(action_dim) is tuple and type(state_dim) is tuple, \
"Must set the dimensions of state and action for replay memory"
self.state_dim = state_dim
if action_dim == (1,):
self.action_dim = ()
else:
self.action_dim = action_dim
self.states = numpy.zeros((memory_size,) + state_dim, dtype=state_dtype)
self.actions = numpy.zeros((memory_size,) + action_dim, dtype=action_dtype)
self.rewards = numpy.zeros(memory_size, dtype='float32')
self.terminate_flags = numpy.zeros(memory_size, dtype='bool')
self.memory_size = memory_size
self.replay_start_size = replay_start_size
self.history_length = history_length
self.top = 0
self.size = 0
def latest_slice(self):
if self.size >= self.history_length:
return self.states.take(numpy.arange(self.top - self.history_length, self.top),
axis=0, mode="wrap")
else:
assert False, "We can only slice from the replay memory if the " \
"replay size is larger than the length of frames we want to take" \
"as the input."
@property
def sample_enabled(self):
return self.size > self.replay_start_size
def clear(self):
"""
Clear all contents in the relay memory
"""
self.states[:] = 0
self.actions[:] = 0
self.rewards[:] = 0
self.terminate_flags[:] = 0
self.top = 0
self.size = 0
def reset(self):
"""
Reset all the flags stored in the replay memory.
It will not clear the inner-content and is a light/quick version of clear()
"""
self.top = 0
self.size = 0
def copy(self):
# TODO Test the copy function
replay_memory = copy.copy(self)
replay_memory.states = numpy.zeros(self.states.shape, dtype=self.states.dtype)
replay_memory.actions = numpy.zeros(self.actions.shape, dtype=self.actions.dtype)
replay_memory.rewards = numpy.zeros(self.rewards.shape, dtype='float32')
replay_memory.terminate_flags = numpy.zeros(self.terminate_flags.shape, dtype='bool')
replay_memory.states[numpy.arange(self.top-self.size, self.top), ::] = \
self.states[numpy.arange(self.top-self.size, self.top)]
replay_memory.actions[numpy.arange(self.top-self.size, self.top)] = \
self.actions[numpy.arange(self.top-self.size, self.top)]
replay_memory.rewards[numpy.arange(self.top-self.size, self.top)] = \
self.rewards[numpy.arange(self.top-self.size, self.top)]
replay_memory.terminate_flags[numpy.arange(self.top-self.size, self.top)] = \
self.terminate_flags[numpy.arange(self.top-self.size, self.top)]
return replay_memory
def append(self, obs, action, reward, terminate_flag):
self.states[self.top] = obs
self.actions[self.top] = action
self.rewards[self.top] = reward
self.terminate_flags[self.top] = terminate_flag
self.top = (self.top + 1) % self.memory_size
if self.size < self.memory_size:
self.size += 1
def sample_last(self, batch_size, states, offset):
assert self.size >= batch_size and self.replay_start_size >= self.history_length
assert(0 <= self.size <= self.memory_size)
assert(0 <= self.top <= self.memory_size)
if self.size <= self.replay_start_size:
raise ValueError("Size of the effective samples of the ReplayMemory must be "
"bigger than start_size! Currently, size=%d, start_size=%d"
%(self.size, self.replay_start_size))
actions = numpy.empty((batch_size,) + self.action_dim, dtype=self.actions.dtype)
rewards = numpy.empty(batch_size, dtype='float32')
terminate_flags = numpy.empty(batch_size, dtype='bool')
counter = 0
first_index = self.top - self.history_length - 1
while counter < batch_size:
full_indices = numpy.arange(first_index, first_index + self.history_length+1)
end_index = first_index + self.history_length
if numpy.any(self.terminate_flags.take(full_indices[0:self.history_length], mode='wrap')):
# Check if terminates in the middle of the sample!
first_index -= 1
continue
states[counter + offset] = self.states.take(full_indices, axis=0, mode='wrap')
actions[counter] = self.actions.take(end_index, axis=0, mode='wrap')
rewards[counter] = self.rewards.take(end_index, mode='wrap')
terminate_flags[counter] = self.terminate_flags.take(end_index, mode='wrap')
counter += 1
first_index -= 1
return actions, rewards, terminate_flags
def sample_mix(self, batch_size, states, offset, current_index):
assert self.size >= batch_size and self.replay_start_size >= self.history_length
assert(0 <= self.size <= self.memory_size)
assert(0 <= self.top <= self.memory_size)
if self.size <= self.replay_start_size:
raise ValueError("Size of the effective samples of the ReplayMemory must be bigger than "
"start_size! Currently, size=%d, start_size=%d"
%(self.size, self.replay_start_size))
actions = numpy.empty((batch_size,) + self.action_dim, dtype=self.actions.dtype)
rewards = numpy.empty(batch_size, dtype='float32')
terminate_flags = numpy.empty(batch_size, dtype='bool')
counter = 0
first_index = self.top - self.history_length + current_index
thisid = first_index
while counter < batch_size:
full_indices = numpy.arange(thisid, thisid + self.history_length+1)
end_index = thisid + self.history_length
if numpy.any(self.terminate_flags.take(full_indices[0:self.history_length], mode='wrap')):
# Check if terminates in the middle of the sample!
thisid -= 1
continue
states[counter+offset] = self.states.take(full_indices, axis=0, mode='wrap')
actions[counter] = self.actions.take(end_index, axis=0, mode='wrap')
rewards[counter] = self.rewards.take(end_index, mode='wrap')
terminate_flags[counter] = self.terminate_flags.take(end_index, mode='wrap')
counter += 1
thisid = self.rng.randint(low=self.top - self.size, high=self.top - self.history_length-1)
return actions, rewards, terminate_flags
def sample_inplace(self, batch_size, states, offset):
assert self.size >= batch_size and self.replay_start_size >= self.history_length
assert(0 <= self.size <= self.memory_size)
assert(0 <= self.top <= self.memory_size)
if self.size <= self.replay_start_size:
raise ValueError("Size of the effective samples of the ReplayMemory must be "
"bigger than start_size! Currently, size=%d, start_size=%d"
%(self.size, self.replay_start_size))
actions = numpy.zeros((batch_size,) + self.action_dim, dtype=self.actions.dtype)
rewards = numpy.zeros(batch_size, dtype='float32')
terminate_flags = numpy.zeros(batch_size, dtype='bool')
counter = 0
while counter < batch_size:
index = self.rng.randint(low=self.top - self.size + 1, high=self.top - self.history_length )
transition_indices = numpy.arange(index, index + self.history_length+1)
initial_indices = transition_indices - 1
end_index = index + self.history_length - 1
if numpy.any(self.terminate_flags.take(initial_indices[0:self.history_length], mode='wrap')):
# Check if terminates in the middle of the sample!
continue
states[counter + offset] = self.states.take(initial_indices, axis=0, mode='wrap')
actions[counter] = self.actions.take(end_index, axis=0, mode='wrap')
rewards[counter] = self.rewards.take(end_index, mode='wrap')
terminate_flags[counter] = self.terminate_flags.take(end_index, mode='wrap')
# next_states[counter] = self.states.take(transition_indices, axis=0, mode='wrap')
counter += 1
return actions, rewards, terminate_flags
def sample(self, batch_size):
assert self.size >= batch_size and self.replay_start_size >= self.history_length
assert(0 <= self.size <= self.memory_size)
assert(0 <= self.top <= self.memory_size)
if self.size <= self.replay_start_size:
raise ValueError("Size of the effective samples of the ReplayMemory must be bigger than "
"start_size! Currently, size=%d, start_size=%d"
%(self.size, self.replay_start_size))
#TODO Possibly states + inds for less memory access
states = numpy.zeros((batch_size, self.history_length) + self.state_dim,
dtype=self.states.dtype)
actions = numpy.zeros((batch_size,) + self.action_dim, dtype=self.actions.dtype)
rewards = numpy.zeros(batch_size, dtype='float32')
terminate_flags = numpy.zeros(batch_size, dtype='bool')
next_states = numpy.zeros((batch_size, self.history_length) + self.state_dim,
dtype=self.states.dtype)
counter = 0
while counter < batch_size:
index = self.rng.randint(low=self.top - self.size + 1, high=self.top - self.history_length)
transition_indices = numpy.arange(index, index + self.history_length)
initial_indices = transition_indices - 1
end_index = index + self.history_length - 1
while numpy.any(self.terminate_flags.take(initial_indices, mode='wrap')):
# Check if terminates in the middle of the sample!
index -= 1
transition_indices = numpy.arange(index, index + self.history_length)
initial_indices = transition_indices - 1
end_index = index + self.history_length - 1
states[counter] = self.states.take(initial_indices, axis=0, mode='wrap')
actions[counter] = self.actions.take(end_index, axis=0, mode='wrap')
rewards[counter] = self.rewards.take(end_index, mode='wrap')
terminate_flags[counter] = self.terminate_flags.take(end_index, mode='wrap')
next_states[counter] = self.states.take(transition_indices, axis=0, mode='wrap')
counter += 1
return states, actions, rewards, next_states, terminate_flags
| apache-2.0 |
danieljaouen/ansible | lib/ansible/modules/cloud/google/gcspanner.py | 88 | 9807 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcspanner
version_added: "2.3"
short_description: Create and Delete Instances/Databases on Spanner
description:
- Create and Delete Instances/Databases on Spanner.
See U(https://cloud.google.com/spanner/docs) for an overview.
requirements:
- python >= 2.6
- google-auth >= 0.5.0
- google-cloud-spanner >= 0.23.0
notes:
- Changing the configuration on an existing instance is not supported.
author:
- Tom Melendez (@supertom) <tom@supertom.com>
options:
configuration:
description:
- Configuration the instance should use.
- Examples are us-central1, asia-east1 and europe-west1.
required: yes
instance_id:
description:
- GCP spanner instance name.
required: yes
database_name:
description:
- Name of database contained on the instance.
force_instance_delete:
description:
- To delete an instance, this argument must exist and be true (along with state being equal to absent).
type: bool
default: 'no'
instance_display_name:
description:
- Name of Instance to display.
- If not specified, instance_id will be used instead.
node_count:
description:
- Number of nodes in the instance.
default: 1
state:
description:
- State of the instance or database. Applies to the most granular resource.
- If a C(database_name) is specified we remove it.
- If only C(instance_id) is specified, that is what is removed.
choices: [ absent, present ]
default: present
'''
EXAMPLES = '''
- name: Create instance
gcspanner:
instance_id: '{{ instance_id }}'
configuration: '{{ configuration }}'
state: present
node_count: 1
- name: Create database
gcspanner:
instance_id: '{{ instance_id }}'
configuration: '{{ configuration }}'
database_name: '{{ database_name }}'
state: present
- name: Delete instance (and all databases)
- gcspanner:
instance_id: '{{ instance_id }}'
configuration: '{{ configuration }}'
state: absent
force_instance_delete: yes
'''
RETURN = '''
state:
description: The state of the instance or database. Value will be either 'absent' or 'present'.
returned: Always
type: str
sample: "present"
database_name:
description: Name of database.
returned: When database name is specified
type: str
sample: "mydatabase"
instance_id:
description: Name of instance.
returned: Always
type: str
sample: "myinstance"
previous_values:
description: List of dictionaries containing previous values prior to update.
returned: When an instance update has occurred and a field has been modified.
type: dict
sample: "'previous_values': { 'instance': { 'instance_display_name': 'my-instance', 'node_count': 1 } }"
updated:
description: Boolean field to denote an update has occurred.
returned: When an update has occurred.
type: bool
sample: True
'''
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
try:
from google.cloud import spanner
from google.gax.errors import GaxError
HAS_GOOGLE_CLOUD_SPANNER = True
except ImportError as e:
HAS_GOOGLE_CLOUD_SPANNER = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
from ansible.module_utils.six import string_types
CLOUD_CLIENT = 'google-cloud-spanner'
CLOUD_CLIENT_MINIMUM_VERSION = '0.23.0'
CLOUD_CLIENT_USER_AGENT = 'ansible-spanner-0.1'
def get_spanner_configuration_name(config_name, project_name):
config_name = 'projects/%s/instanceConfigs/regional-%s' % (project_name,
config_name)
return config_name
def instance_update(instance):
"""
Call update method on spanner client.
Note: A ValueError exception is thrown despite the client succeeding.
So, we validate the node_count and instance_display_name parameters and then
ignore the ValueError exception.
:param instance: a Spanner instance object
:type instance: class `google.cloud.spanner.Instance`
:returns True on success, raises ValueError on type error.
:rtype ``bool``
"""
errmsg = ''
if not isinstance(instance.node_count, int):
errmsg = 'node_count must be an integer %s (%s)' % (
instance.node_count, type(instance.node_count))
if instance.display_name and not isinstance(instance.display_name,
string_types):
errmsg = 'instance_display_name must be an string %s (%s)' % (
instance.display_name, type(instance.display_name))
if errmsg:
raise ValueError(errmsg)
try:
instance.update()
except ValueError:
# The ValueError here is the one we 'expect'.
pass
return True
def main():
module = AnsibleModule(
argument_spec=dict(
instance_id=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
database_name=dict(type='str'),
configuration=dict(type='str', required=True),
node_count=dict(type='int', default=1),
instance_display_name=dict(type='str'),
force_instance_delete=dict(type='bool', default=False),
service_account_email=dict(type='str'),
credentials_file=dict(type='str'),
project_id=dict(type='str'),
),
)
if not HAS_PYTHON26:
module.fail_json(
msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_GOOGLE_CLOUD_SPANNER:
module.fail_json(msg="Please install google-cloud-spanner.")
if not check_min_pkg_version(CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION):
module.fail_json(msg="Please install %s client version %s" %
(CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION))
mod_params = {}
mod_params['state'] = module.params.get('state')
mod_params['instance_id'] = module.params.get('instance_id')
mod_params['database_name'] = module.params.get('database_name')
mod_params['configuration'] = module.params.get('configuration')
mod_params['node_count'] = module.params.get('node_count', None)
mod_params['instance_display_name'] = module.params.get('instance_display_name')
mod_params['force_instance_delete'] = module.params.get('force_instance_delete')
creds, params = get_google_cloud_credentials(module)
spanner_client = spanner.Client(project=params['project_id'],
credentials=creds,
user_agent=CLOUD_CLIENT_USER_AGENT)
changed = False
json_output = {}
i = None
if mod_params['instance_id']:
config_name = get_spanner_configuration_name(
mod_params['configuration'], params['project_id'])
i = spanner_client.instance(mod_params['instance_id'],
configuration_name=config_name)
d = None
if mod_params['database_name']:
# TODO(supertom): support DDL
ddl_statements = ''
d = i.database(mod_params['database_name'], ddl_statements)
if mod_params['state'] == 'absent':
# Remove the most granular resource. If database is specified
# we remove it. If only instance is specified, that is what is removed.
if d is not None and d.exists():
d.drop()
changed = True
else:
if i.exists():
if mod_params['force_instance_delete']:
i.delete()
else:
module.fail_json(
msg=(("Cannot delete Spanner instance: "
"'force_instance_delete' argument not specified")))
changed = True
elif mod_params['state'] == 'present':
if not i.exists():
i = spanner_client.instance(mod_params['instance_id'],
configuration_name=config_name,
display_name=mod_params['instance_display_name'],
node_count=mod_params['node_count'] or 1)
i.create()
changed = True
else:
# update instance
i.reload()
inst_prev_vals = {}
if i.display_name != mod_params['instance_display_name']:
inst_prev_vals['instance_display_name'] = i.display_name
i.display_name = mod_params['instance_display_name']
if mod_params['node_count']:
if i.node_count != mod_params['node_count']:
inst_prev_vals['node_count'] = i.node_count
i.node_count = mod_params['node_count']
if inst_prev_vals:
changed = instance_update(i)
json_output['updated'] = changed
json_output['previous_values'] = {'instance': inst_prev_vals}
if d:
if not d.exists():
d.create()
d.reload()
changed = True
json_output['changed'] = changed
json_output.update(mod_params)
module.exit_json(**json_output)
if __name__ == '__main__':
main()
| gpl-3.0 |
rossepstein2013/groupme-liker | lib/jinja2/ext.py | 357 | 25072 | # -*- coding: utf-8 -*-
"""
jinja2.ext
~~~~~~~~~~
Jinja extensions allow to add custom tags similar to the way django custom
tags work. By default two example extensions exist: an i18n and a cache
extension.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2 import nodes
from jinja2.defaults import BLOCK_START_STRING, \
BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
from jinja2.environment import Environment
from jinja2.runtime import concat
from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
from jinja2.utils import contextfunction, import_string, Markup
from jinja2._compat import with_metaclass, string_types, iteritems
# the only real useful gettext functions for a Jinja template. Note
# that ugettext must be assigned to gettext as Jinja doesn't support
# non unicode strings.
GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
class ExtensionRegistry(type):
"""Gives the extension an unique identifier."""
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
rv.identifier = rv.__module__ + '.' + rv.__name__
return rv
class Extension(with_metaclass(ExtensionRegistry, object)):
"""Extensions can be used to add extra functionality to the Jinja template
system at the parser level. Custom extensions are bound to an environment
but may not store environment specific data on `self`. The reason for
this is that an extension can be bound to another environment (for
overlays) by creating a copy and reassigning the `environment` attribute.
As extensions are created by the environment they cannot accept any
arguments for configuration. One may want to work around that by using
a factory function, but that is not possible as extensions are identified
by their import name. The correct way to configure the extension is
storing the configuration values on the environment. Because this way the
environment ends up acting as central configuration storage the
attributes may clash which is why extensions have to ensure that the names
they choose for configuration are not too generic. ``prefix`` for example
is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
name as includes the name of the extension (fragment cache).
"""
#: if this extension parses this is the list of tags it's listening to.
tags = set()
#: the priority of that extension. This is especially useful for
#: extensions that preprocess values. A lower value means higher
#: priority.
#:
#: .. versionadded:: 2.4
priority = 100
def __init__(self, environment):
self.environment = environment
def bind(self, environment):
"""Create a copy of this extension bound to another environment."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.environment = environment
return rv
def preprocess(self, source, name, filename=None):
"""This method is called before the actual lexing and can be used to
preprocess the source. The `filename` is optional. The return value
must be the preprocessed source.
"""
return source
def filter_stream(self, stream):
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\s, but it doesn't have to return a
:class:`~jinja2.lexer.TokenStream`.
In the `ext` folder of the Jinja2 source distribution there is a file
called `inlinegettext.py` which implements a filter that utilizes this
method.
"""
return stream
def parse(self, parser):
"""If any of the :attr:`tags` matched this method is called with the
parser as first argument. The token the parser stream is pointing at
is the name token that matched. This method has to return one or a
list of multiple nodes.
"""
raise NotImplementedError()
def attr(self, name, lineno=None):
"""Return an attribute node for the current extension. This is useful
to pass constants on extensions to generated template code.
::
self.attr('_my_attribute', lineno=lineno)
"""
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
def call_method(self, name, args=None, kwargs=None, dyn_args=None,
dyn_kwargs=None, lineno=None):
"""Call a method of the extension. This is a shortcut for
:meth:`attr` + :class:`jinja2.nodes.Call`.
"""
if args is None:
args = []
if kwargs is None:
kwargs = []
return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
dyn_args, dyn_kwargs, lineno=lineno)
@contextfunction
def _gettext_alias(__context, *args, **kwargs):
return __context.call(__context.resolve('gettext'), *args, **kwargs)
def _make_new_gettext(func):
@contextfunction
def gettext(__context, __string, **variables):
rv = __context.call(func, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return gettext
def _make_new_ngettext(func):
@contextfunction
def ngettext(__context, __singular, __plural, __num, **variables):
variables.setdefault('num', __num)
rv = __context.call(func, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return ngettext
class InternationalizationExtension(Extension):
"""This extension adds gettext support to Jinja2."""
tags = set(['trans'])
# TODO: the i18n extension is currently reevaluating values in a few
# situations. Take this example:
# {% trans count=something() %}{{ count }} foo{% pluralize
# %}{{ count }} fooss{% endtrans %}
# something is called twice here. One time for the gettext value and
# the other time for the n-parameter of the ngettext function.
def __init__(self, environment):
Extension.__init__(self, environment)
environment.globals['_'] = _gettext_alias
environment.extend(
install_gettext_translations=self._install,
install_null_translations=self._install_null,
install_gettext_callables=self._install_callables,
uninstall_gettext_translations=self._uninstall,
extract_translations=self._extract,
newstyle_gettext=False
)
def _install(self, translations, newstyle=None):
gettext = getattr(translations, 'ugettext', None)
if gettext is None:
gettext = translations.gettext
ngettext = getattr(translations, 'ungettext', None)
if ngettext is None:
ngettext = translations.ngettext
self._install_callables(gettext, ngettext, newstyle)
def _install_null(self, newstyle=None):
self._install_callables(
lambda x: x,
lambda s, p, n: (n != 1 and (p,) or (s,))[0],
newstyle
)
def _install_callables(self, gettext, ngettext, newstyle=None):
if newstyle is not None:
self.environment.newstyle_gettext = newstyle
if self.environment.newstyle_gettext:
gettext = _make_new_gettext(gettext)
ngettext = _make_new_ngettext(ngettext)
self.environment.globals.update(
gettext=gettext,
ngettext=ngettext
)
def _uninstall(self, translations):
for key in 'gettext', 'ngettext':
self.environment.globals.pop(key, None)
def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
if isinstance(source, string_types):
source = self.environment.parse(source)
return extract_from_ast(source, gettext_functions)
def parse(self, parser):
"""Parse a translatable tag."""
lineno = next(parser.stream).lineno
num_called_num = False
# find all the variables referenced. Additionally a variable can be
# defined in the body of the trans block too, but this is checked at
# a later state.
plural_expr = None
plural_expr_assignment = None
variables = {}
while parser.stream.current.type != 'block_end':
if variables:
parser.stream.expect('comma')
# skip colon for python compatibility
if parser.stream.skip_if('colon'):
break
name = parser.stream.expect('name')
if name.value in variables:
parser.fail('translatable variable %r defined twice.' %
name.value, name.lineno,
exc=TemplateAssertionError)
# expressions
if parser.stream.current.type == 'assign':
next(parser.stream)
variables[name.value] = var = parser.parse_expression()
else:
variables[name.value] = var = nodes.Name(name.value, 'load')
if plural_expr is None:
if isinstance(var, nodes.Call):
plural_expr = nodes.Name('_trans', 'load')
variables[name.value] = plural_expr
plural_expr_assignment = nodes.Assign(
nodes.Name('_trans', 'store'), var)
else:
plural_expr = var
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural = plural_names = None
have_plural = False
referenced = set()
# now parse until endtrans or pluralize
singular_names, singular = self._parse_block(parser, True)
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
plural_expr = nodes.Name(singular_names[0], 'load')
num_called_num = singular_names[0] == 'num'
# if we have a pluralize block, we parse that too
if parser.stream.current.test('name:pluralize'):
have_plural = True
next(parser.stream)
if parser.stream.current.type != 'block_end':
name = parser.stream.expect('name')
if name.value not in variables:
parser.fail('unknown variable %r for pluralization' %
name.value, name.lineno,
exc=TemplateAssertionError)
plural_expr = variables[name.value]
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
else:
next(parser.stream)
# register free names as simple name expressions
for var in referenced:
if var not in variables:
variables[var] = nodes.Name(var, 'load')
if not have_plural:
plural_expr = None
elif plural_expr is None:
parser.fail('pluralize without variables', lineno)
node = self._make_node(singular, plural, variables, plural_expr,
bool(referenced),
num_called_num and have_plural)
node.set_lineno(lineno)
if plural_expr_assignment is not None:
return [plural_expr_assignment, node]
else:
return node
def _parse_block(self, parser, allow_pluralize):
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while 1:
if parser.stream.current.type == 'data':
buf.append(parser.stream.current.value.replace('%', '%%'))
next(parser.stream)
elif parser.stream.current.type == 'variable_begin':
next(parser.stream)
name = parser.stream.expect('name').value
referenced.append(name)
buf.append('%%(%s)s' % name)
parser.stream.expect('variable_end')
elif parser.stream.current.type == 'block_begin':
next(parser.stream)
if parser.stream.current.test('name:endtrans'):
break
elif parser.stream.current.test('name:pluralize'):
if allow_pluralize:
break
parser.fail('a translatable section can have only one '
'pluralize section')
parser.fail('control structures in translatable sections are '
'not allowed')
elif parser.stream.eos:
parser.fail('unclosed translation block')
else:
assert False, 'internal parser error'
return referenced, concat(buf)
def _make_node(self, singular, plural, variables, plural_expr,
vars_referenced, num_called_num):
"""Generates a useful node from the data provided."""
# no variables referenced? no need to escape for old style
# gettext invocations only if there are vars.
if not vars_referenced and not self.environment.newstyle_gettext:
singular = singular.replace('%%', '%')
if plural:
plural = plural.replace('%%', '%')
# singular only:
if plural_expr is None:
gettext = nodes.Name('gettext', 'load')
node = nodes.Call(gettext, [nodes.Const(singular)],
[], None, None)
# singular and plural
else:
ngettext = nodes.Name('ngettext', 'load')
node = nodes.Call(ngettext, [
nodes.Const(singular),
nodes.Const(plural),
plural_expr
], [], None, None)
# in case newstyle gettext is used, the method is powerful
# enough to handle the variable expansion and autoescape
# handling itself
if self.environment.newstyle_gettext:
for key, value in iteritems(variables):
# the function adds that later anyways in case num was
# called num, so just skip it.
if num_called_num and key == 'num':
continue
node.kwargs.append(nodes.Keyword(key, value))
# otherwise do that here
else:
# mark the return value as safe if we are in an
# environment with autoescaping turned on
node = nodes.MarkSafeIfAutoescape(node)
if variables:
node = nodes.Mod(node, nodes.Dict([
nodes.Pair(nodes.Const(key), value)
for key, value in variables.items()
]))
return nodes.Output([node])
class ExprStmtExtension(Extension):
"""Adds a `do` tag to Jinja2 that works like the print statement just
that it doesn't print the return value.
"""
tags = set(['do'])
def parse(self, parser):
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
node.node = parser.parse_tuple()
return node
class LoopControlExtension(Extension):
"""Adds break and continue to the template engine."""
tags = set(['break', 'continue'])
def parse(self, parser):
token = next(parser.stream)
if token.value == 'break':
return nodes.Break(lineno=token.lineno)
return nodes.Continue(lineno=token.lineno)
class WithExtension(Extension):
"""Adds support for a django-like with block."""
tags = set(['with'])
def parse(self, parser):
node = nodes.Scope(lineno=next(parser.stream).lineno)
assignments = []
while parser.stream.current.type != 'block_end':
lineno = parser.stream.current.lineno
if assignments:
parser.stream.expect('comma')
target = parser.parse_assign_target()
parser.stream.expect('assign')
expr = parser.parse_expression()
assignments.append(nodes.Assign(target, expr, lineno=lineno))
node.body = assignments + \
list(parser.parse_statements(('name:endwith',),
drop_needle=True))
return node
class AutoEscapeExtension(Extension):
"""Changes auto escape rules for a scope."""
tags = set(['autoescape'])
def parse(self, parser):
node = nodes.ScopedEvalContextModifier(lineno=next(parser.stream).lineno)
node.options = [
nodes.Keyword('autoescape', parser.parse_expression())
]
node.body = parser.parse_statements(('name:endautoescape',),
drop_needle=True)
return nodes.Scope([node])
def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
babel_style=True):
"""Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as `None`. This
allows Babel to figure out what you really meant if you are using
gettext functions that allow keyword arguments for placeholder expansion.
If you don't want that behavior set the `babel_style` parameter to `False`
which causes only strings to be returned and parameters are always stored
in tuples. As a consequence invalid gettext calls (calls without a single
string parameter or string parameters after non-string parameters) are
skipped.
This example explains the behavior:
>>> from jinja2 import Environment
>>> env = Environment()
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
>>> list(extract_from_ast(node))
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
>>> list(extract_from_ast(node, babel_style=False))
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
For every string found this function yields a ``(lineno, function,
message)`` tuple, where:
* ``lineno`` is the number of the line on which the string was found,
* ``function`` is the name of the ``gettext`` function used (if the
string was extracted from embedded Python code), and
* ``message`` is the string itself (a ``unicode`` object, or a tuple
of ``unicode`` objects for functions with multiple string arguments).
This extraction function operates on the AST and is because of that unable
to extract any comments. For comment support you have to use the babel
extraction interface or extract comments yourself.
"""
for node in node.find_all(nodes.Call):
if not isinstance(node.node, nodes.Name) or \
node.node.name not in gettext_functions:
continue
strings = []
for arg in node.args:
if isinstance(arg, nodes.Const) and \
isinstance(arg.value, string_types):
strings.append(arg.value)
else:
strings.append(None)
for arg in node.kwargs:
strings.append(None)
if node.dyn_args is not None:
strings.append(None)
if node.dyn_kwargs is not None:
strings.append(None)
if not babel_style:
strings = tuple(x for x in strings if x is not None)
if not strings:
continue
else:
if len(strings) == 1:
strings = strings[0]
else:
strings = tuple(strings)
yield node.lineno, node.node.name, strings
class _CommentFinder(object):
"""Helper class to find comments in a token stream. Can only
find comments for gettext calls forwards. Once the comment
from line 4 is found, a comment for line 1 will not return a
usable value.
"""
def __init__(self, tokens, comment_tags):
self.tokens = tokens
self.comment_tags = comment_tags
self.offset = 0
self.last_lineno = 0
def find_backwards(self, offset):
try:
for _, token_type, token_value in \
reversed(self.tokens[self.offset:offset]):
if token_type in ('comment', 'linecomment'):
try:
prefix, comment = token_value.split(None, 1)
except ValueError:
continue
if prefix in self.comment_tags:
return [comment.rstrip()]
return []
finally:
self.offset = offset
def find_comments(self, lineno):
if not self.comment_tags or self.last_lineno > lineno:
return []
for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]):
if token_lineno > lineno:
return self.find_backwards(self.offset + idx)
return self.find_backwards(len(self.tokens))
def babel_extract(fileobj, keywords, comment_tags, options):
"""Babel extraction method for Jinja templates.
.. versionchanged:: 2.3
Basic support for translation comments was added. If `comment_tags`
is now set to a list of keywords for extraction, the extractor will
try to find the best preceeding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
.. versionchanged:: 2.5.1
The `newstyle_gettext` flag can be set to `True` to enable newstyle
gettext calls.
.. versionchanged:: 2.7
A `silent` option can now be provided. If set to `False` template
syntax errors are propagated instead of being ignored.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results.
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
(comments will be empty currently)
"""
extensions = set()
for extension in options.get('extensions', '').split(','):
extension = extension.strip()
if not extension:
continue
extensions.add(import_string(extension))
if InternationalizationExtension not in extensions:
extensions.add(InternationalizationExtension)
def getbool(options, key, default=False):
return options.get(key, str(default)).lower() in \
('1', 'on', 'yes', 'true')
silent = getbool(options, 'silent', True)
environment = Environment(
options.get('block_start_string', BLOCK_START_STRING),
options.get('block_end_string', BLOCK_END_STRING),
options.get('variable_start_string', VARIABLE_START_STRING),
options.get('variable_end_string', VARIABLE_END_STRING),
options.get('comment_start_string', COMMENT_START_STRING),
options.get('comment_end_string', COMMENT_END_STRING),
options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
getbool(options, 'trim_blocks', TRIM_BLOCKS),
getbool(options, 'lstrip_blocks', LSTRIP_BLOCKS),
NEWLINE_SEQUENCE,
getbool(options, 'keep_trailing_newline', KEEP_TRAILING_NEWLINE),
frozenset(extensions),
cache_size=0,
auto_reload=False
)
if getbool(options, 'newstyle_gettext'):
environment.newstyle_gettext = True
source = fileobj.read().decode(options.get('encoding', 'utf-8'))
try:
node = environment.parse(source)
tokens = list(environment.lex(environment.preprocess(source)))
except TemplateSyntaxError as e:
if not silent:
raise
# skip templates with syntax errors
return
finder = _CommentFinder(tokens, comment_tags)
for lineno, func, message in extract_from_ast(node, keywords):
yield lineno, func, message, finder.find_comments(lineno)
#: nicer import names
i18n = InternationalizationExtension
do = ExprStmtExtension
loopcontrols = LoopControlExtension
with_ = WithExtension
autoescape = AutoEscapeExtension
| apache-2.0 |
classmember/proof_of_concept | python/events/lib/python3.4/site-packages/pip/_internal/cli/autocompletion.py | 29 | 6083 | """Logic that powers autocompletion installed by ``pip completion``.
"""
import optparse
import os
import sys
from pip._internal.cli.main_parser import create_main_parser
from pip._internal.commands import commands_dict, get_summaries
from pip._internal.utils.misc import get_installed_distributions
def autocomplete():
"""Entry Point for completion of main and subcommand options.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword - 1]
except IndexError:
current = ''
subcommands = [cmd for cmd, summary in get_summaries()]
options = []
# subcommand
try:
subcommand_name = [w for w in cwords if w in subcommands][0]
except IndexError:
subcommand_name = None
parser = create_main_parser()
# subcommand options
if subcommand_name:
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for show and uninstall
should_list_installed = (
subcommand_name in ['show', 'uninstall'] and
not current.startswith('-')
)
if should_list_installed:
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
subcommand = commands_dict[subcommand_name]()
for opt in subcommand.parser.option_list_all:
if opt.help != optparse.SUPPRESS_HELP:
for opt_str in opt._long_opts + opt._short_opts:
options.append((opt_str, opt.nargs))
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
# get completion type given cwords and available subcommand options
completion_type = get_path_completion_type(
cwords, cword, subcommand.parser.option_list_all,
)
# get completion files and directories if ``completion_type`` is
# ``<file>``, ``<dir>`` or ``<path>``
if completion_type:
options = auto_complete_paths(current, completion_type)
options = ((opt, 0) for opt in options)
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1] and option[0][:2] == "--":
opt_label += '='
print(opt_label)
else:
# show main parser options only when necessary
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
opts = (o for it in opts for o in it)
if current.startswith('-'):
for opt in opts:
if opt.help != optparse.SUPPRESS_HELP:
subcommands += opt._long_opts + opt._short_opts
else:
# get completion type given cwords and all available options
completion_type = get_path_completion_type(cwords, cword, opts)
if completion_type:
subcommands = auto_complete_paths(current, completion_type)
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
def get_path_completion_type(cwords, cword, opts):
"""Get the type of path completion (``file``, ``dir``, ``path`` or None)
:param cwords: same as the environmental variable ``COMP_WORDS``
:param cword: same as the environmental variable ``COMP_CWORD``
:param opts: The available options to check
:return: path completion type (``file``, ``dir``, ``path`` or None)
"""
if cword < 2 or not cwords[cword - 2].startswith('-'):
return
for opt in opts:
if opt.help == optparse.SUPPRESS_HELP:
continue
for o in str(opt).split('/'):
if cwords[cword - 2].split('=')[0] == o:
if not opt.metavar or any(
x in ('path', 'file', 'dir')
for x in opt.metavar.split('/')):
return opt.metavar
def auto_complete_paths(current, completion_type):
"""If ``completion_type`` is ``file`` or ``path``, list all regular files
and directories starting with ``current``; otherwise only list directories
starting with ``current``.
:param current: The word to be completed
:param completion_type: path completion type(`file`, `path` or `dir`)i
:return: A generator of regular files and/or directories
"""
directory, filename = os.path.split(current)
current_path = os.path.abspath(directory)
# Don't complete paths if they can't be accessed
if not os.access(current_path, os.R_OK):
return
filename = os.path.normcase(filename)
# list all files that start with ``filename``
file_list = (x for x in os.listdir(current_path)
if os.path.normcase(x).startswith(filename))
for f in file_list:
opt = os.path.join(current_path, f)
comp_file = os.path.normcase(os.path.join(directory, f))
# complete regular files when there is not ``<dir>`` after option
# complete directories when there is ``<file>``, ``<path>`` or
# ``<dir>``after option
if completion_type != 'dir' and os.path.isfile(opt):
yield comp_file
elif os.path.isdir(opt):
yield os.path.join(comp_file, '')
| mit |
drxaero/calibre | src/calibre/ebooks/pdf/render/engine.py | 10 | 14304 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys, traceback, math
from collections import namedtuple
from functools import wraps, partial
from future_builtins import map, zip
from PyQt5.Qt import (QPaintEngine, QPaintDevice, Qt, QTransform, QBrush)
from calibre.constants import plugins
from calibre.ebooks.pdf.render.serialize import (PDFStream, Path)
from calibre.ebooks.pdf.render.common import inch, A4, fmtnum
from calibre.ebooks.pdf.render.graphics import convert_path, Graphics
from calibre.utils.fonts.sfnt.container import Sfnt, UnsupportedFont
from calibre.utils.fonts.sfnt.metrics import FontMetrics
Point = namedtuple('Point', 'x y')
ColorState = namedtuple('ColorState', 'color opacity do')
GlyphInfo = namedtuple('GlyphInfo', 'name size stretch positions indices')
def repr_transform(t):
vals = map(fmtnum, (t.m11(), t.m12(), t.m21(), t.m22(), t.dx(), t.dy()))
return '[%s]'%' '.join(vals)
def store_error(func):
@wraps(func)
def errh(self, *args, **kwargs):
try:
func(self, *args, **kwargs)
except:
self.errors_occurred = True
self.errors(traceback.format_exc())
return errh
class Font(FontMetrics):
def __init__(self, sfnt):
FontMetrics.__init__(self, sfnt)
self.glyph_map = {}
class PdfEngine(QPaintEngine):
FEATURES = QPaintEngine.AllFeatures & ~(
QPaintEngine.PorterDuff | QPaintEngine.PerspectiveTransform |
QPaintEngine.ObjectBoundingModeGradients |
QPaintEngine.RadialGradientFill |
QPaintEngine.ConicalGradientFill
)
def __init__(self, file_object, page_width, page_height, left_margin,
top_margin, right_margin, bottom_margin, width, height,
errors=print, debug=print, compress=True,
mark_links=False):
QPaintEngine.__init__(self, self.FEATURES)
self.file_object = file_object
self.compress, self.mark_links = compress, mark_links
self.page_height, self.page_width = page_height, page_width
self.left_margin, self.top_margin = left_margin, top_margin
self.right_margin, self.bottom_margin = right_margin, bottom_margin
self.pixel_width, self.pixel_height = width, height
# Setup a co-ordinate transform that allows us to use co-ords
# from Qt's pixel based co-ordinate system with its origin at the top
# left corner. PDF's co-ordinate system is based on pts and has its
# origin in the bottom left corner. We also have to implement the page
# margins. Therefore, we need to translate, scale and reflect about the
# x-axis.
dy = self.page_height - self.top_margin
dx = self.left_margin
sx = (self.page_width - self.left_margin -
self.right_margin) / self.pixel_width
sy = (self.page_height - self.top_margin -
self.bottom_margin) / self.pixel_height
self.pdf_system = QTransform(sx, 0, 0, -sy, dx, dy)
self.graphics = Graphics(self.pixel_width, self.pixel_height)
self.errors_occurred = False
self.errors, self.debug = errors, debug
self.fonts = {}
self.current_page_num = 1
self.current_page_inited = False
self.qt_hack, err = plugins['qt_hack']
if err:
raise RuntimeError('Failed to load qt_hack with err: %s'%err)
def apply_graphics_state(self):
self.graphics(self.pdf_system, self.painter())
def resolve_fill(self, rect):
self.graphics.resolve_fill(rect, self.pdf_system,
self.painter().transform())
@property
def do_fill(self):
return self.graphics.current_state.do_fill
@property
def do_stroke(self):
return self.graphics.current_state.do_stroke
def init_page(self):
self.pdf.transform(self.pdf_system)
self.pdf.apply_fill(color=(1, 1, 1)) # QPainter has a default background brush of white
self.graphics.reset()
self.pdf.save_stack()
self.current_page_inited = True
def begin(self, device):
if not hasattr(self, 'pdf'):
try:
self.pdf = PDFStream(self.file_object, (self.page_width,
self.page_height), compress=self.compress,
mark_links=self.mark_links,
debug=self.debug)
self.graphics.begin(self.pdf)
except:
self.errors(traceback.format_exc())
self.errors_occurred = True
return False
return True
def end_page(self):
if self.current_page_inited:
self.pdf.restore_stack()
self.pdf.end_page()
self.current_page_inited = False
self.current_page_num += 1
def end(self):
try:
self.end_page()
self.pdf.end()
except:
self.errors(traceback.format_exc())
self.errors_occurred = True
return False
finally:
self.pdf = self.file_object = None
return True
def type(self):
return QPaintEngine.Pdf
def add_image(self, img, cache_key):
if img.isNull():
return
return self.pdf.add_image(img, cache_key)
@store_error
def drawTiledPixmap(self, rect, pixmap, point):
self.apply_graphics_state()
brush = QBrush(pixmap)
bl = rect.topLeft()
color, opacity, pattern, do_fill = self.graphics.convert_brush(
brush, bl-point, 1.0, self.pdf_system,
self.painter().transform())
self.pdf.save_stack()
self.pdf.apply_fill(color, pattern)
self.pdf.draw_rect(bl.x(), bl.y(), rect.width(), rect.height(),
stroke=False, fill=True)
self.pdf.restore_stack()
@store_error
def drawPixmap(self, rect, pixmap, source_rect):
self.apply_graphics_state()
source_rect = source_rect.toRect()
pixmap = (pixmap if source_rect == pixmap.rect() else
pixmap.copy(source_rect))
image = pixmap.toImage()
ref = self.add_image(image, pixmap.cacheKey())
if ref is not None:
self.pdf.draw_image(rect.x(), rect.y(), rect.width(),
rect.height(), ref)
@store_error
def drawImage(self, rect, image, source_rect, flags=Qt.AutoColor):
self.apply_graphics_state()
source_rect = source_rect.toRect()
image = (image if source_rect == image.rect() else
image.copy(source_rect))
ref = self.add_image(image, image.cacheKey())
if ref is not None:
self.pdf.draw_image(rect.x(), rect.y(), rect.width(),
rect.height(), ref)
@store_error
def updateState(self, state):
self.graphics.update_state(state, self.painter())
@store_error
def drawPath(self, path):
self.apply_graphics_state()
p = convert_path(path)
fill_rule = {Qt.OddEvenFill:'evenodd',
Qt.WindingFill:'winding'}[path.fillRule()]
self.pdf.draw_path(p, stroke=self.do_stroke,
fill=self.do_fill, fill_rule=fill_rule)
@store_error
def drawPoints(self, points):
self.apply_graphics_state()
p = Path()
for point in points:
p.move_to(point.x(), point.y())
p.line_to(point.x(), point.y() + 0.001)
self.pdf.draw_path(p, stroke=self.do_stroke, fill=False)
@store_error
def drawRects(self, rects):
self.apply_graphics_state()
with self.graphics:
for rect in rects:
self.resolve_fill(rect)
bl = rect.topLeft()
self.pdf.draw_rect(bl.x(), bl.y(), rect.width(), rect.height(),
stroke=self.do_stroke, fill=self.do_fill)
def create_sfnt(self, text_item):
get_table = partial(self.qt_hack.get_sfnt_table, text_item)
try:
ans = Font(Sfnt(get_table))
except UnsupportedFont as e:
raise UnsupportedFont('The font %s is not a valid sfnt. Error: %s'%(
text_item.font().family(), e))
glyph_map = self.qt_hack.get_glyph_map(text_item)
gm = {}
for uc, glyph_id in enumerate(glyph_map):
if glyph_id not in gm:
gm[glyph_id] = unichr(uc)
ans.full_glyph_map = gm
return ans
@store_error
def drawTextItem(self, point, text_item):
# return super(PdfEngine, self).drawTextItem(point, text_item)
self.apply_graphics_state()
gi = GlyphInfo(*self.qt_hack.get_glyphs(point, text_item))
if not gi.indices:
return
name = hash(gi.name)
if name not in self.fonts:
try:
self.fonts[name] = self.create_sfnt(text_item)
except UnsupportedFont:
return super(PdfEngine, self).drawTextItem(point, text_item)
metrics = self.fonts[name]
for glyph_id in gi.indices:
try:
metrics.glyph_map[glyph_id] = metrics.full_glyph_map[glyph_id]
except (KeyError, ValueError):
pass
glyphs = []
last_x = last_y = 0
for glyph_index, (x, y) in zip(gi.indices, gi.positions):
glyphs.append((x-last_x, last_y - y, glyph_index))
last_x, last_y = x, y
self.pdf.draw_glyph_run([gi.stretch, 0, 0, -1, 0, 0], gi.size, metrics,
glyphs)
@store_error
def drawPolygon(self, points, mode):
self.apply_graphics_state()
if not points:
return
p = Path()
p.move_to(points[0].x(), points[0].y())
for point in points[1:]:
p.line_to(point.x(), point.y())
p.close()
fill_rule = {self.OddEvenMode:'evenodd',
self.WindingMode:'winding'}.get(mode, 'evenodd')
self.pdf.draw_path(p, stroke=True, fill_rule=fill_rule,
fill=(mode in (self.OddEvenMode, self.WindingMode, self.ConvexMode)))
def set_metadata(self, *args, **kwargs):
self.pdf.set_metadata(*args, **kwargs)
def add_outline(self, toc):
self.pdf.links.add_outline(toc)
def add_links(self, current_item, start_page, links, anchors):
for pos in anchors.itervalues():
pos['left'], pos['top'] = self.pdf_system.map(pos['left'], pos['top'])
for link in links:
pos = link[1]
llx = pos['left']
lly = pos['top'] + pos['height']
urx = pos['left'] + pos['width']
ury = pos['top']
llx, lly = self.pdf_system.map(llx, lly)
urx, ury = self.pdf_system.map(urx, ury)
link[1] = pos['column'] + start_page
link.append((llx, lly, urx, ury))
self.pdf.links.add(current_item, start_page, links, anchors)
class PdfDevice(QPaintDevice): # {{{
def __init__(self, file_object, page_size=A4, left_margin=inch,
top_margin=inch, right_margin=inch, bottom_margin=inch,
xdpi=1200, ydpi=1200, errors=print, debug=print,
compress=True, mark_links=False):
QPaintDevice.__init__(self)
self.xdpi, self.ydpi = xdpi, ydpi
self.page_width, self.page_height = page_size
self.body_width = self.page_width - left_margin - right_margin
self.body_height = self.page_height - top_margin - bottom_margin
self.left_margin, self.right_margin = left_margin, right_margin
self.top_margin, self.bottom_margin = top_margin, bottom_margin
self.engine = PdfEngine(file_object, self.page_width, self.page_height,
left_margin, top_margin, right_margin,
bottom_margin, self.width(), self.height(),
errors=errors, debug=debug, compress=compress,
mark_links=mark_links)
self.add_outline = self.engine.add_outline
self.add_links = self.engine.add_links
def paintEngine(self):
return self.engine
def metric(self, m):
if m in (self.PdmDpiX, self.PdmPhysicalDpiX):
return self.xdpi
if m in (self.PdmDpiY, self.PdmPhysicalDpiY):
return self.ydpi
if m == self.PdmDepth:
return 32
if m == self.PdmNumColors:
return sys.maxint
if m == self.PdmWidthMM:
return int(round(self.body_width * 0.35277777777778))
if m == self.PdmHeightMM:
return int(round(self.body_height * 0.35277777777778))
if m == self.PdmWidth:
return int(round(self.body_width * self.xdpi / 72.0))
if m == self.PdmHeight:
return int(round(self.body_height * self.ydpi / 72.0))
return 0
def end_page(self, *args, **kwargs):
self.engine.end_page(*args, **kwargs)
def init_page(self):
self.engine.init_page()
@property
def full_page_rect(self):
page_width = int(math.ceil(self.page_width * self.xdpi / 72.0))
lm = int(math.ceil(self.left_margin * self.xdpi / 72.0))
page_height = int(math.ceil(self.page_height * self.ydpi / 72.0))
tm = int(math.ceil(self.top_margin * self.ydpi / 72.0))
return (-lm, -tm, page_width+1, page_height+1)
@property
def current_page_num(self):
return self.engine.current_page_num
@property
def errors_occurred(self):
return self.engine.errors_occurred
def to_px(self, pt, vertical=True):
return pt * (self.height()/self.page_height if vertical else
self.width()/self.page_width)
def set_metadata(self, *args, **kwargs):
self.engine.set_metadata(*args, **kwargs)
# }}}
| gpl-3.0 |
zacps/zulip | tools/lib/test_script.py | 5 | 2256 | from __future__ import absolute_import
from __future__ import print_function
from typing import Tuple
import os
from version import PROVISION_VERSION
def get_major_version(v):
# type: (str) -> int
return int(v.split('.')[0])
def get_version_file():
# type: () -> str
return 'var/provision_version'
PREAMBLE = '''
Before we run tests, we make sure your provisioning version
is correct by looking at var/provision_version, which is at
version %s, and we compare it to the version in source
control (version.py), which is %s.
'''
def preamble(version):
# type: (str) -> str
text = PREAMBLE % (version, PROVISION_VERSION)
text += '\n'
return text
NEED_TO_DOWNGRADE = '''
It looks like you checked out a branch that expects an older
version of dependencies than the version you provisioned last.
This may be ok, but it's likely that you either want to rebase
your branch on top of upstream/master or re-provision your VM.
Do this: `./tools/provision.py`
'''
NEED_TO_UPGRADE = '''
It looks like you checked out a branch that has added
dependencies beyond what you last provisioned. Your tests
are likely to fail until you add dependencies by provisioning.
Do this: `./tools/provision.py`
'''
def get_provisioning_status():
# type: () -> Tuple[bool, str]
version_file = get_version_file()
if not os.path.exists(version_file):
# If the developer doesn't have a version_file written by
# a previous provision, then we don't do any safety checks
# here on the assumption that the developer is managing
# their own dependencies and not running provision.py.
return True, None
version = open(version_file).read().strip()
# Normal path for people that provision--we're all good!
if version == PROVISION_VERSION:
return True, None
# We may be more provisioned than the branch we just moved to. As
# long as the major version hasn't changed, then we should be ok.
if version > PROVISION_VERSION:
if get_major_version(version) == get_major_version(PROVISION_VERSION):
return True, None
else:
return False, preamble(version) + NEED_TO_DOWNGRADE
return False, preamble(version) + NEED_TO_UPGRADE
| apache-2.0 |
bwhite/hadoopy | examples/l4-vision-and-image-processing-with-hadoop/ex0-face-finder/face_finder.py | 1 | 3141 | #!/usr/bin/env python
# (C) Copyright 2011 Brandyn A. White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Hadoopy Face Finding Demo"""
__author__ = 'Brandyn A. White <bwhite@cs.umd.edu>'
__license__ = 'GPL V3'
import hadoopy
import Image
import imfeat
import cStringIO as StringIO
import os
import cv
class Mapper(object):
def __init__(self):
path = 'haarcascade_frontalface_default.xml'
if os.path.exists(path):
self._cascade = cv.Load(path)
else:
path = 'fixtures/haarcascade_frontalface_default.xml'
if os.path.exists(path):
self._cascade = cv.Load(path)
else:
raise ValueError("Can't find .xml file!")
def _detect_faces(self, img):
min_size = (20, 20)
image_scale = 2
haar_scale = 1.2
min_neighbors = 2
haar_flags = 0
if img.nChannels == 3:
gray = cv.CreateImage((img.width, img.height), 8, 1)
cv.CvtColor(img, gray, cv.CV_BGR2GRAY)
else:
gray = img
small_img = cv.CreateImage((cv.Round(img.width / image_scale),
cv.Round(img.height / image_scale)), 8, 1)
cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
cv.EqualizeHist(small_img, small_img)
faces = cv.HaarDetectObjects(small_img, self._cascade,
cv.CreateMemStorage(0),
haar_scale, min_neighbors, haar_flags,
min_size)
return [((x * image_scale, y * image_scale,
w * image_scale, h * image_scale), n)
for (x, y, w, h), n in faces]
def _load_cv_image(self, value):
return imfeat.convert_image(Image.open(StringIO.StringIO(value)),
[('opencv', 'rgb', 8)])
def map(self, key, value):
"""
Args:
key: Image name
value: Image as jpeg byte data
Yields:
A tuple in the form of (key, value)
key: Image name
value: (image, faces) where image is the input value and faces is
a list of ((x, y, w, h), n)
"""
try:
image = self._load_cv_image(value)
except:
hadoopy.counter('DATA_ERRORS', 'ImageLoadError')
return
faces = self._detect_faces(image)
if faces:
yield key, (value, faces)
if __name__ == "__main__":
hadoopy.run(Mapper, doc=__doc__)
| gpl-3.0 |
sachingupta006/Mezzanine | mezzanine/generic/migrations/0006_move_keywords.py | 12 | 10241 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.db.models.fields.related import ReverseSingleRelatedObjectDescriptor
from django.contrib.sites.models import Site
class Migration(DataMigration):
def forwards(self, orm):
"""
This migration exists because prior to giving Keyword a site field, keywords were
shared between different sites. In order to add the site field, the "current site"
was assigned as a default. This migration attempts to determine the correct site
for the keyword by finding what objects the keyword was assigned to, and if
that object contains a reference to the site table, it uses that site.
If there is no reference, however, the default previously assigned is used.
Part of what this does is create new Keywords for cases where a keyword object
is being shared between different sites. A distinct keyword is used in each case.
"""
AssignedKeyword = orm['generic.assignedkeyword']
Keyword = orm['generic.keyword']
keywords = Keyword._base_manager.all()
for keyword in keywords:
#assignments = AssignedKeyword._base_manager.filter(keyword=keyword)
assignments = keyword.assignments.all()
site_dict = {}
uses_original = False
for assignment in assignments:
try:
ct = ContentType._base_manager.get(pk=assignment.content_type.pk)
related = ct.model_class()._base_manager.get(pk=assignment.object_pk)
site = None
for attr_name in dir(related.__class__):
attr = getattr(related.__class__, attr_name)
if isinstance(attr, ReverseSingleRelatedObjectDescriptor) and \
attr.field.rel.to is Site:
site = getattr(related, attr_name)
if site:
break
if site:
dict_keyword = site_dict.get(site.pk, None)
if not dict_keyword:
orm_site = orm['sites.site'].objects.get(pk=site.pk)
dict_keyword = Keyword(site=orm_site, slug=keyword.slug, title=keyword.title)
dict_keyword.save()
site_dict[site.pk] = dict_keyword
assignment.keyword = dict_keyword
assignment.save()
else:
uses_original = True
except Exception, e:
uses_original = True
if not uses_original:
keyword.delete()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'comments.comment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': "orm['auth.User']"}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.assignedkeyword': {
'Meta': {'object_name': 'AssignedKeyword'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': "orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.IntegerField', [], {})
},
'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.rating': {
'Meta': {'object_name': 'Rating'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.IntegerField', [], {}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
'generic.threadedcomment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'ThreadedComment', '_ormbases': ['comments.Comment']},
'by_author': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['comments.Comment']", 'unique': 'True', 'primary_key': 'True'}),
'email_hash': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'replied_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'null': 'True', 'to': "orm['generic.ThreadedComment']"})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['generic']
| bsd-2-clause |
sarvex/depot-tools | third_party/logilab/common/urllib2ext.py | 92 | 3416 | from __future__ import print_function
import logging
import urllib2
import kerberos as krb
class GssapiAuthError(Exception):
"""raised on error during authentication process"""
import re
RGX = re.compile('(?:.*,)*\s*Negotiate\s*([^,]*),?', re.I)
def get_negociate_value(headers):
for authreq in headers.getheaders('www-authenticate'):
match = RGX.search(authreq)
if match:
return match.group(1)
class HTTPGssapiAuthHandler(urllib2.BaseHandler):
"""Negotiate HTTP authentication using context from GSSAPI"""
handler_order = 400 # before Digest Auth
def __init__(self):
self._reset()
def _reset(self):
self._retried = 0
self._context = None
def clean_context(self):
if self._context is not None:
krb.authGSSClientClean(self._context)
def http_error_401(self, req, fp, code, msg, headers):
try:
if self._retried > 5:
raise urllib2.HTTPError(req.get_full_url(), 401,
"negotiate auth failed", headers, None)
self._retried += 1
logging.debug('gssapi handler, try %s' % self._retried)
negotiate = get_negociate_value(headers)
if negotiate is None:
logging.debug('no negociate found in a www-authenticate header')
return None
logging.debug('HTTPGssapiAuthHandler: negotiate 1 is %r' % negotiate)
result, self._context = krb.authGSSClientInit("HTTP@%s" % req.get_host())
if result < 1:
raise GssapiAuthError("HTTPGssapiAuthHandler: init failed with %d" % result)
result = krb.authGSSClientStep(self._context, negotiate)
if result < 0:
raise GssapiAuthError("HTTPGssapiAuthHandler: step 1 failed with %d" % result)
client_response = krb.authGSSClientResponse(self._context)
logging.debug('HTTPGssapiAuthHandler: client response is %s...' % client_response[:10])
req.add_unredirected_header('Authorization', "Negotiate %s" % client_response)
server_response = self.parent.open(req)
negotiate = get_negociate_value(server_response.info())
if negotiate is None:
logging.warning('HTTPGssapiAuthHandler: failed to authenticate server')
else:
logging.debug('HTTPGssapiAuthHandler negotiate 2: %s' % negotiate)
result = krb.authGSSClientStep(self._context, negotiate)
if result < 1:
raise GssapiAuthError("HTTPGssapiAuthHandler: step 2 failed with %d" % result)
return server_response
except GssapiAuthError as exc:
logging.error(repr(exc))
finally:
self.clean_context()
self._reset()
if __name__ == '__main__':
import sys
# debug
import httplib
httplib.HTTPConnection.debuglevel = 1
httplib.HTTPSConnection.debuglevel = 1
# debug
import logging
logging.basicConfig(level=logging.DEBUG)
# handle cookies
import cookielib
cj = cookielib.CookieJar()
ch = urllib2.HTTPCookieProcessor(cj)
# test with url sys.argv[1]
h = HTTPGssapiAuthHandler()
response = urllib2.build_opener(h, ch).open(sys.argv[1])
print('\nresponse: %s\n--------------\n' % response.code, response.info())
| bsd-3-clause |
MattsFleaMarket/python-for-android | python-build/python-libs/gdata/build/lib/atom/__init__.py | 136 | 49199 | #!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains classes representing Atom elements.
Module objective: provide data classes for Atom constructs. These classes hide
the XML-ness of Atom and provide a set of native Python classes to interact
with.
Conversions to and from XML should only be necessary when the Atom classes
"touch the wire" and are sent over HTTP. For this reason this module
provides methods and functions to convert Atom classes to and from strings.
For more information on the Atom data model, see RFC 4287
(http://www.ietf.org/rfc/rfc4287.txt)
AtomBase: A foundation class on which Atom classes are built. It
handles the parsing of attributes and children which are common to all
Atom classes. By default, the AtomBase class translates all XML child
nodes into ExtensionElements.
ExtensionElement: Atom allows Atom objects to contain XML which is not part
of the Atom specification, these are called extension elements. If a
classes parser encounters an unexpected XML construct, it is translated
into an ExtensionElement instance. ExtensionElement is designed to fully
capture the information in the XML. Child nodes in an XML extension are
turned into ExtensionElements as well.
"""
__author__ = 'api.jscudder (Jeffrey Scudder)'
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import warnings
# XML namespaces which are often used in Atom entities.
ATOM_NAMESPACE = 'http://www.w3.org/2005/Atom'
ELEMENT_TEMPLATE = '{http://www.w3.org/2005/Atom}%s'
APP_NAMESPACE = 'http://purl.org/atom/app#'
APP_TEMPLATE = '{http://purl.org/atom/app#}%s'
# This encoding is used for converting strings before translating the XML
# into an object.
XML_STRING_ENCODING = 'utf-8'
# The desired string encoding for object members. set or monkey-patch to
# unicode if you want object members to be Python unicode strings, instead of
# encoded strings
MEMBER_STRING_ENCODING = 'utf-8'
#MEMBER_STRING_ENCODING = unicode
# If True, all methods which are exclusive to v1 will raise a
# DeprecationWarning
ENABLE_V1_WARNINGS = False
def v1_deprecated(warning=None):
"""Shows a warning if ENABLE_V1_WARNINGS is True.
Function decorator used to mark methods used in v1 classes which
may be removed in future versions of the library.
"""
warning = warning or ''
# This closure is what is returned from the deprecated function.
def mark_deprecated(f):
# The deprecated_function wraps the actual call to f.
def optional_warn_function(*args, **kwargs):
if ENABLE_V1_WARNINGS:
warnings.warn(warning, DeprecationWarning, stacklevel=2)
return f(*args, **kwargs)
# Preserve the original name to avoid masking all decorated functions as
# 'deprecated_function'
optional_warn_function.func_name = f.func_name
return optional_warn_function
return mark_deprecated
@v1_deprecated('Please use atom.core.parse with atom.data classes instead.')
def CreateClassFromXMLString(target_class, xml_string, string_encoding=None):
"""Creates an instance of the target class from the string contents.
Args:
target_class: class The class which will be instantiated and populated
with the contents of the XML. This class must have a _tag and a
_namespace class variable.
xml_string: str A string which contains valid XML. The root element
of the XML string should match the tag and namespace of the desired
class.
string_encoding: str The character encoding which the xml_string should
be converted to before it is interpreted and translated into
objects. The default is None in which case the string encoding
is not changed.
Returns:
An instance of the target class with members assigned according to the
contents of the XML - or None if the root XML tag and namespace did not
match those of the target class.
"""
encoding = string_encoding or XML_STRING_ENCODING
if encoding and isinstance(xml_string, unicode):
xml_string = xml_string.encode(encoding)
tree = ElementTree.fromstring(xml_string)
return _CreateClassFromElementTree(target_class, tree)
def _CreateClassFromElementTree(target_class, tree, namespace=None, tag=None):
"""Instantiates the class and populates members according to the tree.
Note: Only use this function with classes that have _namespace and _tag
class members.
Args:
target_class: class The class which will be instantiated and populated
with the contents of the XML.
tree: ElementTree An element tree whose contents will be converted into
members of the new target_class instance.
namespace: str (optional) The namespace which the XML tree's root node must
match. If omitted, the namespace defaults to the _namespace of the
target class.
tag: str (optional) The tag which the XML tree's root node must match. If
omitted, the tag defaults to the _tag class member of the target
class.
Returns:
An instance of the target class - or None if the tag and namespace of
the XML tree's root node did not match the desired namespace and tag.
"""
if namespace is None:
namespace = target_class._namespace
if tag is None:
tag = target_class._tag
if tree.tag == '{%s}%s' % (namespace, tag):
target = target_class()
target._HarvestElementTree(tree)
return target
else:
return None
class ExtensionContainer(object):
@v1_deprecated('Please use data model classes in atom.data instead.')
def __init__(self, extension_elements=None, extension_attributes=None,
text=None):
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
# Three methods to create an object from an ElementTree
def _HarvestElementTree(self, tree):
# Fill in the instance members from the contents of the XML tree.
for child in tree:
self._ConvertElementTreeToMember(child)
for attribute, value in tree.attrib.iteritems():
self._ConvertElementAttributeToMember(attribute, value)
# Encode the text string according to the desired encoding type. (UTF-8)
if tree.text:
if MEMBER_STRING_ENCODING is unicode:
self.text = tree.text
else:
self.text = tree.text.encode(MEMBER_STRING_ENCODING)
def _ConvertElementTreeToMember(self, child_tree, current_class=None):
self.extension_elements.append(_ExtensionElementFromElementTree(
child_tree))
def _ConvertElementAttributeToMember(self, attribute, value):
# Encode the attribute value's string with the desired type Default UTF-8
if value:
if MEMBER_STRING_ENCODING is unicode:
self.extension_attributes[attribute] = value
else:
self.extension_attributes[attribute] = value.encode(
MEMBER_STRING_ENCODING)
# One method to create an ElementTree from an object
def _AddMembersToElementTree(self, tree):
for child in self.extension_elements:
child._BecomeChildElement(tree)
for attribute, value in self.extension_attributes.iteritems():
if value:
if isinstance(value, unicode) or MEMBER_STRING_ENCODING is unicode:
tree.attrib[attribute] = value
else:
# Decode the value from the desired encoding (default UTF-8).
tree.attrib[attribute] = value.decode(MEMBER_STRING_ENCODING)
if self.text:
if isinstance(self.text, unicode) or MEMBER_STRING_ENCODING is unicode:
tree.text = self.text
else:
tree.text = self.text.decode(MEMBER_STRING_ENCODING)
def FindExtensions(self, tag=None, namespace=None):
"""Searches extension elements for child nodes with the desired name.
Returns a list of extension elements within this object whose tag
and/or namespace match those passed in. To find all extensions in
a particular namespace, specify the namespace but not the tag name.
If you specify only the tag, the result list may contain extension
elements in multiple namespaces.
Args:
tag: str (optional) The desired tag
namespace: str (optional) The desired namespace
Returns:
A list of elements whose tag and/or namespace match the parameters
values
"""
results = []
if tag and namespace:
for element in self.extension_elements:
if element.tag == tag and element.namespace == namespace:
results.append(element)
elif tag and not namespace:
for element in self.extension_elements:
if element.tag == tag:
results.append(element)
elif namespace and not tag:
for element in self.extension_elements:
if element.namespace == namespace:
results.append(element)
else:
for element in self.extension_elements:
results.append(element)
return results
class AtomBase(ExtensionContainer):
_children = {}
_attributes = {}
@v1_deprecated('Please use data model classes in atom.data instead.')
def __init__(self, extension_elements=None, extension_attributes=None,
text=None):
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
def _ConvertElementTreeToMember(self, child_tree):
# Find the element's tag in this class's list of child members
if self.__class__._children.has_key(child_tree.tag):
member_name = self.__class__._children[child_tree.tag][0]
member_class = self.__class__._children[child_tree.tag][1]
# If the class member is supposed to contain a list, make sure the
# matching member is set to a list, then append the new member
# instance to the list.
if isinstance(member_class, list):
if getattr(self, member_name) is None:
setattr(self, member_name, [])
getattr(self, member_name).append(_CreateClassFromElementTree(
member_class[0], child_tree))
else:
setattr(self, member_name,
_CreateClassFromElementTree(member_class, child_tree))
else:
ExtensionContainer._ConvertElementTreeToMember(self, child_tree)
def _ConvertElementAttributeToMember(self, attribute, value):
# Find the attribute in this class's list of attributes.
if self.__class__._attributes.has_key(attribute):
# Find the member of this class which corresponds to the XML attribute
# (lookup in current_class._attributes) and set this member to the
# desired value (using self.__dict__).
if value:
# Encode the string to capture non-ascii characters (default UTF-8)
if MEMBER_STRING_ENCODING is unicode:
setattr(self, self.__class__._attributes[attribute], value)
else:
setattr(self, self.__class__._attributes[attribute],
value.encode(MEMBER_STRING_ENCODING))
else:
ExtensionContainer._ConvertElementAttributeToMember(self, attribute,
value)
# Three methods to create an ElementTree from an object
def _AddMembersToElementTree(self, tree):
# Convert the members of this class which are XML child nodes.
# This uses the class's _children dictionary to find the members which
# should become XML child nodes.
member_node_names = [values[0] for tag, values in
self.__class__._children.iteritems()]
for member_name in member_node_names:
member = getattr(self, member_name)
if member is None:
pass
elif isinstance(member, list):
for instance in member:
instance._BecomeChildElement(tree)
else:
member._BecomeChildElement(tree)
# Convert the members of this class which are XML attributes.
for xml_attribute, member_name in self.__class__._attributes.iteritems():
member = getattr(self, member_name)
if member is not None:
if isinstance(member, unicode) or MEMBER_STRING_ENCODING is unicode:
tree.attrib[xml_attribute] = member
else:
tree.attrib[xml_attribute] = member.decode(MEMBER_STRING_ENCODING)
# Lastly, call the ExtensionContainers's _AddMembersToElementTree to
# convert any extension attributes.
ExtensionContainer._AddMembersToElementTree(self, tree)
def _BecomeChildElement(self, tree):
"""
Note: Only for use with classes that have a _tag and _namespace class
member. It is in AtomBase so that it can be inherited but it should
not be called on instances of AtomBase.
"""
new_child = ElementTree.Element('')
tree.append(new_child)
new_child.tag = '{%s}%s' % (self.__class__._namespace,
self.__class__._tag)
self._AddMembersToElementTree(new_child)
def _ToElementTree(self):
"""
Note, this method is designed to be used only with classes that have a
_tag and _namespace. It is placed in AtomBase for inheritance but should
not be called on this class.
"""
new_tree = ElementTree.Element('{%s}%s' % (self.__class__._namespace,
self.__class__._tag))
self._AddMembersToElementTree(new_tree)
return new_tree
def ToString(self, string_encoding='UTF-8'):
"""Converts the Atom object to a string containing XML."""
return ElementTree.tostring(self._ToElementTree(), encoding=string_encoding)
def __str__(self):
return self.ToString()
class Name(AtomBase):
"""The atom:name element"""
_tag = 'name'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Name
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def NameFromString(xml_string):
return CreateClassFromXMLString(Name, xml_string)
class Email(AtomBase):
"""The atom:email element"""
_tag = 'email'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Email
Args:
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EmailFromString(xml_string):
return CreateClassFromXMLString(Email, xml_string)
class Uri(AtomBase):
"""The atom:uri element"""
_tag = 'uri'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Uri
Args:
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def UriFromString(xml_string):
return CreateClassFromXMLString(Uri, xml_string)
class Person(AtomBase):
"""A foundation class from which atom:author and atom:contributor extend.
A person contains information like name, email address, and web page URI for
an author or contributor to an Atom feed.
"""
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_children['{%s}name' % (ATOM_NAMESPACE)] = ('name', Name)
_children['{%s}email' % (ATOM_NAMESPACE)] = ('email', Email)
_children['{%s}uri' % (ATOM_NAMESPACE)] = ('uri', Uri)
def __init__(self, name=None, email=None, uri=None,
extension_elements=None, extension_attributes=None, text=None):
"""Foundation from which author and contributor are derived.
The constructor is provided for illustrative purposes, you should not
need to instantiate a Person.
Args:
name: Name The person's name
email: Email The person's email address
uri: Uri The URI of the person's webpage
extension_elements: list A list of ExtensionElement instances which are
children of this element.
extension_attributes: dict A dictionary of strings which are the values
for additional XML attributes of this element.
text: String The text contents of the element. This is the contents
of the Entry's XML text node. (Example: <foo>This is the text</foo>)
"""
self.name = name
self.email = email
self.uri = uri
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
class Author(Person):
"""The atom:author element
An author is a required element in Feed.
"""
_tag = 'author'
_namespace = ATOM_NAMESPACE
_children = Person._children.copy()
_attributes = Person._attributes.copy()
#_children = {}
#_attributes = {}
def __init__(self, name=None, email=None, uri=None,
extension_elements=None, extension_attributes=None, text=None):
"""Constructor for Author
Args:
name: Name
email: Email
uri: Uri
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.name = name
self.email = email
self.uri = uri
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
def AuthorFromString(xml_string):
return CreateClassFromXMLString(Author, xml_string)
class Contributor(Person):
"""The atom:contributor element"""
_tag = 'contributor'
_namespace = ATOM_NAMESPACE
_children = Person._children.copy()
_attributes = Person._attributes.copy()
def __init__(self, name=None, email=None, uri=None,
extension_elements=None, extension_attributes=None, text=None):
"""Constructor for Contributor
Args:
name: Name
email: Email
uri: Uri
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.name = name
self.email = email
self.uri = uri
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
def ContributorFromString(xml_string):
return CreateClassFromXMLString(Contributor, xml_string)
class Link(AtomBase):
"""The atom:link element"""
_tag = 'link'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['href'] = 'href'
_attributes['type'] = 'type'
_attributes['title'] = 'title'
_attributes['length'] = 'length'
_attributes['hreflang'] = 'hreflang'
def __init__(self, href=None, rel=None, link_type=None, hreflang=None,
title=None, length=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Link
Args:
href: string The href attribute of the link
rel: string
type: string
hreflang: string The language for the href
title: string
length: string The length of the href's destination
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.href = href
self.rel = rel
self.type = link_type
self.hreflang = hreflang
self.title = title
self.length = length
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def LinkFromString(xml_string):
return CreateClassFromXMLString(Link, xml_string)
class Generator(AtomBase):
"""The atom:generator element"""
_tag = 'generator'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_attributes['uri'] = 'uri'
_attributes['version'] = 'version'
def __init__(self, uri=None, version=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for Generator
Args:
uri: string
version: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.uri = uri
self.version = version
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GeneratorFromString(xml_string):
return CreateClassFromXMLString(Generator, xml_string)
class Text(AtomBase):
"""A foundation class from which atom:title, summary, etc. extend.
This class should never be instantiated.
"""
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_attributes['type'] = 'type'
def __init__(self, text_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Text
Args:
text_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = text_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Title(Text):
"""The atom:title element"""
_tag = 'title'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
def __init__(self, title_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Title
Args:
title_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = title_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def TitleFromString(xml_string):
return CreateClassFromXMLString(Title, xml_string)
class Subtitle(Text):
"""The atom:subtitle element"""
_tag = 'subtitle'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
def __init__(self, subtitle_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Subtitle
Args:
subtitle_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = subtitle_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SubtitleFromString(xml_string):
return CreateClassFromXMLString(Subtitle, xml_string)
class Rights(Text):
"""The atom:rights element"""
_tag = 'rights'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
def __init__(self, rights_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Rights
Args:
rights_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = rights_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def RightsFromString(xml_string):
return CreateClassFromXMLString(Rights, xml_string)
class Summary(Text):
"""The atom:summary element"""
_tag = 'summary'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
def __init__(self, summary_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Summary
Args:
summary_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = summary_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SummaryFromString(xml_string):
return CreateClassFromXMLString(Summary, xml_string)
class Content(Text):
"""The atom:content element"""
_tag = 'content'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
_attributes['src'] = 'src'
def __init__(self, content_type=None, src=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for Content
Args:
content_type: string
src: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = content_type
self.src = src
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ContentFromString(xml_string):
return CreateClassFromXMLString(Content, xml_string)
class Category(AtomBase):
"""The atom:category element"""
_tag = 'category'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_attributes['term'] = 'term'
_attributes['scheme'] = 'scheme'
_attributes['label'] = 'label'
def __init__(self, term=None, scheme=None, label=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for Category
Args:
term: str
scheme: str
label: str
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.term = term
self.scheme = scheme
self.label = label
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def CategoryFromString(xml_string):
return CreateClassFromXMLString(Category, xml_string)
class Id(AtomBase):
"""The atom:id element."""
_tag = 'id'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Id
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def IdFromString(xml_string):
return CreateClassFromXMLString(Id, xml_string)
class Icon(AtomBase):
"""The atom:icon element."""
_tag = 'icon'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Icon
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def IconFromString(xml_string):
return CreateClassFromXMLString(Icon, xml_string)
class Logo(AtomBase):
"""The atom:logo element."""
_tag = 'logo'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Logo
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def LogoFromString(xml_string):
return CreateClassFromXMLString(Logo, xml_string)
class Draft(AtomBase):
"""The app:draft element which indicates if this entry should be public."""
_tag = 'draft'
_namespace = APP_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for app:draft
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def DraftFromString(xml_string):
return CreateClassFromXMLString(Draft, xml_string)
class Control(AtomBase):
"""The app:control element indicating restrictions on publication.
The APP control element may contain a draft element indicating whether or
not this entry should be publicly available.
"""
_tag = 'control'
_namespace = APP_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_children['{%s}draft' % APP_NAMESPACE] = ('draft', Draft)
def __init__(self, draft=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for app:control"""
self.draft = draft
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ControlFromString(xml_string):
return CreateClassFromXMLString(Control, xml_string)
class Date(AtomBase):
"""A parent class for atom:updated, published, etc."""
#TODO Add text to and from time conversion methods to allow users to set
# the contents of a Date to a python DateTime object.
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Updated(Date):
"""The atom:updated element."""
_tag = 'updated'
_namespace = ATOM_NAMESPACE
_children = Date._children.copy()
_attributes = Date._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Updated
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def UpdatedFromString(xml_string):
return CreateClassFromXMLString(Updated, xml_string)
class Published(Date):
"""The atom:published element."""
_tag = 'published'
_namespace = ATOM_NAMESPACE
_children = Date._children.copy()
_attributes = Date._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Published
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def PublishedFromString(xml_string):
return CreateClassFromXMLString(Published, xml_string)
class LinkFinder(object):
"""An "interface" providing methods to find link elements
Entry elements often contain multiple links which differ in the rel
attribute or content type. Often, developers are interested in a specific
type of link so this class provides methods to find specific classes of
links.
This class is used as a mixin in Atom entries and feeds.
"""
def GetSelfLink(self):
"""Find the first link with rel set to 'self'
Returns:
An atom.Link or none if none of the links had rel equal to 'self'
"""
for a_link in self.link:
if a_link.rel == 'self':
return a_link
return None
def GetEditLink(self):
for a_link in self.link:
if a_link.rel == 'edit':
return a_link
return None
def GetEditMediaLink(self):
for a_link in self.link:
if a_link.rel == 'edit-media':
return a_link
return None
def GetNextLink(self):
for a_link in self.link:
if a_link.rel == 'next':
return a_link
return None
def GetLicenseLink(self):
for a_link in self.link:
if a_link.rel == 'license':
return a_link
return None
def GetAlternateLink(self):
for a_link in self.link:
if a_link.rel == 'alternate':
return a_link
return None
class FeedEntryParent(AtomBase, LinkFinder):
"""A super class for atom:feed and entry, contains shared attributes"""
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_children['{%s}author' % ATOM_NAMESPACE] = ('author', [Author])
_children['{%s}category' % ATOM_NAMESPACE] = ('category', [Category])
_children['{%s}contributor' % ATOM_NAMESPACE] = ('contributor', [Contributor])
_children['{%s}id' % ATOM_NAMESPACE] = ('id', Id)
_children['{%s}link' % ATOM_NAMESPACE] = ('link', [Link])
_children['{%s}rights' % ATOM_NAMESPACE] = ('rights', Rights)
_children['{%s}title' % ATOM_NAMESPACE] = ('title', Title)
_children['{%s}updated' % ATOM_NAMESPACE] = ('updated', Updated)
def __init__(self, author=None, category=None, contributor=None,
atom_id=None, link=None, rights=None, title=None, updated=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.rights = rights
self.title = title
self.updated = updated
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Source(FeedEntryParent):
"""The atom:source element"""
_tag = 'source'
_namespace = ATOM_NAMESPACE
_children = FeedEntryParent._children.copy()
_attributes = FeedEntryParent._attributes.copy()
_children['{%s}generator' % ATOM_NAMESPACE] = ('generator', Generator)
_children['{%s}icon' % ATOM_NAMESPACE] = ('icon', Icon)
_children['{%s}logo' % ATOM_NAMESPACE] = ('logo', Logo)
_children['{%s}subtitle' % ATOM_NAMESPACE] = ('subtitle', Subtitle)
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for Source
Args:
author: list (optional) A list of Author instances which belong to this
class.
category: list (optional) A list of Category instances
contributor: list (optional) A list on Contributor instances
generator: Generator (optional)
icon: Icon (optional)
id: Id (optional) The entry's Id element
link: list (optional) A list of Link instances
logo: Logo (optional)
rights: Rights (optional) The entry's Rights element
subtitle: Subtitle (optional) The entry's subtitle element
title: Title (optional) the entry's title element
updated: Updated (optional) the entry's updated element
text: String (optional) The text contents of the element. This is the
contents of the Entry's XML text node.
(Example: <foo>This is the text</foo>)
extension_elements: list (optional) A list of ExtensionElement instances
which are children of this element.
extension_attributes: dict (optional) A dictionary of strings which are
the values for additional XML attributes of this element.
"""
self.author = author or []
self.category = category or []
self.contributor = contributor or []
self.generator = generator
self.icon = icon
self.id = atom_id
self.link = link or []
self.logo = logo
self.rights = rights
self.subtitle = subtitle
self.title = title
self.updated = updated
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SourceFromString(xml_string):
return CreateClassFromXMLString(Source, xml_string)
class Entry(FeedEntryParent):
"""The atom:entry element"""
_tag = 'entry'
_namespace = ATOM_NAMESPACE
_children = FeedEntryParent._children.copy()
_attributes = FeedEntryParent._attributes.copy()
_children['{%s}content' % ATOM_NAMESPACE] = ('content', Content)
_children['{%s}published' % ATOM_NAMESPACE] = ('published', Published)
_children['{%s}source' % ATOM_NAMESPACE] = ('source', Source)
_children['{%s}summary' % ATOM_NAMESPACE] = ('summary', Summary)
_children['{%s}control' % APP_NAMESPACE] = ('control', Control)
@v1_deprecated('Please use atom.data.Entry instead.')
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, control=None, title=None, updated=None,
extension_elements=None, extension_attributes=None, text=None):
"""Constructor for atom:entry
Args:
author: list A list of Author instances which belong to this class.
category: list A list of Category instances
content: Content The entry's Content
contributor: list A list on Contributor instances
id: Id The entry's Id element
link: list A list of Link instances
published: Published The entry's Published element
rights: Rights The entry's Rights element
source: Source the entry's source element
summary: Summary the entry's summary element
title: Title the entry's title element
updated: Updated the entry's updated element
control: The entry's app:control element which can be used to mark an
entry as a draft which should not be publicly viewable.
text: String The text contents of the element. This is the contents
of the Entry's XML text node. (Example: <foo>This is the text</foo>)
extension_elements: list A list of ExtensionElement instances which are
children of this element.
extension_attributes: dict A dictionary of strings which are the values
for additional XML attributes of this element.
"""
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.title = title
self.updated = updated
self.control = control
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EntryFromString(xml_string):
return CreateClassFromXMLString(Entry, xml_string)
class Feed(Source):
"""The atom:feed element"""
_tag = 'feed'
_namespace = ATOM_NAMESPACE
_children = Source._children.copy()
_attributes = Source._attributes.copy()
_children['{%s}entry' % ATOM_NAMESPACE] = ('entry', [Entry])
@v1_deprecated('Please use atom.data.Feed instead.')
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None, entry=None,
text=None, extension_elements=None, extension_attributes=None):
"""Constructor for Source
Args:
author: list (optional) A list of Author instances which belong to this
class.
category: list (optional) A list of Category instances
contributor: list (optional) A list on Contributor instances
generator: Generator (optional)
icon: Icon (optional)
id: Id (optional) The entry's Id element
link: list (optional) A list of Link instances
logo: Logo (optional)
rights: Rights (optional) The entry's Rights element
subtitle: Subtitle (optional) The entry's subtitle element
title: Title (optional) the entry's title element
updated: Updated (optional) the entry's updated element
entry: list (optional) A list of the Entry instances contained in the
feed.
text: String (optional) The text contents of the element. This is the
contents of the Entry's XML text node.
(Example: <foo>This is the text</foo>)
extension_elements: list (optional) A list of ExtensionElement instances
which are children of this element.
extension_attributes: dict (optional) A dictionary of strings which are
the values for additional XML attributes of this element.
"""
self.author = author or []
self.category = category or []
self.contributor = contributor or []
self.generator = generator
self.icon = icon
self.id = atom_id
self.link = link or []
self.logo = logo
self.rights = rights
self.subtitle = subtitle
self.title = title
self.updated = updated
self.entry = entry or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def FeedFromString(xml_string):
return CreateClassFromXMLString(Feed, xml_string)
class ExtensionElement(object):
"""Represents extra XML elements contained in Atom classes."""
def __init__(self, tag, namespace=None, attributes=None,
children=None, text=None):
"""Constructor for EtensionElement
Args:
namespace: string (optional) The XML namespace for this element.
tag: string (optional) The tag (without the namespace qualifier) for
this element. To reconstruct the full qualified name of the element,
combine this tag with the namespace.
attributes: dict (optinal) The attribute value string pairs for the XML
attributes of this element.
children: list (optional) A list of ExtensionElements which represent
the XML child nodes of this element.
"""
self.namespace = namespace
self.tag = tag
self.attributes = attributes or {}
self.children = children or []
self.text = text
def ToString(self):
element_tree = self._TransferToElementTree(ElementTree.Element(''))
return ElementTree.tostring(element_tree, encoding="UTF-8")
def _TransferToElementTree(self, element_tree):
if self.tag is None:
return None
if self.namespace is not None:
element_tree.tag = '{%s}%s' % (self.namespace, self.tag)
else:
element_tree.tag = self.tag
for key, value in self.attributes.iteritems():
element_tree.attrib[key] = value
for child in self.children:
child._BecomeChildElement(element_tree)
element_tree.text = self.text
return element_tree
def _BecomeChildElement(self, element_tree):
"""Converts this object into an etree element and adds it as a child node.
Adds self to the ElementTree. This method is required to avoid verbose XML
which constantly redefines the namespace.
Args:
element_tree: ElementTree._Element The element to which this object's XML
will be added.
"""
new_element = ElementTree.Element('')
element_tree.append(new_element)
self._TransferToElementTree(new_element)
def FindChildren(self, tag=None, namespace=None):
"""Searches child nodes for objects with the desired tag/namespace.
Returns a list of extension elements within this object whose tag
and/or namespace match those passed in. To find all children in
a particular namespace, specify the namespace but not the tag name.
If you specify only the tag, the result list may contain extension
elements in multiple namespaces.
Args:
tag: str (optional) The desired tag
namespace: str (optional) The desired namespace
Returns:
A list of elements whose tag and/or namespace match the parameters
values
"""
results = []
if tag and namespace:
for element in self.children:
if element.tag == tag and element.namespace == namespace:
results.append(element)
elif tag and not namespace:
for element in self.children:
if element.tag == tag:
results.append(element)
elif namespace and not tag:
for element in self.children:
if element.namespace == namespace:
results.append(element)
else:
for element in self.children:
results.append(element)
return results
def ExtensionElementFromString(xml_string):
element_tree = ElementTree.fromstring(xml_string)
return _ExtensionElementFromElementTree(element_tree)
def _ExtensionElementFromElementTree(element_tree):
element_tag = element_tree.tag
if '}' in element_tag:
namespace = element_tag[1:element_tag.index('}')]
tag = element_tag[element_tag.index('}')+1:]
else:
namespace = None
tag = element_tag
extension = ExtensionElement(namespace=namespace, tag=tag)
for key, value in element_tree.attrib.iteritems():
extension.attributes[key] = value
for child in element_tree:
extension.children.append(_ExtensionElementFromElementTree(child))
extension.text = element_tree.text
return extension
def deprecated(warning=None):
"""Decorator to raise warning each time the function is called.
Args:
warning: The warning message to be displayed as a string (optinoal).
"""
warning = warning or ''
# This closure is what is returned from the deprecated function.
def mark_deprecated(f):
# The deprecated_function wraps the actual call to f.
def deprecated_function(*args, **kwargs):
warnings.warn(warning, DeprecationWarning, stacklevel=2)
return f(*args, **kwargs)
# Preserve the original name to avoid masking all decorated functions as
# 'deprecated_function'
deprecated_function.func_name = f.func_name
return deprecated_function
return mark_deprecated
| apache-2.0 |
ClearwaterCore/gmock-upstream | gtest/scripts/fuse_gtest_files.py | 2577 | 8813 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for
more information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include "gtest/..."'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print 'ERROR: Cannot find %s in directory %s.' % (relative_path,
directory)
print ('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print ('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print 'ABORTED.'
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in file(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = sets.Set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in file(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
if 'include/' + m.group(1) == GTEST_SPI_H_SEED:
# It's '#include "gtest/gtest-spi.h"'. This file is not
# #included by "gtest/gtest.h", so we need to process it.
ProcessFile(GTEST_SPI_H_SEED)
else:
# It's '#include "gtest/foo.h"' where foo is not gtest-spi.
# We treat it as '#include "gtest/gtest.h"', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include "gtest/gtest.h" more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
| bsd-3-clause |
xq262144/hue | desktop/core/ext-py/Django-1.6.10/tests/validation/test_error_messages.py | 108 | 3896 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.db import models
from django.utils import six
from django.utils.unittest import TestCase
class ValidationMessagesTest(TestCase):
def _test_validation_messages(self, field, value, expected):
with self.assertRaises(ValidationError) as cm:
field.clean(value, None)
self.assertEqual(cm.exception.messages, expected)
def test_autofield_field_raises_error_message(self):
f = models.AutoField(primary_key=True)
self._test_validation_messages(f, 'fõo',
["'fõo' value must be an integer."])
# primary_key must be True. Refs #12467.
with six.assertRaisesRegex(self, AssertionError,
"AutoFields must have primary_key=True."):
models.AutoField(primary_key=False)
def test_integer_field_raises_error_message(self):
f = models.IntegerField()
self._test_validation_messages(f, 'fõo',
["'fõo' value must be an integer."])
def test_boolean_field_raises_error_message(self):
f = models.BooleanField()
self._test_validation_messages(f, 'fõo',
["'fõo' value must be either True or False."])
def test_float_field_raises_error_message(self):
f = models.FloatField()
self._test_validation_messages(f, 'fõo',
["'fõo' value must be a float."])
def test_decimal_field_raises_error_message(self):
f = models.DecimalField()
self._test_validation_messages(f, 'fõo',
["'fõo' value must be a decimal number."])
def test_null_boolean_field_raises_error_message(self):
f = models.NullBooleanField()
self._test_validation_messages(f, 'fõo',
["'fõo' value must be either None, True or False."])
def test_date_field_raises_error_message(self):
f = models.DateField()
self._test_validation_messages(f, 'fõo',
["'fõo' value has an invalid date format. "
"It must be in YYYY-MM-DD format."])
self._test_validation_messages(f, 'aaaa-10-10',
["'aaaa-10-10' value has an invalid date format. "
"It must be in YYYY-MM-DD format."])
self._test_validation_messages(f, '2011-13-10',
["'2011-13-10' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."])
self._test_validation_messages(f, '2011-10-32',
["'2011-10-32' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."])
def test_datetime_field_raises_error_message(self):
f = models.DateTimeField()
# Wrong format
self._test_validation_messages(f, 'fõo',
["'fõo' value has an invalid format. It must be "
"in YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."])
# Correct format but invalid date
self._test_validation_messages(f, '2011-10-32',
["'2011-10-32' value has the correct format "
"(YYYY-MM-DD) but it is an invalid date."])
# Correct format but invalid date/time
self._test_validation_messages(f, '2011-10-32 10:10',
["'2011-10-32 10:10' value has the correct format "
"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
"but it is an invalid date/time."])
def test_time_field_raises_error_message(self):
f = models.TimeField()
# Wrong format
self._test_validation_messages(f, 'fõo',
["'fõo' value has an invalid format. It must be in "
"HH:MM[:ss[.uuuuuu]] format."])
# Correct format but invalid time
self._test_validation_messages(f, '25:50',
["'25:50' value has the correct format "
"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."])
| apache-2.0 |
valtech-mooc/edx-platform | cms/lib/xblock/test/test_runtime.py | 25 | 2194 | """
Tests of edX Studio runtime functionality
"""
from urlparse import urlparse
from mock import Mock
from unittest import TestCase
from cms.lib.xblock.runtime import handler_url
class TestHandlerUrl(TestCase):
"""Test the LMS handler_url"""
def setUp(self):
self.block = Mock()
def test_trailing_characters(self):
self.assertFalse(handler_url(self.block, 'handler').endswith('?'))
self.assertFalse(handler_url(self.block, 'handler').endswith('/'))
self.assertFalse(handler_url(self.block, 'handler', 'suffix').endswith('?'))
self.assertFalse(handler_url(self.block, 'handler', 'suffix').endswith('/'))
self.assertFalse(handler_url(self.block, 'handler', 'suffix', 'query').endswith('?'))
self.assertFalse(handler_url(self.block, 'handler', 'suffix', 'query').endswith('/'))
self.assertFalse(handler_url(self.block, 'handler', query='query').endswith('?'))
self.assertFalse(handler_url(self.block, 'handler', query='query').endswith('/'))
def _parsed_query(self, query_string):
"""Return the parsed query string from a handler_url generated with the supplied query_string"""
return urlparse(handler_url(self.block, 'handler', query=query_string)).query
def test_query_string(self):
self.assertIn('foo=bar', self._parsed_query('foo=bar'))
self.assertIn('foo=bar&baz=true', self._parsed_query('foo=bar&baz=true'))
self.assertIn('foo&bar&baz', self._parsed_query('foo&bar&baz'))
def _parsed_path(self, handler_name='handler', suffix=''):
"""Return the parsed path from a handler_url with the supplied handler_name and suffix"""
return urlparse(handler_url(self.block, handler_name, suffix=suffix)).path
def test_suffix(self):
self.assertTrue(self._parsed_path(suffix="foo").endswith('foo'))
self.assertTrue(self._parsed_path(suffix="foo/bar").endswith('foo/bar'))
self.assertTrue(self._parsed_path(suffix="/foo/bar").endswith('/foo/bar'))
def test_handler_name(self):
self.assertIn('handler1', self._parsed_path('handler1'))
self.assertIn('handler_a', self._parsed_path('handler_a'))
| agpl-3.0 |
klmitch/nova | nova/tests/unit/compute/test_instance_list.py | 3 | 16596 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.compute import instance_list
from nova.compute import multi_cell_list
from nova import context as nova_context
from nova import exception
from nova import objects
from nova import test
from nova.tests import fixtures
FAKE_CELLS = [objects.CellMapping(), objects.CellMapping()]
class TestInstanceList(test.NoDBTestCase):
def setUp(self):
super(TestInstanceList, self).setUp()
cells = [objects.CellMapping(uuid=getattr(uuids, 'cell%i' % i),
name='cell%i' % i,
transport_url='fake:///',
database_connection='fake://')
for i in range(0, 3)]
insts = {}
for cell in cells:
insts[cell.uuid] = list([
dict(
uuid=getattr(uuids, '%s-inst%i' % (cell.name, i)),
hostname='%s-inst%i' % (cell.name, i))
for i in range(0, 3)])
self.cells = cells
self.insts = insts
self.context = nova_context.RequestContext()
self.useFixture(fixtures.SpawnIsSynchronousFixture())
self.flags(instance_list_cells_batch_strategy='fixed', group='api')
def test_compare_simple_instance_quirks(self):
# Ensure uuid,asc is added
ctx = instance_list.InstanceSortContext(['key0'], ['asc'])
self.assertEqual(['key0', 'uuid'], ctx.sort_keys)
self.assertEqual(['asc', 'asc'], ctx.sort_dirs)
# Ensure defaults are added
ctx = instance_list.InstanceSortContext(None, None)
self.assertEqual(['created_at', 'id', 'uuid'], ctx.sort_keys)
self.assertEqual(['desc', 'desc', 'asc'], ctx.sort_dirs)
@mock.patch('nova.db.api.instance_get_all_by_filters_sort')
@mock.patch('nova.objects.CellMappingList.get_all')
def test_get_instances_sorted(self, mock_cells, mock_inst):
mock_cells.return_value = self.cells
insts_by_cell = self.insts.values()
mock_inst.side_effect = insts_by_cell
obj, insts = instance_list.get_instances_sorted(self.context, {},
None, None, [],
['hostname'], ['asc'])
insts_one = [inst['hostname'] for inst in insts]
# Reverse the order that we get things from the cells so we can
# make sure that the result is still sorted the same way
insts_by_cell = list(reversed(list(insts_by_cell)))
mock_inst.reset_mock()
mock_inst.side_effect = insts_by_cell
obj, insts = instance_list.get_instances_sorted(self.context, {},
None, None, [],
['hostname'], ['asc'])
insts_two = [inst['hostname'] for inst in insts]
self.assertEqual(insts_one, insts_two)
@mock.patch('nova.objects.BuildRequestList.get_by_filters')
@mock.patch('nova.compute.instance_list.get_instances_sorted')
@mock.patch('nova.objects.CellMappingList.get_by_project_id')
def test_user_gets_subset_of_cells(self, mock_cm, mock_gi, mock_br):
self.flags(instance_list_per_project_cells=True, group='api')
mock_gi.return_value = instance_list.InstanceLister(None, None), []
mock_br.return_value = []
user_context = nova_context.RequestContext('fake', 'fake')
instance_list.get_instance_objects_sorted(
user_context, {}, None, None, [], None, None)
mock_gi.assert_called_once_with(user_context, {}, None, None, [],
None, None,
cell_mappings=mock_cm.return_value,
batch_size=1000,
cell_down_support=False)
@mock.patch('nova.context.CELLS', new=FAKE_CELLS)
@mock.patch('nova.context.load_cells')
@mock.patch('nova.objects.BuildRequestList.get_by_filters')
@mock.patch('nova.compute.instance_list.get_instances_sorted')
@mock.patch('nova.objects.CellMappingList.get_by_project_id')
def test_admin_gets_all_cells(self, mock_cm, mock_gi, mock_br, mock_lc):
mock_gi.return_value = instance_list.InstanceLister(None, None), []
mock_br.return_value = []
admin_context = nova_context.RequestContext('fake', 'fake',
is_admin=True)
instance_list.get_instance_objects_sorted(
admin_context, {}, None, None, [], None, None)
mock_gi.assert_called_once_with(admin_context, {}, None, None, [],
None, None,
cell_mappings=FAKE_CELLS,
batch_size=100,
cell_down_support=False)
mock_cm.assert_not_called()
mock_lc.assert_called_once_with()
@mock.patch('nova.context.CELLS', new=FAKE_CELLS)
@mock.patch('nova.context.load_cells')
@mock.patch('nova.objects.BuildRequestList.get_by_filters')
@mock.patch('nova.compute.instance_list.get_instances_sorted')
@mock.patch('nova.objects.CellMappingList.get_by_project_id')
def test_user_gets_all_cells(self, mock_cm, mock_gi, mock_br, mock_lc):
self.flags(instance_list_per_project_cells=False, group='api')
mock_gi.return_value = instance_list.InstanceLister(None, None), []
mock_br.return_value = []
user_context = nova_context.RequestContext('fake', 'fake')
instance_list.get_instance_objects_sorted(
user_context, {}, None, None, [], None, None)
mock_gi.assert_called_once_with(user_context, {}, None, None, [],
None, None,
cell_mappings=FAKE_CELLS,
batch_size=100,
cell_down_support=False)
mock_lc.assert_called_once_with()
@mock.patch('nova.context.CELLS', new=FAKE_CELLS)
@mock.patch('nova.context.load_cells')
@mock.patch('nova.objects.BuildRequestList.get_by_filters')
@mock.patch('nova.compute.instance_list.get_instances_sorted')
@mock.patch('nova.objects.CellMappingList.get_by_project_id')
def test_admin_gets_all_cells_anyway(self, mock_cm, mock_gi, mock_br,
mock_lc):
self.flags(instance_list_per_project_cells=True, group='api')
mock_gi.return_value = instance_list.InstanceLister(None, None), []
mock_br.return_value = []
admin_context = nova_context.RequestContext('fake', 'fake',
is_admin=True)
instance_list.get_instance_objects_sorted(
admin_context, {}, None, None, [], None, None)
mock_gi.assert_called_once_with(admin_context, {}, None, None, [],
None, None,
cell_mappings=FAKE_CELLS,
batch_size=100,
cell_down_support=False)
mock_cm.assert_not_called()
mock_lc.assert_called_once_with()
@mock.patch('nova.context.scatter_gather_cells')
def test_get_instances_with_down_cells(self, mock_sg):
inst_cell0 = self.insts[uuids.cell0]
# storing the uuids of the instances from the up cell
uuid_initial = [inst['uuid'] for inst in inst_cell0]
def wrap(thing):
return multi_cell_list.RecordWrapper(ctx, self.context, thing)
ctx = nova_context.RequestContext()
instances = [wrap(inst) for inst in inst_cell0]
# creating one up cell and two down cells
ret_val = {}
ret_val[uuids.cell0] = instances
ret_val[uuids.cell1] = [wrap(exception.BuildRequestNotFound(uuid='f'))]
ret_val[uuids.cell2] = [wrap(nova_context.did_not_respond_sentinel)]
mock_sg.return_value = ret_val
obj, res = instance_list.get_instances_sorted(self.context, {}, None,
None, [], None, None)
uuid_final = [inst['uuid'] for inst in res]
# return the results from the up cell, ignoring the down cell.
self.assertEqual(uuid_initial, uuid_final)
@mock.patch('nova.context.scatter_gather_cells')
def test_get_instances_by_not_skipping_down_cells(self, mock_sg):
self.flags(list_records_by_skipping_down_cells=False, group='api')
inst_cell0 = self.insts[uuids.cell0]
def wrap(thing):
return multi_cell_list.RecordWrapper(ctx, self.context, thing)
ctx = nova_context.RequestContext()
instances = [wrap(inst) for inst in inst_cell0]
# creating one up cell and two down cells
ret_val = {}
ret_val[uuids.cell0] = instances
ret_val[uuids.cell1] = [wrap(exception.BuildRequestNotFound(uuid='f'))]
ret_val[uuids.cell2] = [wrap(nova_context.did_not_respond_sentinel)]
mock_sg.return_value = ret_val
# Raises exception if a cell is down without skipping them
# as CONF.api.list_records_by_skipping_down_cells is set to False.
# This would in turn result in an API 500 internal error.
exp = self.assertRaises(exception.NovaException,
instance_list.get_instance_objects_sorted, self.context, {}, None,
None, [], None, None)
self.assertIn('configuration indicates', str(exp))
@mock.patch('nova.context.scatter_gather_cells')
def test_get_instances_with_cell_down_support(self, mock_sg):
self.flags(list_records_by_skipping_down_cells=False, group='api')
inst_cell0 = self.insts[uuids.cell0]
# storing the uuids of the instances from the up cell
uuid_initial = [inst['uuid'] for inst in inst_cell0]
def wrap(thing):
return multi_cell_list.RecordWrapper(ctx, self.context, thing)
ctx = nova_context.RequestContext()
instances = [wrap(inst) for inst in inst_cell0]
# creating one up cell and two down cells
ret_val = {}
ret_val[uuids.cell0] = instances
ret_val[uuids.cell1] = [wrap(exception.BuildRequestNotFound(uuid='f'))]
ret_val[uuids.cell2] = [wrap(nova_context.did_not_respond_sentinel)]
mock_sg.return_value = ret_val
# From the new microversion (2.68) if cell_down_support is True
# then CONF.api.list_records_by_skipping_down_cells will be ignored.
# Exception will not be raised even if its False.
obj, res = instance_list.get_instances_sorted(self.context, {}, None,
None, [], None, None,
cell_down_support=True)
uuid_final = [inst['uuid'] for inst in res]
# return the results from the up cell, ignoring the down cell and
# constructing partial results later.
self.assertEqual(uuid_initial, uuid_final)
def test_batch_size_fixed(self):
fixed_size = 200
self.flags(instance_list_cells_batch_strategy='fixed', group='api')
self.flags(instance_list_cells_batch_fixed_size=fixed_size,
group='api')
# We call the batch size calculator with various arguments, including
# lists of cells which are just counted, so the cardinality is all that
# matters.
# One cell, so batch at $limit
ret = instance_list.get_instance_list_cells_batch_size(
1000, [mock.sentinel.cell1])
self.assertEqual(1000, ret)
# Two cells, so batch at $fixed_size
ret = instance_list.get_instance_list_cells_batch_size(
1000, [mock.sentinel.cell1, mock.sentinel.cell2])
self.assertEqual(fixed_size, ret)
# Four cells, so batch at $fixed_size
ret = instance_list.get_instance_list_cells_batch_size(
1000, [mock.sentinel.cell1, mock.sentinel.cell2,
mock.sentinel.cell3, mock.sentinel.cell4])
self.assertEqual(fixed_size, ret)
# Three cells, tiny limit, so batch at lower threshold
ret = instance_list.get_instance_list_cells_batch_size(
10, [mock.sentinel.cell1,
mock.sentinel.cell2,
mock.sentinel.cell3])
self.assertEqual(100, ret)
# Three cells, limit above floor, so batch at limit
ret = instance_list.get_instance_list_cells_batch_size(
110, [mock.sentinel.cell1,
mock.sentinel.cell2,
mock.sentinel.cell3])
self.assertEqual(110, ret)
def test_batch_size_distributed(self):
self.flags(instance_list_cells_batch_strategy='distributed',
group='api')
# One cell, so batch at $limit
ret = instance_list.get_instance_list_cells_batch_size(1000, [1])
self.assertEqual(1000, ret)
# Two cells so batch at ($limit/2)+10%
ret = instance_list.get_instance_list_cells_batch_size(1000, [1, 2])
self.assertEqual(550, ret)
# Four cells so batch at ($limit/4)+10%
ret = instance_list.get_instance_list_cells_batch_size(1000, [1, 2,
3, 4])
self.assertEqual(275, ret)
# Three cells, tiny limit, so batch at lower threshold
ret = instance_list.get_instance_list_cells_batch_size(10, [1, 2, 3])
self.assertEqual(100, ret)
# Three cells, small limit, so batch at lower threshold
ret = instance_list.get_instance_list_cells_batch_size(110, [1, 2, 3])
self.assertEqual(100, ret)
# No cells, so batch at $limit
ret = instance_list.get_instance_list_cells_batch_size(1000, [])
self.assertEqual(1000, ret)
class TestInstanceListBig(test.NoDBTestCase):
def setUp(self):
super(TestInstanceListBig, self).setUp()
cells = [objects.CellMapping(uuid=getattr(uuids, 'cell%i' % i),
name='cell%i' % i,
transport_url='fake:///',
database_connection='fake://')
for i in range(0, 3)]
insts = list([
dict(
uuid=getattr(uuids, 'inst%i' % i),
hostname='inst%i' % i)
for i in range(0, 100)])
self.cells = cells
self.insts = insts
self.context = nova_context.RequestContext()
self.useFixture(fixtures.SpawnIsSynchronousFixture())
@mock.patch('nova.db.api.instance_get_all_by_filters_sort')
@mock.patch('nova.objects.CellMappingList.get_all')
def test_get_instances_batched(self, mock_cells, mock_inst):
mock_cells.return_value = self.cells
def fake_get_insts(ctx, filters, limit, *a, **k):
for i in range(0, limit):
yield self.insts.pop()
mock_inst.side_effect = fake_get_insts
obj, insts = instance_list.get_instances_sorted(self.context, {},
50, None, [],
['hostname'], ['desc'],
batch_size=10)
# Make sure we returned exactly how many were requested
insts = list(insts)
self.assertEqual(50, len(insts))
# Since the instances are all uniform, we should have a
# predictable number of queries to the database. 5 queries
# would get us 50 results, plus one more gets triggered by the
# sort to fill the buffer for the first cell feeder that runs
# dry.
self.assertEqual(6, mock_inst.call_count)
| apache-2.0 |
ctools/ctools | cscripts/csphagen.py | 1 | 44281 | #! /usr/bin/env python
# ==========================================================================
# Computes the PHA spectra for source/background and ARF/RMF files using the
# reflected region method
#
# Copyright (C) 2017-2021 Luigi Tibaldo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ==========================================================================
import gammalib
import ctools
import math
import sys
from cscripts import mputils
# =============== #
# csfindobs class #
# =============== #
class csphagen(ctools.csobservation):
"""
Generate PHA, ARF and RMF files for classical IACT spectral analysis
"""
# Constructor
def __init__(self, *argv):
"""
Constructor
"""
# Initialise application by calling the appropriate class constructor
self._init_csobservation(self.__class__.__name__, ctools.__version__, argv)
# Initialise other variables
self._obs_off = gammalib.GObservations()
self._ebounds = gammalib.GEbounds()
self._etruebounds = gammalib.GEbounds()
self._src_dir = gammalib.GSkyDir()
self._src_reg = gammalib.GSkyRegions()
self._models = gammalib.GModels()
self._srcname = ''
self._bkg_regs = []
self._excl_reg = None
self._has_exclusion = False
self._srcshape = ''
self._rad = 0.0
self._reg_width = 0.0
self._reg_height = 0.0
self._reg_posang = 0.0
self._nthreads = 0
# Return
return
# State methods por pickling
def __getstate__(self):
"""
Extend ctools.csobservation getstate method to include some members
Returns
-------
state : dict
Pickled instance
"""
# Set pickled dictionary
state = {'base' : ctools.csobservation.__getstate__(self),
'obs_off' : self._obs_off,
'ebounds' : self._ebounds,
'etruebounds' : self._etruebounds,
'src_dir' : self._src_dir,
'src_reg' : self._src_reg,
'models' : self._models,
'srcname' : self._srcname,
'bkg_regs' : self._bkg_regs,
'excl_reg' : self._excl_reg,
'has_exclusion' : self._has_exclusion,
'srcshape' : self._srcshape,
'rad' : self._rad,
'reg_width' : self._reg_width,
'reg_height' : self._reg_height,
'reg_posang' : self._reg_posang,
'nthreads' : self._nthreads}
# Return pickled dictionary
return state
def __setstate__(self, state):
"""
Extend ctools.csobservation setstate method to include some members
Parameters
----------
state : dict
Pickled instance
"""
ctools.csobservation.__setstate__(self, state['base'])
self._obs_off = state['obs_off']
self._ebounds = state['ebounds']
self._etruebounds = state['etruebounds']
self._src_dir = state['src_dir']
self._src_reg = state['src_reg']
self._models = state['models']
self._srcname = state['srcname']
self._bkg_regs = state['bkg_regs']
self._excl_reg = state['excl_reg']
self._has_exclusion = state['has_exclusion']
self._srcshape = state['srcshape']
self._rad = state['rad']
self._reg_width = state['reg_width']
self._reg_height = state['reg_height']
self._reg_posang = state['reg_posang']
self._nthreads = state['nthreads']
# Return
return
# Private methods
def _query_src_direction(self):
"""
Set up the source direction parameter
"""
# Initialise source direction
self._src_dir = gammalib.GSkyDir()
# Get coordinate systel
coordsys = self['coordsys'].string()
# If coordinate system is celestial then query "ra" and "dec"
if coordsys == 'CEL':
ra = self['ra'].real()
dec = self['dec'].real()
self._src_dir.radec_deg(ra, dec)
# ... otherwise, if coordinate system is galactic then query "glon"
# and "glat"
elif coordsys == 'GAL':
glon = self['glon'].real()
glat = self['glat'].real()
self._src_dir.lb_deg(glon, glat)
# Return
return
def _compute_posang(self, pnt_dir, a, b):
"""
Compute the difference in position angle wrt the pointing in degrees
Parameters
----------
pnt_dir : `~gammalib.GSkyDir`
Pointing direction
a : `~gammalib.GSkyDir`
First sky direction
a : `~gammalib.GSkyDir`
Second sky direction
Returns
-------
posang : float
Position angle (degrees)
"""
# Compute position angles
posang_a = pnt_dir.posang_deg(a) % 360
posang_b = pnt_dir.posang_deg(b) % 360
# Compute difference
posang = abs(posang_a - posang_b)
# Return position angle
return posang
def _get_regions(self, filename):
"""
Get regions from DS9 file or FITS file
Parameters
----------
filename : `~gammalib.GFilename`
Filename
Returns
-------
regs : `~gammalib.GSkyRegions`
Region container
"""
# If filename is a FITS file then load region map and append to
# list of regions
if filename.is_fits():
map = gammalib.GSkyRegionMap(filename)
regs = gammalib.GSkyRegions()
regs.append(map)
# ... otherwise load DS9 file
else:
regs = gammalib.GSkyRegions(filename)
# Return region container
return regs
def _get_source_parameters(self):
"""
Get parameters to define source/On region
"""
# Get source shape
self._srcshape = self['srcshape'].string()
# Query source direction
self._query_src_direction()
# If source shape is a circle the append GSkyRegionCircle
if self._srcshape == 'CIRCLE':
# Set circular source region
self._rad = self['rad'].real()
self._src_reg.append(gammalib.GSkyRegionCircle(self._src_dir, self._rad))
# ... otherwise if source shape is a rectangle then append
# GSkyRegionRectangle
elif self._srcshape == 'RECT':
# Set rectangular source region
self._reg_width = self['width'].real()
self._reg_height = self['height'].real()
self._reg_posang = self['posang'].real()
self._src_reg.append(gammalib.GSkyRegionRectangle(self._src_dir,
self._reg_width,
self._reg_height,
self._reg_posang))
# Return
return
def _get_parameters_bkgmethod_reflected(self):
"""
Get parameters for REFLECTED background method
"""
# Query parameters for source/On region definition
self._get_source_parameters()
# Query minimum number of background regions and
# number of background regions to skip next to On region
self['bkgregmin'].integer()
self['bkgregskip'].integer()
# Return
return
def _get_parameters_bkgmethod_custom(self):
"""
Get parameters for CUSTOM background method
Raises
------
RuntimeError
Only one On region is allowed
"""
# Set up source region
filename = self['srcregfile'].filename()
self._src_reg = self._get_regions(filename)
# Raise an exception if there is more than one source region
if len(self._src_reg) != 1:
raise RuntimeError('Only one On region is allowed')
# Set up source direction. Query parameters if neccessary.
if self._models.is_empty():
if isinstance(self._src_reg[0], gammalib.GSkyRegionCircle):
self._src_dir = self._src_reg[0].centre()
self._rad = self._src_reg[0].radius()
else:
self._query_src_direction()
# Make sure that all CTA observations have an Off region by loading the
# Off region region the parameter 'bkgregfile' for all CTA observations
# without Off region
for obs in self.obs():
if obs.classname() == 'GCTAObservation':
if obs.off_regions().is_empty():
filename = self['bkgregfile'].filename()
regions = self._get_regions(filename)
obs.off_regions(regions)
# Return
return
def _get_parameters_bkgmethod_off(self):
"""
Get parameters for OFF background method
Raises
------
RuntimeError
On and Off observations must have same size
RuntimeError
Off observations must be event lists
"""
# Set up Off observations. If there are no Off observations in the
# container then load them via user parameters
if self.obs_off().is_empty():
# Get Off observation file name
filename = self['inobsoff'].filename()
# If Off observation is a FITS file then load observation and
# append it to the Off observation container
if gammalib.GFilename(filename).is_fits():
self._obs_off.append(gammalib.GCTAObservation(filename))
# ... otherwise load XML file into Off observation container
else:
self._obs_off.load(filename)
# Check that size of On and Off observations are the same, otherwise
# raise error
if self.obs().size() != self.obs_off().size():
raise RuntimeError('On and Off observations must have the same size')
# Loop through observations
for obs in self.obs_off():
# Check that observation is event list, otherwise throw error
if obs.eventtype() != "EventList":
raise RuntimeError('Off observations must be event lists')
# Check that they have response, otherwise assign based on user parameter
if obs.has_response() == False:
# Get database and IRF
database = self["caldb"].string()
irf = self["irf"].string()
# Create an XML element for response
parameter = "parameter name=\"Calibration\"" +\
" database=\"" + database + "\"" +\
" response=\"" + irf + "\""
xml = gammalib.GXmlElement()
xml.append(parameter)
# Create CTA response
response = gammalib.GCTAResponseIrf(xml)
# Attach response to observation
obs.response(response)
# Add models from Off observations to model container
for model in self.obs_off().models():
self._models.append(model)
# Query parameters for source/On region definition
self._get_source_parameters()
# Return
return
def _get_parameters_bkgmethod(self):
"""
Get background method parameters
"""
# Get background method
bkgmethod = self['bkgmethod'].string()
# Get background method dependent parameters
if bkgmethod == 'REFLECTED':
self._get_parameters_bkgmethod_reflected()
elif bkgmethod == 'CUSTOM':
self._get_parameters_bkgmethod_custom()
elif bkgmethod == 'OFF':
self._get_parameters_bkgmethod_off()
# Query parameters that are needed for all background methods
self['maxoffset'].real()
self['use_model_bkg'].boolean()
# Return
return
def _get_parameters(self):
"""
Get parameters from parfile and setup observations
"""
# Clear source models
self._models.clear()
# Setup observations (require response and allow event list, don't
# allow counts cube)
self._setup_observations(self.obs(), True, True, False)
# Get source model and source name. First try to extract models from
# observation container. If this does not work then try creating
# model from the inmodel parameter
if self.obs().models().size() > 0:
self._models = self.obs().models().clone()
self._srcname = self['srcname'].string()
elif self['inmodel'].is_valid():
inmodel = self['inmodel'].filename()
self._models = gammalib.GModels(inmodel)
self._srcname = self['srcname'].string()
# Set energy bounds
self._ebounds = self._create_ebounds()
# Initialize empty src regions container
self._src_reg = gammalib.GSkyRegions()
# Exclusion map
if (self._excl_reg is not None) and (self._excl_reg.map().npix() > 0):
# Exclusion map set and is not empty
self._has_exclusion = True
elif self['inexclusion'].is_valid():
inexclusion = self['inexclusion'].filename()
# If the user has not specified the extension to use
# and there is an extension called 'EXCLUSION' ...
if not inexclusion.has_extname()\
and not inexclusion.has_extno()\
and gammalib.GFits(inexclusion).contains('EXCLUSION'):
# ... choose it for the exclusion map
extname = inexclusion.url() + '[EXCLUSION]'
inexclusion = gammalib.GFilename(extname)
# Otherwise will pick the default (primary) HDU
self._excl_reg = gammalib.GSkyRegionMap(inexclusion)
self._has_exclusion = True
else:
self._has_exclusion = False
# Get background method parameters (have to come after setting up of
# observations and models)
self._get_parameters_bkgmethod()
# If there are multiple observations query whether to stack them
if self.obs().size() > 1:
self['stack'].boolean()
# Query ahead output parameters
if (self._read_ahead()):
self['outobs'].filename()
self['outmodel'].filename()
self['prefix'].string()
# Write input parameters into logger
self._log_parameters(gammalib.TERSE)
# Set number of processes for multiprocessing
self._nthreads = mputils.nthreads(self)
# If we have no model then create now a dummy model
if self._models.is_empty():
spatial = gammalib.GModelSpatialPointSource(self._src_dir)
spectral = gammalib.GModelSpectralPlaw(1.0e-18, -2.0,
gammalib.GEnergy(1.0, 'TeV'))
model = gammalib.GModelSky(spatial, spectral)
model.name('Dummy')
self._models.append(model)
self._srcname = 'Dummy'
self['use_model_bkg'].boolean(False)
# Return
return
def _compute_region_separation(self, pnt_dir):
"""
Compute the separation angle for reflected off regions in radians
Returns
-------
angle : float
Separation angle of two off regions (radians)
"""
# Initialise the result
separation = -1.0
# Compute offset of reflected regions to pointing position
offset = pnt_dir.dist_deg(self._src_dir)
# If shape is a circle then compute apparent diameter of the circle
# as separation
if self._srcshape == 'CIRCLE':
separation = 2.0 * self._rad / offset
# ... otherwise if shape is a rectangle then compute the opening angle
# towards combinations of rectangle corners. This method overestimates
# the real need of space between the ectangles, so the method may be
# optimised to gain more off regions! Anyway, it is assured that the
# off regions will never overlap.
elif self._srcshape == 'RECT':
# Get the sky directions of the corners of the rectangle
cs = [self._src_reg[0].corner(icorner) for icorner in range(4)]
# Compute the 6 opening angles
combinations = [[0,1], [0,2], [0,3], [1,2], [1,3], [2,3]]
angles = [self._compute_posang(pnt_dir, cs[i], cs[j]) \
for i,j in combinations]
# The desired separation is the maximum opening angle
separation = max(angles) * gammalib.deg2rad
# Return
return separation
def _reflected_regions(self, obs):
"""
Calculate list of reflected regions for a single observation (pointing)
Parameters
----------
obs : `~gammalib.GCTAObservation()`
CTA observation
Returns
-------
regions : `~gammalib.GSkyRegions`
List of reflected regions
"""
# Initialise list of reflected regions
regions = gammalib.GSkyRegions()
# Get offset angle of source
pnt_dir = obs.pointing().dir()
offset = pnt_dir.dist_deg(self._src_dir)
# Skip observation if it is too close to source
if self._src_reg.contains(pnt_dir):
msg = ' Skip because observation is pointed at %.3f deg from source'\
% (offset)
if self._srcshape == 'CIRCLE':
msg += ' (circle rad=%.3f).' % (self._rad)
self._log_string(gammalib.NORMAL, msg)
# ... otherwise
else:
posang = pnt_dir.posang_deg(self._src_dir)
if (self._srcshape == 'CIRCLE') or (self._srcshape == 'RECT'):
# Determine number of background regions to skip
N_skip = self['bkgregskip'].integer()
N_lim = 1 + 2 * N_skip
# Compute the angular separation of reflected regions wrt
# camera center. The factor 1.05 ensures background regions
# do not overlap due to numerical precision issues
alpha = 1.05 * self._compute_region_separation(pnt_dir)
# Compute number of reflected regions by dividing the angular
# separation by 2 pi.
N = int(2.0 * math.pi / alpha)
# If there are not enough reflected regions then skip the
# observation ...
if N < self['bkgregmin'].integer() + N_lim:
msg = ' Skip because the number %d of reflected regions '\
'for background estimation is smaller than '\
'"bkgregmin"=%d.' % (N-N_lim, self['bkgregmin'].integer())
self._log_string(gammalib.NORMAL, msg)
# ... otherwise loop over position angle to create reflected
# regions
else:
# Append reflected regions
alpha = 360.0 / N
dphi_max = 360.0 - alpha * (1 + N_skip)
dphi = alpha * (1 + N_skip)
while dphi <= dphi_max:
ctr_dir = pnt_dir.clone()
ctr_dir.rotate_deg(posang + dphi, offset)
if self._srcshape == 'CIRCLE':
region = gammalib.GSkyRegionCircle(ctr_dir, self._rad)
elif self._srcshape == 'RECT':
# Adjust the posang of the rectangle correspondingly
region = gammalib.GSkyRegionRectangle(ctr_dir,
self._reg_width,
self._reg_height,
self._reg_posang + dphi)
if self._has_exclusion:
if self._excl_reg.overlaps(region):
# Signal region overlap
msg = ' Reflected region overlaps with '\
'exclusion region.'
self._log_string(gammalib.EXPLICIT, msg)
# If region overlaps with exclusion region
# try to increment by 10% of angular step
dphi += 0.1 * alpha
else:
regions.append(region)
dphi += alpha
else:
regions.append(region)
dphi += alpha
# Check again that we have enough background regions
# now that we have checked for overlap with exclusion region
if regions.size() >= self['bkgregmin'].integer():
# Log number of reflected regions
msg = ' Use %d reflected regions.' % (regions.size())
self._log_string(gammalib.NORMAL, msg)
# Otherwise log observation skipped and return empty region container
else:
msg = ' Skip because the number %d of regions' \
'for background estimation not overlapping ' \
'with the exclusion region is smaller than ' \
'"bkgregmin"=%d.' % \
(regions.size(), self['bkgregmin'].integer())
self._log_string(gammalib.NORMAL, msg)
regions = gammalib.GSkyRegions()
# Return reflected regions
return regions
def _instrument_regions(self, obs, obs_off):
"""
Compute background regions for Off observation
Calculate background region in Off observation that corresponds to the
source region in the On observation in instrument coordinates
Parameters
----------
obs : `~gammalib.GCTAObservation()`
On CTA observation
obs_off : `~gammalib.GCTAObservation()`
Off CTA observation
Returns
-------
regions : `~gammalib.GSkyRegions`
Container with background region
"""
# Initialise region container
regions = gammalib.GSkyRegions()
# Convert source position in On observation to instrument coordinates
instdir = obs.pointing().instdir(self._src_dir)
# Convert instrument position to sky direction for Off observation
off_dir = obs_off.pointing().skydir(instdir)
# Build region according to shape specified by user
# If circle
if self._srcshape == 'CIRCLE':
region = gammalib.GSkyRegionCircle(off_dir, self._rad)
# ... otherwise if rectangle
elif self._srcshape == 'RECT':
# Instrument coordinates take sky direction as reference
# so no need to change the position angle
region = gammalib.GSkyRegionRectangle(off_dir,
self._reg_width,
self._reg_height,
self._reg_posang)
# Check if background region overlaps with exclusion region
is_valid = True
if self._has_exclusion:
if self._excl_reg.overlaps(region):
# Signal region overlap
msg = ' Background region overlaps with exclusion region.'
self._log_string(gammalib.EXPLICIT, msg)
is_valid = False
# If region is valid then append it to container
if is_valid:
regions.append(region)
# Return
return regions
def _set_models(self, results):
"""
Set models for On/Off fitting
The method does the following
- append "OnOff" to the instrument name of all background models
- fix all spatial and temporal parameters
Parameters
----------
results : list of dict
Result dictionaries
Returns
-------
models : `~gammalib.GModels()`
Model container
"""
# Write header
self._log_header1(gammalib.NORMAL, 'Set models')
# Initialise model container
models = gammalib.GModels()
# Initialies stacked model flag
has_stacked_model = False
# Loop over all models in observation and append "OnOff" to instrument
# names
for model in self._models:
# Initialise model usage
use_model = False
# If model is a background model then check if it will be
# used
if 'GCTA' in model.classname():
# Skip model if background model should not be used
if not self['use_model_bkg'].boolean():
self._log_string(gammalib.NORMAL, ' Skip "%s" model "%s" (%s)' % \
(model.instruments(), model.name(), model.ids()))
continue
# Check if model corresponds to one of the relevant
# observations
for result in results:
if model.is_valid(result['instrument'], result['id']):
if result['bkg_reg'].size() > 0:
use_model = True
break
# If stacked analysis is requested then just use for model
# and remove instrument ID
if self['stack'].boolean():
# If there is already a model for stacked analysis then
# skip this one
if has_stacked_model:
msg = ' Skip "%s" model "%s" (%s). There is already ' \
'a model for stacked analysis.' % \
(model.instruments(), model.name(), model.ids())
self._log_string(gammalib.NORMAL, msg)
continue
# ... otherwise use model for stacked analysis
else:
has_stacked_model = True
use_model = True
model.ids('')
# Append "OnOff" to instrument name
model.instruments(model.instruments()+'OnOff')
# ... otherwise, if model is not a background model then use it
else:
use_model = True
# If model is relevant then append it now to the model
# container
if use_model:
# Log model usage
self._log_string(gammalib.NORMAL, ' Use "%s" model "%s" (%s)' % \
(model.instruments(), model.name(), model.ids()))
# Append model to container
models.append(model)
# ... otherwise signal that model is skipped
else:
self._log_string(gammalib.NORMAL, ' Skip "%s" model "%s" (%s)' % \
(model.instruments(), model.name(), model.ids()))
# Return model container
return models
def _set_statistic(self, obs):
"""
Set statistic for observation
If the "use_model_bkg" is true then set statistic to "cstat",
otherwise set it to "wstat"
Parameters
----------
obs : `~gammalib.GObservation()`
Observation
Returns
-------
obs : `~gammalib.GObservation()`
Observation
"""
# Set statistic according to background model usage
if self['use_model_bkg'].boolean():
obs.statistic('cstat')
else:
obs.statistic('wstat')
# Return observation
return obs
def _etrue_ebounds(self):
"""
Set true energy boundaries
Returns
-------
ebounds : `~gammalib.GEbounds()`
True energy boundaries
"""
# Determine minimum and maximum energies
emin = self._ebounds.emin() * 0.5
emax = self._ebounds.emax() * 1.2
if emin.TeV() < self['etruemin'].real():
emin = gammalib.GEnergy(self['etruemin'].real(), 'TeV')
if emax.TeV() > self['etruemax'].real():
emax = gammalib.GEnergy(self['etruemax'].real(), 'TeV')
# Determine number of energy bins
n_decades = (emax.log10TeV() - emin.log10TeV())
n_bins = int(n_decades * float(self['etruebins'].integer()) + 0.5)
if n_bins < 1:
n_bins = 1
# Set energy boundaries
ebounds = gammalib.GEbounds(n_bins, emin, emax)
# Write header
self._log_header1(gammalib.TERSE, 'True energy binning')
# Log true energy bins
for i in range(ebounds.size()):
value = '%s - %s' % (str(ebounds.emin(i)), str(ebounds.emax(i)))
self._log_value(gammalib.TERSE, 'Bin %d' % (i+1), value)
# Return energy boundaries
return ebounds
def _set_background_regions(self, obs, obs_off=None):
"""
Set background regions for an observation
Parameters
----------
obs : `~gammalib.GCTAObservation()`
CTA observation
Returns
-------
regions : `~gammalib.GSkyRegions()`
Background regions
"""
# Initialise empty background regions for this observation
bkg_reg = gammalib.GSkyRegions()
# If reflected background is requested then create reflected
# background regions
if self['bkgmethod'].string() == 'REFLECTED':
bkg_reg = self._reflected_regions(obs)
# ... otherwise if custom background is requested then get the
# background regions from the observation. We use a copy here since
# otherwise the background regions go out of scope once the observations
# are replaced by the On/Off observations.
elif self['bkgmethod'].string() == 'CUSTOM':
bkg_reg = obs.off_regions().copy()
# ... otherwise if dedicated Off runs are use then
# use background region that correspond to the same instrument coordinates
if self['bkgmethod'].string() == 'OFF':
bkg_reg = self._instrument_regions(obs,obs_off)
# Return background regions
return bkg_reg
def _process_observation(self,i):
"""
Generate On/Off spectra for individual observation
Parameters
----------
i : int
Observation number
Returns
-------
result : dict
On/Off spectra, background regions, observation id
"""
# Retrieve observation from container
onoff = None
bkg_reg = None
obs = self.obs()[i]
# Retrieve dedicated Off observation if it exists
if not self.obs_off().is_empty():
obs_off = self.obs_off()[i]
# Otherwise use the same as On
else:
obs_off = self.obs()[i]
# Log header
self._log_header3(gammalib.NORMAL,'%s observation "%s"' % \
(obs.instrument(), obs.id()))
# Skip non CTA observations
if obs.classname() != 'GCTAObservation':
self._log_string(gammalib.NORMAL, ' Skip because not a "GCTAObservation"')
# Otherwise calculate On/Off spectra
else:
# Get background model usage flag and log flag
use_model_bkg = self['use_model_bkg'].boolean()
if use_model_bkg:
msg = ' Use background model.'
else:
msg = ' Background model not used, assume constant backround rate.'
self._log_string(gammalib.NORMAL, msg)
# Get offset angle of source
pnt_dir = obs.pointing().dir()
offset = pnt_dir.dist_deg(self._src_dir)
# Skip observation if it is pointed too far from the source
if offset >= self['maxoffset'].real():
msg = ' Skip because observation is pointed at %.3f deg >= ' \
'"maxoffset=%.3f" from source.' \
% (offset, self['maxoffset'].real())
self._log_string(gammalib.NORMAL, msg)
# ... otherwise continue to process
else:
# Set background regions for this observation
bkg_reg = self._set_background_regions(obs,obs_off)
# If there are any background regions then create On/Off observation
# and append it to the output container
if bkg_reg.size() >= 0:
# Create On/Off observation
onoff = gammalib.GCTAOnOffObservation(obs, obs_off,
self._models,
self._srcname,
self._etruebounds,
self._ebounds,
self._src_reg,
bkg_reg,
use_model_bkg)
# Set On/Off observation ID
onoff.id(obs.id())
# Otherwise log observation skipped
else:
msg = ' Skip because no valid Off regions could be determined'
self._log_string(gammalib.NORMAL, msg)
# Construct dictionary with results
result = {'onoff' : onoff,
'bkg_reg' : bkg_reg,
'instrument': obs.instrument(),
'id' : obs.id()}
# Return results
return result
def _unpack_result(self, outobs, result):
"""
Unpack result from calculation of On/Off regions
Parameters
----------
outobs : `~gammalib.GObservations`
Observation container
result : dict
On/Off spectra, background regions, observation id
Returns
-------
outobs : `~gammalib.GObservations`
Observation container with result appended
"""
# Continue only if result is valid
if result['onoff'] != None:
# If the results contain an On/Off observation
if result['onoff'].classname() == 'GCTAOnOffObservation':
# Set statistic according to background model usage
obs = self._set_statistic(result['onoff'])
# Append observation to observation container
outobs.append(obs)
# Append background regions
self._bkg_regs.append({'regions': result['bkg_reg'],
'id': result['id']})
# Return observation container
return outobs
# Public methods
def run(self):
"""
Run the script
"""
# Switch screen logging on in debug mode
if self._logDebug():
self._log.cout(True)
# Get parameters
self._get_parameters()
# Write observation into logger
self._log_observations(gammalib.NORMAL, self.obs(), 'Observation')
if not self.obs_off().is_empty():
self._log_observations(gammalib.NORMAL, self._obs_off, 'Off Observation')
# Set true energy bins
self._etruebounds = self._etrue_ebounds()
# Write header
self._log_header1(gammalib.TERSE, 'Spectral binning')
# Log reconstructed energy bins
for i in range(self._ebounds.size()):
value = '%s - %s' % (str(self._ebounds.emin(i)),
str(self._ebounds.emax(i)))
self._log_value(gammalib.TERSE, 'Bin %d' % (i+1), value)
# Write header
self._log_header1(gammalib.NORMAL,
'Generation of source and background spectra')
# Initialise run variables
outobs = gammalib.GObservations()
self._bkg_regs = []
results = []
# If there is more than one observation and we use multiprocessing
if self._nthreads > 1 and self.obs().size() > 1:
# Compute observations
args = [(self, '_process_observation', i)
for i in range(self.obs().size())]
poolresults = mputils.process(self._nthreads, mputils.mpfunc, args)
# Construct results
for i in range(self.obs().size()):
result = poolresults[i][0]
outobs = self._unpack_result(outobs, result)
results.append(result)
self._log_string(gammalib.TERSE, poolresults[i][1]['log'], False)
# Otherwise, loop through observations and generate pha, arf, rmf files
else:
for i in range(self.obs().size()):
# Process individual observation
result = self._process_observation(i)
outobs = self._unpack_result(outobs, result)
results.append(result)
# Stack observations
if outobs.size() > 1 and self['stack'].boolean():
# Write header
self._log_header1(gammalib.NORMAL, 'Stacking %d observations' %
(outobs.size()))
# Stack observations
stacked_obs = gammalib.GCTAOnOffObservation(outobs)
# Set statistic according to background model usage
stacked_obs = self._set_statistic(stacked_obs)
# Put stacked observations in output container
outobs = gammalib.GObservations()
outobs.append(stacked_obs)
# Create models that allow On/Off fitting
models = self._set_models(results)
# Set models in output container
outobs.models(models)
# Set observation container
self.obs(outobs)
# Return
return
def save(self):
"""
Save data
"""
# Write header
self._log_header1(gammalib.TERSE, 'Save data')
# Get XML output filename, prefix and clobber
outobs = self['outobs'].filename()
outmodel = self['outmodel'].filename()
prefix = self['prefix'].string()
clobber = self['clobber'].boolean()
# Loop over all observation in container
for obs in self.obs():
# Set filenames
if self['stack'].boolean():
onname = prefix + '_stacked_pha_on.fits'
offname = prefix + '_stacked_pha_off.fits'
arfname = prefix + '_stacked_arf.fits'
rmfname = prefix + '_stacked_rmf.fits'
elif self.obs().size() > 1:
onname = prefix + '_%s_pha_on.fits' % (obs.id())
offname = prefix + '_%s_pha_off.fits' % (obs.id())
arfname = prefix + '_%s_arf.fits' % (obs.id())
rmfname = prefix + '_%s_rmf.fits' % (obs.id())
else:
onname = prefix + '_pha_on.fits'
offname = prefix + '_pha_off.fits'
arfname = prefix + '_arf.fits'
rmfname = prefix + '_rmf.fits'
# Set background and response file names in On spectrum
obs.on_spec().backfile(offname)
obs.on_spec().respfile(rmfname)
obs.on_spec().ancrfile(arfname)
# Save files
obs.on_spec().save(onname, clobber)
obs.off_spec().save(offname, clobber)
obs.arf().save(arfname, clobber)
obs.rmf().save(rmfname, clobber)
# Stamp files
self._stamp(onname)
self._stamp(offname)
self._stamp(arfname)
self._stamp(rmfname)
# Log file names
self._log_value(gammalib.NORMAL, 'PHA on file', onname)
self._log_value(gammalib.NORMAL, 'PHA off file', offname)
self._log_value(gammalib.NORMAL, 'ARF file', arfname)
self._log_value(gammalib.NORMAL, 'RMF file', rmfname)
# Save observation definition XML file
self.obs().save(outobs)
# Save model definition XML file
self.obs().models().save(outmodel)
# Log file names
self._log_value(gammalib.NORMAL, 'Obs. definition XML file', outobs.url())
self._log_value(gammalib.NORMAL, 'Model definition XML file', outmodel.url())
# Save ds9 On region file
regname = prefix + '_on.reg'
self._src_reg.save(regname)
self._log_value(gammalib.NORMAL, 'On region file', regname)
# Save ds9 Off region files
for bkg_reg in self._bkg_regs:
# Set filename
if len(self._bkg_regs) > 1:
regname = prefix + '_%s_off.reg' % (bkg_reg['id'])
else:
regname = prefix + '_off.reg'
# Save ds9 region file
bkg_reg['regions'].save(regname)
# Log file name
self._log_value(gammalib.NORMAL, 'Off region file', regname)
# Return
return
def exclusion_map(self, object=None):
"""
Return and optionally set the exclusion regions map
Parameters
----------
object : `~gammalib.GSkyRegion` or `~gammalib.GSkyMap` or `~gammalib.GFilename`
Exclusion regions object
Returns
-------
region : `~gammalib.GSkyRegionMap`
Exclusion regions map
"""
# If a regions object is provided then set the regions ...
if object is not None:
self._excl_reg = gammalib.GSkyRegionMap(object)
# Return
return self._excl_reg
def obs_off(self, obs=None):
"""
Return and optionally set the Off observations
Parameters
----------
obs : `~gammalib.GCTAObservations`
Off observations container
Returns
-------
observation container : `~gammalib.GCTAObservations`
Off observations container
"""
# If an observation container is provided then set the Off observations ...
if obs is not None:
self._obs_off = obs
# Return
return self._obs_off
# ======================== #
# Main routine entry point #
# ======================== #
if __name__ == '__main__':
# Create instance of application
app = csphagen(sys.argv)
# Execute application
app.execute()
| gpl-3.0 |
uclouvain/osis | base/tests/views/test_learning_unit_proposal.py | 1 | 55572 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import datetime
from unittest import mock
from django.contrib import messages
from django.contrib.messages import get_messages
from django.contrib.messages.storage.fallback import FallbackStorage
from django.http import HttpResponseNotFound, HttpResponse, HttpResponseForbidden
from django.test import TestCase, RequestFactory
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from waffle.testutils import override_flag
from attribution.tests.factories.attribution_charge_new import AttributionChargeNewFactory
from attribution.tests.factories.attribution_new import AttributionNewFactory
from base.business import learning_unit_proposal as proposal_business
from base.business.learning_unit_proposal import INITIAL_DATA_FIELDS, copy_learning_unit_data
from base.forms.learning_unit.edition import LearningUnitProposalEndDateForm
from base.forms.learning_unit_proposal import ProposalLearningUnitForm
from base.models import proposal_learning_unit
from base.models.academic_year import AcademicYear
from base.models.enums import groups
from base.models.enums import learning_component_year_type
from base.models.enums import learning_unit_year_periodicity
from base.models.enums import organization_type, entity_type, \
learning_unit_year_subtypes, proposal_type, learning_container_year_types, proposal_state
from base.models.enums.proposal_state import ProposalState, LimitedProposalState
from base.models.enums.proposal_type import ProposalType
from base.tests.factories import campus as campus_factory, organization as organization_factory, \
person as person_factory
from base.tests.factories.academic_calendar import generate_proposal_calendars, \
generate_proposal_calendars_without_start_and_end_date
from base.tests.factories.academic_year import create_current_academic_year, \
AcademicYearFactory
from base.tests.factories.business.learning_units import GenerateContainer
from base.tests.factories.campus import CampusFactory
from base.tests.factories.entity import EntityFactory
from base.tests.factories.entity_version import EntityVersionFactory
from base.tests.factories.group import CentralManagerGroupFactory, FacultyManagerGroupFactory
from base.tests.factories.learning_component_year import LearningComponentYearFactory
from base.tests.factories.learning_container_year import LearningContainerYearFactory
from base.tests.factories.learning_unit import LearningUnitFactory
from base.tests.factories.learning_unit_year import LearningUnitYearFactory
from base.tests.factories.learning_unit_year import LearningUnitYearFakerFactory
from base.tests.factories.organization import OrganizationFactory
from base.tests.factories.person import PersonFactory
from base.tests.factories.proposal_learning_unit import ProposalLearningUnitFactory
from base.tests.factories.tutor import TutorFactory
from base.tests.factories.user import UserFactory
from base.views.learning_units.proposal.update import update_learning_unit_proposal, \
learning_unit_modification_proposal, learning_unit_suppression_proposal
from base.views.learning_units.search.proposal import ACTION_CONSOLIDATE, ACTION_BACK_TO_INITIAL, ACTION_FORCE_STATE
from learning_unit.tests.factories.central_manager import CentralManagerFactory
from learning_unit.tests.factories.faculty_manager import FacultyManagerFactory
from reference.tests.factories.language import LanguageFactory, FrenchLanguageFactory
LABEL_VALUE_BEFORE_PROPOSAL = _('Value before proposal')
@override_flag('learning_unit_proposal_update', active=True)
class TestLearningUnitModificationProposal(TestCase):
@classmethod
def setUpTestData(cls):
academic_years = AcademicYearFactory.produce(number_past=3, number_future=10)
an_organization = OrganizationFactory(type=organization_type.MAIN)
current_academic_year = create_current_academic_year()
generate_proposal_calendars_without_start_and_end_date(academic_years)
cls.entity_version = EntityVersionFactory(
entity__organization=an_organization,
entity_type=entity_type.FACULTY,
start_date=current_academic_year.start_date,
end_date=current_academic_year.end_date
)
learning_container_year = LearningContainerYearFactory(
acronym="LOSIS1212",
academic_year=current_academic_year,
container_type=learning_container_year_types.COURSE,
requirement_entity=cls.entity_version.entity,
allocation_entity=cls.entity_version.entity,
additional_entity_1=cls.entity_version.entity,
additional_entity_2=cls.entity_version.entity,
)
cls.learning_unit_year = LearningUnitYearFakerFactory(
acronym=learning_container_year.acronym,
subtype=learning_unit_year_subtypes.FULL,
academic_year=current_academic_year,
learning_container_year=learning_container_year,
quadrimester=None,
specific_title_english="title english",
campus=CampusFactory(organization=an_organization, is_administration=True),
internship_subtype=None
)
cls.person = FacultyManagerFactory(entity=cls.entity_version.entity).person
cls.url = reverse(learning_unit_modification_proposal, args=[cls.learning_unit_year.id])
cls.form_data = {
"academic_year": cls.learning_unit_year.academic_year.id,
"acronym_0": cls.learning_unit_year.acronym[0],
"acronym_1": cls.learning_unit_year.acronym[1:],
"common_title": cls.learning_unit_year.learning_container_year.common_title,
"common_title_english": cls.learning_unit_year.learning_container_year.common_title_english,
"specific_title": cls.learning_unit_year.specific_title,
"specific_title_english": cls.learning_unit_year.specific_title_english,
"container_type": cls.learning_unit_year.learning_container_year.container_type,
"internship_subtype": "",
"credits": cls.learning_unit_year.credits,
"periodicity": cls.learning_unit_year.periodicity,
"status": cls.learning_unit_year.status,
"language": cls.learning_unit_year.language.pk,
"quadrimester": "",
"campus": cls.learning_unit_year.campus.id,
"session": cls.learning_unit_year.session,
"entity": cls.entity_version.id,
"folder_id": "1",
"state": proposal_state.ProposalState.FACULTY.name,
'requirement_entity': cls.entity_version.id,
'allocation_entity': cls.entity_version.id,
'additional_entity_1': cls.entity_version.id,
'additionanl_entity_2': cls.entity_version.id,
# Learning component year data model form
'component-TOTAL_FORMS': '2',
'component-INITIAL_FORMS': '0',
'component-MAX_NUM_FORMS': '2',
'component-0-hourly_volume_total_annual': 20,
'component-0-hourly_volume_partial_q1': 10,
'component-0-hourly_volume_partial_q2': 10,
'component-0-planned_classes': 1,
'component-1-hourly_volume_total_annual': 20,
'component-1-hourly_volume_partial_q1': 10,
'component-1-hourly_volume_partial_q2': 10,
'component-1-planned_classes': 1,
}
cls.academic_year_for_suppression_proposal = AcademicYear.objects.filter(
year=cls.learning_unit_year.academic_year.year - 1)
def setUp(self):
self.client.force_login(self.person.user)
def test_user_not_logged(self):
self.client.logout()
response = self.client.get(self.url)
self.assertRedirects(response, '/login/?next={}'.format(self.url))
def test_user_has_not_permission(self):
person = person_factory.PersonFactory()
self.client.force_login(person.user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
self.assertTemplateUsed(response, "access_denied.html")
def test_get_request(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, HttpResponse.status_code)
self.assertTemplateUsed(response, 'learning_unit/proposal/create_modification.html')
self.assertEqual(response.context['learning_unit_year'], self.learning_unit_year)
self.assertEqual(response.context['person'], self.person)
self.assertIsInstance(response.context['form_proposal'], ProposalLearningUnitForm)
luy_initial = response.context['learning_unit_year_form'].initial
lcy_initial = response.context['learning_container_year_form'].initial
self.assertEqual(luy_initial['academic_year'], self.learning_unit_year.academic_year.id)
self.assertEqual(luy_initial['acronym'], [
self.learning_unit_year.acronym[0], self.learning_unit_year.acronym[1:]])
self.assertEqual(luy_initial['specific_title'], self.learning_unit_year.specific_title)
self.assertEqual(lcy_initial['container_type'], self.learning_unit_year.
learning_container_year.container_type)
self.assertEqual(luy_initial['credits'], self.learning_unit_year.credits)
self.assertEqual(luy_initial['periodicity'], self.learning_unit_year.periodicity)
self.assertEqual(luy_initial['status'], self.learning_unit_year.status)
self.assertEqual(luy_initial['language'], self.learning_unit_year.language.pk)
self.assertEqual(luy_initial['campus'], self.learning_unit_year.campus.id)
def test_post_request_with_invalid_form(self):
response = self.client.post(self.url, data={})
self.assertEqual(response.status_code, HttpResponse.status_code)
self.assertTemplateUsed(response, 'learning_unit/proposal/create_modification.html')
self.assertEqual(response.context['learning_unit_year'], self.learning_unit_year)
self.assertEqual(response.context['person'], self.person)
self.assertIsInstance(response.context['form_proposal'], ProposalLearningUnitForm)
def test_post_request(self):
response = self.client.post(self.url, data=self.form_data)
redirected_url = reverse("learning_unit", args=[self.learning_unit_year.id])
self.assertRedirects(response, redirected_url, fetch_redirect_response=False)
a_proposal_learning_unit = proposal_learning_unit.find_by_learning_unit_year(self.learning_unit_year)
self.assertTrue(a_proposal_learning_unit)
self.assertEqual(a_proposal_learning_unit.author, self.person)
messages_list = [str(message) for message in get_messages(response.wsgi_request)]
self.assertIn(
_("You proposed a modification of type %(type)s for the learning unit %(acronym)s.") % {
'type': proposal_type.ProposalType.MODIFICATION.value,
'acronym': self.learning_unit_year.acronym
},
list(messages_list))
def test_initial_data_fields(self):
expected_initial_data_fields = {
'learning_container_year': [
"id", "acronym", "common_title", "container_type", "in_charge", "common_title_english", "team",
'requirement_entity', 'allocation_entity', 'additional_entity_1', 'additional_entity_2',
],
'learning_unit': [
"id", "end_year",
],
'learning_unit_year': [
"id", "acronym", "specific_title", "internship_subtype", "credits", "campus", "language", "periodicity",
"status", "professional_integration", "specific_title", "specific_title_english", "quadrimester",
"session", "faculty_remark", "other_remark", "other_remark_english"
],
'learning_component_year': [
"id", "acronym", "hourly_volume_total_annual", "hourly_volume_partial_q1", "hourly_volume_partial_q2",
"planned_classes", "type", "repartition_volume_requirement_entity",
"repartition_volume_additional_entity_1", "repartition_volume_additional_entity_2"
],
}
self.assertEqual(expected_initial_data_fields, INITIAL_DATA_FIELDS)
def test_proposal_already_exists(self):
ProposalLearningUnitFactory(learning_unit_year=self.learning_unit_year)
response = self.client.get(self.url)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
self.assertTemplateUsed(response, "access_denied.html")
@override_flag('learning_unit_proposal_update', active=True)
class TestLearningUnitSuppressionProposal(TestCase):
@classmethod
def setUpTestData(cls):
cls.academic_years = AcademicYearFactory.produce(number_past=3, number_future=10)
an_organization = OrganizationFactory(type=organization_type.MAIN)
cls.current_academic_year = cls.academic_years[4]
cls.next_academic_year = cls.academic_years[5]
cls.previous_academic_year = cls.academic_years[3]
generate_proposal_calendars(cls.academic_years)
cls.entity_version = EntityVersionFactory(
entity__organization=an_organization,
entity_type=entity_type.FACULTY,
start_date=cls.academic_years[0].start_date,
end_date=cls.academic_years[-1].end_date
)
learning_container_years = [
LearningContainerYearFactory(
academic_year=year,
container_type=learning_container_year_types.COURSE,
requirement_entity=cls.entity_version.entity,
allocation_entity=cls.entity_version.entity,
) for year in [cls.previous_academic_year, cls.current_academic_year]
]
cls.learning_unit = LearningUnitFactory(
start_year=AcademicYear.objects.first(),
end_year=None
)
cls.learning_unit_year = LearningUnitYearFakerFactory(
acronym="LOSIS1212",
subtype=learning_unit_year_subtypes.FULL,
academic_year=cls.current_academic_year,
learning_container_year=learning_container_years[1],
quadrimester=None,
learning_unit=cls.learning_unit,
campus=CampusFactory(
organization=an_organization,
is_administration=True
),
periodicity=learning_unit_year_periodicity.ANNUAL
)
cls.previous_learning_unit_year = LearningUnitYearFakerFactory(
acronym="LOSIS1212",
subtype=learning_unit_year_subtypes.FULL,
academic_year=cls.previous_academic_year,
learning_container_year=learning_container_years[0],
quadrimester=None,
learning_unit=cls.learning_unit,
campus=cls.learning_unit_year.campus,
periodicity=learning_unit_year_periodicity.ANNUAL
)
cls.person = CentralManagerFactory(entity=cls.entity_version.entity).person
cls.url = reverse(learning_unit_suppression_proposal, args=[cls.learning_unit_year.id])
cls.academic_year_for_suppression_proposal = AcademicYear.objects.filter(
year=cls.learning_unit_year.academic_year.year - 1)
cls.form_data = {
"academic_year": cls.academic_year_for_suppression_proposal.first().id,
"entity": cls.entity_version.id,
"folder_id": "1",
"state": ProposalState.FACULTY.name
}
def setUp(self):
self.client.force_login(self.person.user)
def test_get_request(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, HttpResponse.status_code)
self.assertTemplateUsed(response, 'learning_unit/proposal/create_suppression.html')
self.assertEqual(response.context['learning_unit_year'], self.learning_unit_year)
self.assertEqual(response.context['person'], self.person)
self.assertIsInstance(response.context['form_proposal'], ProposalLearningUnitForm)
self.assertIsInstance(response.context['form_end_date'], LearningUnitProposalEndDateForm)
self.assertCountEqual(
list(response.context['form_end_date'].fields['academic_year'].queryset),
list(self.academic_year_for_suppression_proposal)
)
form_proposal = response.context['form_proposal']
form_end_date = response.context['form_end_date']
self.assertEqual(form_end_date.fields['academic_year'].initial, None)
self.assertTrue(form_end_date.fields['academic_year'].required)
self.assertEqual(form_proposal.fields['folder_id'].initial, None)
self.assertEqual(form_proposal.fields['entity'].initial, None)
def test_get_request_first_year_of_UE(self):
url = reverse(learning_unit_suppression_proposal, args=[self.previous_learning_unit_year.id])
response = self.client.get(url)
redirected_url = reverse("learning_unit", args=[self.previous_learning_unit_year.id])
self.assertRedirects(response, redirected_url)
msgs = [str(message) for message in get_messages(response.wsgi_request)]
self.assertEqual(
msgs[0],
_("You cannot put in proposal for ending date on the first year of the learning unit.")
)
def test_get_request_on_UE_with_end_date(self):
self.learning_unit.end_year = self.next_academic_year
self.learning_unit.save()
response = self.client.get(self.url)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_get_request_academic_year_list_in_form_for_central_manager(self):
person_factory.add_person_to_groups(self.person, [groups.CENTRAL_MANAGER_GROUP])
response = self.client.get(self.url)
self.assertCountEqual(
list(response.context['form_end_date'].fields['academic_year'].queryset),
list(self.academic_year_for_suppression_proposal)
)
def test_post_request(self):
response = self.client.post(self.url, data=self.form_data)
redirected_url = reverse("learning_unit", args=[self.learning_unit_year.id])
self.assertRedirects(response, redirected_url, fetch_redirect_response=False)
a_proposal_learning_unit = proposal_learning_unit.find_by_learning_unit_year(self.learning_unit_year)
self.assertTrue(a_proposal_learning_unit)
self.assertEqual(a_proposal_learning_unit.author, self.person)
messages = [str(message) for message in get_messages(response.wsgi_request)]
self.assertIn(
_("You proposed a modification of type %(type)s for the learning unit %(acronym)s.") % {
'type': proposal_type.ProposalType.SUPPRESSION.value,
'acronym': self.learning_unit_year.acronym
},
list(messages)
)
self.learning_unit.refresh_from_db()
self.assertEqual(self.learning_unit.end_year, self.academic_year_for_suppression_proposal.first())
class TestLearningUnitProposalSearch(TestCase):
@classmethod
def setUpTestData(cls):
AcademicYearFactory.produce(number_past=3, number_future=10)
cls.person = person_factory.PersonWithPermissionsFactory("can_propose_learningunit", "can_access_learningunit")
ac_years = AcademicYearFactory.produce_in_future(quantity=3)
cls.an_entity = EntityFactory()
cls.entity_version = EntityVersionFactory(entity=cls.an_entity, entity_type=entity_type.SCHOOL,
start_date=ac_years[0].start_date,
end_date=ac_years[1].end_date)
cls.proposals = [_create_proposal_learning_unit("LOSIS1211"),
_create_proposal_learning_unit("LOSIS1212"),
_create_proposal_learning_unit("LOSIS1213")]
def setUp(self):
self.client.force_login(self.person.user)
def test_learning_units_proposal_search(self):
url = reverse("learning_units_proposal")
response = self.client.get(url, data={'acronym': self.proposals[0].learning_unit_year.acronym})
self.assertEqual(response.context['learning_units_count'], 1)
def test_learning_units_proposal_search_by_tutor(self):
proposal = _create_proposal_learning_unit("LOSIS1214")
tutor = TutorFactory(person=self.person)
attribution = AttributionNewFactory(tutor=tutor)
learning_unit_component = LearningComponentYearFactory(learning_unit_year=proposal.learning_unit_year)
AttributionChargeNewFactory(attribution=attribution,
learning_component_year=learning_unit_component)
url = reverse("learning_units_proposal")
response = self.client.get(url, data={'tutor': self.person.first_name})
self.assertEqual(response.context['learning_units_count'], 1)
def test_learning_units_proposal_force_state_available_choices_as_faculty_manager(self):
url = reverse("learning_units_proposal")
self.person.user.groups.add(FacultyManagerGroupFactory())
response = self.client.get(url, data={'acronym': self.proposals[0].learning_unit_year.acronym})
state_choices = response.context['form_proposal_state'].fields['state'].choices
self.assertEqual(state_choices, list(LimitedProposalState.choices()))
def test_learning_units_proposal_force_state_available_choices_as_central_manager(self):
url = reverse("learning_units_proposal")
self.person.user.groups.add(CentralManagerGroupFactory())
response = self.client.get(url, data={'acronym': self.proposals[0].learning_unit_year.acronym})
state_choices = response.context['form_proposal_state'].fields['state'].choices
self.assertEqual(state_choices, list(ProposalState.choices()))
class TestGroupActionsOnProposals(TestCase):
@classmethod
def setUpTestData(cls):
AcademicYearFactory.produce(number_past=3, number_future=10)
cls.person = person_factory.PersonWithPermissionsFactory("can_access_learningunit")
cls.proposals = [_create_proposal_learning_unit("LOSIS1211"),
_create_proposal_learning_unit("LOSIS1212"),
_create_proposal_learning_unit("LOSIS1213")]
cls.url = reverse("learning_units_proposal")
AcademicYearFactory.produce_in_future(quantity=3)
def setUp(self):
self.client.force_login(self.person.user)
def test_when_no_proposals_selected(self):
response = self.client.post(self.url, data={"action": ACTION_BACK_TO_INITIAL}, follow=True)
messages = [str(message) for message in response.context["messages"]]
self.assertIn(_("No proposals was selected."), messages)
@mock.patch("base.business.learning_unit_proposal.cancel_proposals_and_send_report",
side_effect=lambda proposals, author, research_criteria: {})
def test_when_action_is_back_to_initial(self, mock_cancel_proposals):
post_data = {
"action": ACTION_BACK_TO_INITIAL,
"selected_action": [self.proposals[0].learning_unit_year.acronym]
}
self.client.post(self.url, data=post_data, follow=True)
proposals, author, research_criteria = mock_cancel_proposals.call_args[0]
self.assertEqual(list(proposals), [self.proposals[0]])
self.assertEqual(author, self.person)
self.assertFalse(research_criteria)
@mock.patch("base.business.learning_unit_proposal.consolidate_proposals_and_send_report",
side_effect=lambda proposals, author, research_criteria: {})
def test_when_action_is_consolidate(self, mock_consolidate):
post_data = {
"action": ACTION_CONSOLIDATE,
"selected_action": [self.proposals[0].learning_unit_year.acronym]
}
self.client.post(self.url, data=post_data, follow=True)
proposals, author, research_criteria = mock_consolidate.call_args[0]
self.assertEqual(list(proposals), [self.proposals[0]])
self.assertEqual(author, self.person)
self.assertFalse(research_criteria)
@mock.patch("base.business.learning_unit_proposal.force_state_of_proposals",
side_effect=lambda proposals, author, research_criteria: {})
def test_when_action_is_force_state_but_no_new_state(self, mock_force_state):
post_data = {
"action": ACTION_FORCE_STATE,
"selected_action": [self.proposals[0].learning_unit_year.acronym]
}
self.client.post(self.url, data=post_data, follow=True)
self.assertFalse(mock_force_state.called)
@mock.patch("base.business.learning_unit_proposal.force_state_of_proposals",
side_effect=lambda proposals, author, research_criteria: {})
def test_when_action_is_force_state(self, mock_force_state):
post_data = {
"action": ACTION_FORCE_STATE,
"selected_action": [self.proposals[0].learning_unit_year.acronym,
self.proposals[2].learning_unit_year.acronym],
"state": proposal_state.ProposalState.ACCEPTED.name
}
self.client.post(self.url, data=post_data, follow=True)
proposals, author, new_state = mock_force_state.call_args[0]
self.assertCountEqual(list(proposals), [self.proposals[0], self.proposals[2]])
self.assertEqual(author, self.person)
self.assertEqual(new_state, proposal_state.ProposalState.ACCEPTED.name)
@override_flag('learning_unit_proposal_delete', active=True)
class TestLearningUnitProposalCancellation(TestCase):
@classmethod
def setUpTestData(cls):
academic_year = create_current_academic_year()
generate_proposal_calendars_without_start_and_end_date([academic_year])
cls.learning_unit_proposal = _create_proposal_learning_unit("LOSIS1211")
cls.learning_unit_year = cls.learning_unit_proposal.learning_unit_year
cls.person = FacultyManagerFactory(
entity=cls.learning_unit_year.learning_container_year.requirement_entity
).person
def setUp(self):
self.url = reverse('learning_unit_cancel_proposal', args=[self.learning_unit_year.id])
self.client.force_login(self.person.user)
def test_user_not_logged(self):
self.client.logout()
response = self.client.get(self.url)
self.assertRedirects(response, '/login/?next={}'.format(self.url))
def test_user_has_not_permission(self):
person = PersonFactory()
self.client.force_login(person.user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
self.assertTemplateUsed(response, "access_denied.html")
def test_with_non_existent_learning_unit_year(self):
self.learning_unit_proposal_to_delete = _create_proposal_learning_unit("LOSIS1211D")
self.learning_unit_year_to_delete = self.learning_unit_proposal_to_delete.learning_unit_year
self.person_to_delete = FacultyManagerFactory(
entity=self.learning_unit_year_to_delete.learning_container_year.requirement_entity
).person
self.url = reverse('learning_unit_cancel_proposal', args=[self.learning_unit_year_to_delete.id])
self.client.force_login(self.person_to_delete.user)
self.learning_unit_year_to_delete.delete()
response = self.client.get(self.url)
self.assertEqual(response.status_code, HttpResponseNotFound.status_code)
self.assertTemplateUsed(response, "page_not_found.html")
def test_with_none_person(self):
user = UserFactory()
self.client.force_login(user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
self.assertTemplateUsed(response, "access_denied.html")
def test_with_no_proposal(self):
self.learning_unit_proposal_to_delete = _create_proposal_learning_unit("LOSIS1211D")
self.learning_unit_year_to_delete = self.learning_unit_proposal_to_delete.learning_unit_year
self.person_to_delete = FacultyManagerFactory(
entity=self.learning_unit_year_to_delete.learning_container_year.requirement_entity
).person
self.url = reverse('learning_unit_cancel_proposal', args=[self.learning_unit_year_to_delete.id])
self.client.force_login(self.person_to_delete.user)
self.learning_unit_proposal_to_delete.delete()
response = self.client.get(self.url)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
self.assertTemplateUsed(response, "access_denied.html")
def test_with_proposal_of_state_different_than_faculty(self):
self.learning_unit_proposal.state = proposal_state.ProposalState.CENTRAL.name
self.learning_unit_proposal.save()
response = self.client.get(self.url)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
self.assertTemplateUsed(response, "access_denied.html")
def test_user_not_linked_to_current_requirement_entity(self):
person = PersonFactory()
self.client.force_login(person.user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
self.assertTemplateUsed(response, "access_denied.html")
def test_context_after_valid_get_request(self):
response = self.client.get(self.url)
redirected_url = reverse('learning_unit', args=[self.learning_unit_year.id])
self.assertRedirects(response, redirected_url, fetch_redirect_response=False)
messages = [str(message) for message in get_messages(response.wsgi_request)]
self.assertIn(_("Proposal %(acronym)s (%(academic_year)s) successfully canceled.") % {
"acronym": self.learning_unit_year.acronym,
"academic_year": self.learning_unit_year.academic_year
}, messages)
def test_models_after_cancellation_of_proposal(self):
_modify_learning_unit_year_data(self.learning_unit_year)
_modify_entities_linked_to_learning_container_year(self.learning_unit_year.learning_container_year)
new_entity = self.learning_unit_year.learning_container_year.requirement_entity
FacultyManagerFactory(entity=new_entity, person=self.person)
self.client.get(self.url)
self.learning_unit_year.refresh_from_db()
self.learning_unit_year.learning_container_year.refresh_from_db()
initial_data = self.learning_unit_proposal.initial_data
self.assertTrue(_test_attributes_equal(self.learning_unit_year, initial_data["learning_unit_year"]))
self.assertTrue(_test_attributes_equal(self.learning_unit_year.learning_unit, initial_data["learning_unit"]))
self.assertTrue(_test_attributes_equal(self.learning_unit_year.learning_container_year,
initial_data["learning_container_year"]))
def _test_attributes_equal(obj, attribute_values_dict):
for key, value in attribute_values_dict.items():
attr_value = getattr(obj, key)
foreign_key_fields = [
"campus", "language", 'requirement_entity', 'allocation_entity',
'additional_entity_1', 'additional_entity_2'
]
if key == "credits":
if float(attr_value) != float(value):
return False
elif attr_value and key in foreign_key_fields:
if attr_value.pk != value:
return False
elif attr_value != value:
return False
return True
def _create_proposal_learning_unit(acronym):
a_learning_unit_year = LearningUnitYearFactory(
acronym=acronym,
subtype=learning_unit_year_subtypes.FULL,
learning_container_year__requirement_entity=EntityVersionFactory().entity,
)
learning_component_lecturing = LearningComponentYearFactory(
learning_unit_year=a_learning_unit_year,
type=learning_component_year_type.LECTURING
)
learning_component_practical = LearningComponentYearFactory(
learning_unit_year=a_learning_unit_year,
type=learning_component_year_type.PRACTICAL_EXERCISES)
container_year = a_learning_unit_year.learning_container_year
initial_data = {
"learning_container_year": {
"id": container_year.id,
"acronym": a_learning_unit_year.acronym,
"common_title": a_learning_unit_year.specific_title,
"common_title_english": a_learning_unit_year.specific_title_english,
"container_type": container_year.container_type,
"in_charge": container_year.in_charge,
"requirement_entity": container_year.requirement_entity.id,
"allocation_entity": None,
"additional_entity_1": None,
"additional_entity_2": None,
},
"learning_unit_year": {
"id": a_learning_unit_year.id,
"acronym": a_learning_unit_year.acronym,
"specific_title": a_learning_unit_year.specific_title,
"specific_title_english": a_learning_unit_year.specific_title_english,
"internship_subtype": a_learning_unit_year.internship_subtype,
"credits": float(a_learning_unit_year.credits),
"language": a_learning_unit_year.language.pk,
"campus": a_learning_unit_year.campus.id,
"periodicity": a_learning_unit_year.periodicity
},
"learning_unit": {
"id": a_learning_unit_year.learning_unit.id,
},
"learning_component_years": [
{"id": learning_component_lecturing.id, "planned_classes": learning_component_lecturing.planned_classes,
"hourly_volume_partial_q1": learning_component_lecturing.hourly_volume_partial_q1,
"hourly_volume_partial_q2": learning_component_lecturing.hourly_volume_partial_q2,
"hourly_volume_total_annual": learning_component_lecturing.hourly_volume_total_annual
},
{"id": learning_component_practical.id, "planned_classes": learning_component_practical.planned_classes,
"hourly_volume_partial_q1": learning_component_practical.hourly_volume_partial_q1,
"hourly_volume_partial_q2": learning_component_practical.hourly_volume_partial_q2,
"hourly_volume_total_annual": learning_component_practical.hourly_volume_total_annual
}
]
}
return ProposalLearningUnitFactory(learning_unit_year=a_learning_unit_year,
type=proposal_type.ProposalType.MODIFICATION.name,
state=proposal_state.ProposalState.FACULTY.name,
initial_data=initial_data,
entity=container_year.requirement_entity)
def _modify_learning_unit_year_data(a_learning_unit_year):
a_learning_unit_year.specific_title = "New title"
a_learning_unit_year.specific_title_english = "New english title"
a_learning_unit_year.acronym = "LNEW456"
a_learning_unit_year.credits = 123
a_learning_unit_year.language = LanguageFactory()
a_learning_unit_year.save()
a_learning_container = a_learning_unit_year.learning_container_year
a_learning_container.campus = CampusFactory()
a_learning_container.save()
def _modify_entities_linked_to_learning_container_year(a_learning_container_year):
a_learning_container_year.requirement_entity = EntityFactory()
a_learning_container_year.save()
@override_flag('learning_unit_proposal_update', active=True)
class TestEditProposal(TestCase):
@classmethod
def setUpTestData(cls):
today = datetime.date.today()
cls.academic_years = AcademicYearFactory.produce_in_future(quantity=5)
cls.current_academic_year = cls.academic_years[0]
end_year = AcademicYearFactory(year=cls.current_academic_year.year + 10)
generate_proposal_calendars(cls.academic_years)
cls.language = FrenchLanguageFactory()
cls.organization = organization_factory.OrganizationFactory(type=organization_type.MAIN)
cls.campus = campus_factory.CampusFactory(organization=cls.organization, is_administration=True)
cls.entity = EntityFactory(organization=cls.organization)
cls.entity_version = EntityVersionFactory(entity=cls.entity, entity_type=entity_type.FACULTY,
start_date=today.replace(year=1900),
end_date=None)
cls.generated_container = GenerateContainer(cls.current_academic_year, end_year, parent_entity=cls.entity)
cls.generated_container_first_year = cls.generated_container.generated_container_years[1]
cls.learning_unit_year = cls.generated_container_first_year.learning_unit_year_full
cls.requirement_entity_of_luy = cls.generated_container_first_year.requirement_entity_container_year
cls.person = FacultyManagerFactory(entity=cls.entity, with_child=True).person
cls.url = reverse(update_learning_unit_proposal, args=[cls.learning_unit_year.id])
cls.academic_year_for_suppression_proposal = AcademicYear.objects.filter(
year=cls.learning_unit_year.academic_year.year - 1)
def setUp(self):
self.proposal = ProposalLearningUnitFactory(learning_unit_year=self.learning_unit_year,
state=ProposalState.FACULTY.name,
folder_id=1,
entity=self.entity,
type=proposal_type.ProposalType.MODIFICATION.name)
self.client.force_login(self.person.user)
def test_edit_proposal_get_no_permission(self):
person = person_factory.PersonFactory()
self.client.force_login(person.user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
self.assertTemplateUsed(response, 'access_denied.html')
def test_edit_proposal_get_regular_user_with_permission(self):
person = FacultyManagerFactory().person
self.client.force_login(person.user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
self.assertTemplateUsed(response, 'access_denied.html')
def test_edit_proposal_get_as_faculty_manager(self):
response = self.client.get(self.url)
self.assertTemplateUsed(response, 'learning_unit/proposal/update_modification.html')
self.assertIsInstance(response.context['form_proposal'], ProposalLearningUnitForm)
def test_edit_proposal_get_as_central_manager_with_instance(self):
central_manager = person_factory.CentralManagerForUEFactory("can_edit_learning_unit_proposal")
self.client.logout()
self.client.force_login(central_manager.user)
response = self.client.get(self.url)
self.assertTemplateUsed(response, 'learning_unit/proposal/update_modification.html')
self.assertIsInstance(response.context['form_proposal'], ProposalLearningUnitForm)
self.assertEqual(response.context['form_proposal'].initial['state'], str(ProposalState.FACULTY.name))
def get_valid_data(self):
return {
'acronym_0': 'L',
'acronym_1': 'TAU2000',
"subtype": learning_unit_year_subtypes.FULL,
"container_type": learning_container_year_types.COURSE,
"academic_year": self.current_academic_year.id,
"status": True,
"credits": "5",
"campus": self.campus.id,
"common_title": "Common UE title",
"language": self.language.pk,
"periodicity": learning_unit_year_periodicity.ANNUAL,
"entity": self.entity_version.pk,
"folder_id": 1,
'requirement_entity':
self.entity_version.pk,
'allocation_entity':
self.entity_version.pk,
'additional_entity_1': '',
# Learning component year data model form
'component-TOTAL_FORMS': '2',
'component-INITIAL_FORMS': '0',
'component-MAX_NUM_FORMS': '2',
'component-0-hourly_volume_total_annual': 20,
'component-0-hourly_volume_partial_q1': 10,
'component-0-hourly_volume_partial_q2': 10,
'component-0-planned_classes': 1,
'component-1-hourly_volume_total_annual': 20,
'component-1-hourly_volume_partial_q1': 10,
'component-1-hourly_volume_partial_q2': 10,
'component-1-planned_classes': 1,
}
def get_modify_data(self):
modifydict = dict(self.get_valid_data())
modifydict["state"] = ProposalState.CENTRAL.value
return modifydict
def get_faulty_data(self):
faultydict = dict(self.get_valid_data())
faultydict["state"] = "bad_choice"
return faultydict
def test_edit_proposal_post_as_faculty_manager(self):
initial_data = copy_learning_unit_data(self.learning_unit_year)
self.proposal.initial_data = initial_data
request_factory = RequestFactory()
request = request_factory.post(self.url, data=self.get_modify_data())
request.user = self.person.user
request.session = self.client.session
request._messages = FallbackStorage(request)
update_learning_unit_proposal(request, learning_unit_year_id=self.learning_unit_year.id)
msg = [m.message for m in get_messages(request)]
msg_level = [m.level for m in get_messages(request)]
self.assertIn(messages.SUCCESS, msg_level, msg)
self.assertEqual(len(msg), 1)
self.proposal.refresh_from_db()
self.assertEqual(self.proposal.state, 'FACULTY')
def test_edit_proposal_post_wrong_data(self):
self.person.user.groups.add(CentralManagerGroupFactory())
response = self.client.post(self.url, data=self.get_faulty_data())
self.assertTemplateUsed(response, 'learning_unit/proposal/update_modification.html')
self.assertIsInstance(response.context['form_proposal'], ProposalLearningUnitForm)
form = response.context['form_proposal']
self.assertEqual(len(form.errors), 1)
self.proposal.refresh_from_db()
self.assertEqual(self.proposal.state, ProposalState.FACULTY.name)
def test_edit_suppression_proposal_get(self):
self.proposal.type = ProposalType.SUPPRESSION.name
self.proposal.save()
response = self.client.get(self.url)
self.assertTemplateUsed(response, 'learning_unit/proposal/update_suppression.html')
self.assertIsInstance(response.context['form_end_date'], LearningUnitProposalEndDateForm)
self.assertCountEqual(
list(response.context['form_end_date'].fields['academic_year'].queryset),
list(self.academic_year_for_suppression_proposal)
)
self.assertIsInstance(response.context['form_proposal'], ProposalLearningUnitForm)
def test_edit_suppression_proposal_post(self):
self.proposal.type = ProposalType.SUPPRESSION.name
self.proposal.save()
request_factory = RequestFactory()
request = request_factory.post(self.url,
data={"academic_year": self.academic_year_for_suppression_proposal.first().id,
"entity": self.entity_version.id,
"folder_id": 12})
request.user = self.person.user
request.session = 'session'
request._messages = FallbackStorage(request)
update_learning_unit_proposal(request, learning_unit_year_id=self.learning_unit_year.id)
msg = [m.message for m in get_messages(request)]
msg_level = [m.level for m in get_messages(request)]
self.assertEqual(len(msg), 1)
self.assertIn(messages.SUCCESS, msg_level)
self.proposal.refresh_from_db()
self.assertEqual(self.proposal.folder_id, 12)
def test_edit_suppression_proposal_wrong_post(self):
self.proposal.type = ProposalType.SUPPRESSION.name
self.proposal.save()
response = self.client.post(self.url, data={"academic_year": self.academic_years[3].id,
"entity": self.entity_version.id})
self.assertEqual(self.url, response.request['PATH_INFO'])
class TestLearningUnitProposalDisplay(TestCase):
@classmethod
def setUpTestData(cls):
cls.language_pt = LanguageFactory(code='PT', name="Portugais")
cls.language_it = LanguageFactory(code='IT', name="Italien")
cls.campus = CampusFactory()
cls.academic_year = create_current_academic_year()
cls.l_container_year = LearningContainerYearFactory(
acronym="LBIR1212",
academic_year=cls.academic_year,
)
cls.learning_unit = LearningUnitFactory(learning_container=cls.l_container_year.learning_container)
cls.learning_unit_yr = LearningUnitYearFactory(
acronym="LBIR1212",
learning_unit=cls.learning_unit,
learning_container_year=cls.l_container_year,
academic_year=cls.academic_year,
subtype=learning_unit_year_subtypes.FULL,
status=True,
quadrimester="Q3",
credits=4,
campus=cls.campus,
language=cls.language_pt,
periodicity=learning_unit_year_periodicity.BIENNIAL_EVEN
)
cls.proposal_learning_unit = ProposalLearningUnitFactory(learning_unit_year=cls.learning_unit_yr)
cls.initial_credits = 3.0
cls.initial_quadrimester = 'Q1'
cls.initial_language = cls.language_it.pk
cls.initial_periodicity = learning_unit_year_periodicity.ANNUAL
cls.initial_data_learning_unit_year = {'credits': cls.initial_credits, 'periodicity': cls.initial_periodicity}
organization_main = OrganizationFactory(type=organization_type.MAIN)
cls.entity_from_main_organization = EntityFactory(organization=organization_main)
cls.entity_version = EntityVersionFactory(entity=cls.entity_from_main_organization)
organization_not_main = OrganizationFactory(type=organization_type.ACADEMIC_PARTNER)
cls.entity_from_not_main_organization = EntityFactory(organization=organization_not_main)
cls.entity_version_not_main = EntityVersionFactory(entity=cls.entity_from_not_main_organization)
cls.initial_language_en = cls.language_it
end_year = AcademicYearFactory(year=cls.academic_year.year + 1)
cls.generator_learning_container = GenerateContainer(start_year=cls.academic_year, end_year=end_year)
cls.l_container_year_with_entities = cls.generator_learning_container.generated_container_years[0]
def test_is_foreign_key(self):
current_data = {"language{}".format(proposal_business.END_FOREIGN_KEY_NAME): self.language_it.pk}
self.assertTrue(proposal_business._is_foreign_key("language", current_data))
def test_is_not_foreign_key(self):
current_data = {"credits": self.language_it.pk}
self.assertFalse(proposal_business._is_foreign_key("credits", current_data))
def test_check_differences(self):
proposal = ProposalLearningUnitFactory()
proposal.initial_data = {'learning_unit_year': {
'credits': self.initial_credits
}}
proposal.learning_unit_year.credits = self.learning_unit_yr.credits
differences = proposal_business.get_difference_of_proposal(proposal, proposal.learning_unit_year)
self.assertEqual(float(differences.get('credits')), self.initial_credits)
def test_get_the_old_value(self):
differences = proposal_business._get_the_old_value('credits',
{"credits": self.initial_credits + 1},
{'credits': self.initial_credits})
self.assertEqual(differences, "{}".format(self.initial_credits))
def test_get_the_old_value_no_initial_value(self):
differences = proposal_business._get_the_old_value('credits',
{"credits": self.initial_credits + 1},
{})
self.assertEqual(differences, proposal_business.NO_PREVIOUS_VALUE)
def test_get_the_old_value_for_foreign_key(self):
initial_data_learning_unit_year = {'language': self.language_pt.pk}
current_data = {"language_id": self.language_it.pk}
differences = proposal_business._get_the_old_value('language',
current_data,
initial_data_learning_unit_year)
self.assertEqual(differences, str(self.language_pt))
def test_get_the_old_value_for_foreign_key_no_previous_value(self):
initial_data = {"language": None}
current_data = {"language_id": self.language_it.pk}
differences = proposal_business._get_the_old_value('language', current_data, initial_data)
self.assertEqual(differences, proposal_business.NO_PREVIOUS_VALUE)
initial_data = {}
differences = proposal_business._get_the_old_value('language', current_data, initial_data)
self.assertEqual(differences, proposal_business.NO_PREVIOUS_VALUE)
def test_get_the_old_value_with_translation(self):
key = proposal_business.VALUES_WHICH_NEED_TRANSLATION[0]
initial_data = {key: learning_unit_year_periodicity.ANNUAL}
current_data = {key: learning_unit_year_periodicity.BIENNIAL_EVEN}
differences = proposal_business._get_the_old_value(key, current_data, initial_data)
self.assertEqual(differences, _(learning_unit_year_periodicity.ANNUAL))
def test_get_str_representing_old_data_from_foreign_key(self):
differences = proposal_business._get_str_representing_old_data_from_foreign_key('campus', self.campus.id)
self.assertEqual(differences, str(self.campus.name))
def test_get_str_representing_old_data_from_foreign_key_equals_no_value(self):
differences = proposal_business._get_str_representing_old_data_from_foreign_key(
'campus',
proposal_business.NO_PREVIOUS_VALUE)
self.assertEqual(differences, proposal_business.NO_PREVIOUS_VALUE)
def test_get_old_value_of_foreign_key_for_campus(self):
differences = proposal_business._get_old_value_of_foreign_key('campus', self.campus.id)
self.assertEqual(differences, str(self.campus.name))
def test_get_old_value_of_foreign_key_for_language(self):
differences = proposal_business._get_old_value_of_foreign_key('language', self.language_it.pk)
self.assertEqual(differences, str(self.language_it))
def test_get_old_value_of_foreign_key_for_additional_requirement_entity_main_organization(self):
differences = proposal_business._get_old_value_of_foreign_key('ADDITIONAL_REQUIREMENT_ENTITY_1',
self.entity_from_main_organization.pk)
self.assertEqual(differences, str(self.entity_from_main_organization.most_recent_entity_version.acronym))
def test_get_old_value_of_foreign_key_for_additional_requirement_entity_not_main_organization(self):
differences = proposal_business._get_old_value_of_foreign_key('ADDITIONAL_REQUIREMENT_ENTITY_1',
self.entity_from_not_main_organization.pk)
self.assertEqual(differences, str(self.entity_from_not_main_organization.most_recent_entity_version.title))
def test_get_status_initial_value(self):
self.assertEqual(proposal_business._get_status_initial_value(True),
proposal_business.LABEL_ACTIVE)
self.assertEqual(proposal_business._get_status_initial_value(False),
proposal_business.LABEL_INACTIVE)
def test_get_old_value_for_periodicity(self):
differences = proposal_business._get_the_old_value('periodicity',
{"periodicity": self.learning_unit_yr.periodicity},
{'periodicity': self.initial_periodicity})
self.assertEqual(differences,
dict(learning_unit_year_periodicity.PERIODICITY_TYPES)[self.initial_periodicity])
@override_flag('learning_unit_proposal_delete', active=True)
class TestCreationProposalCancel(TestCase):
@mock.patch('base.utils.send_mail.send_mail_cancellation_learning_unit_proposals')
def test_cancel_proposal_of_learning_unit(self, mock_send_mail):
a_proposal = _create_proposal_learning_unit("LOSIS1211")
luy = a_proposal.learning_unit_year
url = reverse('learning_unit_cancel_proposal', args=[luy.id])
generate_proposal_calendars_without_start_and_end_date([luy.academic_year])
self.central_manager = CentralManagerFactory(entity=luy.learning_container_year.requirement_entity)
self.client.force_login(self.central_manager.person.user)
response = self.client.post(url, data={})
redirected_url = reverse('learning_unit', args=[luy.id])
msgs = [str(message) for message in get_messages(response.wsgi_request)]
self.assertRedirects(response, redirected_url, fetch_redirect_response=False)
self.assertEqual(len(msgs), 2)
self.assertTrue(mock_send_mail.called)
| agpl-3.0 |
andmos/ansible | lib/ansible/modules/cloud/vmware/vmware_vm_vm_drs_rule.py | 55 | 12538 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_vm_vm_drs_rule
short_description: Configure VMware DRS Affinity rule for virtual machine in given cluster
description:
- This module can be used to configure VMware DRS Affinity rule for virtual machine in given cluster.
version_added: 2.5
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
cluster_name:
description:
- Desired cluster name where virtual machines are present for the DRS rule.
required: True
vms:
description:
- List of virtual machines name for which DRS rule needs to be applied.
- Required if C(state) is set to C(present).
drs_rule_name:
description:
- The name of the DRS rule to manage.
required: True
enabled:
description:
- If set to C(True), the DRS rule will be enabled.
- Effective only if C(state) is set to C(present).
default: False
type: bool
mandatory:
description:
- If set to C(True), the DRS rule will be mandatory.
- Effective only if C(state) is set to C(present).
default: False
type: bool
affinity_rule:
description:
- If set to C(True), the DRS rule will be an Affinity rule.
- If set to C(False), the DRS rule will be an Anti-Affinity rule.
- Effective only if C(state) is set to C(present).
default: True
type: bool
state:
description:
- If set to C(present), then the DRS rule is created if not present.
- If set to C(present), then the DRS rule is deleted and created if present already.
- If set to C(absent), then the DRS rule is deleted if present.
required: False
default: present
choices: [ present, absent ]
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Create DRS Affinity Rule for VM-VM
vmware_vm_vm_drs_rule:
hostname: "{{ esxi_server }}"
username: "{{ esxi_username }}"
password: "{{ esxi_password }}"
cluster_name: "{{ cluster_name }}"
validate_certs: no
vms:
- vm1
- vm2
drs_rule_name: vm1-vm2-affinity-rule-001
enabled: True
mandatory: True
affinity_rule: True
delegate_to: localhost
- name: Create DRS Anti-Affinity Rule for VM-VM
vmware_vm_vm_drs_rule:
hostname: "{{ esxi_server }}"
username: "{{ esxi_username }}"
password: "{{ esxi_password }}"
cluster_name: "{{ cluster_name }}"
validate_certs: no
vms:
- vm1
- vm2
drs_rule_name: vm1-vm2-affinity-rule-001
enabled: True
mandatory: True
affinity_rule: False
delegate_to: localhost
- name: Delete DRS Affinity Rule for VM-VM
vmware_vm_vm_drs_rule:
hostname: "{{ esxi_server }}"
username: "{{ esxi_username }}"
password: "{{ esxi_password }}"
cluster_name: "{{ cluster_name }}"
validate_certs: no
drs_rule_name: vm1-vm2-affinity-rule-001
state: absent
delegate_to: localhost
'''
RETURN = r'''
result:
description: metadata about DRS VM and VM rule
returned: when state is present
type: dict
sample: {
"rule_enabled": false,
"rule_key": 20,
"rule_mandatory": true,
"rule_name": "drs_rule_0014",
"rule_uuid": "525f3bc0-253f-825a-418e-2ec93bffc9ae",
"rule_vms": [
"VM_65",
"VM_146"
]
}
'''
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.vmware import (PyVmomi, vmware_argument_spec, wait_for_task,
find_vm_by_id, find_cluster_by_name)
class VmwareDrs(PyVmomi):
def __init__(self, module):
super(VmwareDrs, self).__init__(module)
self.vm_list = module.params['vms']
self.cluster_name = module.params['cluster_name']
self.rule_name = module.params['drs_rule_name']
self.enabled = module.params['enabled']
self.mandatory = module.params['mandatory']
self.affinity_rule = module.params['affinity_rule']
self.state = module.params['state']
# Sanity check for cluster
self.cluster_obj = find_cluster_by_name(content=self.content,
cluster_name=self.cluster_name)
if self.cluster_obj is None:
self.module.fail_json(msg="Failed to find the cluster %s" % self.cluster_name)
# Sanity check for virtual machines
self.vm_obj_list = []
if self.state == 'present':
# Get list of VMs only if state is present
self.vm_obj_list = self.get_all_vms_info()
# Getter
def get_all_vms_info(self, vms_list=None):
"""
Function to get all VM objects using name from given cluster
Args:
vms_list: List of VM names
Returns: List of VM managed objects
"""
vm_obj_list = []
if vms_list is None:
vms_list = self.vm_list
for vm_name in vms_list:
vm_obj = find_vm_by_id(content=self.content, vm_id=vm_name,
vm_id_type='vm_name', cluster=self.cluster_obj)
if vm_obj is None:
self.module.fail_json(msg="Failed to find the virtual machine %s "
"in given cluster %s" % (vm_name,
self.cluster_name))
vm_obj_list.append(vm_obj)
return vm_obj_list
def get_rule_key_by_name(self, cluster_obj=None, rule_name=None):
"""
Function to get a specific DRS rule key by name
Args:
rule_name: Name of rule
cluster_obj: Cluster managed object
Returns: Rule Object if found or None
"""
if cluster_obj is None:
cluster_obj = self.cluster_obj
if rule_name:
rules_list = [rule for rule in cluster_obj.configuration.rule if rule.name == rule_name]
if rules_list:
return rules_list[0]
# No rule found
return None
@staticmethod
def normalize_rule_spec(rule_obj=None):
"""
Function to return human readable rule spec
Args:
rule_obj: Rule managed object
Returns: Dictionary with Rule info
"""
if rule_obj is None:
return {}
return dict(rule_key=rule_obj.key,
rule_enabled=rule_obj.enabled,
rule_name=rule_obj.name,
rule_mandatory=rule_obj.mandatory,
rule_uuid=rule_obj.ruleUuid,
rule_vms=[vm.name for vm in rule_obj.vm],
rule_affinity=True if isinstance(rule_obj, vim.cluster.AffinityRuleSpec) else False,
)
# Create
def create(self):
"""
Function to create a DRS rule if rule does not exist
"""
rule_obj = self.get_rule_key_by_name(rule_name=self.rule_name)
if rule_obj is not None:
# Rule already exists, remove and create again
# Cluster does not allow editing existing rule
existing_rule = self.normalize_rule_spec(rule_obj=rule_obj)
if ((sorted(existing_rule['rule_vms']) == sorted(self.vm_list)) and
(existing_rule['rule_enabled'] == self.enabled) and
(existing_rule['rule_mandatory'] == self.mandatory) and
(existing_rule['rule_affinity'] == self.affinity_rule)):
# Rule is same as existing rule, evacuate
self.module.exit_json(changed=False, result=existing_rule)
# Delete existing rule as we cannot edit it
changed, result = self.delete(rule_name=self.rule_name)
if not changed:
self.module.fail_json(msg="Failed to delete while updating rule %s due to %s" % (self.rule_name, result))
changed, result = self.create_rule_spec()
return changed, result
def create_rule_spec(self):
"""
Function to create DRS rule
"""
changed = False
if self.affinity_rule:
rule = vim.cluster.AffinityRuleSpec()
else:
rule = vim.cluster.AntiAffinityRuleSpec()
rule.vm = self.vm_obj_list
rule.enabled = self.enabled
rule.mandatory = self.mandatory
rule.name = self.rule_name
rule_spec = vim.cluster.RuleSpec(info=rule, operation='add')
config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec])
try:
task = self.cluster_obj.ReconfigureEx(config_spec, modify=True)
changed, result = wait_for_task(task)
except vmodl.fault.InvalidRequest as e:
result = to_native(e.msg)
except Exception as e:
result = to_native(e)
if changed:
rule_obj = self.get_rule_key_by_name(rule_name=self.rule_name)
result = self.normalize_rule_spec(rule_obj)
return changed, result
# Delete
def delete(self, rule_name=None):
"""
Function to delete DRS rule using name
"""
changed = False
if rule_name is None:
rule_name = self.rule_name
rule = self.get_rule_key_by_name(rule_name=rule_name)
if rule is not None:
rule_key = int(rule.key)
rule_spec = vim.cluster.RuleSpec(removeKey=rule_key, operation='remove')
config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec])
try:
task = self.cluster_obj.ReconfigureEx(config_spec, modify=True)
changed, result = wait_for_task(task)
except vmodl.fault.InvalidRequest as e:
result = to_native(e.msg)
except Exception as e:
result = to_native(e)
else:
result = 'No rule named %s exists' % self.rule_name
return changed, result
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
vms=dict(type='list'),
cluster_name=dict(type='str', required=True),
drs_rule_name=dict(type='str', required=True),
enabled=dict(type='bool', default=False),
mandatory=dict(type='bool', default=False),
affinity_rule=dict(type='bool', default=True),
)
)
required_if = [
['state', 'present', ['vms']],
]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
results = dict(failed=False, changed=False)
state = module.params['state']
vm_drs = VmwareDrs(module)
if state == 'present':
# Add Rule
if module.check_mode:
results['changed'] = True
module.exit_json(**results)
changed, result = vm_drs.create()
if changed:
results['changed'] = changed
else:
results['failed'] = True
results['msg'] = "Failed to create DRS rule %s" % vm_drs.rule_name
results['result'] = result
elif state == 'absent':
# Delete Rule
if module.check_mode:
results['changed'] = True
module.exit_json(**results)
changed, result = vm_drs.delete()
if changed:
results['changed'] = changed
results['msg'] = "DRS rule %s deleted successfully." % vm_drs.rule_name
else:
if "No rule named" in result:
results['msg'] = result
module.exit_json(**results)
results['failed'] = True
results['msg'] = "Failed to delete DRS rule %s" % vm_drs.rule_name
results['result'] = result
if results['changed']:
module.exit_json(**results)
if results['failed']:
module.fail_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
christianwengert/mailclient | src/bin/__init__.py | 1 | 2230 | # coding=utf-8
import subprocess
from imapclient import IMAPClient
HOST = 'mail.netzone.ch'
USERNAME = 'christian@wengert.ch'
PASSWORD = subprocess.check_output(["/usr/local/bin/pass", "mail/christian@wengert.ch"])
PASSWORD = PASSWORD.split()[0].decode('utf8')
KEYMAPPING = {}
ssl = True
class Signature():
pass
class Account():
#username, password, First name, Name, host, port, ssl
pass
class Mailbox():
#name, account
pass
class Search():
flags = ''
searchtemrs = ''
date = ''
class Message():
id = ''
flags = '' # is replied and forwarded here?
attachments = ''
subject = ''
content = ''
date = ''
mailbox = ''
label = ''
def save_search():
pass
def new_mailbox():
pass
def delete_mailbox():
pass
def rename_mailbox():
pass
def reply():
pass
def forward():
pass
def mark_unread():
pass
def label():
pass
def move():
pass
def search():
pass
def flag():
pass
def delete():
pass
def compose():
pass
def clean_database():
pass
def sync_database():
#fetch
pass
def main():
server = IMAPClient(HOST, use_uid=True, ssl=ssl)
server.login(USERNAME, PASSWORD)
select_info = server.select_folder('INBOX')
print('%d messages in INBOX' % select_info[b'EXISTS'])
messages = server.search(['NOT', 'DELETED'])
print("%d messages that aren't deleted" % len(messages))
print()
print("Messages:")
response = server.fetch(messages, ['FLAGS', 'RFC822', 'RFC822.SIZE', 'INTERNALDATE'])
for msgid, data in response.items():
print(' ID %d: %d bytes, flags=%s' % (msgid,
data[b'RFC822.SIZE'],
data[b'FLAGS']))
if __name__ == "__main__":
# parser = argparse.ArgumentParser(description='Command line mail client.')
#
# parser.add_argument('--host', dest='accumulate', action='store_const',
# const=sum, default=max,
# help='sum the integers (default: find the max)')
#
# args = parser.parse_args()
# print(args.accumulate(args.integers))
main()
| bsd-3-clause |
edx-solutions/edx-platform | openedx/features/course_experience/plugins.py | 4 | 2843 | """
Platform plugins to support the course experience.
This includes any locally defined CourseTools.
"""
from django.urls import reverse
from django.utils.translation import ugettext as _
from lms.djangoapps.courseware.courses import get_course_by_id
from student.models import CourseEnrollment
from . import SHOW_REVIEWS_TOOL_FLAG, UNIFIED_COURSE_TAB_FLAG
from .course_tools import CourseTool
from .views.course_reviews import CourseReviewsModuleFragmentView
from .views.course_updates import CourseUpdatesFragmentView
class CourseUpdatesTool(CourseTool):
"""
The course updates tool.
"""
@classmethod
def analytics_id(cls):
"""
Returns an analytics id for this tool, used for eventing.
"""
return 'edx.updates'
@classmethod
def title(cls):
"""
Returns the title of this tool.
"""
return _('Updates')
@classmethod
def icon_classes(cls):
"""
Returns icon classes needed to represent this tool.
"""
return 'fa fa-newspaper-o'
@classmethod
def is_enabled(cls, request, course_key):
"""
Returns True if the user should be shown course updates for this course.
"""
if not UNIFIED_COURSE_TAB_FLAG.is_enabled(course_key):
return False
if not CourseEnrollment.is_enrolled(request.user, course_key):
return False
course = get_course_by_id(course_key)
return CourseUpdatesFragmentView.has_updates(request, course)
@classmethod
def url(cls, course_key):
"""
Returns the URL for this tool for the specified course key.
"""
return reverse('openedx.course_experience.course_updates', args=[course_key])
class CourseReviewsTool(CourseTool):
"""
The course reviews tool.
"""
@classmethod
def analytics_id(cls):
"""
Returns an id to uniquely identify this tool in analytics events.
"""
return 'edx.reviews'
@classmethod
def title(cls):
"""
Returns the title of this tool.
"""
return _('Reviews')
@classmethod
def icon_classes(cls):
"""
Returns icon classes needed to represent this tool.
"""
return 'fa fa-star'
@classmethod
def is_enabled(cls, request, course_key):
"""
Returns True if this tool is enabled for the specified course key.
"""
if not SHOW_REVIEWS_TOOL_FLAG.is_enabled(course_key):
return False
return CourseReviewsModuleFragmentView.is_configured()
@classmethod
def url(cls, course_key):
"""
Returns the URL for this tool for the specified course key.
"""
return reverse('openedx.course_experience.course_reviews', args=[course_key])
| agpl-3.0 |
calvinchengx/O-Kay-Blog-wih-Kay-0.10.0 | kay/cache/middleware.py | 10 | 2123 | # -*- coding: utf-8 -*-
"""
Middleware for cache.
:Copyright: (c) 2009 Takashi Matsuo <tmatsuo@candit.jp> All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import logging
from google.appengine.api import memcache
from kay.conf import settings
import kay.cache
def get_key(url, lang):
return "%s?lang=%s" % (url, lang)
class CacheMiddleware(object):
def __init__(self, cache_timeout=settings.CACHE_MIDDLEWARE_SECONDS,
namespace=settings.CACHE_MIDDLEWARE_NAMESPACE,
cache_anonymous_only=settings.CACHE_MIDDLEWARE_ANONYMOUS_ONLY):
self.cache_timeout = cache_timeout
self.namespace = namespace
self.cache_anonymous_only = cache_anonymous_only
def process_response(self, request, response):
if not hasattr(request, '_cache_update') or not request._cache_update:
return response
if not hasattr(response, 'status_code') or not response.status_code == 200:
return response
key = get_key(request.url, request.lang)
timeout = response.cache_control.max_age
if timeout is None:
timeout = self.cache_timeout
if memcache.set(key, response, timeout, namespace=self.namespace):
logging.debug("CacheMiddleware cache set. key: '%s', timeout: %d" %
(key, timeout))
return response
def process_view(self, request, view_func, **kwargs):
request._cache_update = False
if hasattr(view_func, kay.cache.NO_CACHE):
return None
if self.cache_anonymous_only:
if not hasattr(request, 'user'):
logging.warn("You need to add a particular AuthenticationMiddleware "
"before CacheMiddleware")
return None
if not request.method in ('GET', 'HEAD') or request.args:
return None
if self.cache_anonymous_only and request.user.is_authenticated():
return None
key = get_key(request.url, request.lang)
response = memcache.get(key, namespace=self.namespace)
if response:
logging.debug("CacheMiddleware cache hit: key '%s'" % key)
return response
request._cache_update = True
return None
| bsd-3-clause |
ningirsu/stepmania-server | smserver/controllers/routes.py | 1 | 1366 | """ Routes files """
from smserver.smutils.smpacket import smcommand
from smserver.controllers import legacy
ROUTES = {
# Legacy controller for compatibility with Stepmania 5.X
smcommand.SMClientCommand.NSCPingR: legacy.ping_response.PINGRController,
smcommand.SMClientCommand.NSCHello: legacy.hello.HelloController,
smcommand.SMClientCommand.NSCCM: legacy.chat.ChatController,
smcommand.SMClientCommand.NSCFormatted: legacy.discovery.DiscoveryController,
smcommand.SMClientCommand.NSCGON: legacy.game_over.GameOverController,
smcommand.SMClientCommand.NSCGSR: legacy.game_start_request.StartGameRequestController,
smcommand.SMClientCommand.NSCGSU: legacy.game_status_update.GameStatusUpdateController,
smcommand.SMClientCommand.NSCRSG: legacy.request_start_game.RequestStartGameController,
smcommand.SMClientCommand.NSSMONL:legacy.smo.SMOController,
smcommand.SMClientCommand.NSCSU: legacy.user_profil.UserProfilController,
smcommand.SMClientCommand.NSSCSMS: legacy.user_screen.UserStatusController,
smcommand.SMOClientCommand.LOGIN: legacy.login.LoginController,
smcommand.SMOClientCommand.ENTERROOM: legacy.enter_room.EnterRoomController,
smcommand.SMOClientCommand.CREATEROOM: legacy.create_room.CreateRoomController,
smcommand.SMOClientCommand.ROOMINFO: legacy.room_info.RoomInfoController,
}
| mit |
stefan-andritoiu/upm | examples/python/mb704x.py | 6 | 1976 | #!/usr/bin/env python
# Author: Jon Trulson <jtrulson@ics.com>
# Copyright (c) 2016 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_mb704x as sensorObj
def main():
# Instantiate a MB704X sensor using default parameters (bus 0,
# address 112)
sensor = sensorObj.MB704X();
## Exit handlers ##
# This function stops python from printing a stacktrace when you
# hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
while (1):
print("Range: "
+ str(sensor.getRange())
+ " cm")
time.sleep(.5)
if __name__ == '__main__':
main()
| mit |
ThomasMcVay/MediaApp | MediaAppKnobs/KnobElements/RectButton.py | 1 | 1663 | #===============================================================================
# @Author: Madison Aster
# @ModuleDescription:
# @License:
# MediaApp Library - Python Package framework for developing robust Media
# Applications with Qt Library
# Copyright (C) 2013 Madison Aster
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License version 2.1 as published by the Free Software Foundation;
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See LICENSE in the root directory of this library for copy of
# GNU Lesser General Public License and other license details.
#===============================================================================
from Qt import QtGui, QtCore, QtWidgets
class RectButton(QtWidgets.QPushButton):
def __init__(self, *args):
if type(args[0]) is str:
text = args[0]
else:
text = ''
super(RectButton, self).__init__(text)
self.setSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Fixed)
def sizeHint(self):
return QtCore.QSize(600,16)
| lgpl-2.1 |
rutgers-apl/Atomicity-Violation-Detector | tdebug-llvm/llvm/utils/DSAclean.py | 147 | 1187 | #! /usr/bin/python
#changelog:
#10/13/2005b: replaced the # in tmp(.#*)* with alphanumeric and _, this will then remove
#nodes such as %tmp.1.i and %tmp._i.3
#10/13/2005: exntended to remove variables of the form %tmp(.#)* rather than just
#%tmp.#, i.e. it now will remove %tmp.12.3.15 etc, additionally fixed a spelling error in
#the comments
#10/12/2005: now it only removes nodes and edges for which the label is %tmp.# rather
#than removing all lines for which the lable CONTAINS %tmp.#
import re
import sys
if( len(sys.argv) < 3 ):
print 'usage is: ./DSAclean <dot_file_to_be_cleaned> <out_put_file>'
sys.exit(1)
#get a file object
input = open(sys.argv[1], 'r')
output = open(sys.argv[2], 'w')
#we'll get this one line at a time...while we could just put the whole thing in a string
#it would kill old computers
buffer = input.readline()
while buffer != '':
if re.compile("label(\s*)=(\s*)\"\s%tmp(.\w*)*(\s*)\"").search(buffer):
#skip next line, write neither this line nor the next
buffer = input.readline()
else:
#this isn't a tmp Node, we can write it
output.write(buffer)
#prepare for the next iteration
buffer = input.readline()
input.close()
output.close()
| gpl-2.0 |
roubert/python-phonenumbers | python/phonenumbers/timezone.py | 5 | 5470 | """Phone number to time zone mapping functionality
>>> import phonenumbers
>>> from phonenumbers.timezone import time_zones_for_number
>>> ro_number = phonenumbers.parse("+40721234567", "RO")
>>> tzlist = time_zones_for_number(ro_number)
>>> len(tzlist)
1
>>> str(tzlist[0])
'Europe/Bucharest'
>>> mx_number = phonenumbers.parse("+523291234567", "GB")
>>> tzlist = time_zones_for_number(mx_number)
>>> len(tzlist)
2
>>> str(tzlist[0])
'America/Mexico_City'
>>> str(tzlist[1])
'America/Mazatlan'
"""
# Based very loosely on original Java code:
# java/geocoder/src/com/google/i18n/phonenumbers/PhoneNumberToTimeZonesMapper.java
# Copyright (C) 2013 The Libphonenumber Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .util import prnt, u, U_PLUS
from .phonenumberutil import PhoneNumberType, number_type
from .phonenumberutil import PhoneNumberFormat, format_number
try:
from .tzdata import TIMEZONE_DATA, TIMEZONE_LONGEST_PREFIX
except ImportError: # pragma no cover
# Before the generated code exists, the carrierdata/ directory is empty.
# The generation process imports this module, creating a circular
# dependency. The hack below works around this.
import os
import sys
if (os.path.basename(sys.argv[0]) == "buildmetadatafromxml.py" or
os.path.basename(sys.argv[0]) == "buildprefixdata.py"):
prnt("Failed to import generated data (but OK as during autogeneration)", file=sys.stderr)
TIMEZONE_DATA = {'4411': u('Europe/London')}
TIMEZONE_LONGEST_PREFIX = 4
else:
raise
__all__ = ['UNKNOWN_TIMEZONE', 'time_zones_for_geographical_number', 'time_zones_for_number']
# This is defined by ICU as the unknown time zone.
UNKNOWN_TIMEZONE = u("Etc/Unknown")
_UNKNOWN_TIME_ZONE_LIST = (UNKNOWN_TIMEZONE,)
def time_zones_for_geographical_number(numobj):
"""Returns a list of time zones to which a phone number belongs.
This method assumes the validity of the number passed in has already been
checked, and that the number is geo-localizable. We consider fixed-line
and mobile numbers possible candidates for geo-localization.
Arguments:
numobj -- a valid phone number for which we want to get the time zones
to which it belongs
Returns a list of the corresponding time zones or a single element list
with the default unknown time zone if no other time zone was found or if
the number was invalid"""
e164_num = format_number(numobj, PhoneNumberFormat.E164)
if not e164_num.startswith(U_PLUS): # pragma no cover
# Can only hit this arm if there's an internal error in the rest of
# the library
raise Exception("Expect E164 number to start with +")
for prefix_len in range(TIMEZONE_LONGEST_PREFIX, 0, -1):
prefix = e164_num[1:(1 + prefix_len)]
if prefix in TIMEZONE_DATA:
return TIMEZONE_DATA[prefix]
return _UNKNOWN_TIME_ZONE_LIST
def time_zones_for_number(numobj):
"""As time_zones_for_geographical_number() but explicitly checks the
validity of the number passed in.
Arguments:
numobj -- a valid phone number for which we want to get the time zones to which it belongs
Returns a list of the corresponding time zones or a single element list with the default
unknown time zone if no other time zone was found or if the number was invalid"""
ntype = number_type(numobj)
if ntype == PhoneNumberType.UNKNOWN:
return _UNKNOWN_TIME_ZONE_LIST
elif not _can_be_geocoded(ntype):
return _country_level_time_zones_for_number(numobj)
return time_zones_for_geographical_number(numobj)
def _country_level_time_zones_for_number(numobj):
"""Returns the list of time zones corresponding to the country calling code of a number.
Arguments:
numobj -- the phone number to look up
Returns a list of the corresponding time zones or a single element list with the default
unknown time zone if no other time zone was found or if the number was invalid"""
cc = str(numobj.country_code)
for prefix_len in range(TIMEZONE_LONGEST_PREFIX, 0, -1):
prefix = cc[:(1 + prefix_len)]
if prefix in TIMEZONE_DATA:
return TIMEZONE_DATA[prefix]
return _UNKNOWN_TIME_ZONE_LIST
# A similar method is implemented as phonenumberutil._is_number_geographical,
# which performs a stricter check, as it determines if a number has a
# geographical association. Also, if new phone number types were added, we
# should check if this other method should be updated too.
# TODO: Remove duplication by completing the login in the method in phonenumberutil.
# For more information, see the comments in that method.
def _can_be_geocoded(ntype):
return (ntype == PhoneNumberType.FIXED_LINE or
ntype == PhoneNumberType.MOBILE or
ntype == PhoneNumberType.FIXED_LINE_OR_MOBILE)
if __name__ == '__main__': # pragma no cover
import doctest
doctest.testmod()
| apache-2.0 |
lvapeab/nmt-keras | tests/NMT_architectures/unidir_deep_GRU_ConditionalLSTM.py | 1 | 3021 | import argparse
import os
import pytest
from tests.test_config import load_tests_params, clean_dirs
from data_engine.prepare_data import build_dataset
from nmt_keras.training import train_model
from nmt_keras.apply_model import sample_ensemble, score_corpus
def test_NMT_Unidir_deep_GRU_ConditionalLSTM():
params = load_tests_params()
# Current test params: Single layered GRU - GRU
params['BIDIRECTIONAL_ENCODER'] = False
params['N_LAYERS_ENCODER'] = 2
params['BIDIRECTIONAL_DEEP_ENCODER'] = False
params['ENCODER_RNN_TYPE'] = 'GRU'
params['DECODER_RNN_TYPE'] = 'ConditionalLSTM'
params['N_LAYERS_DECODER'] = 2
params['REBUILD_DATASET'] = True
dataset = build_dataset(params)
params['INPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[params['INPUTS_IDS_DATASET'][0]]
params['OUTPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[params['OUTPUTS_IDS_DATASET'][0]]
params['MODEL_NAME'] = \
params['TASK_NAME'] + '_' + params['SRC_LAN'] + params['TRG_LAN'] + '_' + params['MODEL_TYPE'] + \
'_src_emb_' + str(params['SOURCE_TEXT_EMBEDDING_SIZE']) + \
'_bidir_' + str(params['BIDIRECTIONAL_ENCODER']) + \
'_enc_' + params['ENCODER_RNN_TYPE'] + '_*' + str(params['N_LAYERS_ENCODER']) + '_' + str(
params['ENCODER_HIDDEN_SIZE']) + \
'_dec_' + params['DECODER_RNN_TYPE'] + '_*' + str(params['N_LAYERS_DECODER']) + '_' + str(
params['DECODER_HIDDEN_SIZE']) + \
'_deepout_' + '_'.join([layer[0] for layer in params['DEEP_OUTPUT_LAYERS']]) + \
'_trg_emb_' + str(params['TARGET_TEXT_EMBEDDING_SIZE']) + \
'_' + params['OPTIMIZER'] + '_' + str(params['LR'])
# Test several NMT-Keras utilities: train, sample, sample_ensemble, score_corpus...
print("Training model")
train_model(params)
params['RELOAD'] = 1
print("Done")
parser = argparse.ArgumentParser('Parser for unit testing')
parser.dataset = os.path.join(
params['DATASET_STORE_PATH'],
'Dataset_' + params['DATASET_NAME'] + '_' + params['SRC_LAN'] + params['TRG_LAN'] + '.pkl')
parser.text = os.path.join(params['DATA_ROOT_PATH'], params['TEXT_FILES']['val'] + params['SRC_LAN'])
parser.splits = ['val']
parser.config = params['STORE_PATH'] + '/config.pkl'
parser.models = [params['STORE_PATH'] + '/epoch_' + str(1)]
parser.verbose = 0
parser.dest = None
parser.source = os.path.join(params['DATA_ROOT_PATH'], params['TEXT_FILES']['val'] + params['SRC_LAN'])
parser.target = os.path.join(params['DATA_ROOT_PATH'], params['TEXT_FILES']['val'] + params['TRG_LAN'])
parser.weights = []
parser.glossary = None
for n_best in [True, False]:
parser.n_best = n_best
print("Sampling with n_best = %s " % str(n_best))
sample_ensemble(parser, params)
print("Done")
print("Scoring corpus")
score_corpus(parser, params)
print("Done")
clean_dirs(params)
if __name__ == '__main__':
pytest.main([__file__])
| mit |
ray-project/ray | rllib/agents/impala/vtrace_tf.py | 3 | 17057 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to compute V-trace off-policy actor critic targets.
For details and theory see:
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
See https://arxiv.org/abs/1802.01561 for the full paper.
In addition to the original paper's code, changes have been made
to support MultiDiscrete action spaces. behaviour_policy_logits,
target_policy_logits and actions parameters in the entry point
multi_from_logits method accepts lists of tensors instead of just
tensors.
"""
import collections
from ray.rllib.models.tf.tf_action_dist import Categorical
from ray.rllib.utils.framework import try_import_tf
tf1, tf, tfv = try_import_tf()
VTraceFromLogitsReturns = collections.namedtuple("VTraceFromLogitsReturns", [
"vs", "pg_advantages", "log_rhos", "behaviour_action_log_probs",
"target_action_log_probs"
])
VTraceReturns = collections.namedtuple("VTraceReturns", "vs pg_advantages")
def log_probs_from_logits_and_actions(policy_logits,
actions,
dist_class=Categorical,
model=None):
return multi_log_probs_from_logits_and_actions([policy_logits], [actions],
dist_class, model)[0]
def multi_log_probs_from_logits_and_actions(policy_logits, actions, dist_class,
model):
"""Computes action log-probs from policy logits and actions.
In the notation used throughout documentation and comments, T refers to the
time dimension ranging from 0 to T-1. B refers to the batch size and
ACTION_SPACE refers to the list of numbers each representing a number of
actions.
Args:
policy_logits: A list with length of ACTION_SPACE of float32
tensors of shapes [T, B, ACTION_SPACE[0]], ...,
[T, B, ACTION_SPACE[-1]] with un-normalized log-probabilities
parameterizing a softmax policy.
actions: A list with length of ACTION_SPACE of tensors of shapes
[T, B, ...], ..., [T, B, ...]
with actions.
dist_class: Python class of the action distribution.
Returns:
A list with length of ACTION_SPACE of float32 tensors of shapes
[T, B], ..., [T, B] corresponding to the sampling log probability
of the chosen action w.r.t. the policy.
"""
log_probs = []
for i in range(len(policy_logits)):
p_shape = tf.shape(policy_logits[i])
a_shape = tf.shape(actions[i])
policy_logits_flat = tf.reshape(policy_logits[i],
tf.concat([[-1], p_shape[2:]], axis=0))
actions_flat = tf.reshape(actions[i],
tf.concat([[-1], a_shape[2:]], axis=0))
log_probs.append(
tf.reshape(
dist_class(policy_logits_flat, model).logp(actions_flat),
a_shape[:2]))
return log_probs
def from_logits(behaviour_policy_logits,
target_policy_logits,
actions,
discounts,
rewards,
values,
bootstrap_value,
dist_class=Categorical,
model=None,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
name="vtrace_from_logits"):
"""multi_from_logits wrapper used only for tests"""
res = multi_from_logits(
[behaviour_policy_logits], [target_policy_logits], [actions],
discounts,
rewards,
values,
bootstrap_value,
dist_class,
model,
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold,
name=name)
return VTraceFromLogitsReturns(
vs=res.vs,
pg_advantages=res.pg_advantages,
log_rhos=res.log_rhos,
behaviour_action_log_probs=tf.squeeze(
res.behaviour_action_log_probs, axis=0),
target_action_log_probs=tf.squeeze(
res.target_action_log_probs, axis=0),
)
def multi_from_logits(behaviour_policy_logits,
target_policy_logits,
actions,
discounts,
rewards,
values,
bootstrap_value,
dist_class,
model,
behaviour_action_log_probs=None,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
name="vtrace_from_logits"):
r"""V-trace for softmax policies.
Calculates V-trace actor critic targets for softmax polices as described in
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
Target policy refers to the policy we are interested in improving and
behaviour policy refers to the policy that generated the given
rewards and actions.
In the notation used throughout documentation and comments, T refers to the
time dimension ranging from 0 to T-1. B refers to the batch size and
ACTION_SPACE refers to the list of numbers each representing a number of
actions.
Args:
behaviour_policy_logits: A list with length of ACTION_SPACE of float32
tensors of shapes
[T, B, ACTION_SPACE[0]],
...,
[T, B, ACTION_SPACE[-1]]
with un-normalized log-probabilities parameterizing the softmax behaviour
policy.
target_policy_logits: A list with length of ACTION_SPACE of float32
tensors of shapes
[T, B, ACTION_SPACE[0]],
...,
[T, B, ACTION_SPACE[-1]]
with un-normalized log-probabilities parameterizing the softmax target
policy.
actions: A list with length of ACTION_SPACE of
tensors of shapes
[T, B, ...],
...,
[T, B, ...]
with actions sampled from the behaviour policy.
discounts: A float32 tensor of shape [T, B] with the discount encountered
when following the behaviour policy.
rewards: A float32 tensor of shape [T, B] with the rewards generated by
following the behaviour policy.
values: A float32 tensor of shape [T, B] with the value function estimates
wrt. the target policy.
bootstrap_value: A float32 of shape [B] with the value function estimate at
time T.
dist_class: action distribution class for the logits.
model: backing ModelV2 instance
behaviour_action_log_probs: precalculated values of the behaviour actions
clip_rho_threshold: A scalar float32 tensor with the clipping threshold for
importance weights (rho) when calculating the baseline targets (vs).
rho^bar in the paper.
clip_pg_rho_threshold: A scalar float32 tensor with the clipping threshold
on rho_s in \rho_s \delta log \pi(a|x) (r + \gamma v_{s+1} - V(x_s)).
name: The name scope that all V-trace operations will be created in.
Returns:
A `VTraceFromLogitsReturns` namedtuple with the following fields:
vs: A float32 tensor of shape [T, B]. Can be used as target to train a
baseline (V(x_t) - vs_t)^2.
pg_advantages: A float 32 tensor of shape [T, B]. Can be used as an
estimate of the advantage in the calculation of policy gradients.
log_rhos: A float32 tensor of shape [T, B] containing the log importance
sampling weights (log rhos).
behaviour_action_log_probs: A float32 tensor of shape [T, B] containing
behaviour policy action log probabilities (log \mu(a_t)).
target_action_log_probs: A float32 tensor of shape [T, B] containing
target policy action probabilities (log \pi(a_t)).
"""
for i in range(len(behaviour_policy_logits)):
behaviour_policy_logits[i] = tf.convert_to_tensor(
behaviour_policy_logits[i], dtype=tf.float32)
target_policy_logits[i] = tf.convert_to_tensor(
target_policy_logits[i], dtype=tf.float32)
# Make sure tensor ranks are as expected.
# The rest will be checked by from_action_log_probs.
behaviour_policy_logits[i].shape.assert_has_rank(3)
target_policy_logits[i].shape.assert_has_rank(3)
with tf1.name_scope(
name,
values=[
behaviour_policy_logits, target_policy_logits, actions,
discounts, rewards, values, bootstrap_value
]):
target_action_log_probs = multi_log_probs_from_logits_and_actions(
target_policy_logits, actions, dist_class, model)
if (len(behaviour_policy_logits) > 1
or behaviour_action_log_probs is None):
# can't use precalculated values, recompute them. Note that
# recomputing won't work well for autoregressive action dists
# which may have variables not captured by 'logits'
behaviour_action_log_probs = (
multi_log_probs_from_logits_and_actions(
behaviour_policy_logits, actions, dist_class, model))
log_rhos = get_log_rhos(target_action_log_probs,
behaviour_action_log_probs)
vtrace_returns = from_importance_weights(
log_rhos=log_rhos,
discounts=discounts,
rewards=rewards,
values=values,
bootstrap_value=bootstrap_value,
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold)
return VTraceFromLogitsReturns(
log_rhos=log_rhos,
behaviour_action_log_probs=behaviour_action_log_probs,
target_action_log_probs=target_action_log_probs,
**vtrace_returns._asdict())
def from_importance_weights(log_rhos,
discounts,
rewards,
values,
bootstrap_value,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
name="vtrace_from_importance_weights"):
r"""V-trace from log importance weights.
Calculates V-trace actor critic targets as described in
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
In the notation used throughout documentation and comments, T refers to the
time dimension ranging from 0 to T-1. B refers to the batch size. This code
also supports the case where all tensors have the same number of additional
dimensions, e.g., `rewards` is [T, B, C], `values` is [T, B, C],
`bootstrap_value` is [B, C].
Args:
log_rhos: A float32 tensor of shape [T, B] representing the
log importance sampling weights, i.e.
log(target_policy(a) / behaviour_policy(a)). V-trace performs operations
on rhos in log-space for numerical stability.
discounts: A float32 tensor of shape [T, B] with discounts encountered when
following the behaviour policy.
rewards: A float32 tensor of shape [T, B] containing rewards generated by
following the behaviour policy.
values: A float32 tensor of shape [T, B] with the value function estimates
wrt. the target policy.
bootstrap_value: A float32 of shape [B] with the value function estimate at
time T.
clip_rho_threshold: A scalar float32 tensor with the clipping threshold for
importance weights (rho) when calculating the baseline targets (vs).
rho^bar in the paper. If None, no clipping is applied.
clip_pg_rho_threshold: A scalar float32 tensor with the clipping threshold
on rho_s in \rho_s \delta log \pi(a|x) (r + \gamma v_{s+1} - V(x_s)). If
None, no clipping is applied.
name: The name scope that all V-trace operations will be created in.
Returns:
A VTraceReturns namedtuple (vs, pg_advantages) where:
vs: A float32 tensor of shape [T, B]. Can be used as target to
train a baseline (V(x_t) - vs_t)^2.
pg_advantages: A float32 tensor of shape [T, B]. Can be used as the
advantage in the calculation of policy gradients.
"""
log_rhos = tf.convert_to_tensor(log_rhos, dtype=tf.float32)
discounts = tf.convert_to_tensor(discounts, dtype=tf.float32)
rewards = tf.convert_to_tensor(rewards, dtype=tf.float32)
values = tf.convert_to_tensor(values, dtype=tf.float32)
bootstrap_value = tf.convert_to_tensor(bootstrap_value, dtype=tf.float32)
if clip_rho_threshold is not None:
clip_rho_threshold = tf.convert_to_tensor(
clip_rho_threshold, dtype=tf.float32)
if clip_pg_rho_threshold is not None:
clip_pg_rho_threshold = tf.convert_to_tensor(
clip_pg_rho_threshold, dtype=tf.float32)
# Make sure tensor ranks are consistent.
rho_rank = log_rhos.shape.ndims # Usually 2.
values.shape.assert_has_rank(rho_rank)
bootstrap_value.shape.assert_has_rank(rho_rank - 1)
discounts.shape.assert_has_rank(rho_rank)
rewards.shape.assert_has_rank(rho_rank)
if clip_rho_threshold is not None:
clip_rho_threshold.shape.assert_has_rank(0)
if clip_pg_rho_threshold is not None:
clip_pg_rho_threshold.shape.assert_has_rank(0)
with tf1.name_scope(
name,
values=[log_rhos, discounts, rewards, values, bootstrap_value]):
rhos = tf.math.exp(log_rhos)
if clip_rho_threshold is not None:
clipped_rhos = tf.minimum(
clip_rho_threshold, rhos, name="clipped_rhos")
tf1.summary.histogram("clipped_rhos_1000", tf.minimum(
1000.0, rhos))
tf1.summary.scalar(
"num_of_clipped_rhos",
tf.reduce_sum(
tf.cast(
tf.equal(clipped_rhos, clip_rho_threshold), tf.int32)))
tf1.summary.scalar("size_of_clipped_rhos", tf.size(clipped_rhos))
else:
clipped_rhos = rhos
cs = tf.minimum(1.0, rhos, name="cs")
# Append bootstrapped value to get [v1, ..., v_t+1]
values_t_plus_1 = tf.concat(
[values[1:], tf.expand_dims(bootstrap_value, 0)], axis=0)
deltas = clipped_rhos * (
rewards + discounts * values_t_plus_1 - values)
# All sequences are reversed, computation starts from the back.
sequences = (
tf.reverse(discounts, axis=[0]),
tf.reverse(cs, axis=[0]),
tf.reverse(deltas, axis=[0]),
)
# V-trace vs are calculated through a scan from the back to the
# beginning of the given trajectory.
def scanfunc(acc, sequence_item):
discount_t, c_t, delta_t = sequence_item
return delta_t + discount_t * c_t * acc
initial_values = tf.zeros_like(bootstrap_value)
vs_minus_v_xs = tf.nest.map_structure(
tf.stop_gradient,
tf.scan(
fn=scanfunc,
elems=sequences,
initializer=initial_values,
parallel_iterations=1,
name="scan"))
# Reverse the results back to original order.
vs_minus_v_xs = tf.reverse(vs_minus_v_xs, [0], name="vs_minus_v_xs")
# Add V(x_s) to get v_s.
vs = tf.add(vs_minus_v_xs, values, name="vs")
# Advantage for policy gradient.
vs_t_plus_1 = tf.concat(
[vs[1:], tf.expand_dims(bootstrap_value, 0)], axis=0)
if clip_pg_rho_threshold is not None:
clipped_pg_rhos = tf.minimum(
clip_pg_rho_threshold, rhos, name="clipped_pg_rhos")
else:
clipped_pg_rhos = rhos
pg_advantages = (
clipped_pg_rhos * (rewards + discounts * vs_t_plus_1 - values))
# Make sure no gradients backpropagated through the returned values.
return VTraceReturns(
vs=tf.stop_gradient(vs),
pg_advantages=tf.stop_gradient(pg_advantages))
def get_log_rhos(target_action_log_probs, behaviour_action_log_probs):
"""With the selected log_probs for multi-discrete actions of behaviour
and target policies we compute the log_rhos for calculating the vtrace."""
t = tf.stack(target_action_log_probs)
b = tf.stack(behaviour_action_log_probs)
log_rhos = tf.reduce_sum(t - b, axis=0)
return log_rhos
| apache-2.0 |
0011/Unblock-Youku | test/run-all-tests.py | 12 | 3293 | #!/usr/bin/env python
"""
Allow you smoothly surf on many websites blocking non-mainland visitors.
Copyright (C) 2012 - 2014 Bo Zhu http://zhuzhu.org
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import subprocess
import time
import sys
import os
# http://goo.gl/2wtRL
# os.chdir(os.path.dirname(sys.argv[0]))
if os.path.dirname(sys.argv[0]) != '':
os.chdir(os.path.dirname(sys.argv[0]))
print 'PhantomJS',
try:
version = subprocess.check_output(['phantomjs', '--version'])
print version
sys.stdout.flush()
except Exception as exp:
print 'is not installed.'
print 'Please install it and try again.'
sys.stdout.flush()
sys.exit(-1)
server_process = None
def start_server():
global server_process
print 'To start the server, and wait for 21 seconds to set up...'
sys.stdout.flush()
server_process = subprocess.Popen(
['node', '../server/server.js', '--production', '--port=8888'])
time.sleep(21)
def stop_server():
time.sleep(1)
print 'To stop the server...',
sys.stdout.flush()
server_process.terminate()
server_process.wait()
print 'done.'
sys.stdout.flush()
# http://goo.gl/xaBer
def red_alert(text):
print "\033[7;31m" + text + "\033[0m"
sys.stdout.flush()
def run_all_tests():
print
print 'To run all test-*.js files...'
sys.stdout.flush()
num_failed = 0
num_passed = 0
for file_name in os.listdir('.'):
if file_name.startswith('test-') and file_name.endswith('.js'):
if file_name.endswith('-proxy.js'):
command = ['phantomjs', '--proxy=127.0.0.1:8888', file_name]
else:
command = ['phantomjs', file_name]
print
print ' '.join(command)
sys.stdout.flush()
return_value = subprocess.call(command)
time.sleep(2) # sleep 2 seconds between tests
if return_value != 0:
num_failed += 1
red_alert(file_name + ' FAILED!')
else:
num_passed += 1
print file_name + ' passed.'
sys.stdout.flush()
print
sys.stdout.flush()
if num_failed > 0:
red_alert('Final results: ' + str(num_failed) + ' TESTS FAILED'
+ ' (out of ' + str(num_failed + num_passed) + ')')
else:
print 'All %d tests passed.' % (num_passed + num_failed)
print
sys.stdout.flush()
return num_failed
if __name__ == '__main__':
exit_code = -1
try:
start_server()
exit_code = run_all_tests()
finally:
stop_server()
sys.exit(exit_code)
| agpl-3.0 |
strands-project/strands_qsr_lib | qsr_prob_rep/src/qsrrep_hmms/rcc3_hmm.py | 8 | 1322 | # -*- coding: utf-8 -*-
from qsrrep_hmms.hmm_abstractclass import HMMAbstractclass
class RCC3HMM(HMMAbstractclass):
_state_list = ["dc", "po", "o"]
def __init__(self):
super(self.__class__, self).__init__()
self.num_possible_states = 3
def _qsr_to_symbol(self, qsr_data):
"""Transforms a list of qsr state chains to a list of lists of numbers according to the alphabet.
Needs to be overridden by the specific QSR to handle the correct symbols.
:return: List of lists containing the qsr input data as symbols from the alphabet
E.g.: [[1,4,2,7],[0,5,3,8,5,1,3]]
"""
if not type(qsr_data[0]) == list:
return self._qsr_to_symbol([qsr_data])
state_rep = []
for element in qsr_data:
state_rep.append([self._state_list.index(x) for x in element])
return state_rep
def _symbol_to_qsr(self, symbols):
"""Transforms a list of symbols to the corresponding qsr state chains.
Needs to be overridden by the specific QSR to handle the correct symbols.
:return: List of lists of qsr state chains
E.g.: [['dc','po','o'],['dc','po']]
"""
ret = []
for s in symbols:
ret.append([self._state_list[x] for x in s])
return ret | mit |
nistormihai/superdesk-core | tests/io/update_ingest_tests.py | 8 | 3180 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from unittest import mock, TestCase
from unittest.mock import MagicMock
class FakeSuperdesk():
def __init__(self):
self.services = {} # can be accessed and overriden by test methods
def get_resource_service(self, service_name):
return self.services.get(service_name)
fake_superdesk = FakeSuperdesk()
@mock.patch('superdesk.io.commands.update_ingest.superdesk', fake_superdesk)
class GetProviderRoutingSchemeTestCase(TestCase):
"""Tests for the get_provider_routing_scheme() function."""
def setUp(self):
try:
from superdesk.io.commands.update_ingest import (
get_provider_routing_scheme)
except ImportError:
self.fail("Could not import function under test " +
"(get_provider_routing_scheme).")
else:
self.funcToTest = get_provider_routing_scheme
fake_superdesk.services = {
'routing_schemes': MagicMock(name='routing_schemes'),
'content_filters': MagicMock(name='content_filters')
}
def test_returns_none_if_no_provider_scheme_defined(self):
fake_provider = {'routing_scheme': None}
result = self.funcToTest(fake_provider)
self.assertIsNone(result)
def test_returns_scheme_config_from_db_if_scheme_defined(self):
fake_scheme = {
'_id': 'abc123',
'rules': []
}
schemes_service = fake_superdesk.services['routing_schemes']
schemes_service.find_one.return_value = fake_scheme
fake_provider = {'routing_scheme': 'abc123'}
result = self.funcToTest(fake_provider)
# check that correct scheme has been fetched and returned
self.assertTrue(schemes_service.find_one.called)
args, kwargs = schemes_service.find_one.call_args
self.assertEqual(kwargs.get('_id'), 'abc123')
self.assertEqual(result, fake_scheme)
def test_includes_content_filters_in_returned_scheme(self):
fake_scheme = {
'_id': 'abc123',
'rules': [
{'filter': 'filter_id_4'},
{'filter': 'filter_id_8'},
]
}
schemes_service = fake_superdesk.services['routing_schemes']
schemes_service.find_one.return_value = fake_scheme
filters_service = fake_superdesk.services['content_filters']
filters_service.find_one.side_effect = [
{'_id': 'filter_id_4'},
{'_id': 'filter_id_8'},
]
fake_provider = {'routing_scheme': 'abc123'}
result = self.funcToTest(fake_provider)
scheme_rules = result.get('rules', [])
self.assertEqual(len(scheme_rules), 2)
self.assertEqual(scheme_rules[0].get('filter'), {'_id': 'filter_id_4'})
self.assertEqual(scheme_rules[1].get('filter'), {'_id': 'filter_id_8'})
| agpl-3.0 |
hastexo/edx-platform | openedx/core/djangoapps/ace_common/template_context.py | 5 | 1202 | """
Context dictionary for templates that use the ace_common base template.
"""
from django.conf import settings
from django.core.urlresolvers import reverse
from edxmako.shortcuts import marketing_link
from openedx.core.djangoapps.theming.helpers import get_config_value_from_site_or_settings
def get_base_template_context(site):
"""
Dict with entries needed for all templates that use the base template.
"""
return {
# Platform information
'homepage_url': marketing_link('ROOT'),
'dashboard_url': reverse('dashboard'),
'template_revision': getattr(settings, 'EDX_PLATFORM_REVISION', None),
'platform_name': get_config_value_from_site_or_settings(
'PLATFORM_NAME',
site=site,
site_config_name='platform_name',
),
'contact_mailing_address': get_config_value_from_site_or_settings(
'CONTACT_MAILING_ADDRESS', site=site, site_config_name='contact_mailing_address'),
'social_media_urls': get_config_value_from_site_or_settings('SOCIAL_MEDIA_FOOTER_URLS', site=site),
'mobile_store_urls': get_config_value_from_site_or_settings('MOBILE_STORE_URLS', site=site),
}
| agpl-3.0 |
RobotGame/rgkit | rgkit/settings.py | 3 | 1197 | class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class Settings(AttrDict):
def __init__(self, *args, **kwargs):
super(Settings, self).__init__(*args, **kwargs)
def init_map(self, map_data):
self.spawn_coords = map_data['spawn']
self.obstacles = map_data['obstacle']
self.player_count = map_data.get('player_count', 2)
self.start = map_data.get('start', None)
settings = Settings({
'spawn_every': 10,
'spawn_per_player': 5,
'board_size': 19,
'robot_hp': 50,
'attack_range': (8, 10),
'collision_damage': 5,
'suicide_damage': 15,
'max_turns': 100,
'str_limit': 50, # limit on length of representation of action
'max_seed': 2147483647,
# rating systems
'default_rating': 1200,
# user-scripting
'max_time_initialization': 2000,
'max_time_first_act': 1500,
'max_time_per_act': 300,
'exposed_properties': ('location', 'hp', 'player_id'),
'player_only_properties': ('robot_id',),
'user_obj_types': ('Robot',),
'valid_commands': ('move', 'attack', 'guard', 'suicide')
})
| unlicense |
MoonshineSG/OctoPrint | src/octoprint/plugins/softwareupdate/updaters/update_script.py | 7 | 3592 | # coding=utf-8
from __future__ import absolute_import, division, print_function
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
import sys
import logging
from ..exceptions import ConfigurationInvalid, UpdateError
from octoprint.util.commandline import CommandlineCaller, CommandlineError
def _get_caller(log_cb=None):
def _log_call(*lines):
_log(lines, prefix=" ", stream="call")
def _log_stdout(*lines):
_log(lines, prefix=">", stream="stdout")
def _log_stderr(*lines):
_log(lines, prefix="!", stream="stderr")
def _log(lines, prefix=None, stream=None):
if log_cb is None:
return
log_cb(lines, prefix=prefix, stream=stream)
caller = CommandlineCaller()
if log_cb is not None:
caller.on_log_call = _log_call
caller.on_log_stdout = _log_stdout
caller.on_log_stderr = _log_stderr
return caller
def can_perform_update(target, check):
import os
script_configured = bool("update_script" in check and check["update_script"])
folder = None
if "update_folder" in check:
folder = check["update_folder"]
elif "checkout_folder" in check:
folder = check["checkout_folder"]
folder_configured = bool(folder and os.path.isdir(folder))
return script_configured and folder_configured
def perform_update(target, check, target_version, log_cb=None):
logger = logging.getLogger("octoprint.plugins.softwareupdate.updaters.update_script")
if not can_perform_update(target, check):
raise ConfigurationInvalid("checkout_folder and update_folder are missing for update target %s, one is needed" % target)
update_script = check["update_script"]
update_branch = check.get("update_branch", "")
force_exact_version = check.get("force_exact_version", False)
folder = check.get("update_folder", check.get("checkout_folder")) # either should be set, tested above
pre_update_script = check.get("pre_update_script", None)
post_update_script = check.get("post_update_script", None)
caller = _get_caller(log_cb=log_cb)
### pre update
if pre_update_script is not None:
logger.debug("Target: %s, running pre-update script: %s" % (target, pre_update_script))
try:
caller.checked_call(pre_update_script, cwd=folder)
except CommandlineError as e:
logger.exception("Target: %s, error while executing pre update script, got returncode %r" % (target, e.returncode))
### update
try:
update_command = update_script.format(python=sys.executable,
folder=folder,
target=target_version,
branch=update_branch,
force="true" if force_exact_version else "false")
logger.debug("Target %s, running update script: %s" % (target, update_command))
caller.checked_call(update_command, cwd=folder)
except CommandlineError as e:
logger.exception("Target: %s, error while executing update script, got returncode %r" % (target, e.returncode))
raise UpdateError("Error while executing update script for %s", (e.stdout, e.stderr))
### post update
if post_update_script is not None:
logger.debug("Target: %s, running post-update script %s..." % (target, post_update_script))
try:
caller.checked_call(post_update_script, cwd=folder)
except CommandlineError as e:
logger.exception("Target: %s, error while executing post update script, got returncode %r" % (target, e.returncode))
return "ok"
| agpl-3.0 |
mcollins12321/anita | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/eucjpprober.py | 2919 | 3678 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
# PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| mit |
Dhivyap/ansible | lib/ansible/modules/cloud/amazon/ecs_service.py | 8 | 30831 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_service
short_description: create, terminate, start or stop a service in ecs
description:
- Creates or terminates ecs services.
notes:
- the service role specified must be assumable (i.e. have a trust relationship for the ecs service, ecs.amazonaws.com)
- for details of the parameters and returns see U(https://boto3.readthedocs.io/en/latest/reference/services/ecs.html)
- An IAM role must have been previously created
version_added: "2.1"
author:
- "Mark Chance (@Java1Guy)"
- "Darek Kaczynski (@kaczynskid)"
- "Stephane Maarek (@simplesteph)"
- "Zac Blazic (@zacblazic)"
requirements: [ json, botocore, boto3 ]
options:
state:
description:
- The desired state of the service
required: true
choices: ["present", "absent", "deleting"]
name:
description:
- The name of the service
required: true
cluster:
description:
- The name of the cluster in which the service exists
required: false
task_definition:
description:
- The task definition the service will run. This parameter is required when state=present
required: false
load_balancers:
description:
- The list of ELBs defined for this service
required: false
desired_count:
description:
- The count of how many instances of the service. This parameter is required when state=present
required: false
client_token:
description:
- Unique, case-sensitive identifier you provide to ensure the idempotency of the request. Up to 32 ASCII characters are allowed.
required: false
role:
description:
- The name or full Amazon Resource Name (ARN) of the IAM role that allows your Amazon ECS container agent to make calls to your load balancer
on your behalf. This parameter is only required if you are using a load balancer with your service, in a network mode other than `awsvpc`.
required: false
delay:
description:
- The time to wait before checking that the service is available
required: false
default: 10
repeat:
description:
- The number of times to check that the service is available
required: false
default: 10
force_new_deployment:
description:
- Force deployment of service even if there are no changes
required: false
version_added: 2.8
type: bool
deployment_configuration:
description:
- Optional parameters that control the deployment_configuration; format is '{"maximum_percent":<integer>, "minimum_healthy_percent":<integer>}
required: false
version_added: 2.3
placement_constraints:
description:
- The placement constraints for the tasks in the service
required: false
version_added: 2.4
placement_strategy:
description:
- The placement strategy objects to use for tasks in your service. You can specify a maximum of 5 strategy rules per service
required: false
version_added: 2.4
network_configuration:
description:
- network configuration of the service. Only applicable for task definitions created with C(awsvpc) I(network_mode).
- assign_public_ip requires botocore >= 1.8.4
suboptions:
subnets:
description:
- A list of subnet IDs to associate with the task
version_added: 2.6
security_groups:
description:
- A list of security group names or group IDs to associate with the task
version_added: 2.6
assign_public_ip:
description:
- Whether the task's elastic network interface receives a public IP address. This option requires botocore >= 1.8.4.
type: bool
version_added: 2.7
launch_type:
description:
- The launch type on which to run your service
required: false
version_added: 2.7
choices: ["EC2", "FARGATE"]
health_check_grace_period_seconds:
description:
- Seconds to wait before health checking the freshly added/updated services. This option requires botocore >= 1.8.20.
required: false
version_added: 2.8
service_registries:
description:
- describes service discovery registries this service will register with.
required: false
version_added: 2.8
suboptions:
container_name:
description:
- container name for service discovery registration
container_port:
description:
- container port for service discovery registration
arn:
description:
- Service discovery registry ARN
scheduling_strategy:
description:
- The scheduling strategy, defaults to "REPLICA" if not given to preserve previous behavior
required: false
version_added: 2.8
choices: ["DAEMON", "REPLICA"]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Basic provisioning example
- ecs_service:
state: present
name: console-test-service
cluster: new_cluster
task_definition: 'new_cluster-task:1'
desired_count: 0
- name: create ECS service on VPC network
ecs_service:
state: present
name: console-test-service
cluster: new_cluster
task_definition: 'new_cluster-task:1'
desired_count: 0
network_configuration:
subnets:
- subnet-abcd1234
security_groups:
- sg-aaaa1111
- my_security_group
# Simple example to delete
- ecs_service:
name: default
state: absent
cluster: new_cluster
# With custom deployment configuration (added in version 2.3), placement constraints and strategy (added in version 2.4)
- ecs_service:
state: present
name: test-service
cluster: test-cluster
task_definition: test-task-definition
desired_count: 3
deployment_configuration:
minimum_healthy_percent: 75
maximum_percent: 150
placement_constraints:
- type: memberOf
expression: 'attribute:flavor==test'
placement_strategy:
- type: binpack
field: memory
'''
RETURN = '''
service:
description: Details of created service.
returned: when creating a service
type: complex
contains:
clusterArn:
description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
returned: always
type: str
desiredCount:
description: The desired number of instantiations of the task definition to keep running on the service.
returned: always
type: int
loadBalancers:
description: A list of load balancer objects
returned: always
type: complex
contains:
loadBalancerName:
description: the name
returned: always
type: str
containerName:
description: The name of the container to associate with the load balancer.
returned: always
type: str
containerPort:
description: The port on the container to associate with the load balancer.
returned: always
type: int
pendingCount:
description: The number of tasks in the cluster that are in the PENDING state.
returned: always
type: int
runningCount:
description: The number of tasks in the cluster that are in the RUNNING state.
returned: always
type: int
serviceArn:
description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region
of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example,
arn:aws:ecs:region :012345678910 :service/my-service .
returned: always
type: str
serviceName:
description: A user-generated string used to identify the service
returned: always
type: str
status:
description: The valid values are ACTIVE, DRAINING, or INACTIVE.
returned: always
type: str
taskDefinition:
description: The ARN of a task definition to use for tasks in the service.
returned: always
type: str
deployments:
description: list of service deployments
returned: always
type: list of complex
deploymentConfiguration:
description: dictionary of deploymentConfiguration
returned: always
type: complex
contains:
maximumPercent:
description: maximumPercent param
returned: always
type: int
minimumHealthyPercent:
description: minimumHealthyPercent param
returned: always
type: int
events:
description: list of service events
returned: always
type: list of complex
placementConstraints:
description: List of placement constraints objects
returned: always
type: list of complex
contains:
type:
description: The type of constraint. Valid values are distinctInstance and memberOf.
returned: always
type: str
expression:
description: A cluster query language expression to apply to the constraint. Note you cannot specify an expression if the constraint type is
distinctInstance.
returned: always
type: str
placementStrategy:
description: List of placement strategy objects
returned: always
type: list of complex
contains:
type:
description: The type of placement strategy. Valid values are random, spread and binpack.
returned: always
type: str
field:
description: The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId
(or host, which has the same effect), or any platform or custom attribute that is applied to a container instance,
such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are CPU and MEMORY.
returned: always
type: str
ansible_facts:
description: Facts about deleted service.
returned: when deleting a service
type: complex
contains:
service:
description: Details of deleted service in the same structure described above for service creation.
returned: when service existed and was deleted
type: complex
'''
import time
DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
'maximum_percent': 'int',
'minimum_healthy_percent': 'int'
}
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import ec2_argument_spec
from ansible.module_utils.ec2 import snake_dict_to_camel_dict, map_complex_type, get_ec2_security_group_ids_from_names
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
class EcsServiceManager:
"""Handles ECS Services"""
def __init__(self, module):
self.module = module
self.ecs = module.client('ecs')
self.ec2 = module.client('ec2')
def format_network_configuration(self, network_config):
result = dict()
if network_config['subnets'] is not None:
result['subnets'] = network_config['subnets']
else:
self.module.fail_json(msg="Network configuration must include subnets")
if network_config['security_groups'] is not None:
groups = network_config['security_groups']
if any(not sg.startswith('sg-') for sg in groups):
try:
vpc_id = self.ec2.describe_subnets(SubnetIds=[result['subnets'][0]])['Subnets'][0]['VpcId']
groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't look up security groups")
result['securityGroups'] = groups
if network_config['assign_public_ip'] is not None:
if self.module.botocore_at_least('1.8.4'):
if network_config['assign_public_ip'] is True:
result['assignPublicIp'] = "ENABLED"
else:
result['assignPublicIp'] = "DISABLED"
else:
self.module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use assign_public_ip in network_configuration')
return dict(awsvpcConfiguration=result)
def find_in_array(self, array_of_services, service_name, field_name='serviceArn'):
for c in array_of_services:
if c[field_name].endswith(service_name):
return c
return None
def describe_service(self, cluster_name, service_name):
response = self.ecs.describe_services(
cluster=cluster_name,
services=[service_name])
msg = ''
if len(response['failures']) > 0:
c = self.find_in_array(response['failures'], service_name, 'arn')
msg += ", failure reason is " + c['reason']
if c and c['reason'] == 'MISSING':
return None
# fall thru and look through found ones
if len(response['services']) > 0:
c = self.find_in_array(response['services'], service_name)
if c:
return c
raise Exception("Unknown problem describing service %s." % service_name)
def is_matching_service(self, expected, existing):
if expected['task_definition'] != existing['taskDefinition']:
return False
if (expected['load_balancers'] or []) != existing['loadBalancers']:
return False
# expected is params. DAEMON scheduling strategy returns desired count equal to
# number of instances running; don't check desired count if scheduling strat is daemon
if (expected['scheduling_strategy'] != 'DAEMON'):
if (expected['desired_count'] or 0) != existing['desiredCount']:
return False
return True
def create_service(self, service_name, cluster_name, task_definition, load_balancers,
desired_count, client_token, role, deployment_configuration,
placement_constraints, placement_strategy, health_check_grace_period_seconds,
network_configuration, service_registries, launch_type, scheduling_strategy):
params = dict(
cluster=cluster_name,
serviceName=service_name,
taskDefinition=task_definition,
loadBalancers=load_balancers,
clientToken=client_token,
role=role,
deploymentConfiguration=deployment_configuration,
placementConstraints=placement_constraints,
placementStrategy=placement_strategy
)
if network_configuration:
params['networkConfiguration'] = network_configuration
if launch_type:
params['launchType'] = launch_type
if self.health_check_setable(params) and health_check_grace_period_seconds is not None:
params['healthCheckGracePeriodSeconds'] = health_check_grace_period_seconds
if service_registries:
params['serviceRegistries'] = service_registries
# desired count is not required if scheduling strategy is daemon
if desired_count is not None:
params['desiredCount'] = desired_count
if scheduling_strategy:
params['schedulingStrategy'] = scheduling_strategy
response = self.ecs.create_service(**params)
return self.jsonize(response['service'])
def update_service(self, service_name, cluster_name, task_definition,
desired_count, deployment_configuration, network_configuration,
health_check_grace_period_seconds, force_new_deployment):
params = dict(
cluster=cluster_name,
service=service_name,
taskDefinition=task_definition,
deploymentConfiguration=deployment_configuration)
if network_configuration:
params['networkConfiguration'] = network_configuration
if force_new_deployment:
params['forceNewDeployment'] = force_new_deployment
if health_check_grace_period_seconds is not None:
params['healthCheckGracePeriodSeconds'] = health_check_grace_period_seconds
# desired count is not required if scheduling strategy is daemon
if desired_count is not None:
params['desiredCount'] = desired_count
response = self.ecs.update_service(**params)
return self.jsonize(response['service'])
def jsonize(self, service):
# some fields are datetime which is not JSON serializable
# make them strings
if 'createdAt' in service:
service['createdAt'] = str(service['createdAt'])
if 'deployments' in service:
for d in service['deployments']:
if 'createdAt' in d:
d['createdAt'] = str(d['createdAt'])
if 'updatedAt' in d:
d['updatedAt'] = str(d['updatedAt'])
if 'events' in service:
for e in service['events']:
if 'createdAt' in e:
e['createdAt'] = str(e['createdAt'])
return service
def delete_service(self, service, cluster=None):
return self.ecs.delete_service(cluster=cluster, service=service)
def ecs_api_handles_network_configuration(self):
# There doesn't seem to be a nice way to inspect botocore to look
# for attributes (and networkConfiguration is not an explicit argument
# to e.g. ecs.run_task, it's just passed as a keyword argument)
return self.module.botocore_at_least('1.7.44')
def health_check_setable(self, params):
load_balancers = params.get('loadBalancers', [])
# check if botocore (and thus boto3) is new enough for using the healthCheckGracePeriodSeconds parameter
return len(load_balancers) > 0 and self.module.botocore_at_least('1.8.20')
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent', 'deleting']),
name=dict(required=True, type='str'),
cluster=dict(required=False, type='str'),
task_definition=dict(required=False, type='str'),
load_balancers=dict(required=False, default=[], type='list'),
desired_count=dict(required=False, type='int'),
client_token=dict(required=False, default='', type='str'),
role=dict(required=False, default='', type='str'),
delay=dict(required=False, type='int', default=10),
repeat=dict(required=False, type='int', default=10),
force_new_deployment=dict(required=False, default=False, type='bool'),
deployment_configuration=dict(required=False, default={}, type='dict'),
placement_constraints=dict(required=False, default=[], type='list'),
placement_strategy=dict(required=False, default=[], type='list'),
health_check_grace_period_seconds=dict(required=False, type='int'),
network_configuration=dict(required=False, type='dict', options=dict(
subnets=dict(type='list'),
security_groups=dict(type='list'),
assign_public_ip=dict(type='bool')
)),
launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
service_registries=dict(required=False, type='list', default=[]),
scheduling_strategy=dict(required=False, choices=['DAEMON', 'REPLICA'])
))
module = AnsibleAWSModule(argument_spec=argument_spec,
supports_check_mode=True,
required_if=[('state', 'present', ['task_definition']),
('launch_type', 'FARGATE', ['network_configuration'])],
required_together=[['load_balancers', 'role']])
if module.params['state'] == 'present' and module.params['scheduling_strategy'] == 'REPLICA':
if module.params['desired_count'] is None:
module.fail_json(msg='state is present, scheduling_strategy is REPLICA; missing desired_count')
service_mgr = EcsServiceManager(module)
if module.params['network_configuration']:
if not service_mgr.ecs_api_handles_network_configuration():
module.fail_json(msg='botocore needs to be version 1.7.44 or higher to use network configuration')
network_configuration = service_mgr.format_network_configuration(module.params['network_configuration'])
else:
network_configuration = None
deployment_configuration = map_complex_type(module.params['deployment_configuration'],
DEPLOYMENT_CONFIGURATION_TYPE_MAP)
deploymentConfiguration = snake_dict_to_camel_dict(deployment_configuration)
serviceRegistries = list(map(snake_dict_to_camel_dict, module.params['service_registries']))
try:
existing = service_mgr.describe_service(module.params['cluster'], module.params['name'])
except Exception as e:
module.fail_json(msg="Exception describing service '" + module.params['name'] + "' in cluster '" + module.params['cluster'] + "': " + str(e))
results = dict(changed=False)
if module.params['launch_type']:
if not module.botocore_at_least('1.8.4'):
module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch_type')
if module.params['force_new_deployment']:
if not module.botocore_at_least('1.8.4'):
module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use force_new_deployment')
if module.params['health_check_grace_period_seconds']:
if not module.botocore_at_least('1.8.20'):
module.fail_json(msg='botocore needs to be version 1.8.20 or higher to use health_check_grace_period_seconds')
if module.params['state'] == 'present':
matching = False
update = False
if existing and 'status' in existing and existing['status'] == "ACTIVE":
if module.params['force_new_deployment']:
update = True
elif service_mgr.is_matching_service(module.params, existing):
matching = True
results['service'] = existing
else:
update = True
if not matching:
if not module.check_mode:
role = module.params['role']
clientToken = module.params['client_token']
loadBalancers = []
for loadBalancer in module.params['load_balancers']:
if 'containerPort' in loadBalancer:
loadBalancer['containerPort'] = int(loadBalancer['containerPort'])
loadBalancers.append(loadBalancer)
for loadBalancer in loadBalancers:
if 'containerPort' in loadBalancer:
loadBalancer['containerPort'] = int(loadBalancer['containerPort'])
if update:
# check various parameters and boto versions and give a helpful error in boto is not new enough for feature
if module.params['scheduling_strategy']:
if not module.botocore_at_least('1.10.37'):
module.fail_json(msg='botocore needs to be version 1.10.37 or higher to use scheduling_strategy')
elif (existing['schedulingStrategy']) != module.params['scheduling_strategy']:
module.fail_json(msg="It is not possible to update the scheduling strategy of an existing service")
if module.params['service_registries']:
if not module.botocore_at_least('1.9.15'):
module.fail_json(msg='botocore needs to be version 1.9.15 or higher to use service_registries')
elif (existing['serviceRegistries'] or []) != serviceRegistries:
module.fail_json(msg="It is not possible to update the service registries of an existing service")
if (existing['loadBalancers'] or []) != loadBalancers:
module.fail_json(msg="It is not possible to update the load balancers of an existing service")
# update required
response = service_mgr.update_service(module.params['name'],
module.params['cluster'],
module.params['task_definition'],
module.params['desired_count'],
deploymentConfiguration,
network_configuration,
module.params['health_check_grace_period_seconds'],
module.params['force_new_deployment'])
else:
try:
response = service_mgr.create_service(module.params['name'],
module.params['cluster'],
module.params['task_definition'],
loadBalancers,
module.params['desired_count'],
clientToken,
role,
deploymentConfiguration,
module.params['placement_constraints'],
module.params['placement_strategy'],
module.params['health_check_grace_period_seconds'],
network_configuration,
serviceRegistries,
module.params['launch_type'],
module.params['scheduling_strategy']
)
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e, msg="Couldn't create service")
results['service'] = response
results['changed'] = True
elif module.params['state'] == 'absent':
if not existing:
pass
else:
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
del existing['deployments']
del existing['events']
results['ansible_facts'] = existing
if 'status' in existing and existing['status'] == "INACTIVE":
results['changed'] = False
else:
if not module.check_mode:
try:
service_mgr.delete_service(
module.params['name'],
module.params['cluster']
)
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e, msg="Couldn't delete service")
results['changed'] = True
elif module.params['state'] == 'deleting':
if not existing:
module.fail_json(msg="Service '" + module.params['name'] + " not found.")
return
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
delay = module.params['delay']
repeat = module.params['repeat']
time.sleep(delay)
for i in range(repeat):
existing = service_mgr.describe_service(module.params['cluster'], module.params['name'])
status = existing['status']
if status == "INACTIVE":
results['changed'] = True
break
time.sleep(delay)
if i is repeat - 1:
module.fail_json(msg="Service still not deleted after " + str(repeat) + " tries of " + str(delay) + " seconds each.")
return
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
yestech/gae-django-template | django/core/management/commands/reset.py | 229 | 2598 | from optparse import make_option
from django.conf import settings
from django.core.management.base import AppCommand, CommandError
from django.core.management.color import no_style
from django.core.management.sql import sql_reset
from django.db import connections, transaction, DEFAULT_DB_ALIAS
class Command(AppCommand):
option_list = AppCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to reset. '
'Defaults to the "default" database.'),
)
help = "Executes ``sqlreset`` for the given app(s) in the current database."
args = '[appname ...]'
output_transaction = True
def handle_app(self, app, **options):
# This command breaks a lot and should be deprecated
import warnings
warnings.warn(
'This command has been deprecated. The command ``flush`` can be used to delete everything. You can also use ALTER TABLE or DROP TABLE statements manually.',
PendingDeprecationWarning
)
using = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[using]
app_name = app.__name__.split('.')[-2]
self.style = no_style()
sql_list = sql_reset(app, self.style, connection)
if options.get('interactive'):
confirm = raw_input("""
You have requested a database reset.
This will IRREVERSIBLY DESTROY any data for
the "%s" application in the database "%s".
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % (app_name, connection.settings_dict['NAME']))
else:
confirm = 'yes'
if confirm == 'yes':
try:
cursor = connection.cursor()
for sql in sql_list:
cursor.execute(sql)
except Exception, e:
transaction.rollback_unless_managed()
raise CommandError("""Error: %s couldn't be reset. Possible reasons:
* The database isn't running or isn't configured correctly.
* At least one of the database tables doesn't exist.
* The SQL was invalid.
Hint: Look at the output of 'django-admin.py sqlreset %s'. That's the SQL this command wasn't able to run.
The full error: %s""" % (app_name, app_name, e))
transaction.commit_unless_managed()
else:
print "Reset cancelled."
| bsd-3-clause |
shrimpboyho/git.js | emscript/emscripten/1.5.6/tools/autodebugger.py | 12 | 11391 | '''
Processes an LLVM assembly (.ll) file, adding debugging information.
You can then run the .ll file in the LLVM interpreter (lli) and
compare that to the output when compiled using emscripten.
Warning: You probably want to compile with SKIP_STACK_IN_SMALL=0! Otherwise
there may be weird errors.
'''
import os, sys, re
ALLOW_POINTERS = False
ALLOW_MISC = True
MEMCPY = False
MEMCPY2 = False
NO_DLMALLOC = True
POSTAMBLE = '''
@.emscripten.autodebug.str = private constant [10 x i8] c"AD:%d,%d\\0A\\00", align 1 ; [#uses=1]
@.emscripten.autodebug.str.f = private constant [11 x i8] c"AD:%d,%lf\\0A\\00", align 1 ; [#uses=1]
@.emscripten.autodebug.str.64 = private constant [13 x i8] c"AD:%d,%d,%d\\0A\\00", align 1 ; [#uses=1]
; [#uses=1]
define void @emscripten_autodebug_i64(i32 %line, i64 %value) {
entry:
%0 = trunc i64 %value to i32
%1 = lshr i64 %value, 32
%2 = trunc i64 %1 to i32
%3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([13 x i8]* @.emscripten.autodebug.str.64, i32 0, i32 0), i32 %line, i32 %0, i32 %2) ; [#uses=0]
br label %return
return: ; preds = %entry
ret void
}
; [#uses=1]
define void @emscripten_autodebug_i32(i32 %line, i32 %value) {
entry:
%0 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.emscripten.autodebug.str, i32 0, i32 0), i32 %line, i32 %value) ; [#uses=0]
br label %return
return: ; preds = %entry
ret void
}
; [#uses=1]
define void @emscripten_autodebug_i16(i32 %line, i16 %value) {
entry:
%0 = zext i16 %value to i32 ; [#uses=1]
%1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.emscripten.autodebug.str, i32 0, i32 0), i32 %line, i32 %0) ; [#uses=0]
br label %return
return: ; preds = %entry
ret void
}
; [#uses=1]
define void @emscripten_autodebug_i8(i32 %line, i8 %value) {
entry:
%0 = zext i8 %value to i32 ; [#uses=1]
%1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.emscripten.autodebug.str, i32 0, i32 0), i32 %line, i32 %0) ; [#uses=0]
br label %return
return: ; preds = %entry
ret void
}
; [#uses=1]
define void @emscripten_autodebug_float(i32 %line, float %value) {
entry:
%0 = fpext float %value to double ; [#uses=1]
%1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([11 x i8]* @.emscripten.autodebug.str.f, i32 0, i32 0), i32 %line, double %0) ; [#uses=0]
br label %return
return: ; preds = %entry
ret void
}
; [#uses=1]
define void @emscripten_autodebug_double(i32 %line, double %value) {
entry:
%0 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([11 x i8]* @.emscripten.autodebug.str.f, i32 0, i32 0), i32 %line, double %value) ; [#uses=0]
br label %return
return: ; preds = %entry
ret void
}
'''
POSTAMBLE_NEW = '''
@.emscripten.autodebug.str = private constant [10 x i8] c"AD:%d,%d\\0A\\00", align 1 ; [#uses=1]
@.emscripten.autodebug.str.2 = private constant [13 x i8] c"AD:%d,%d,%d\\0A\\00", align 1 ; [#uses=1]
@.emscripten.autodebug.str.f = private constant [11 x i8] c"AD:%d,%lf\\0A\\00", align 1 ; [#uses=1]
; [#uses=1]
define void @emscripten_autodebug_i64(i32 %line, i64 %value) {
%1 = trunc i64 %value to i32
%2 = lshr i64 %value, 32
%3 = trunc i64 %2 to i32
%4 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([13 x i8]* @.emscripten.autodebug.str.2, i32 0, i32 0), i32 %line, i32 %1, i32 %3) ; [#uses=0]
ret void
}
; [#uses=1]
define void @emscripten_autodebug_i32(i32 %line, i32 %value) {
%1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.emscripten.autodebug.str, i32 0, i32 0), i32 %line, i32 %value) ; [#uses=0]
ret void
}
; [#uses=1]
define void @emscripten_autodebug_i16(i32 %line, i16 %value) {
%1 = zext i16 %value to i32 ; [#uses=1]
%2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.emscripten.autodebug.str, i32 0, i32 0), i32 %line, i32 %1) ; [#uses=0]
ret void
}
; [#uses=1]
define void @emscripten_autodebug_i8(i32 %line, i8 %value) {
%1 = zext i8 %value to i32 ; [#uses=1]
%2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.emscripten.autodebug.str, i32 0, i32 0), i32 %line, i32 %1) ; [#uses=0]
ret void
}
; [#uses=1]
define void @emscripten_autodebug_float(i32 %line, float %value) {
%1 = fpext float %value to double ; [#uses=1]
%2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([11 x i8]* @.emscripten.autodebug.str.f, i32 0, i32 0), i32 %line, double %1) ; [#uses=0]
ret void
}
; [#uses=1]
define void @emscripten_autodebug_double(i32 %line, double %value) {
%1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([11 x i8]* @.emscripten.autodebug.str.f, i32 0, i32 0), i32 %line, double %value) ; [#uses=0]
ret void
}
'''
filename, ofilename = sys.argv[1], sys.argv[2]
f = open(filename, 'r')
data = f.read()
f.close()
if 'declare i32 @printf(' not in data:
POSTAMBLE += '''
; [#uses=1]
declare i32 @printf(i8*, ...)
'''
POSTAMBLE_NEW += '''
; [#uses=1]
declare i32 @printf(i8*, ...)
'''
LLVM_STYLE_OLD = '<label>' not in data and 'entry:' in data
if not LLVM_STYLE_OLD:
POSTAMBLE = POSTAMBLE_NEW
if MEMCPY:
POSTAMBLE = '''
@.emscripten.memcpy.str = private constant [7 x i8] c"MC:%d\\0A\\00", align 1 ; [#uses=1]
''' + POSTAMBLE + '''
; [#uses=1]
define void @emscripten_memcpy(i8* %destination, i8* %source, i32 %num, i32 %whati, i1 %sthis) nounwind {
entry:
%destination.addr = alloca i8*, align 4 ; [#uses=3]
%source.addr = alloca i8*, align 4 ; [#uses=2]
%num.addr = alloca i32, align 4 ; [#uses=3]
%i = alloca i32, align 4 ; [#uses=5]
%src = alloca i8*, align 4 ; [#uses=5]
%dst = alloca i8*, align 4 ; [#uses=4]
store i8* %destination, i8** %destination.addr, align 4
store i8* %source, i8** %source.addr, align 4
store i32 %num, i32* %num.addr, align 4
%tmp = load i8** %source.addr, align 4 ; [#uses=1]
store i8* %tmp, i8** %src, align 4
%tmp2 = load i8** %destination.addr, align 4 ; [#uses=1]
store i8* %tmp2, i8** %dst, align 4
store i32 0, i32* %i, align 4
%tmp31 = load i32* %i, align 4 ; [#uses=1]
%tmp42 = load i32* %num.addr, align 4 ; [#uses=1]
%cmp3 = icmp ult i32 %tmp31, %tmp42 ; [#uses=1]
br i1 %cmp3, label %for.body, label %for.end
for.body: ; preds = %for.body, %entry
%tmp5 = load i8** %src, align 4 ; [#uses=1]
%tmp6 = load i8* %tmp5 ; [#uses=1]
%conv = zext i8 %tmp6 to i32 ; [#uses=1]
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8]* @.emscripten.memcpy.str, i32 0, i32 0), i32 %conv); [#uses=0]
%tmp7 = load i8** %src, align 4 ; [#uses=1]
%tmp8 = load i8* %tmp7 ; [#uses=1]
%tmp9 = load i8** %dst, align 4 ; [#uses=1]
store i8 %tmp8, i8* %tmp9
%tmp10 = load i32* %i, align 4 ; [#uses=1]
%inc = add i32 %tmp10, 1 ; [#uses=1]
store i32 %inc, i32* %i, align 4
%tmp11 = load i8** %src, align 4 ; [#uses=1]
%incdec.ptr = getelementptr inbounds i8* %tmp11, i32 1 ; [#uses=1]
store i8* %incdec.ptr, i8** %src, align 4
%tmp12 = load i8** %dst, align 4 ; [#uses=1]
%incdec.ptr13 = getelementptr inbounds i8* %tmp12, i32 1 ; [#uses=1]
store i8* %incdec.ptr13, i8** %dst, align 4
%tmp3 = load i32* %i, align 4 ; [#uses=1]
%tmp4 = load i32* %num.addr, align 4 ; [#uses=1]
%cmp = icmp ult i32 %tmp3, %tmp4 ; [#uses=1]
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body, %entry
%tmp14 = load i8** %destination.addr, align 4 ; [#uses=1]
ret void
}
'''
lines_added = 0
lines = data.split('\n')
in_func = False
added_entry = False
for i in range(len(lines)):
if MEMCPY:
if not lines[i].startswith('declare void'):
lines[i] = lines[i].replace('@llvm.memcpy.p0i8.p0i8.i32', '@emscripten_memcpy')
try:
pre = ''
if lines[i].startswith('define '):
in_func = True
if NO_DLMALLOC and ('@malloc(' in lines[i] or '@free(' in lines[i] or '@sys_alloc(' in lines[i] or '@segment_holding(' in lines[i] or '@init_top(' in lines[i] or '@add_segment(' in lines[i] or '@tmalloc_small(' in lines[i]):
in_func = False
if in_func:
added_entry = False
elif lines[i].startswith('}'):
in_func = False
elif in_func and not added_entry and ' = alloca' not in lines[i] and lines[i].startswith(' '):
# This is a good place to mark entry to this function
added_entry = True
index = i+1+lines_added
pre = ' call void @emscripten_autodebug_i32(i32 -1, i32 %d)' % index
elif in_func and lines[i].startswith(' ret '):
# This is a good place to mark entry to this function
index = i+1+lines_added
pre = ' call void @emscripten_autodebug_i32(i32 -2, i32 %d)' % index
if in_func:
m = re.match(' store (?P<type>i64|i32|i16|i8|float|double|%?[\w\.\*]+) (?P<var>%?[\w.+_]+), .*', lines[i])
if m:
index = i+1+lines_added
if m.group('type') in ['i8', 'i16', 'i32', 'i64', 'float', 'double']:
lines[i] += '\n call void @emscripten_autodebug_%s(i32 %d, %s %s)' % (m.group('type'), index, m.group('type'), m.group('var'))
lines_added += 1
elif ALLOW_POINTERS and m.group('type').endswith('*') and m.group('type').count('*') == 1:
lines[i] += '\n %%ead.%d = ptrtoint %s %s to i32' % (index, m.group('type'), m.group('var'))
lines[i] += '\n call void @emscripten_autodebug_i32(i32 %d, i32 %%ead.%d)' % (index, index)
lines_added += 2
continue
m = re.match(' %(?P<var>[\w_.]+) = load (?P<type>i64|i32|i16|i8|float|double+)\* [^(].*.*', lines[i])
if m:
index = i+1+lines_added
lines[i] += '\n call void @emscripten_autodebug_%s(i32 %d, %s %%%s)' % (m.group('type'), index, m.group('type'), m.group('var'))
lines_added += 1
continue
if ALLOW_MISC:
# call is risky - return values can be i32 (i8*) (i16)
m = re.match(' %(?P<var>[\w_.]+) = (mul|add) (nsw )?(?P<type>i64|i32|i16|i8|float|double+) .*', lines[i])
if m:
index = i+1+lines_added
lines[i] += '\n call void @emscripten_autodebug_%s(i32 %d, %s %%%s)' % (m.group('type'), index, m.group('type'), m.group('var'))
lines_added += 1
continue
if MEMCPY2:
m = re.match(' call void @llvm\.memcpy\.p0i8\.p0i8\.i32\(i8\* %(?P<dst>[\w_.]+), i8\* %(?P<src>[\w_.]+), i32 8, i32 (?P<align>\d+),.*', lines[i])
if m:
index = i+1+lines_added
lines[i] += '\n %%adtemp%d = load i8* %%%s, align 1' % (index, m.group('src')) + \
'\n call void @emscripten_autodebug_i8(i32 %d, i8 %%adtemp%d)' % (index, index)
lines_added += 3
continue
finally:
if len(pre) > 0:
lines[i] = pre + '\n' + lines[i]
lines_added += 1
f = open(ofilename, 'w')
f.write('\n'.join(lines) + '\n' + POSTAMBLE + '\n')
f.close()
print 'Success.'
| gpl-2.0 |
CottageLabs/portality | portality/default_models.py | 3 | 20330 |
from datetime import datetime
from portality.core import app
from portality.dao import DomainObject as DomainObject
'''
Define models in here. They should all inherit from the DomainObject.
Look in the dao.py to learn more about the default methods available to the Domain Object.
When using portality in your own flask app, perhaps better to make your own models file somewhere and copy these examples
'''
# an example account object, which requires the further additional imports
# There is a more complex example below that also requires these imports
from werkzeug import generate_password_hash, check_password_hash
from flask.ext.login import UserMixin
class Account(DomainObject, UserMixin):
__type__ = 'account'
@classmethod
def pull_by_email(cls,email):
res = cls.query(q='email:"' + email + '"')
if res.get('hits',{}).get('total',0) == 1:
return cls(**res['hits']['hits'][0]['_source'])
else:
return None
def set_password(self, password):
self.data['password'] = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.data['password'], password)
@property
def is_super(self):
return not self.is_anonymous() and self.id in app.config['SUPER_USER']
# a typical record object, with no special abilities
class Record(DomainObject):
__type__ = 'record'
# a special object that allows a search onto all index types - FAILS TO CREATE INSTANCES
class Everything(DomainObject):
__type__ = 'everything'
@classmethod
def target(cls):
t = 'http://' + str(app.config['ELASTIC_SEARCH_HOST']).lstrip('http://').rstrip('/') + '/'
t += app.config['ELASTIC_SEARCH_DB'] + '/'
return t
# a page manager object, with a couple of extra methods
class Pages(DomainObject):
__type__ = 'pages'
@classmethod
def pull_by_url(cls,url):
res = cls.query(q={"query":{"term":{'url.exact':url}}})
if res.get('hits',{}).get('total',0) == 1:
return cls(**res['hits']['hits'][0]['_source'])
else:
return None
def update_from_form(self, request):
newdata = request.json if request.json else request.values
for k, v in newdata.items():
if k == 'tags':
tags = []
for tag in v.split(','):
if len(tag) > 0: tags.append(tag)
self.data[k] = tags
elif k in ['editable','accessible','visible','comments']:
if v == "on":
self.data[k] = True
else:
self.data[k] = False
elif k not in ['submit']:
self.data[k] = v
if not self.data['url'].startswith('/'):
self.data['url'] = '/' + self.data['url']
if 'title' not in self.data or self.data['title'] == "":
self.data['title'] = 'untitled'
def save_from_form(self, request):
self.update_from_form(request)
self.save()
# You can make simple models that just reside in their own index type.
# Then other model types may rely on them, or they may be used on your frontend. Whatever.
class SearchHistory(DomainObject):
__type__ = 'searchhistory'
# You could write a record model that stores versions of itself in an archive.
# In which case, here is an example of an Archive model.
class Archive(DomainObject):
__type__ = 'archive'
@classmethod
def store(cls, data, action='update'):
archive = Archive.get(data.get('_id',None))
if not archive:
archive = Archive(_id=data.get('_id',None))
if archive:
if 'store' not in archive.data: archive.data['store'] = []
try:
who = current_user.id
except:
who = data.get('_created_by','anonymous')
archive.data['store'].insert(0, {
'date':data.get('_last_modified', datetime.now().strftime("%Y-%m-%d %H%M")),
'user': who,
'state': data,
'action':action
})
archive.save()
# Here is a much more complex Record object that defines its own ID generator, merges and deduplicates itself,
# tracks its own history, and knows what collections it belongs to and some other things
# (taken from http://github.com/okfn/bibserver
'''class Record(DomainObject):
__type__ = 'record'
@classmethod
def get(cls, id_):
if id_ is None:
return None
try:
out = requests.get(cls.target() + id_)
if out.status_code == 404:
return None
else:
rec = cls(**out.json())
rec.data['_views'] = int(rec.data.get('_views',0)) + 1
rec.data['_last_viewed'] = datetime.now().strftime("%Y-%m-%d %H%M")
r = requests.post(rec.target() + rec.id, data=json.dumps(rec.data))
return rec
except:
return None
@property
def views(self):
return self.data.get('_views',0)
@classmethod
def make_rid(cls,data):
id_data = {
'author': [i.get('name','') for i in data.get('author',[])].sort(),
'title': data.get('title','')
}
buf = util.slugify(json.dumps(id_data, sort_keys=True).decode('unicode-escape'),delim=u'')
new_id = hashlib.md5(buf).hexdigest()
return new_id
@classmethod
def sameas(cls,rid):
res = cls.query(terms={'_sameas':rid})
if res['hits']['total'] == 1:
return cls(**res['hits']['hits'][0]['_source'])
else:
return None
@classmethod
def merge(cls, a, b) :
for k, v in a.items():
if k.startswith('_') and k not in ['_collection']:
del a[k]
elif isinstance(v, dict) and k in b:
cls.merge(v, b[k])
elif isinstance(v, list) and k in b:
if not isinstance(b[k], list):
b[k] = [b[k]]
for idx, item in enumerate(v):
if isinstance(item,dict) and idx < len(b[k]):
cls.merge(v[idx],b[k][idx])
elif k in ['_collection'] and item not in b[k]:
b[k].append(item)
a.update(b)
return a
@property
def history(self):
archive = Archive.get(self.data.get('_id',None))
if archive:
return archive.data.get('store',[])
else:
return []
# remove a record from a collection - bypasses the main save which always tries to greedily retain info
def removefromcollection(self,collid):
collid = collid.replace('/','_____')
if collid in self.data.get('_collection',[]):
self.data['_collection'].remove(collid)
r = requests.post(self.target() + self.id, data=json.dumps(self.data))
Archive.store(self.data)
def addtocollection(self,collid):
collid = collid.replace('/','_____')
if '_collection' not in self.data:
self.data['_collection'] = []
if collid not in self.data['_collection']:
self.data['_collection'].append(collid)
r = requests.post(self.target() + self.id, data=json.dumps(self.data))
Archive.store(self.data)
# add or remove a tag to a record
def removetag(self,tagid):
if tagid in self.data.get('_tag',[]):
self.data['_tag'].remove(tagid)
r = requests.post(self.target() + self.id, data=json.dumps(self.data))
Archive.store(self.data)
def addtag(self,tagid):
if '_tag' not in self.data:
self.data['_tag'] = []
if tagid not in self.data['_tag']:
self.data['_tag'].append(tagid)
r = requests.post(self.target() + self.id, data=json.dumps(self.data))
Archive.store(self.data)
# returns a list of current users collections that this record is in
@property
def isinmy(self):
colls = []
if current_user is not None and not current_user.is_anonymous():
for item in self.data['_collection']:
if item.startswith(current_user.id):
colls.append(item)
return colls
def save(self):
# archive the old version
if app.config['ARCHIVING']:
Archive.store(self.data)
# make an ID based on current content - builds from authors and title
derivedID = self.make_rid(self.data)
# look for any stored record with the derived ID
exists = requests.get(self.target() + derivedID)
if exists.status_code == 200:
# where found, merge with current data and this record will be overwritten on save
self.data = self.merge(self.data, exists.json()['_source'])
# if this record has a new ID, need to merge the old record and delete it
if self.id is not None and self.id != derivedID:
old = requests.get(self.target() + self.id)
if old.status_code == 200:
self.data = self.merge(self.data, old.json()['_source'])
if '_sameas' not in self.data: self.data['_sameas'] = []
self.data['_sameas'].append(self.id)
Archive.store(self.data, action='delete')
r = requests.delete( self.target() + self.id )
# ensure the latest ID is used by this record now
self.data['_id'] = derivedID
# make sure all collection refs are lower-cased
self.data['_collection'] = [i.lower() for i in self.data.get('_collection',[])]
# update site url, created date, last modified date
if 'SITE_URL' in app.config:
self.data['url'] = app.config['SITE_URL'].rstrip('/') + '/record/' + self.id
if 'identifier' not in self.data: self.data['identifier'] = []
if 'bibsoup' not in [i['type'] for i in self.data['identifier']]:
self.data['identifier'].append({'type':'bibsoup','url':self.data['url'],'id':self.id})
if '_created' not in self.data:
self.data['_created'] = datetime.now().strftime("%Y-%m-%d %H%M")
self.data['_last_modified'] = datetime.now().strftime("%Y-%m-%d %H%M")
r = requests.post(self.target() + self.id, data=json.dumps(self.data))
return r.status_code
@classmethod
def bulk(cls, records):
# TODO: change this to a bulk es save
for item in records:
new = Record(**item)
success = 0
attempts = 0
while success != 200 and attempts < 10:
time.sleep(attempts * 0.1)
success = new.save()
attempts += 1
def delete(self):
Archive.store(self.data, action='delete')
r = requests.delete( self.target() + self.id )
def similar(self,field="title"):
res = Record.query(recid=self.id, endpoint='_mlt', q='mlt_fields=' + field + '&min_term_freq=1&percent_terms_to_match=1&min_word_len=3')
return [Record(**i['_source']) for i in res['hits']['hits']]
@property
def valuelist(self):
# a list of all the values in the record
vals = []
def valloop(obj):
if isinstance(obj,dict):
for item in obj:
valloop(obj[item])
elif isinstance(obj,list):
for thing in obj:
valloop(thing)
else:
vals.append(obj)
valloop(self.data)
return vals
@property
def valuelist_string(self):
return json.dumps(self.valuelist)
@property
def remote(self):
# check any listed external APIs for relevant data to return
# TODO: just does service core for now - implement for others
info = {}
apis = app.config['EXTERNAL_APIS']
if apis['servicecore']['key']:
try:
servicecore = "not found in any UK repository"
addr = apis['servicecore']['url'] + self.data['title'].replace(' ','%20') + "?format=json&api_key=" + apis['servicecore']['key']
r = requests.get(addr)
data = r.json()
if 'ListRecords' in data and len(data['ListRecords']) != 0:
info['servicecore'] = data['ListRecords'][0]['record']['metadata']['oai_dc:dc']
except:
pass
return info
# build how it should look on the page
@property
def pretty(self):
result = '<p>'
img = False
if img:
result += '<img class="thumbnail" style="float:left; width:100px; margin:0 5px 10px 0; max-height:150px;" src="' + img[0] + '" />'
record = self.data
lines = ''
if 'title' in record:
lines += '<h2>' + record['title'] + '</h2>'
if 'author' in record:
lines += '<p>'
authors = False
for obj in record.get('author',[]):
if authors: lines += ', '
lines += obj.get('name','')
authors = True
lines += '</p>'
if 'journal' in record:
lines += '<p><i>' + record['journal'].get('name','') + '</i>'
if 'year' in record:
lines += ' (' + record['year'] + ')'
lines += '</p>'
elif 'year' in record:
lines += '<p>(' + record['year'] + ')</p>'
if 'link' in record:
for obj in record['link']:
lines += '<small><a target="_blank" href="' + obj['url'] + '">'
if 'anchor' in obj:
lines += obj['anchor']
else:
lines += obj['url']
lines += '</a></small>'
if lines:
result += lines
else:
result += json.dumps(record,sort_keys=True,indent=4)
result += '</p>'
return result
'''
# And a more complex Collection example to go with the above Record, also from bibserver
'''class Collection(DomainObject):
__type__ = 'collection'
@classmethod
def get(cls, id_):
if id_ is None:
return None
try:
id_ = id_.replace('/','_____')
out = requests.get(cls.target() + id_)
if out.status_code == 404:
return None
else:
rec = cls(**out.json())
rec.data['_views'] = int(rec.data.get('_views',0)) + 1
rec.data['_last_viewed'] = datetime.now().strftime("%Y-%m-%d %H%M")
r = requests.post(rec.target() + rec.id, data=json.dumps(rec.data))
return rec
except:
return None
@property
def views(self):
return self.data.get('_views',0)
def records(self, **kwargs):
return [Record.get(**i['_source']['_id']) for i in Record.query(terms={'_collection':self.id}, **kwargs).get('hits',{}).get('hits',[])]
def save(self):
if not self.owner and not current_user.is_anonymous() and not self.data.get('public',False):
self.data['owner'] = current_user.id
if not self.data.get('slug',False):
self.data['slug'] = util.slugify(self.data.get('name',uuid.uuid4().hex))
if not self.id:
self.data['_id'] = self.owner + '_____' + self.data['slug']
if not self.data.get('url',False):
url = app.config.get('SITE_URL','').rstrip('/') + '/'
if self.owner:
url += self.owner + '/'
self.data['url'] = url + self.data['slug']
if '_created' not in self.data:
self.data['_created'] = datetime.now().strftime("%Y-%m-%d %H%M")
self.data['_last_modified'] = datetime.now().strftime("%Y-%m-%d %H%M")
r = requests.post(self.target() + self.id, data=json.dumps(self.data))
print r.text
def delete(self):
r = requests.delete( self.target() + self.id )
count = 0
while count < len(self):
for record in self.records(_from=count,size=100):
record.removefromcollection(self.id)
count += 100
def __len__(self):
return Record.query(terms={'_collection':self.id}).get('hits',{}).get('total',0)
@property
def owner(self):
return self.data.get('owner','')
'''
# Here is a more complex Account model example, with calls back out to SearchHistory models
# Also expects a Collection model to exist, and defines how removal of a user account would include
# removal of registration of collections to that user too.
'''class Account(DomainObject, UserMixin):
__type__ = 'account'
@classmethod
def pull_by_email(cls,email):
res = cls.query(q='email:"' + email + '"')
if res.get('hits',{}).get('total',0) == 1:
return cls(**res['hits']['hits'][0]['_source'])
else:
return None
@property
def recentsearches(self):
if app.config.get('QUERY_TRACKING', False):
res = SearchHistory.query(terms={'user':current_user.id}, sort={"_created" + app.config.get('FACET_FIELD','.exact'):{"order":"desc"}}, size=100)
print res
return [i.get('_source',{}) for i in res.get('hits',{}).get('hits',[])]
else:
return []
@property
def recentviews(self):
return self.data.get('recentviews',[])
def addrecentview(self, ridtuple):
if 'recentviews' not in self.data:
self.data['recentviews'] = []
if ridtuple[0] not in [t[0] for t in self.data['recentviews']]:
self.data['recentviews'].insert(0, ridtuple)
if len(self.data['recentviews']) > 100:
del self.data['recentviews'][100]
self.save()
def set_password(self, password):
self.data['password'] = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.data['password'], password)
@property
def is_super(self):
return auth.user.is_super(self)
@property
def email(self):
return self.data['email']
def collections(self, sort={"slug.exact":{"order":"asc"}}, **kwargs):
return [Collection.get(i['_source']['_id']) for i in Collection.query(terms={'owner':self.id},**kwargs).get('hits',{}).get('hits',[])]
def __len__(self):
return Collection.query(terms={'owner':self.id}).get('hits',{}).get('total',0)
def delete(self):
r = requests.delete( self.target() + self.id )
count = 0
while count < len(self):
for coll in self.collections(_from=count,size=100):
coll.delete()
count += 100
'''
# This could be used with account signup approval processes to store accounts that have been
# created but not yet approved via email confirmation.
'''class UnapprovedAccount(Account):
__type__ = 'unapprovedaccount'
def requestvalidation(self):
# send an email to account email address and await response, unless in debug mode
# validate link is like http://siteaddr.net/username?validate=key
msg = "Hello " + self.id + "\n\n"
msg += "Thanks for signing up with " + app.config['SERVICE_NAME'] + "\n\n"
msg += "In order to validate and enable your account, please follow the link below:\n\n"
msg += app.config['SITE_URL'] + "/" + self.id + "?validate=" + self.data['validate_key'] + "\n\n"
msg += "Thanks! We hope you enjoy using " + app.config['SERVICE_NAME']
if not app.config['DEBUG']:
util.send_mail([self.data['email']], app.config['EMAIL_FROM'], 'validate your account', msg)
def validate(self,key):
# accept validation and create new account
if key == self.data['validate_key']:
del self.data['validate_key']
account = Account(**self.data)
account.save()
self.delete()
return account
else:
return None
'''
| mit |
kozmikkick/KozmiKKerneL-KitKat | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
kontron/python-ipmi | pyipmi/sensor.py | 1 | 7640 | # cOPYRIGht (c) 2014 Kontron Europe GmbH
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import absolute_import
from .utils import check_completion_code
from .msgs import create_request_by_name
from .helper import get_sdr_data_helper, get_sdr_chunk_helper
from . import sdr
# THRESHOLD BASED STATES
EVENT_READING_TYPE_CODE_THRESHOLD = 0x01
# DMI-based "Usage States" STATES
EVENT_READING_TYPE_CODE_DISCRETE = 0x02
# DIGITAL/DISCRETE EVENT STATES
EVENT_READING_TYPE_CODE_STATE = 0x03
EVENT_READING_TYPE_CODE_PREDICTIVE_FAILURE = 0x04
EVENT_READING_TYPE_CODE_LIMIT = 0x05
EVENT_READING_TYPE_CODE_PERFORMANCE = 0x06
# Sensor Types
SENSOR_TYPE_TEMPERATURE = 0x01
SENSOR_TYPE_VOLTAGE = 0x02
SENSOR_TYPE_CURRENT = 0x03
SENSOR_TYPE_FAN = 0x04
SENSOR_TYPE_CHASSIS_INTRUSION = 0x05
SENSOR_TYPE_PLATFORM_SECURITY = 0x06
SENSOR_TYPE_PROCESSOR = 0x07
SENSOR_TYPE_POWER_SUPPLY = 0x08
SENSOR_TYPE_POWER_UNIT = 0x09
SENSOR_TYPE_COOLING_DEVICE = 0x0a
SENSOR_TYPE_OTHER_UNITS_BASED_SENSOR = 0x0b
SENSOR_TYPE_MEMORY = 0x0c
SENSOR_TYPE_DRIVE_SLOT = 0x0d
SENSOR_TYPE_POST_MEMORY_RESIZE = 0x0e
SENSOR_TYPE_SYSTEM_FIRMWARE_PROGRESS = 0x0f
SENSOR_TYPE_EVENT_LOGGING_DISABLED = 0x10
SENSOR_TYPE_WATCHDOG_1 = 0x11
SENSOR_TYPE_SYSTEM_EVENT = 0x12
SENSOR_TYPE_CRITICAL_INTERRUPT = 0x13
SENSOR_TYPE_BUTTON = 0x14
SENSOR_TYPE_MODULE_BOARD = 0x15
SENSOR_TYPE_MICROCONTROLLER_COPROCESSOR = 0x16
SENSOR_TYPE_ADD_IN_CARD = 0x17
SENSOR_TYPE_CHASSIS = 0x18
SENSOR_TYPE_CHIP_SET = 0x19
SENSOR_TYPE_OTHER_FRU = 0x1a
SENSOR_TYPE_CABLE_INTERCONNECT = 0x1b
SENSOR_TYPE_TERMINATOR = 0x1c
SENSOR_TYPE_SYSTEM_BOOT_INITIATED = 0x1d
SENSOR_TYPE_BOOT_ERROR = 0x1e
SENSOR_TYPE_OS_BOOT = 0x1f
SENSOR_TYPE_OS_CRITICAL_STOP = 0x20
SENSOR_TYPE_SLOT_CONNECTOR = 0x21
SENSOR_TYPE_SYSTEM_ACPI_POWER_STATE = 0x22
SENSOR_TYPE_WATCHDOG_2 = 0x23
SENSOR_TYPE_PLATFORM_ALERT = 0x24
SENSOR_TYPE_ENTITY_PRESENT = 0x25
SENSOR_TYPE_MONITOR_ASIC_IC = 0x26
SENSOR_TYPE_LAN = 0x27
SENSOR_TYPE_MANGEMENT_SUBSYSTEM_HEALTH = 0x28
SENSOR_TYPE_BATTERY = 0x29
SENSOR_TYPE_SESSION_AUDIT = 0x2a
SENSOR_TYPE_VERSION_CHANGE = 0x2b
SENSOR_TYPE_FRU_STATE = 0x2c
SENSOR_TYPE_FRU_HOT_SWAP = 0xf0
SENSOR_TYPE_IPMB_PHYSICAL_LINK = 0xf1
SENSOR_TYPE_MODULE_HOT_SWAP = 0xf2
SENSOR_TYPE_POWER_CHANNEL_NOTIFICATION = 0xf3
SENSOR_TYPE_TELCO_ALARM_INPUT = 0xf4
SENSOR_TYPE_OEM_KONTRON_FRU_INFORMATION_AGENT = 0xc5
SENSOR_TYPE_OEM_KONTRON_POST_VALUE = 0xc6
SENSOR_TYPE_OEM_KONTRON_FW_UPGRADE = 0xc7
SENSOR_TYPE_OEM_KONTRON_DIAGNOSTIC = 0xc9
SENSOR_TYPE_OEM_KONTRON_SYSTEM_FIRMWARE_UPGRADE = 0xca
SENSOR_TYPE_OEM_KONTRON_POWER_DENIED = 0xcd
SENSOR_TYPE_OEM_KONTRON_RESET = 0xcf
class Sensor(object):
def reserve_device_sdr_repository(self):
rsp = self.send_message_with_name('ReserveDeviceSdrRepository')
return rsp.reservation_id
def _get_device_sdr_chunk(self, reservation_id, record_id, offset, length):
req = create_request_by_name('GetDeviceSdr')
req.reservation_id = reservation_id
req.record_id = record_id
req.offset = offset
req.bytes_to_read = length
rsp = get_sdr_chunk_helper(self.send_message, req,
self.reserve_device_sdr_repository)
return (rsp.next_record_id, rsp.record_data)
def get_device_sdr(self, record_id, reservation_id=None):
"""Collect all data from the sensor device to get the SDR.
`record_id` the Record ID.
`reservation_id=None` can be set. if None the reservation ID will
be determined.
"""
(next_id, record_data) = \
get_sdr_data_helper(self.reserve_device_sdr_repository,
self._get_device_sdr_chunk,
record_id, reservation_id)
return sdr.SdrCommon.from_data(record_data, next_id)
def device_sdr_entries(self):
"""A generator that returns the SDR list.
Starting with ID=0x0000 and
end when ID=0xffff is returned.
"""
reservation_id = self.reserve_device_sdr_repository()
record_id = 0
while True:
record = self.get_device_sdr(record_id, reservation_id)
yield record
if record.next_id == 0xffff:
break
record_id = record.next_id
def get_device_sdr_list(self, reservation_id=None):
"""Return the complete SDR list."""
return list(self.device_sdr_entries())
def rearm_sensor_events(self, sensor_number):
"""Rearm sensor events for the given sensor number."""
self.send_message_with_name('RearmSensorEvents',
sensor_number=sensor_number)
def get_sensor_reading(self, sensor_number, lun=0):
"""Return the sensor reading at the assertion states.
`sensor_number`
Returns a tuple with `raw reading`and `assertion states`.
"""
rsp = self.send_message_with_name('GetSensorReading',
sensor_number=sensor_number,
lun=lun)
reading = rsp.sensor_reading
if rsp.config.initial_update_in_progress:
reading = None
states = None
if rsp.states1 is not None:
states = rsp.states1
if rsp.states2 is not None:
states |= (rsp.states2 << 8)
return (reading, states)
def set_sensor_thresholds(self, sensor_number, lun=0,
unr=None, ucr=None, unc=None,
lnc=None, lcr=None, lnr=None):
"""Set the sensor thresholds that are not 'None'.
`sensor_number`
`unr` for upper non-recoverable
`ucr` for upper critical
`unc` for upper non-critical
`lnc` for lower non-critical
`lcr` for lower critical
`lnr` for lower non-recoverable
"""
req = create_request_by_name('SetSensorThresholds')
req.sensor_number = sensor_number
req.lun = lun
thresholds = dict(unr=unr, ucr=ucr, unc=unc, lnc=lnc, lcr=lcr, lnr=lnr)
for key, value in thresholds.items():
if value is not None:
setattr(req.set_mask, key, 1)
setattr(req.threshold, key, value)
rsp = self.send_message(req)
check_completion_code(rsp.completion_code)
def get_sensor_thresholds(self, sensor_number, lun=0):
rsp = self.send_message_with_name('GetSensorThresholds',
sensor_number=sensor_number,
lun=lun)
thresholds = {}
threshold_list = ('unr', 'ucr', 'unc', 'lnc', 'lcr', 'lnr')
for threshold in threshold_list:
if hasattr(rsp.readable_mask, threshold):
if getattr(rsp.readable_mask, threshold):
thresholds[threshold] = getattr(rsp.threshold, threshold)
return thresholds
| lgpl-2.1 |
mezz64/home-assistant | homeassistant/util/ruamel_yaml.py | 10 | 4816 | """ruamel.yaml utility functions."""
from collections import OrderedDict
import logging
import os
from os import O_CREAT, O_TRUNC, O_WRONLY, stat_result
from typing import Dict, List, Optional, Union
import ruamel.yaml
from ruamel.yaml import YAML # type: ignore
from ruamel.yaml.compat import StringIO
from ruamel.yaml.constructor import SafeConstructor
from ruamel.yaml.error import YAMLError
from homeassistant.exceptions import HomeAssistantError
from homeassistant.util.yaml import secret_yaml
_LOGGER = logging.getLogger(__name__)
JSON_TYPE = Union[List, Dict, str] # pylint: disable=invalid-name
class ExtSafeConstructor(SafeConstructor):
"""Extended SafeConstructor."""
name: Optional[str] = None
class UnsupportedYamlError(HomeAssistantError):
"""Unsupported YAML."""
class WriteError(HomeAssistantError):
"""Error writing the data."""
def _include_yaml(
constructor: ExtSafeConstructor, node: ruamel.yaml.nodes.Node
) -> JSON_TYPE:
"""Load another YAML file and embeds it using the !include tag.
Example:
device_tracker: !include device_tracker.yaml
"""
if constructor.name is None:
raise HomeAssistantError(
"YAML include error: filename not set for %s" % node.value
)
fname = os.path.join(os.path.dirname(constructor.name), node.value)
return load_yaml(fname, False)
def _yaml_unsupported(
constructor: ExtSafeConstructor, node: ruamel.yaml.nodes.Node
) -> None:
raise UnsupportedYamlError(
f"Unsupported YAML, you can not use {node.tag} in "
f"{os.path.basename(constructor.name or '(None)')}"
)
def object_to_yaml(data: JSON_TYPE) -> str:
"""Create yaml string from object."""
yaml = YAML(typ="rt")
yaml.indent(sequence=4, offset=2)
stream = StringIO()
try:
yaml.dump(data, stream)
result: str = stream.getvalue()
return result
except YAMLError as exc:
_LOGGER.error("YAML error: %s", exc)
raise HomeAssistantError(exc) from exc
def yaml_to_object(data: str) -> JSON_TYPE:
"""Create object from yaml string."""
yaml = YAML(typ="rt")
try:
result: Union[List, Dict, str] = yaml.load(data)
return result
except YAMLError as exc:
_LOGGER.error("YAML error: %s", exc)
raise HomeAssistantError(exc) from exc
def load_yaml(fname: str, round_trip: bool = False) -> JSON_TYPE:
"""Load a YAML file."""
if round_trip:
yaml = YAML(typ="rt")
yaml.preserve_quotes = True
else:
if ExtSafeConstructor.name is None:
ExtSafeConstructor.name = fname
yaml = YAML(typ="safe")
yaml.Constructor = ExtSafeConstructor
try:
with open(fname, encoding="utf-8") as conf_file:
# If configuration file is empty YAML returns None
# We convert that to an empty dict
return yaml.load(conf_file) or OrderedDict()
except YAMLError as exc:
_LOGGER.error("YAML error in %s: %s", fname, exc)
raise HomeAssistantError(exc) from exc
except UnicodeDecodeError as exc:
_LOGGER.error("Unable to read file %s: %s", fname, exc)
raise HomeAssistantError(exc) from exc
def save_yaml(fname: str, data: JSON_TYPE) -> None:
"""Save a YAML file."""
yaml = YAML(typ="rt")
yaml.indent(sequence=4, offset=2)
tmp_fname = f"{fname}__TEMP__"
try:
try:
file_stat = os.stat(fname)
except OSError:
file_stat = stat_result((0o644, -1, -1, -1, -1, -1, -1, -1, -1, -1))
with open(
os.open(tmp_fname, O_WRONLY | O_CREAT | O_TRUNC, file_stat.st_mode),
"w",
encoding="utf-8",
) as temp_file:
yaml.dump(data, temp_file)
os.replace(tmp_fname, fname)
if hasattr(os, "chown") and file_stat.st_ctime > -1:
try:
os.chown(fname, file_stat.st_uid, file_stat.st_gid)
except OSError:
pass
except YAMLError as exc:
_LOGGER.error(str(exc))
raise HomeAssistantError(exc) from exc
except OSError as exc:
_LOGGER.exception("Saving YAML file %s failed: %s", fname, exc)
raise WriteError(exc) from exc
finally:
if os.path.exists(tmp_fname):
try:
os.remove(tmp_fname)
except OSError as exc:
# If we are cleaning up then something else went wrong, so
# we should suppress likely follow-on errors in the cleanup
_LOGGER.error("YAML replacement cleanup failed: %s", exc)
ExtSafeConstructor.add_constructor("!secret", secret_yaml)
ExtSafeConstructor.add_constructor("!include", _include_yaml)
ExtSafeConstructor.add_constructor(None, _yaml_unsupported)
| apache-2.0 |
hplustree/trove | trove/tests/unittests/guestagent/test_couchbase_manager.py | 3 | 6377 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import stat
import tempfile
import mock
from mock import DEFAULT
from mock import MagicMock
from mock import Mock
from mock import patch
from oslo_utils import netutils
from trove.common import utils
from trove.guestagent import backup
from trove.guestagent.datastore.experimental.couchbase import (
manager as couch_manager)
from trove.guestagent.datastore.experimental.couchbase import (
service as couch_service)
from trove.guestagent import volume
from trove.tests.unittests.guestagent.test_datastore_manager import \
DatastoreManagerTest
class GuestAgentCouchbaseManagerTest(DatastoreManagerTest):
def setUp(self):
super(GuestAgentCouchbaseManagerTest, self).setUp('couchbase')
self.manager = couch_manager.Manager()
self.packages = 'couchbase-server'
app_patcher = patch.multiple(
couch_service.CouchbaseApp,
stop_db=DEFAULT, start_db=DEFAULT, restart=DEFAULT)
self.addCleanup(app_patcher.stop)
app_patcher.start()
netutils_patcher = patch.object(netutils, 'get_my_ipv4')
self.addCleanup(netutils_patcher.stop)
netutils_patcher.start()
def tearDown(self):
super(GuestAgentCouchbaseManagerTest, self).tearDown()
def test_update_status(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
self.manager.update_status(self.context)
mock_status.update.assert_any_call()
def test_prepare_device_path_true(self):
self._prepare_dynamic()
def test_prepare_from_backup(self):
self._prepare_dynamic(backup_id='backup_id_123abc')
@patch.multiple(couch_service.CouchbaseApp,
install_if_needed=DEFAULT,
start_db_with_conf_changes=DEFAULT,
initial_setup=DEFAULT)
@patch.multiple(volume.VolumeDevice,
format=DEFAULT,
mount=DEFAULT,
mount_points=Mock(return_value=[]))
@patch.object(backup, 'restore')
def _prepare_dynamic(self, device_path='/dev/vdb', backup_id=None,
*mocks, **kwmocks):
# covering all outcomes is starting to cause trouble here
backup_info = {'id': backup_id,
'location': 'fake-location',
'type': 'CbBackup',
'checksum': 'fake-checksum'} if backup_id else None
mock_status = MagicMock()
mock_status.begin_install = MagicMock(return_value=None)
self.manager.appStatus = mock_status
instance_ram = 2048
mount_point = '/var/lib/couchbase'
self.manager.prepare(self.context, self.packages, None,
instance_ram, None, device_path=device_path,
mount_point=mount_point,
backup_info=backup_info,
overrides=None,
cluster_config=None)
# verification/assertion
mock_status.begin_install.assert_any_call()
kwmocks['install_if_needed'].assert_any_call(self.packages)
if backup_info:
backup.restore.assert_any_call(self.context,
backup_info,
mount_point)
def test_restart(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
couch_service.CouchbaseApp.restart = MagicMock(return_value=None)
# invocation
self.manager.restart(self.context)
# verification/assertion
couch_service.CouchbaseApp.restart.assert_any_call()
def test_stop_db(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
couch_service.CouchbaseApp.stop_db = MagicMock(return_value=None)
# invocation
self.manager.stop_db(self.context)
# verification/assertion
couch_service.CouchbaseApp.stop_db.assert_any_call(
do_not_start_on_reboot=False)
def __fake_mkstemp(self):
self.tempfd, self.tempname = self.original_mkstemp()
return self.tempfd, self.tempname
def __fake_mkstemp_raise(self):
raise OSError(11, 'Resource temporarily unavailable')
def __cleanup_tempfile(self):
if self.tempname:
os.unlink(self.tempname)
@mock.patch.object(utils, 'execute_with_timeout',
Mock(return_value=('0', '')))
def test_write_password_to_file1(self):
self.original_mkstemp = tempfile.mkstemp
self.tempname = None
with mock.patch.object(tempfile,
'mkstemp',
self.__fake_mkstemp):
self.addCleanup(self.__cleanup_tempfile)
rootaccess = couch_service.CouchbaseRootAccess()
rootaccess.write_password_to_file('mypassword')
filepermissions = os.stat(self.tempname).st_mode
self.assertEqual(stat.S_IRUSR, filepermissions & 0o777)
@mock.patch.object(utils, 'execute_with_timeout',
Mock(return_value=('0', '')))
@mock.patch(
'trove.guestagent.datastore.experimental.couchbase.service.LOG')
def test_write_password_to_file2(self, mock_logging):
self.original_mkstemp = tempfile.mkstemp
self.tempname = None
with mock.patch.object(tempfile,
'mkstemp',
self.__fake_mkstemp_raise):
rootaccess = couch_service.CouchbaseRootAccess()
self.assertRaises(RuntimeError,
rootaccess.write_password_to_file,
'mypassword')
| apache-2.0 |
ygol/dotfiles | bin/.venv-ansible-venv/lib/python2.6/site-packages/pip/_vendor/requests/packages/chardet/sbcharsetprober.py | 2927 | 4793 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
from .compat import wrap_ord
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
SYMBOL_CAT_ORDER = 250
NUMBER_OF_SEQ_CAT = 4
POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1
#NEGATIVE_CAT = 0
class SingleByteCharSetProber(CharSetProber):
def __init__(self, model, reversed=False, nameProber=None):
CharSetProber.__init__(self)
self._mModel = model
# TRUE if we need to reverse every pair in the model lookup
self._mReversed = reversed
# Optional auxiliary prober for name decision
self._mNameProber = nameProber
self.reset()
def reset(self):
CharSetProber.reset(self)
# char order of last character
self._mLastOrder = 255
self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT
self._mTotalSeqs = 0
self._mTotalChar = 0
# characters that fall in our sampling range
self._mFreqChar = 0
def get_charset_name(self):
if self._mNameProber:
return self._mNameProber.get_charset_name()
else:
return self._mModel['charsetName']
def feed(self, aBuf):
if not self._mModel['keepEnglishLetter']:
aBuf = self.filter_without_english_letters(aBuf)
aLen = len(aBuf)
if not aLen:
return self.get_state()
for c in aBuf:
order = self._mModel['charToOrderMap'][wrap_ord(c)]
if order < SYMBOL_CAT_ORDER:
self._mTotalChar += 1
if order < SAMPLE_SIZE:
self._mFreqChar += 1
if self._mLastOrder < SAMPLE_SIZE:
self._mTotalSeqs += 1
if not self._mReversed:
i = (self._mLastOrder * SAMPLE_SIZE) + order
model = self._mModel['precedenceMatrix'][i]
else: # reverse the order of the letters in the lookup
i = (order * SAMPLE_SIZE) + self._mLastOrder
model = self._mModel['precedenceMatrix'][i]
self._mSeqCounters[model] += 1
self._mLastOrder = order
if self.get_state() == constants.eDetecting:
if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD:
cf = self.get_confidence()
if cf > POSITIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, we have a'
'winner\n' %
(self._mModel['charsetName'], cf))
self._mState = constants.eFoundIt
elif cf < NEGATIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, below negative'
'shortcut threshhold %s\n' %
(self._mModel['charsetName'], cf,
NEGATIVE_SHORTCUT_THRESHOLD))
self._mState = constants.eNotMe
return self.get_state()
def get_confidence(self):
r = 0.01
if self._mTotalSeqs > 0:
r = ((1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs
/ self._mModel['mTypicalPositiveRatio'])
r = r * self._mFreqChar / self._mTotalChar
if r >= 1.0:
r = 0.99
return r
| mit |
henrytao-me/openerp.positionq | openerp/addons/point_of_sale/wizard/pos_sales_user.py | 55 | 2240 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
class pos_sale_user(osv.osv_memory):
_name = 'pos.sale.user'
_description = 'Sale by User'
_columns = {
'date_start': fields.date('Date Start', required=True),
'date_end': fields.date('Date End', required=True),
'user_id': fields.many2many('res.users', 'sale_user_rel', 'user_id', 'uid', 'Salesperson'),
}
def print_report(self, cr, uid, ids, context=None):
"""
To get the date and print the report
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : return report
"""
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
res = self.read(cr, uid, ids, ['date_start', 'date_end', 'user_id'], context=context)
res = res and res[0] or {}
datas['form'] = res
return {
'type': 'ir.actions.report.xml',
'report_name': 'pos.sales.user',
'datas': datas,
}
pos_sale_user()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
henriquegemignani/randovania | randovania/gui/tracker_window.py | 1 | 32424 | import collections
import functools
import json
import typing
from pathlib import Path
from random import Random
from typing import Optional, Dict, Set, List, Tuple, Iterator, Union
import matplotlib.pyplot as plt
import networkx
from PySide2 import QtWidgets
from PySide2.QtCore import Qt
from PySide2.QtWidgets import QMainWindow, QTreeWidgetItem, QCheckBox, QLabel, QGridLayout, QWidget, QMessageBox
from matplotlib.axes import Axes
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from randovania.game_description.area_location import AreaLocation
from randovania.game_description.game_description import GameDescription
from randovania.game_description.item.item_category import ItemCategory
from randovania.game_description.node import Node, ResourceNode, TranslatorGateNode, TeleporterNode, DockNode
from randovania.game_description.resources.item_resource_info import ItemResourceInfo
from randovania.game_description.resources.pickup_entry import PickupEntry
from randovania.game_description.resources.resource_info import add_resource_gain_to_current_resources
from randovania.game_description.resources.translator_gate import TranslatorGate
from randovania.game_description.world import World
from randovania.games.game import RandovaniaGame
from randovania.games.prime import patcher_file
from randovania.generator import generator
from randovania.gui.generated.tracker_window_ui import Ui_TrackerWindow
from randovania.gui.lib.common_qt_lib import set_default_window_icon
from randovania.gui.lib.custom_spin_box import CustomSpinBox
from randovania.layout import translator_configuration
from randovania.layout.echoes_configuration import EchoesConfiguration
from randovania.layout.teleporters import TeleporterShuffleMode
from randovania.layout.translator_configuration import LayoutTranslatorRequirement
from randovania.resolver.bootstrap import logic_bootstrap
from randovania.resolver.logic import Logic
from randovania.resolver.resolver_reach import ResolverReach
from randovania.resolver.state import State, add_pickup_to_state
class InvalidLayoutForTracker(Exception):
pass
def _load_previous_state(persistence_path: Path,
layout_configuration: EchoesConfiguration,
) -> Optional[dict]:
previous_layout_path = persistence_path.joinpath("layout_configuration.json")
try:
with previous_layout_path.open() as previous_layout_file:
previous_layout = EchoesConfiguration.from_json(json.load(previous_layout_file))
except (FileNotFoundError, TypeError, KeyError, ValueError, json.JSONDecodeError):
return None
if previous_layout != layout_configuration:
return None
previous_state_path = persistence_path.joinpath("state.json")
try:
with previous_state_path.open() as previous_state_file:
return json.load(previous_state_file)
except (FileNotFoundError, json.JSONDecodeError):
return None
class MatplotlibWidget(QtWidgets.QWidget):
ax: Axes
def __init__(self, parent=None):
super().__init__(parent)
fig = Figure(figsize=(7, 5), dpi=65, facecolor=(1, 1, 1), edgecolor=(0, 0, 0))
self.canvas = FigureCanvas(fig)
self.toolbar = NavigationToolbar(self.canvas, self)
lay = QtWidgets.QVBoxLayout(self)
lay.addWidget(self.toolbar)
lay.addWidget(self.canvas)
self.ax = fig.add_subplot(111)
self.line, *_ = self.ax.plot([])
class TrackerWindow(QMainWindow, Ui_TrackerWindow):
# Tracker state
_collected_pickups: Dict[PickupEntry, int]
_actions: List[Node]
# Tracker configuration
logic: Logic
game_description: GameDescription
layout_configuration: EchoesConfiguration
persistence_path: Path
_initial_state: State
_elevator_id_to_combo: Dict[int, QtWidgets.QComboBox]
_translator_gate_to_combo: Dict[TranslatorGate, QtWidgets.QComboBox]
_starting_nodes: Set[ResourceNode]
_undefined_item = ItemResourceInfo(-1, "Undefined", "Undefined", 0, None)
# UI tools
_asset_id_to_item: Dict[int, QTreeWidgetItem]
_node_to_item: Dict[Node, QTreeWidgetItem]
_widget_for_pickup: Dict[PickupEntry, Union[QCheckBox, CustomSpinBox]]
_during_setup = False
def __init__(self, persistence_path: Path, layout_configuration: EchoesConfiguration):
super().__init__()
self.setupUi(self)
set_default_window_icon(self)
self._collected_pickups = {}
self._widget_for_pickup = {}
self._actions = []
self._asset_id_to_item = {}
self._node_to_item = {}
self.layout_configuration = layout_configuration
self.persistence_path = persistence_path
player_pool = generator.create_player_pool(Random(0), self.layout_configuration, 0, 1)
pool_patches = player_pool.patches
self.game_description, self._initial_state = logic_bootstrap(layout_configuration,
player_pool.game,
pool_patches)
self.logic = Logic(self.game_description, layout_configuration)
self._initial_state.resources["add_self_as_requirement_to_resources"] = 1
self.menu_reset_action.triggered.connect(self._confirm_reset)
self.resource_filter_check.stateChanged.connect(self.update_locations_tree_for_reachable_nodes)
self.hide_collected_resources_check.stateChanged.connect(self.update_locations_tree_for_reachable_nodes)
self.undo_last_action_button.clicked.connect(self._undo_last_action)
self.configuration_label.setText("Trick Level: {}; Starts with:\n{}".format(
layout_configuration.trick_level.pretty_description,
", ".join(
resource.short_name
for resource in pool_patches.starting_items.keys()
)
))
self.setup_pickups_box(player_pool.pickups)
self.setup_possible_locations_tree()
self.setup_elevators()
self.setup_translator_gates()
self.matplot_widget = MatplotlibWidget(self.tab_graph_map)
self.tab_graph_map_layout.addWidget(self.matplot_widget)
self._world_to_node_positions = {}
self.map_tab_widget.currentChanged.connect(self._on_tab_changed)
for world in self.game_description.world_list.worlds:
self.graph_map_world_combo.addItem(world.name, world)
self.graph_map_world_combo.currentIndexChanged.connect(self.on_graph_map_world_combo)
persistence_path.mkdir(parents=True, exist_ok=True)
previous_state = _load_previous_state(persistence_path, layout_configuration)
if not self.apply_previous_state(previous_state):
self.setup_starting_location(None)
with persistence_path.joinpath("layout_configuration.json").open("w") as layout_file:
json.dump(layout_configuration.as_json, layout_file)
self._add_new_action(self._initial_state.node)
def apply_previous_state(self, previous_state: Optional[dict]) -> bool:
if previous_state is None:
return False
starting_location = None
needs_starting_location = len(self.layout_configuration.starting_location.locations) > 1
resource_db = self.game_description.resource_database
translator_gates = {}
try:
pickup_name_to_pickup = {pickup.name: pickup for pickup in self._collected_pickups.keys()}
quantity_to_change = {
pickup_name_to_pickup[pickup_name]: quantity
for pickup_name, quantity in previous_state["collected_pickups"].items()
}
previous_actions = [
self.game_description.world_list.all_nodes[index]
for index in previous_state["actions"]
]
if needs_starting_location:
starting_location = AreaLocation.from_json(previous_state["starting_location"])
elevators = {
int(elevator_id): AreaLocation.from_json(location) if location is not None else None
for elevator_id, location in previous_state["elevators"].items()
}
if self.layout_configuration.game == RandovaniaGame.PRIME2:
translator_gates = {
TranslatorGate(int(gate)): (resource_db.get_item(item)
if item is not None
else self._undefined_item)
for gate, item in previous_state["translator_gates"].items()
}
except KeyError:
return False
self.setup_starting_location(starting_location)
for elevator_id, area_location in elevators.items():
combo = self._elevator_id_to_combo[elevator_id]
if area_location is None:
combo.setCurrentIndex(0)
continue
for i in range(combo.count()):
if area_location == combo.itemData(i):
combo.setCurrentIndex(i)
break
for gate, item in translator_gates.items():
combo = self._translator_gate_to_combo[gate]
for i in range(combo.count()):
if item == combo.itemData(i):
combo.setCurrentIndex(i)
break
self.bulk_change_quantity(quantity_to_change)
self._add_new_actions(previous_actions)
return True
def reset(self):
self.bulk_change_quantity({
pickup: 0
for pickup in self._collected_pickups.keys()
})
while len(self._actions) > 1:
self._actions.pop()
self.actions_list.takeItem(len(self._actions))
for elevator in self._elevator_id_to_combo.values():
elevator.setCurrentIndex(0)
for elevator in self._translator_gate_to_combo.values():
elevator.setCurrentIndex(0)
self._refresh_for_new_action()
def _confirm_reset(self):
reply = QMessageBox.question(self, "Reset Tracker?", "Do you want to reset the tracker progression?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
self.reset()
@property
def _show_only_resource_nodes(self) -> bool:
return self.resource_filter_check.isChecked()
@property
def _hide_collected_resources(self) -> bool:
return self.hide_collected_resources_check.isChecked()
@property
def _collected_nodes(self) -> Set[ResourceNode]:
return self._starting_nodes | set(action for action in self._actions if action.is_resource_node)
def _pretty_node_name(self, node: Node) -> str:
world_list = self.game_description.world_list
return "{} / {}".format(world_list.area_name(world_list.nodes_to_area(node)), node.name)
def _refresh_for_new_action(self):
self.undo_last_action_button.setEnabled(len(self._actions) > 1)
self.current_location_label.setText("Current location: {}".format(self._pretty_node_name(self._actions[-1])))
self.update_locations_tree_for_reachable_nodes()
def _add_new_action(self, node: Node):
self._add_new_actions([node])
def _add_new_actions(self, nodes: Iterator[Node]):
for node in nodes:
self.actions_list.addItem(self._pretty_node_name(node))
self._actions.append(node)
self._refresh_for_new_action()
def _undo_last_action(self):
self._actions.pop()
self.actions_list.takeItem(len(self._actions))
self._refresh_for_new_action()
def _on_tree_node_double_clicked(self, item: QTreeWidgetItem, _):
node: Optional[Node] = getattr(item, "node", None)
if not item.isDisabled() and node is not None and node != self._actions[-1]:
self._add_new_action(node)
def _positions_for_world(self, world: World):
g = networkx.DiGraph()
world_list = self.game_description.world_list
state = self.state_for_current_configuration()
for area in world.areas:
g.add_node(area)
for area in world.areas:
nearby_areas = set()
for node in area.nodes:
if isinstance(node, DockNode):
try:
target_node = world_list.resolve_dock_node(node, state.patches)
nearby_areas.add(world_list.nodes_to_area(target_node))
except IndexError as e:
print(f"For {node.name} in {area.name}, received {e}")
continue
for other_area in nearby_areas:
g.add_edge(area, other_area)
return networkx.drawing.spring_layout(g)
def update_matplot_widget(self, nodes_in_reach: Set[Node]):
g = networkx.DiGraph()
world_list = self.game_description.world_list
state = self.state_for_current_configuration()
world = self.graph_map_world_combo.currentData()
for area in world.areas:
g.add_node(area)
for area in world.areas:
nearby_areas = set()
for node in area.nodes:
if node not in nodes_in_reach:
continue
if isinstance(node, DockNode):
# TODO: respect is_blast_shield: if already opened once, no requirement needed.
# Includes opening form behind with different criteria
try:
target_node = world_list.resolve_dock_node(node, state.patches)
dock_weakness = state.patches.dock_weakness.get((area.area_asset_id, node.dock_index),
node.default_dock_weakness)
if dock_weakness.requirement.satisfied(state.resources, state.energy):
nearby_areas.add(world_list.nodes_to_area(target_node))
except IndexError as e:
print(f"For {node.name} in {area.name}, received {e}")
continue
for other_area in nearby_areas:
g.add_edge(area, other_area)
self.matplot_widget.ax.clear()
cf = self.matplot_widget.ax.get_figure()
cf.set_facecolor("w")
if world.world_asset_id not in self._world_to_node_positions:
self._world_to_node_positions[world.world_asset_id] = self._positions_for_world(world)
pos = self._world_to_node_positions[world.world_asset_id]
networkx.draw_networkx_nodes(g, pos, ax=self.matplot_widget.ax)
networkx.draw_networkx_edges(g, pos, arrows=True, ax=self.matplot_widget.ax)
networkx.draw_networkx_labels(g, pos, ax=self.matplot_widget.ax,
labels={area: area.name for area in world.areas},
verticalalignment='top')
self.matplot_widget.ax.set_axis_off()
plt.draw_if_interactive()
self.matplot_widget.canvas.draw()
def on_graph_map_world_combo(self):
nodes_in_reach = self.current_nodes_in_reach(self.state_for_current_configuration())
self.update_matplot_widget(nodes_in_reach)
def current_nodes_in_reach(self, state):
if state is None:
nodes_in_reach = set()
else:
reach = ResolverReach.calculate_reach(self.logic, state)
nodes_in_reach = set(reach.nodes)
nodes_in_reach.add(state.node)
return nodes_in_reach
def _on_tab_changed(self):
if self.map_tab_widget.currentWidget() == self.tab_graph_map:
self.on_graph_map_world_combo()
def update_locations_tree_for_reachable_nodes(self):
state = self.state_for_current_configuration()
nodes_in_reach = self.current_nodes_in_reach(state)
if self.map_tab_widget.currentWidget() == self.tab_graph_map:
self.update_matplot_widget(nodes_in_reach)
all_nodes = self.game_description.world_list.all_nodes
for world in self.game_description.world_list.worlds:
for area in world.areas:
area_is_visible = False
for node in area.nodes:
is_collected = node in self._collected_nodes
is_visible = node in nodes_in_reach and not (self._hide_collected_resources
and is_collected)
if self._show_only_resource_nodes:
is_visible = is_visible and node.is_resource_node
node_item = self._node_to_item[node]
node_item.setHidden(not is_visible)
if node.is_resource_node:
resource_node = typing.cast(ResourceNode, node)
node_item.setDisabled(not resource_node.can_collect(state.patches, state.resources, all_nodes))
node_item.setCheckState(0, Qt.Checked if is_collected else Qt.Unchecked)
area_is_visible = area_is_visible or is_visible
self._asset_id_to_item[area.area_asset_id].setHidden(not area_is_visible)
# Persist the current state
self.persist_current_state()
def persist_current_state(self):
world_list = self.game_description.world_list
with self.persistence_path.joinpath("state.json").open("w") as state_file:
json.dump(
{
"actions": [
node.index
for node in self._actions
],
"collected_pickups": {
pickup.name: quantity
for pickup, quantity in self._collected_pickups.items()
},
"elevators": {
str(elevator_id): combo.currentData().as_json if combo.currentIndex() > 0 else None
for elevator_id, combo in self._elevator_id_to_combo.items()
},
"translator_gates": {
str(gate.index): combo.currentData().index if combo.currentIndex() > 0 else None
for gate, combo in self._translator_gate_to_combo.items()
},
"starting_location": world_list.node_to_area_location(self._initial_state.node).as_json,
},
state_file
)
def setup_possible_locations_tree(self):
"""
Creates the possible_locations_tree with all worlds, areas and nodes.
"""
self.possible_locations_tree.itemDoubleClicked.connect(self._on_tree_node_double_clicked)
# TODO: Dark World names
for world in self.game_description.world_list.worlds:
world_item = QTreeWidgetItem(self.possible_locations_tree)
world_item.setText(0, world.name)
world_item.setExpanded(True)
self._asset_id_to_item[world.world_asset_id] = world_item
for area in world.areas:
area_item = QTreeWidgetItem(world_item)
area_item.area = area
area_item.setText(0, area.name)
area_item.setHidden(True)
self._asset_id_to_item[area.area_asset_id] = area_item
for node in area.nodes:
node_item = QTreeWidgetItem(area_item)
if isinstance(node, TranslatorGateNode):
node_item.setText(0, "{} ({})".format(node.name, node.gate))
else:
node_item.setText(0, node.name)
node_item.node = node
if node.is_resource_node:
node_item.setFlags(node_item.flags() & ~Qt.ItemIsUserCheckable)
self._node_to_item[node] = node_item
def setup_elevators(self):
world_list = self.game_description.world_list
nodes_by_world: Dict[str, List[TeleporterNode]] = collections.defaultdict(list)
self._elevator_id_to_combo = {}
areas_to_not_change = {
2278776548, # Sky Temple Gateway
2068511343, # Sky Temple Energy Controller
3136899603, # Aerie Transport Station
1564082177, # Aerie
}
targets = {}
for world, area, node in world_list.all_worlds_areas_nodes:
if isinstance(node, TeleporterNode) and node.editable and area.area_asset_id not in areas_to_not_change:
name = world.correct_name(area.in_dark_aether)
nodes_by_world[name].append(node)
location = AreaLocation(world.world_asset_id, area.area_asset_id)
targets[patcher_file.elevator_area_name(world_list, location, True)] = location
if self.layout_configuration.elevators.mode == TeleporterShuffleMode.ONE_WAY_ANYTHING:
targets = {}
for world in world_list.worlds:
for area in world.areas:
name = world.correct_name(area.in_dark_aether)
targets[f"{name} - {area.name}"] = AreaLocation(world.world_asset_id, area.area_asset_id)
combo_targets = sorted(targets.items(), key=lambda it: it[0])
for world_name in sorted(nodes_by_world.keys()):
nodes = nodes_by_world[world_name]
nodes_locations = [AreaLocation(world_list.nodes_to_world(node).world_asset_id,
world_list.nodes_to_area(node).area_asset_id)
for node in nodes]
nodes_names = [patcher_file.elevator_area_name(world_list, location, True)
for location in nodes_locations]
nodes = sorted(nodes_by_world[world_name], key=lambda it: world_list.nodes_to_area(it).name)
group = QtWidgets.QGroupBox(self.elevators_scroll_contents)
group.setTitle(world_name)
self.elevators_scroll_layout.addWidget(group)
layout = QtWidgets.QGridLayout(group)
for i, (node, location, name) in enumerate(sorted(zip(nodes, nodes_locations, nodes_names),
key=lambda it: it[2])):
node_name = QtWidgets.QLabel(group)
node_name.setText(name)
layout.addWidget(node_name, i, 0)
combo = QtWidgets.QComboBox(group)
if self.layout_configuration.elevators.is_vanilla:
combo.addItem("Vanilla", node.default_connection)
combo.setEnabled(False)
else:
combo.addItem("Undefined", location)
for target_name, connection in combo_targets:
combo.addItem(target_name, connection)
combo.currentIndexChanged.connect(self.update_locations_tree_for_reachable_nodes)
self._elevator_id_to_combo[node.teleporter_instance_id] = combo
layout.addWidget(combo, i, 1)
def setup_translator_gates(self):
world_list = self.game_description.world_list
resource_db = self.game_description.resource_database
self._translator_gate_to_combo = {}
if self.layout_configuration.game != RandovaniaGame.PRIME2:
return
gates = {
f"{area.name} ({node.gate.index})": node.gate
for world, area, node in world_list.all_worlds_areas_nodes
if isinstance(node, TranslatorGateNode)
}
translator_requirement = self.layout_configuration.translator_configuration.translator_requirement
for i, (gate_name, gate) in enumerate(sorted(gates.items(), key=lambda it: it[0])):
node_name = QtWidgets.QLabel(self.translator_gate_scroll_contents)
node_name.setText(gate_name)
self.translator_gate_scroll_layout.addWidget(node_name, i, 0)
combo = QtWidgets.QComboBox(self.translator_gate_scroll_contents)
gate_requirement = translator_requirement[gate]
if gate_requirement in (LayoutTranslatorRequirement.RANDOM,
LayoutTranslatorRequirement.RANDOM_WITH_REMOVED):
combo.addItem("Undefined", self._undefined_item)
for translator, index in translator_configuration.ITEM_INDICES.items():
combo.addItem(translator.long_name, resource_db.get_item(index))
else:
combo.addItem(gate_requirement.long_name, resource_db.get_item(gate_requirement.item_index))
combo.setEnabled(False)
combo.currentIndexChanged.connect(self.update_locations_tree_for_reachable_nodes)
self._translator_gate_to_combo[gate] = combo
self.translator_gate_scroll_layout.addWidget(combo, i, 1)
def setup_starting_location(self, area_location: Optional[AreaLocation]):
world_list = self.game_description.world_list
if len(self.layout_configuration.starting_location.locations) > 1:
if area_location is None:
area_locations = sorted(self.layout_configuration.starting_location.locations,
key=lambda it: world_list.area_name(world_list.area_by_area_location(it)))
location_names = [world_list.area_name(world_list.area_by_area_location(it))
for it in area_locations]
selected_name = QtWidgets.QInputDialog.getItem(self, "Starting Location", "Select starting location",
location_names, 0, False)
area_location = area_locations[location_names.index(selected_name[0])]
self._initial_state.node = world_list.resolve_teleporter_connection(area_location)
self._starting_nodes = {
node
for node in world_list.all_nodes
if node.is_resource_node and node.resource() in self._initial_state.resources
}
def _change_item_quantity(self, pickup: PickupEntry, use_quantity_as_bool: bool, quantity: int):
if use_quantity_as_bool:
if bool(quantity):
quantity = 1
else:
quantity = 0
self._collected_pickups[pickup] = quantity
if not self._during_setup:
self.update_locations_tree_for_reachable_nodes()
def bulk_change_quantity(self, new_quantity: Dict[PickupEntry, int]):
self._during_setup = True
for pickup, quantity in new_quantity.items():
widget = self._widget_for_pickup[pickup]
if isinstance(widget, QCheckBox):
widget.setChecked(quantity > 0)
else:
widget.setValue(quantity)
self._during_setup = False
def _create_widgets_with_quantity(self,
pickup: PickupEntry,
parent_widget: QWidget,
parent_layout: QGridLayout,
row: int,
quantity: int,
):
label = QLabel(parent_widget)
label.setText(pickup.name)
parent_layout.addWidget(label, row, 0)
spin_bix = CustomSpinBox(parent_widget)
spin_bix.setMaximumWidth(50)
spin_bix.setMaximum(quantity)
spin_bix.valueChanged.connect(functools.partial(self._change_item_quantity, pickup, False))
self._widget_for_pickup[pickup] = spin_bix
parent_layout.addWidget(spin_bix, row, 1)
def setup_pickups_box(self, item_pool: List[PickupEntry]):
parent_widgets: Dict[ItemCategory, Tuple[QWidget, QGridLayout]] = {
ItemCategory.EXPANSION: (self.expansions_box, self.expansions_layout),
ItemCategory.ENERGY_TANK: (self.expansions_box, self.expansions_layout),
ItemCategory.TRANSLATOR: (self.translators_box, self.translators_layout),
ItemCategory.TEMPLE_KEY: (self.keys_box, self.keys_layout),
ItemCategory.SKY_TEMPLE_KEY: (self.keys_box, self.keys_layout),
}
major_pickup_parent_widgets = (self.upgrades_box, self.upgrades_layout)
row_for_parent = {
self.expansions_box: 0,
self.translators_box: 0,
self.upgrades_box: 0,
self.keys_box: 0,
}
column_for_parent = {
self.translators_box: 0,
self.upgrades_box: 0,
self.keys_box: 0,
}
k_column_count = 2
pickup_by_name = {}
pickup_with_quantity = {}
for pickup in item_pool:
if pickup.name in pickup_by_name:
pickup_with_quantity[pickup_by_name[pickup.name]] += 1
else:
pickup_by_name[pickup.name] = pickup
pickup_with_quantity[pickup] = 1
non_expansions_with_quantity = []
for pickup, quantity in pickup_with_quantity.items():
self._collected_pickups[pickup] = 0
parent_widget, parent_layout = parent_widgets.get(pickup.item_category, major_pickup_parent_widgets)
row = row_for_parent[parent_widget]
if parent_widget is self.expansions_box:
self._create_widgets_with_quantity(pickup, parent_widget, parent_layout, row, quantity)
row_for_parent[parent_widget] += 1
else:
if quantity > 1:
non_expansions_with_quantity.append((parent_widget, parent_layout, pickup, quantity))
else:
check_box = QCheckBox(parent_widget)
check_box.setText(pickup.name)
check_box.stateChanged.connect(functools.partial(self._change_item_quantity, pickup, True))
self._widget_for_pickup[pickup] = check_box
column = column_for_parent[parent_widget]
parent_layout.addWidget(check_box, row, column)
column += 1
if column >= k_column_count:
column = 0
row += 1
row_for_parent[parent_widget] = row
column_for_parent[parent_widget] = column
for parent_widget, parent_layout, pickup, quantity in non_expansions_with_quantity:
if column_for_parent[parent_widget] != 0:
column_for_parent[parent_widget] = 0
row_for_parent[parent_widget] += 1
self._create_widgets_with_quantity(pickup, parent_widget, parent_layout,
row_for_parent[parent_widget],
quantity)
row_for_parent[parent_widget] += 1
def state_for_current_configuration(self) -> Optional[State]:
all_nodes = self.game_description.world_list.all_nodes
state = self._initial_state.copy()
if self._actions:
state.node = self._actions[-1]
for teleporter, combo in self._elevator_id_to_combo.items():
assert combo.currentData() is not None
state.patches.elevator_connection[teleporter] = combo.currentData()
for gate, item in self._translator_gate_to_combo.items():
state.patches.translator_gates[gate] = item.currentData()
for pickup, quantity in self._collected_pickups.items():
for _ in range(quantity):
add_pickup_to_state(state, pickup)
for node in self._collected_nodes:
add_resource_gain_to_current_resources(node.resource_gain_on_collect(state.patches, state.resources,
all_nodes),
state.resources)
return state
| gpl-3.0 |
Tejal011089/medsyn2_lib | webnotes/model/create_new.py | 34 | 1724 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Create a new document with defaults set
"""
import webnotes
from webnotes.utils import nowdate, nowtime, cint, flt
import webnotes.defaults
def get_new_doc(doctype, parent_doc = None, parentfield = None):
doc = webnotes.doc({
"doctype": doctype,
"__islocal": 1,
"owner": webnotes.session.user,
"docstatus": 0
})
meta = webnotes.get_doctype(doctype)
if parent_doc:
doc.parent = parent_doc.name
doc.parenttype = parent_doc.doctype
if parentfield:
doc.parentfield = parentfield
for d in meta.get({"doctype":"DocField", "parent": doctype}):
default = webnotes.defaults.get_user_default(d.fieldname)
if default:
doc.fields[d.fieldname] = default
elif d.fields.get("default"):
if d.default == "__user":
doc.fields[d.fieldname] = webnotes.session.user
elif d.default == "Today":
doc.fields[d.fieldname] = nowdate()
elif d.default.startswith(":"):
ref_fieldname = d.default[1:].lower().replace(" ", "_")
if parent_doc:
ref_docname = parent_doc.fields[ref_fieldname]
else:
ref_docname = webnotes.conn.get_default(ref_fieldname)
doc.fields[d.fieldname] = webnotes.conn.get_value(d.default[1:],
ref_docname, d.fieldname)
else:
doc.fields[d.fieldname] = d.default
# convert type of default
if d.fieldtype in ("Int", "Check"):
doc.fields[d.fieldname] = cint(doc.fields[d.fieldname])
elif d.fieldtype in ("Float", "Currency"):
doc.fields[d.fieldname] = flt(doc.fields[d.fieldname])
elif d.fieldtype == "Time":
doc.fields[d.fieldname] = nowtime()
return doc
| mit |
crosswalk-project/crosswalk-android-extensions | build/idl-generator/third_party/WebKit/Tools/Scripts/webkitpy/tool/commands/abstractsequencedcommand.py | 51 | 2378 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.commands.stepsequence import StepSequence
from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
_log = logging.getLogger(__name__)
class AbstractSequencedCommand(AbstractDeclarativeCommand):
steps = None
def __init__(self):
self._sequence = StepSequence(self.steps)
AbstractDeclarativeCommand.__init__(self, self._sequence.options())
def _prepare_state(self, options, args, tool):
return None
def execute(self, options, args, tool):
try:
state = self._prepare_state(options, args, tool)
except ScriptError, e:
_log.error(e.message_with_output())
self._exit(e.exit_code or 2)
self._sequence.run_and_handle_errors(tool, options, state)
| bsd-3-clause |
tkw1536/GitManager | tests/commands/test_state.py | 1 | 4328 | import unittest
import unittest.mock
from GitManager.commands import state
from GitManager.repo import description
from GitManager.utils import format
from GitManager.repo import implementation
class TestState(unittest.TestCase):
""" Tests that the state command works properly """
@unittest.mock.patch(
'GitManager.repo.implementation.LocalRepository')
@unittest.mock.patch(
'builtins.print')
def test_run(self,
builtins_print: unittest.mock.Mock,
implementation_LocalRepository: unittest.mock.Mock):
# create a repository
repo = description.RepositoryDescription('/path/to/source',
'/path/to/clone')
# create a line
line = format.TerminalLine()
# and a command instance
cmd = state.State(line, [repo], "--no-update")
# if we are up-to-date, nothing should have been printed
implementation_LocalRepository.return_value.exists.return_value = True
implementation_LocalRepository.return_value.remote_status \
.return_value = implementation.RemoteStatus.UP_TO_DATE
self.assertTrue(cmd.run(repo))
implementation_LocalRepository.return_value.remote_status \
.assert_called_with(False)
builtins_print.assert_not_called()
# reset the mock
implementation_LocalRepository.reset_mock()
builtins_print.reset_mock()
# create another command instance
cmd = state.State(line, [repo], "--update")
# if the local repository does not exist, we
implementation_LocalRepository.return_value.exists.return_value = False
self.assertFalse(cmd.run(repo))
# reset the mock
implementation_LocalRepository.reset_mock()
builtins_print.reset_mock()
# if we are up-to-date, nothing should have been printed
implementation_LocalRepository.return_value.exists.return_value = True
implementation_LocalRepository.return_value.remote_status \
.return_value = implementation.RemoteStatus.UP_TO_DATE
self.assertTrue(cmd.run(repo))
implementation_LocalRepository.return_value.remote_status\
.assert_called_with(True)
builtins_print.assert_not_called()
# reset the mock
implementation_LocalRepository.reset_mock()
builtins_print.reset_mock()
# we need to pull
implementation_LocalRepository.return_value.exists.return_value = True
implementation_LocalRepository.return_value.remote_status \
.return_value = implementation.RemoteStatus.REMOTE_NEWER
self.assertFalse(cmd.run(repo))
implementation_LocalRepository.return_value.remote_status \
.assert_called_with(True)
builtins_print.assert_called_with(
format.Format.yellow('Upstream is ahead of your branch, '
'pull required. '))
# reset the mock
implementation_LocalRepository.reset_mock()
builtins_print.reset_mock()
# we need to push
implementation_LocalRepository.return_value.exists.return_value = True
implementation_LocalRepository.return_value.remote_status \
.return_value = implementation.RemoteStatus.LOCAL_NEWER
self.assertFalse(cmd.run(repo))
implementation_LocalRepository.return_value.remote_status \
.assert_called_with(True)
builtins_print.assert_called_with(
format.Format.green('Your branch is ahead of upstream, '
'push required.'))
# reset the mock
implementation_LocalRepository.reset_mock()
builtins_print.reset_mock()
# divergence
implementation_LocalRepository.return_value.exists.return_value = True
implementation_LocalRepository.return_value.remote_status \
.return_value = implementation.RemoteStatus.DIVERGENCE
self.assertFalse(cmd.run(repo))
implementation_LocalRepository.return_value.remote_status \
.assert_called_with(True)
builtins_print.assert_called_with(
format.Format.red('Your branch and upstream have diverged, '
'merge or rebase required. '))
| mit |
sourcepole/kadas-albireo | python/ext-libs/pytz/tzinfo.py | 95 | 19212 | '''Base classes and helpers for building zone specific tzinfo classes'''
from datetime import datetime, timedelta, tzinfo
from bisect import bisect_right
try:
set
except NameError:
from sets import Set as set
import pytz
from pytz.exceptions import AmbiguousTimeError, NonExistentTimeError
__all__ = []
_timedelta_cache = {}
def memorized_timedelta(seconds):
'''Create only one instance of each distinct timedelta'''
try:
return _timedelta_cache[seconds]
except KeyError:
delta = timedelta(seconds=seconds)
_timedelta_cache[seconds] = delta
return delta
_epoch = datetime.utcfromtimestamp(0)
_datetime_cache = {0: _epoch}
def memorized_datetime(seconds):
'''Create only one instance of each distinct datetime'''
try:
return _datetime_cache[seconds]
except KeyError:
# NB. We can't just do datetime.utcfromtimestamp(seconds) as this
# fails with negative values under Windows (Bug #90096)
dt = _epoch + timedelta(seconds=seconds)
_datetime_cache[seconds] = dt
return dt
_ttinfo_cache = {}
def memorized_ttinfo(*args):
'''Create only one instance of each distinct tuple'''
try:
return _ttinfo_cache[args]
except KeyError:
ttinfo = (
memorized_timedelta(args[0]),
memorized_timedelta(args[1]),
args[2]
)
_ttinfo_cache[args] = ttinfo
return ttinfo
_notime = memorized_timedelta(0)
def _to_seconds(td):
'''Convert a timedelta to seconds'''
return td.seconds + td.days * 24 * 60 * 60
class BaseTzInfo(tzinfo):
# Overridden in subclass
_utcoffset = None
_tzname = None
zone = None
def __str__(self):
return self.zone
class StaticTzInfo(BaseTzInfo):
'''A timezone that has a constant offset from UTC
These timezones are rare, as most locations have changed their
offset at some point in their history
'''
def fromutc(self, dt):
'''See datetime.tzinfo.fromutc'''
if dt.tzinfo is not None and dt.tzinfo is not self:
raise ValueError('fromutc: dt.tzinfo is not self')
return (dt + self._utcoffset).replace(tzinfo=self)
def utcoffset(self, dt, is_dst=None):
'''See datetime.tzinfo.utcoffset
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return self._utcoffset
def dst(self, dt, is_dst=None):
'''See datetime.tzinfo.dst
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return _notime
def tzname(self, dt, is_dst=None):
'''See datetime.tzinfo.tzname
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return self._tzname
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime.
This is normally a no-op, as StaticTzInfo timezones never have
ambiguous cases to correct:
>>> from pytz import timezone
>>> gmt = timezone('GMT')
>>> isinstance(gmt, StaticTzInfo)
True
>>> dt = datetime(2011, 5, 8, 1, 2, 3, tzinfo=gmt)
>>> gmt.normalize(dt) is dt
True
The supported method of converting between timezones is to use
datetime.astimezone(). Currently normalize() also works:
>>> la = timezone('America/Los_Angeles')
>>> dt = la.localize(datetime(2011, 5, 7, 1, 2, 3))
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> gmt.normalize(dt).strftime(fmt)
'2011-05-07 08:02:03 GMT (+0000)'
'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def __repr__(self):
return '<StaticTzInfo %r>' % (self.zone,)
def __reduce__(self):
# Special pickle to zone remains a singleton and to cope with
# database changes.
return pytz._p, (self.zone,)
class DstTzInfo(BaseTzInfo):
'''A timezone that has a variable offset from UTC
The offset might change if daylight saving time comes into effect,
or at a point in history when the region decides to change their
timezone definition.
'''
# Overridden in subclass
_utc_transition_times = None # Sorted list of DST transition times in UTC
_transition_info = None # [(utcoffset, dstoffset, tzname)] corresponding
# to _utc_transition_times entries
zone = None
# Set in __init__
_tzinfos = None
_dst = None # DST offset
def __init__(self, _inf=None, _tzinfos=None):
if _inf:
self._tzinfos = _tzinfos
self._utcoffset, self._dst, self._tzname = _inf
else:
_tzinfos = {}
self._tzinfos = _tzinfos
self._utcoffset, self._dst, self._tzname = self._transition_info[0]
_tzinfos[self._transition_info[0]] = self
for inf in self._transition_info[1:]:
if inf not in _tzinfos:
_tzinfos[inf] = self.__class__(inf, _tzinfos)
def fromutc(self, dt):
'''See datetime.tzinfo.fromutc'''
if (dt.tzinfo is not None
and getattr(dt.tzinfo, '_tzinfos', None) is not self._tzinfos):
raise ValueError('fromutc: dt.tzinfo is not self')
dt = dt.replace(tzinfo=None)
idx = max(0, bisect_right(self._utc_transition_times, dt) - 1)
inf = self._transition_info[idx]
return (dt + inf[0]).replace(tzinfo=self._tzinfos[inf])
def normalize(self, dt):
'''Correct the timezone information on the given datetime
If date arithmetic crosses DST boundaries, the tzinfo
is not magically adjusted. This method normalizes the
tzinfo to the correct one.
To test, first we need to do some setup
>>> from pytz import timezone
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
We next create a datetime right on an end-of-DST transition point,
the instant when the wallclocks are wound back one hour.
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
Now, if we subtract a few minutes from it, note that the timezone
information has not changed.
>>> before = loc_dt - timedelta(minutes=10)
>>> before.strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
But we can fix that by calling the normalize method
>>> before = eastern.normalize(before)
>>> before.strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
The supported method of converting between timezones is to use
datetime.astimezone(). Currently, normalize() also works:
>>> th = timezone('Asia/Bangkok')
>>> am = timezone('Europe/Amsterdam')
>>> dt = th.localize(datetime(2011, 5, 7, 1, 2, 3))
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> am.normalize(dt).strftime(fmt)
'2011-05-06 20:02:03 CEST (+0200)'
'''
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
# Convert dt in localtime to UTC
offset = dt.tzinfo._utcoffset
dt = dt.replace(tzinfo=None)
dt = dt - offset
# convert it back, and return it
return self.fromutc(dt)
def localize(self, dt, is_dst=False):
'''Convert naive time to local time.
This method should be used to construct localtimes, rather
than passing a tzinfo argument to a datetime constructor.
is_dst is used to determine the correct timezone in the ambigous
period at the end of daylight saving time.
>>> from pytz import timezone
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> amdam = timezone('Europe/Amsterdam')
>>> dt = datetime(2004, 10, 31, 2, 0, 0)
>>> loc_dt1 = amdam.localize(dt, is_dst=True)
>>> loc_dt2 = amdam.localize(dt, is_dst=False)
>>> loc_dt1.strftime(fmt)
'2004-10-31 02:00:00 CEST (+0200)'
>>> loc_dt2.strftime(fmt)
'2004-10-31 02:00:00 CET (+0100)'
>>> str(loc_dt2 - loc_dt1)
'1:00:00'
Use is_dst=None to raise an AmbiguousTimeError for ambiguous
times at the end of daylight saving time
>>> try:
... loc_dt1 = amdam.localize(dt, is_dst=None)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
is_dst defaults to False
>>> amdam.localize(dt) == amdam.localize(dt, False)
True
is_dst is also used to determine the correct timezone in the
wallclock times jumped over at the start of daylight saving time.
>>> pacific = timezone('US/Pacific')
>>> dt = datetime(2008, 3, 9, 2, 0, 0)
>>> ploc_dt1 = pacific.localize(dt, is_dst=True)
>>> ploc_dt2 = pacific.localize(dt, is_dst=False)
>>> ploc_dt1.strftime(fmt)
'2008-03-09 02:00:00 PDT (-0700)'
>>> ploc_dt2.strftime(fmt)
'2008-03-09 02:00:00 PST (-0800)'
>>> str(ploc_dt2 - ploc_dt1)
'1:00:00'
Use is_dst=None to raise a NonExistentTimeError for these skipped
times.
>>> try:
... loc_dt1 = pacific.localize(dt, is_dst=None)
... except NonExistentTimeError:
... print('Non-existent')
Non-existent
'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
# Find the two best possibilities.
possible_loc_dt = set()
for delta in [timedelta(days=-1), timedelta(days=1)]:
loc_dt = dt + delta
idx = max(0, bisect_right(
self._utc_transition_times, loc_dt) - 1)
inf = self._transition_info[idx]
tzinfo = self._tzinfos[inf]
loc_dt = tzinfo.normalize(dt.replace(tzinfo=tzinfo))
if loc_dt.replace(tzinfo=None) == dt:
possible_loc_dt.add(loc_dt)
if len(possible_loc_dt) == 1:
return possible_loc_dt.pop()
# If there are no possibly correct timezones, we are attempting
# to convert a time that never happened - the time period jumped
# during the start-of-DST transition period.
if len(possible_loc_dt) == 0:
# If we refuse to guess, raise an exception.
if is_dst is None:
raise NonExistentTimeError(dt)
# If we are forcing the pre-DST side of the DST transition, we
# obtain the correct timezone by winding the clock forward a few
# hours.
elif is_dst:
return self.localize(
dt + timedelta(hours=6), is_dst=True) - timedelta(hours=6)
# If we are forcing the post-DST side of the DST transition, we
# obtain the correct timezone by winding the clock back.
else:
return self.localize(
dt - timedelta(hours=6), is_dst=False) + timedelta(hours=6)
# If we get this far, we have multiple possible timezones - this
# is an ambiguous case occuring during the end-of-DST transition.
# If told to be strict, raise an exception since we have an
# ambiguous case
if is_dst is None:
raise AmbiguousTimeError(dt)
# Filter out the possiblilities that don't match the requested
# is_dst
filtered_possible_loc_dt = [
p for p in possible_loc_dt
if bool(p.tzinfo._dst) == is_dst
]
# Hopefully we only have one possibility left. Return it.
if len(filtered_possible_loc_dt) == 1:
return filtered_possible_loc_dt[0]
if len(filtered_possible_loc_dt) == 0:
filtered_possible_loc_dt = list(possible_loc_dt)
# If we get this far, we have in a wierd timezone transition
# where the clocks have been wound back but is_dst is the same
# in both (eg. Europe/Warsaw 1915 when they switched to CET).
# At this point, we just have to guess unless we allow more
# hints to be passed in (such as the UTC offset or abbreviation),
# but that is just getting silly.
#
# Choose the earliest (by UTC) applicable timezone.
sorting_keys = {}
for local_dt in filtered_possible_loc_dt:
key = local_dt.replace(tzinfo=None) - local_dt.tzinfo._utcoffset
sorting_keys[key] = local_dt
first_key = sorted(sorting_keys)[0]
return sorting_keys[first_key]
def utcoffset(self, dt, is_dst=None):
'''See datetime.tzinfo.utcoffset
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.utcoffset(ambiguous, is_dst=False)
datetime.timedelta(-1, 73800)
>>> tz.utcoffset(ambiguous, is_dst=True)
datetime.timedelta(-1, 77400)
>>> try:
... tz.utcoffset(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._utcoffset
else:
return self._utcoffset
def dst(self, dt, is_dst=None):
'''See datetime.tzinfo.dst
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.dst(normal)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=False)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=True)
datetime.timedelta(0, 3600)
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.dst(ambiguous, is_dst=False)
datetime.timedelta(0)
>>> tz.dst(ambiguous, is_dst=True)
datetime.timedelta(0, 3600)
>>> try:
... tz.dst(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._dst
else:
return self._dst
def tzname(self, dt, is_dst=None):
'''See datetime.tzinfo.tzname
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.tzname(normal)
'NDT'
>>> tz.tzname(normal, is_dst=False)
'NDT'
>>> tz.tzname(normal, is_dst=True)
'NDT'
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.tzname(ambiguous, is_dst=False)
'NST'
>>> tz.tzname(ambiguous, is_dst=True)
'NDT'
>>> try:
... tz.tzname(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return self.zone
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._tzname
else:
return self._tzname
def __repr__(self):
if self._dst:
dst = 'DST'
else:
dst = 'STD'
if self._utcoffset > _notime:
return '<DstTzInfo %r %s+%s %s>' % (
self.zone, self._tzname, self._utcoffset, dst
)
else:
return '<DstTzInfo %r %s%s %s>' % (
self.zone, self._tzname, self._utcoffset, dst
)
def __reduce__(self):
# Special pickle to zone remains a singleton and to cope with
# database changes.
return pytz._p, (
self.zone,
_to_seconds(self._utcoffset),
_to_seconds(self._dst),
self._tzname
)
def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None):
"""Factory function for unpickling pytz tzinfo instances.
This is shared for both StaticTzInfo and DstTzInfo instances, because
database changes could cause a zones implementation to switch between
these two base classes and we can't break pickles on a pytz version
upgrade.
"""
# Raises a KeyError if zone no longer exists, which should never happen
# and would be a bug.
tz = pytz.timezone(zone)
# A StaticTzInfo - just return it
if utcoffset is None:
return tz
# This pickle was created from a DstTzInfo. We need to
# determine which of the list of tzinfo instances for this zone
# to use in order to restore the state of any datetime instances using
# it correctly.
utcoffset = memorized_timedelta(utcoffset)
dstoffset = memorized_timedelta(dstoffset)
try:
return tz._tzinfos[(utcoffset, dstoffset, tzname)]
except KeyError:
# The particular state requested in this timezone no longer exists.
# This indicates a corrupt pickle, or the timezone database has been
# corrected violently enough to make this particular
# (utcoffset,dstoffset) no longer exist in the zone, or the
# abbreviation has been changed.
pass
# See if we can find an entry differing only by tzname. Abbreviations
# get changed from the initial guess by the database maintainers to
# match reality when this information is discovered.
for localized_tz in tz._tzinfos.values():
if (localized_tz._utcoffset == utcoffset
and localized_tz._dst == dstoffset):
return localized_tz
# This (utcoffset, dstoffset) information has been removed from the
# zone. Add it back. This might occur when the database maintainers have
# corrected incorrect information. datetime instances using this
# incorrect information will continue to do so, exactly as they were
# before being pickled. This is purely an overly paranoid safety net - I
# doubt this will ever been needed in real life.
inf = (utcoffset, dstoffset, tzname)
tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos)
return tz._tzinfos[inf]
| gpl-2.0 |
vmindru/ansible | lib/ansible/modules/network/onyx/onyx_interface.py | 9 | 17101 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: onyx_interface
version_added: "2.5"
author: "Samer Deeb (@samerd)"
short_description: Manage Interfaces on Mellanox ONYX network devices
description:
- This module provides declarative management of Interfaces
on Mellanox ONYX network devices.
notes:
options:
name:
description:
- Name of the Interface.
required: true
description:
description:
- Description of Interface.
enabled:
description:
- Interface link status.
type: bool
speed:
description:
- Interface link speed.
choices: ['1G', '10G', '25G', '40G', '50G', '56G', '100G']
mtu:
description:
- Maximum size of transmit packet.
aggregate:
description: List of Interfaces definitions.
duplex:
description:
- Interface link status
default: auto
choices: ['full', 'half', 'auto']
tx_rate:
description:
- Transmit rate in bits per second (bps).
- This is state check parameter only.
- Supports conditionals, see L(Conditionals in Networking Modules,../network/user_guide/network_working_with_command_output.html)
rx_rate:
description:
- Receiver rate in bits per second (bps).
- This is state check parameter only.
- Supports conditionals, see L(Conditionals in Networking Modules,../network/user_guide/network_working_with_command_output.html)
delay:
description:
- Time in seconds to wait before checking for the operational state on
remote device. This wait is applicable for operational state argument
which are I(state) with values C(up)/C(down).
default: 10
purge:
description:
- Purge Interfaces not defined in the aggregate parameter.
This applies only for logical interface.
default: false
type: bool
state:
description:
- State of the Interface configuration, C(up) means present and
operationally up and C(down) means present and operationally C(down)
default: present
choices: ['present', 'absent', 'up', 'down']
"""
EXAMPLES = """
- name: configure interface
onyx_interface:
name: Eth1/2
description: test-interface
speed: 100G
mtu: 512
- name: make interface up
onyx_interface:
name: Eth1/2
enabled: True
- name: make interface down
onyx_interface:
name: Eth1/2
enabled: False
- name: Check intent arguments
onyx_interface:
name: Eth1/2
state: up
- name: Config + intent
onyx_interface:
name: Eth1/2
enabled: False
state: down
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device.
returned: always
type: list
sample:
- interface ethernet 1/2
- description test-interface
- mtu 512
- exit
"""
from copy import deepcopy
import re
from time import sleep
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.common.utils import conditional
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.onyx.onyx import BaseOnyxModule
from ansible.module_utils.network.onyx.onyx import get_interfaces_config
class OnyxInterfaceModule(BaseOnyxModule):
IF_ETH_REGEX = re.compile(r"^Eth(\d+\/\d+|\d+\/\d+\/\d+)$")
IF_VLAN_REGEX = re.compile(r"^Vlan (\d+)$")
IF_LOOPBACK_REGEX = re.compile(r"^Loopback (\d+)$")
IF_TYPE_ETH = "ethernet"
IF_TYPE_LOOPBACK = "loopback"
IF_TYPE_VLAN = "vlan"
IF_TYPE_MAP = {
IF_TYPE_ETH: IF_ETH_REGEX,
IF_TYPE_VLAN: IF_VLAN_REGEX,
IF_TYPE_LOOPBACK: IF_LOOPBACK_REGEX,
}
UNSUPPORTED_ATTRS = {
IF_TYPE_ETH: (),
IF_TYPE_VLAN: ('speed', 'rx_rate', 'tx_rate'),
IF_TYPE_LOOPBACK: ('speed', 'mtu', 'rx_rate', 'tx_rate'),
}
UNSUPPORTED_STATES = {
IF_TYPE_ETH: ('absent',),
IF_TYPE_VLAN: (),
IF_TYPE_LOOPBACK: ('up', 'down'),
}
IF_MODIFIABLE_ATTRS = ('speed', 'description', 'mtu')
_interface_type = None
@classmethod
def _get_element_spec(cls):
return dict(
name=dict(type='str'),
description=dict(),
speed=dict(choices=['1G', '10G', '25G', '40G', '50G', '56G', '100G']),
mtu=dict(type='int'),
enabled=dict(type='bool'),
delay=dict(default=10, type='int'),
state=dict(default='present',
choices=['present', 'absent', 'up', 'down']),
tx_rate=dict(),
rx_rate=dict(),
)
@classmethod
def _get_aggregate_spec(cls, element_spec):
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
return aggregate_spec
def init_module(self):
""" module initialization
"""
element_spec = self._get_element_spec()
aggregate_spec = self._get_aggregate_spec(element_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict',
options=aggregate_spec),
purge=dict(default=False, type='bool'),
)
argument_spec.update(element_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate']]
self._module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
def validate_purge(self, value):
if value:
self._module.fail_json(
msg='Purge is not supported!')
def validate_duplex(self, value):
if value != 'auto':
self._module.fail_json(
msg='Duplex is not supported!')
def _get_interface_type(self, if_name):
if_type = None
if_id = None
for interface_type, interface_regex in iteritems(self.IF_TYPE_MAP):
match = interface_regex.match(if_name)
if match:
if_type = interface_type
if_id = match.group(1)
break
return if_type, if_id
def _set_if_type(self, params):
if_name = params['name']
if_type, if_id = self._get_interface_type(if_name)
if not if_id:
self._module.fail_json(
msg='unsupported interface: %s' % if_name)
params['if_type'] = if_type
params['if_id'] = if_id
def _check_supported_attrs(self, if_obj):
unsupported_attrs = self.UNSUPPORTED_ATTRS[self._interface_type]
for attr in unsupported_attrs:
val = if_obj[attr]
if val is not None:
self._module.fail_json(
msg='attribute %s is not supported for %s interface' % (
attr, self._interface_type))
req_state = if_obj['state']
unsupported_states = self.UNSUPPORTED_STATES[self._interface_type]
if req_state in unsupported_states:
self._module.fail_json(
msg='%s state is not supported for %s interface' % (
req_state, self._interface_type))
def _validate_interface_type(self):
for if_obj in self._required_config:
if_type = if_obj['if_type']
if not self._interface_type:
self._interface_type = if_type
elif self._interface_type != if_type:
self._module.fail_json(
msg='Cannot aggreagte interfaces from different types')
self._check_supported_attrs(if_obj)
def get_required_config(self):
self._required_config = list()
module_params = self._module.params
aggregate = module_params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module_params[key]
self.validate_param_values(item, item)
req_item = item.copy()
self._set_if_type(req_item)
self._required_config.append(req_item)
else:
params = {
'name': module_params['name'],
'description': module_params['description'],
'speed': module_params['speed'],
'mtu': module_params['mtu'],
'state': module_params['state'],
'delay': module_params['delay'],
'enabled': module_params['enabled'],
'tx_rate': module_params['tx_rate'],
'rx_rate': module_params['rx_rate'],
}
self.validate_param_values(params)
self._set_if_type(params)
self._required_config.append(params)
self._validate_interface_type()
@classmethod
def get_if_name(cls, item):
return cls.get_config_attr(item, "header")
@classmethod
def get_admin_state(cls, item):
admin_state = cls.get_config_attr(item, "Admin state")
return str(admin_state).lower() == "enabled"
@classmethod
def get_oper_state(cls, item):
oper_state = cls.get_config_attr(item, "Operational state")
if not oper_state:
oper_state = cls.get_config_attr(item, "State")
return str(oper_state).lower()
@classmethod
def get_speed(cls, item):
speed = cls.get_config_attr(item, 'Actual speed')
if not speed:
return
try:
speed = int(speed.split()[0])
return "%dG" % speed
except ValueError:
return None
def _create_if_data(self, name, item):
regex = self.IF_TYPE_MAP[self._interface_type]
if_id = ''
match = regex.match(name)
if match:
if_id = match.group(1)
return dict(
name=name,
description=self.get_config_attr(item, 'Description'),
speed=self.get_speed(item),
mtu=self.get_mtu(item),
enabled=self.get_admin_state(item),
state=self.get_oper_state(item),
if_id=if_id)
def _get_interfaces_config(self):
return get_interfaces_config(self._module, self._interface_type)
def load_current_config(self):
self._os_version = self._get_os_version()
self._current_config = dict()
config = self._get_interfaces_config()
if not config:
return
if self._os_version < self.ONYX_API_VERSION:
for if_data in config:
if_name = self.get_if_name(if_data)
self._current_config[if_name] = self._create_if_data(
if_name, if_data)
else:
if_data = dict()
for if_config in config:
for if_name, if_attr in iteritems(if_config):
for config in if_attr:
for key, value in iteritems(config):
if_data[key] = value
self._current_config[if_name] = self._create_if_data(
if_name, if_data)
def _generate_no_if_commands(self, req_if, curr_if):
if self._interface_type == self.IF_TYPE_ETH:
name = req_if['name']
self._module.fail_json(
msg='cannot remove ethernet interface %s' % name)
if not curr_if:
return
if_id = req_if['if_id']
if not if_id:
return
self._commands.append(
'no interface %s %s' % (self._interface_type, if_id))
def _add_commands_to_interface(self, req_if, cmd_list):
if not cmd_list:
return
if_id = req_if['if_id']
if not if_id:
return
self._commands.append(
'interface %s %s' % (self._interface_type, if_id))
self._commands.extend(cmd_list)
self._commands.append('exit')
def _generate_if_commands(self, req_if, curr_if):
enabled = req_if['enabled']
cmd_list = []
for attr_name in self.IF_MODIFIABLE_ATTRS:
candidate = req_if.get(attr_name)
running = curr_if.get(attr_name)
if candidate != running:
if candidate:
cmd = attr_name + ' ' + str(candidate)
if self._interface_type == self.IF_TYPE_ETH and \
attr_name in ('mtu', 'speed'):
cmd = cmd + ' ' + 'force'
cmd_list.append(cmd)
curr_enabled = curr_if.get('enabled', False)
if enabled is not None and enabled != curr_enabled:
cmd = 'shutdown'
if enabled:
cmd = "no %s" % cmd
cmd_list.append(cmd)
if cmd_list:
self._add_commands_to_interface(req_if, cmd_list)
def generate_commands(self):
for req_if in self._required_config:
name = req_if['name']
curr_if = self._current_config.get(name, {})
if not curr_if and self._interface_type == self.IF_TYPE_ETH:
self._module.fail_json(
msg='could not find ethernet interface %s' % name)
continue
req_state = req_if['state']
if req_state == 'absent':
self._generate_no_if_commands(req_if, curr_if)
else:
self._generate_if_commands(req_if, curr_if)
def _get_interfaces_rates(self):
return get_interfaces_config(self._module, self._interface_type,
"rates")
def _get_interfaces_status(self):
return get_interfaces_config(self._module, self._interface_type,
"status")
def _check_state(self, name, want_state, statuses):
curr_if = statuses.get(name, {})
if curr_if:
curr_if = curr_if[0]
curr_state = self.get_oper_state(curr_if).strip()
if curr_state is None or not conditional(want_state, curr_state):
return 'state eq(%s)' % want_state
def check_declarative_intent_params(self, result):
failed_conditions = []
delay_called = False
rates = None
statuses = None
for req_if in self._required_config:
want_state = req_if.get('state')
want_tx_rate = req_if.get('tx_rate')
want_rx_rate = req_if.get('rx_rate')
name = req_if['name']
if want_state not in ('up', 'down') and not want_tx_rate and not \
want_rx_rate:
continue
if not delay_called and result['changed']:
delay_called = True
delay = req_if['delay']
if delay > 0:
sleep(delay)
if want_state in ('up', 'down'):
if statuses is None:
statuses = self._get_interfaces_status() or {}
cond = self._check_state(name, want_state, statuses)
if cond:
failed_conditions.append(cond)
if_rates = None
if want_tx_rate or want_rx_rate:
if not rates:
rates = self._get_interfaces_rates()
if_rates = rates.get(name)
if if_rates:
if_rates = if_rates[0]
if want_tx_rate:
have_tx_rate = None
if if_rates:
have_tx_rate = if_rates.get('egress rate')
if have_tx_rate:
have_tx_rate = have_tx_rate.split()[0]
if have_tx_rate is None or not \
conditional(want_tx_rate, have_tx_rate.strip(),
cast=int):
failed_conditions.append('tx_rate ' + want_tx_rate)
if want_rx_rate:
have_rx_rate = None
if if_rates:
have_rx_rate = if_rates.get('ingress rate')
if have_rx_rate:
have_rx_rate = have_rx_rate.split()[0]
if have_rx_rate is None or not \
conditional(want_rx_rate, have_rx_rate.strip(),
cast=int):
failed_conditions.append('rx_rate ' + want_rx_rate)
return failed_conditions
def main():
""" main entry point for module execution
"""
OnyxInterfaceModule.main()
if __name__ == '__main__':
main()
| gpl-3.0 |
tejoesperanto/pasportaservo | hosting/migrations/0048_profile_gender_pronoun.py | 3 | 6310 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-04-10 13:25
from __future__ import unicode_literals
from django.db import migrations, models, connection
from django.core.management.color import no_style
import hosting.fields
def populate_genders(app_registry, schema_editor):
Gender = app_registry.get_model('hosting', 'Gender')
Gender.objects.bulk_create([
Gender(id=1, name_en='Akava\'ine', name='Akava\'ine'),
Gender(id=2, name_en='bigender', name='ambaŭgenra'),
Gender(id=3, name_en='androgynous', name='androgena'),
Gender(id=4, name_en='Baklâ', name='Baklâ'),
Gender(id=5, name_en='Bissu', name='Bissu'),
Gender(id=6, name_en='Calabai', name='Calabai'),
Gender(id=7, name_en='Calalai', name='Calalai'),
Gender(id=8, name_en='cisgender woman', name='cisgenra virino'),
Gender(id=9, name_en='cisgender man', name='cisgenra viro'),
Gender(id=10, name_en='cis woman', name='cis-ino'),
Gender(id=11, name_en='cis man', name='cis-viro'),
Gender(id=12, name_en='pangender', name='ĉiugenra'),
Gender(id=13, name_en='bi-gender', name='dugenra'),
Gender(id=14, name_en='two-spirit', name='du-spirita'),
Gender(id=15, name_en='genderfluid', name='fluidgenra'),
Gender(id=16, name_en='genderqueer', name='genrokvira'),
Gender(id=17, name_en='gender nonconforming', name='genro-nekonforma'),
Gender(id=18, name_en='gender neutral', name='genro-neŭtra'),
Gender(id=19, name_en='gender questioning', name='genro-priduba'),
Gender(id=20, name_en='gender variant', name='genro-varia'),
Gender(id=21, name_en='intersex', name='interseksa'),
Gender(id=22, name_en='other gender', name='ne-difinanta genron'),
Gender(id=23, name_en='non-binary gender', name='neduumgenra'),
Gender(id=24, name_en='gender non-conforming', name='ne-laŭanta genron'),
Gender(id=25, name_en='Neutrois', name='Neutrois'),
Gender(id=26, name_en='demiwoman', name='partgenre ina'),
Gender(id=27, name_en='demiman', name='partgenre vira'),
Gender(id=28, name_en='agender', name='sengenra'),
Gender(id=29, name_en='trans*', name='trans*'),
Gender(id=30, name_en='trans*person', name='trans*persono'),
Gender(id=31, name_en='trans female', name='transfemala'),
Gender(id=32, name_en='transgender', name='transgenra'),
Gender(id=33, name_en='transgender woman', name='transgenra virino'),
Gender(id=34, name_en='transgender man', name='transgenra viro'),
Gender(id=35, name_en='trans feminine', name='trans-ineca'),
Gender(id=36, name_en='trans woman', name='trans-ino'),
Gender(id=37, name_en='trans male', name='transmaskla'),
Gender(id=38, name_en='transsexual', name='transseksa'),
Gender(id=39, name_en='trans masculine', name='trans-vireca'),
Gender(id=40, name_en='trans man', name='trans-viro'),
Gender(id=41, name_en='Travesti', name='Travesti'),
Gender(id=42, name_en='third gender', name='tria-genra'),
Gender(id=43, name_en='third gender (Chhakka)', name='tria-genra (Chhakka)'),
Gender(id=44, name_en='third gender (Fa\'afafine)', name='tria-genra (Fa\'afafine)'),
Gender(id=45, name_en='third gender (Hijra)', name='tria-genra (Hijra)'),
Gender(id=46, name_en='third gender (Kathoey)', name='tria-genra (Kathoey)'),
Gender(id=47, name_en='third gender (Khanīth)', name='tria-genra (Khanīth)'),
Gender(id=48, name_en='third gender (Māhū)', name='tria-genra (Māhū)'),
Gender(id=49, name_en='third gender (Muxhe)', name='tria-genra (Muxhe)'),
Gender(id=50, name_en='trigender', name='trigenra'),
])
with connection.cursor() as cursor:
for sql in connection.ops.sequence_reset_sql(no_style(), [Gender]):
cursor.execute(sql)
class Migration(migrations.Migration):
dependencies = [
('hosting', '0047_preferences_public_listing'),
]
operations = [
migrations.CreateModel(
name='Gender',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_en', models.CharField(max_length=255, unique=True, verbose_name='name (in English)')),
('name', models.CharField(max_length=255, unique=True, verbose_name='name')),
],
options={
'verbose_name_plural': 'genders',
'verbose_name': 'gender',
},
),
migrations.RunPython(
populate_genders, reverse_code=migrations.RunPython.noop
),
migrations.AddField(
model_name='profile',
name='pronoun',
field=models.CharField(blank=True, choices=[(None, ''), ('She', 'she'), ('He', 'he'), ('They', 'they')], max_length=5, verbose_name='personal pronoun'),
),
migrations.AddField(
model_name='profile',
name='gender',
field=hosting.fields.ForeigKeyWithSuggestions(blank=True, choices='hosting.Gender', to_field='name', verbose_name='gender'),
),
migrations.AlterField(
model_name='profile',
name='description',
field=models.TextField(blank=True, help_text='Short biography. \nProvide here further details about yourself. If you indicated that your gender is non-binary, it will be helpful if you explain more.', verbose_name='description'),
),
]
| agpl-3.0 |
moijes12/oh-mainline | vendor/packages/kombu/kombu/transport/django/migrations/0001_initial.py | 39 | 2435 | # encoding: utf-8
from __future__ import absolute_import
# flake8: noqa
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Queue'
db.create_table('djkombu_queue', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=200)),
))
db.send_create_signal('django', ['Queue'])
# Adding model 'Message'
db.create_table('djkombu_message', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('visible', self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True)),
('sent_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, db_index=True, blank=True)),
('payload', self.gf('django.db.models.fields.TextField')()),
('queue', self.gf('django.db.models.fields.related.ForeignKey')(related_name='messages', to=orm['django.Queue'])),
))
db.send_create_signal('django', ['Message'])
def backwards(self, orm):
# Deleting model 'Queue'
db.delete_table('djkombu_queue')
# Deleting model 'Message'
db.delete_table('djkombu_message')
models = {
'django.message': {
'Meta': {'object_name': 'Message', 'db_table': "'djkombu_message'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payload': ('django.db.models.fields.TextField', [], {}),
'queue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'to': "orm['django.Queue']"}),
'sent_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'})
},
'django.queue': {
'Meta': {'object_name': 'Queue', 'db_table': "'djkombu_queue'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
}
}
complete_apps = ['django']
| agpl-3.0 |
vkmguy/Flights-and-Hotels | database/hotelDB.py | 2 | 1448 | from utility import DBConnectivity
from classes.hotel_detail import Hotel
def get_hotels(location):
try:
con=DBConnectivity.create_connection()
cur=DBConnectivity.create_cursor(con)
list_of_hotels=[]
cur.execute("select hotelid, hotelname, location, efare, dfare from hotel where location=:location",{"location":location})
for hotelid,hotelname,location,efare,dfare in cur:
'''
In this loop, we are creating a product object for every row
and setting the values from the row into the product object
'''
hotel=Hotel()
hotel.set_hotelid(hotelid)
hotel.set_hotelname(hotelname)
hotel.set_location(location)
hotel.set_efare(efare)
hotel.set_dfare(dfare)
list_of_hotels.append(hotel)
return list_of_hotels
finally:
cur.close()
con.close()
def get_booked_flight(fbookid):
try:
con=DBConnectivity.create_connection()
cur=DBConnectivity.create_cursor(con)
list_of_members=[]
cur.execute("select no_of_children,no_of_adults from flight_booking where bookingid=:fbookid",{"fbookid":fbookid})
for booking in cur:
list_of_members.append(booking[0])
list_of_members.append(booking[1])
return list_of_members
finally:
cur.close()
con.close()
| epl-1.0 |
cewood/ansible | lib/ansible/plugins/action/script.py | 67 | 4108 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def run(self, tmp=None, task_vars=None):
''' handler for file transfer operations '''
if self._play_context.check_mode:
return dict(skipped=True, msg='check mode not supported for this module')
if not tmp:
tmp = self._make_tmp_path()
creates = self._task.args.get('creates')
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
result = self._execute_module(module_name='stat', module_args=dict(path=creates), task_vars=task_vars, tmp=tmp, persist_files=True)
stat = result.get('stat', None)
if stat and stat.get('exists', False):
return dict(skipped=True, msg=("skipped, since %s exists" % creates))
removes = self._task.args.get('removes')
if removes:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
result = self._execute_module(module_name='stat', module_args=dict(path=removes), task_vars=task_vars, tmp=tmp, persist_files=True)
stat = result.get('stat', None)
if stat and not stat.get('exists', False):
return dict(skipped=True, msg=("skipped, since %s does not exist" % removes))
# the script name is the first item in the raw params, so we split it
# out now so we know the file name we need to transfer to the remote,
# and everything else is an argument to the script which we need later
# to append to the remote command
parts = self._task.args.get('_raw_params', '').strip().split()
source = parts[0]
args = ' '.join(parts[1:])
if self._task._role is not None:
source = self._loader.path_dwim_relative(self._task._role._role_path, 'files', source)
else:
source = self._loader.path_dwim_relative(self._loader.get_basedir(), 'files', source)
# transfer the file to a remote tmp location
tmp_src = self._connection._shell.join_path(tmp, os.path.basename(source))
self._connection.put_file(source, tmp_src)
sudoable = True
# set file permissions, more permissive when the copy is done as a different user
if self._play_context.become and self._play_context.become_user != 'root':
chmod_mode = 'a+rx'
sudoable = False
else:
chmod_mode = '+rx'
self._remote_chmod(tmp, chmod_mode, tmp_src, sudoable=sudoable)
# add preparation steps to one ssh roundtrip executing the script
env_string = self._compute_environment_string()
script_cmd = ' '.join([env_string, tmp_src, args])
result = self._low_level_execute_command(cmd=script_cmd, tmp=None, sudoable=True)
# clean up after
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES:
self._remove_tmp_path(tmp)
result['changed'] = True
return result
| gpl-3.0 |
muffinresearch/amo-validator | validator/testcases/javascript/actions.py | 1 | 39965 | from copy import deepcopy
from functools import partial
import sys
import types
# Global import of predefinedentities will cause an import loop
import instanceactions
from validator.constants import (BUGZILLA_BUG, DESCRIPTION_TYPES, FENNEC_GUID,
FIREFOX_GUID, MAX_STR_SIZE)
from validator.decorator import version_range
from jstypes import JSArray, JSContext, JSLiteral, JSObject, JSWrapper
NUMERIC_TYPES = (int, long, float, complex)
# None of these operations (or their augmented assignment counterparts) should
# be performed on non-numeric data. Any time we get non-numeric data for these
# guys, we just return window.NaN.
NUMERIC_OPERATORS = ('-', '*', '/', '%', '<<', '>>', '>>>', '|', '^', '&')
NUMERIC_OPERATORS += tuple('%s=' % op for op in NUMERIC_OPERATORS)
def get_NaN(traverser):
# If we've cached the traverser's NaN instance, just use that.
ncache = getattr(traverser, 'NAN_CACHE', None)
if ncache is not None:
return ncache
# Otherwise, we need to import GLOBAL_ENTITIES and build a raw copy.
from predefinedentities import GLOBAL_ENTITIES
ncache = traverser._build_global('NaN', GLOBAL_ENTITIES[u'NaN'])
# Cache it so we don't need to do this again.
traverser.NAN_CACHE = ncache
return ncache
def _get_member_exp_property(traverser, node):
"""Return the string value of a member expression's property."""
if node['property']['type'] == 'Identifier' and not node.get('computed'):
return unicode(node['property']['name'])
else:
eval_exp = traverser._traverse_node(node['property'])
return _get_as_str(eval_exp.get_literal_value())
def _expand_globals(traverser, node):
"""Expands a global object that has a lambda value."""
if node.is_global and callable(node.value.get('value')):
result = node.value['value'](traverser)
if isinstance(result, dict):
output = traverser._build_global('--', result)
elif isinstance(result, JSWrapper):
output = result
else:
output = JSWrapper(result, traverser)
# Set the node context.
if 'context' in node.value:
traverser._debug('CONTEXT>>%s' % node.value['context'])
output.context = node.value['context']
else:
traverser._debug('CONTEXT>>INHERITED')
output.context = node.context
return output
return node
def trace_member(traverser, node, instantiate=False):
'Traces a MemberExpression and returns the appropriate object'
traverser._debug('TESTING>>%s' % node['type'])
if node['type'] == 'MemberExpression':
# x.y or x[y]
# x = base
base = trace_member(traverser, node['object'], instantiate)
base = _expand_globals(traverser, base)
identifier = _get_member_exp_property(traverser, node)
# Handle the various global entity properties.
if base.is_global:
# If we've got an XPCOM wildcard, return a copy of the entity.
if 'xpcom_wildcard' in base.value:
traverser._debug('MEMBER_EXP>>XPCOM_WILDCARD')
from predefinedentities import CONTRACT_ENTITIES
if identifier in CONTRACT_ENTITIES:
kw = dict(err_id=('js', 'actions', 'dangerous_contract'),
warning='Dangerous XPCOM contract ID')
kw.update(CONTRACT_ENTITIES[identifier])
traverser.warning(**kw)
base.value = base.value.copy()
del base.value['xpcom_wildcard']
return base
test_identifier(traverser, identifier)
traverser._debug('MEMBER_EXP>>PROPERTY: %s' % identifier)
output = base.get(
traverser=traverser, instantiate=instantiate, name=identifier)
output.context = base.context
if base.is_global:
# In the cases of XPCOM objects, methods generally
# remain bound to their parent objects, even when called
# indirectly.
output.parent = base
return output
elif node['type'] == 'Identifier':
traverser._debug('MEMBER_EXP>>ROOT:IDENTIFIER')
test_identifier(traverser, node['name'])
# If we're supposed to instantiate the object and it doesn't already
# exist, instantitate the object.
if instantiate and not traverser._is_defined(node['name']):
output = JSWrapper(JSObject(), traverser=traverser)
traverser.contexts[0].set(node['name'], output)
else:
output = traverser._seek_variable(node['name'])
return _expand_globals(traverser, output)
else:
traverser._debug('MEMBER_EXP>>ROOT:EXPRESSION')
# It's an expression, so just try your damndest.
return traverser._traverse_node(node)
def test_identifier(traverser, name):
'Tests whether an identifier is banned'
import predefinedentities
if name in predefinedentities.BANNED_IDENTIFIERS:
traverser.err.warning(
err_id=('js', 'actions', 'banned_identifier'),
warning='Banned or deprecated JavaScript Identifier',
description=predefinedentities.BANNED_IDENTIFIERS[name],
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
context=traverser.context)
def _function(traverser, node):
'Prevents code duplication'
def wrap(traverser, node):
me = JSObject()
traverser.function_collection.append([])
# Replace the current context with a prototypeable JS object.
traverser._pop_context()
me.type_ = 'default' # Treat the function as a normal object.
traverser._push_context(me)
traverser._debug('THIS_PUSH')
traverser.this_stack.append(me) # Allow references to "this"
# Declare parameters in the local scope
params = []
for param in node['params']:
if param['type'] == 'Identifier':
params.append(param['name'])
elif param['type'] == 'ArrayPattern':
for element in param['elements']:
# Array destructuring in function prototypes? LOL!
if element is None or element['type'] != 'Identifier':
continue
params.append(element['name'])
local_context = traverser._peek_context(1)
for param in params:
var = JSWrapper(lazy=True, traverser=traverser)
# We can assume that the params are static because we don't care
# about what calls the function. We want to know whether the
# function solely returns static values. If so, it is a static
# function.
local_context.set(param, var)
traverser._traverse_node(node['body'])
# Since we need to manually manage the "this" stack, pop off that
# context.
traverser._debug('THIS_POP')
traverser.this_stack.pop()
# Call all of the function collection's members to traverse all of the
# child functions.
func_coll = traverser.function_collection.pop()
for func in func_coll:
func()
# Put the function off for traversal at the end of the current block scope.
traverser.function_collection[-1].append(partial(wrap, traverser, node))
return JSWrapper(traverser=traverser, callable=True, dirty=True)
def _define_function(traverser, node):
me = _function(traverser, node)
traverser._peek_context(2).set(node['id']['name'], me)
return me
def _func_expr(traverser, node):
'Represents a lambda function'
return _function(traverser, node)
def _define_with(traverser, node):
'Handles `with` statements'
object_ = traverser._traverse_node(node['object'])
if isinstance(object_, JSWrapper) and isinstance(object_.value, JSObject):
traverser.contexts[-1] = object_.value
traverser.contexts.append(JSContext('block'))
return
def _define_var(traverser, node):
'Creates a local context variable'
traverser._debug('VARIABLE_DECLARATION')
traverser.debug_level += 1
declarations = (node['declarations'] if 'declarations' in node
else node['head'])
kind = node.get('kind', 'let')
for declaration in declarations:
# It could be deconstruction of variables :(
if declaration['id']['type'] == 'ArrayPattern':
vars = []
for element in declaration['id']['elements']:
# NOTE : Multi-level array destructuring sucks. Maybe implement
# it someday if you're bored, but it's so rarely used and it's
# so utterly complex, there's probably no need to ever code it
# up.
if element is None or element['type'] != 'Identifier':
vars.append(None)
continue
vars.append(element['name'])
# The variables are not initialized
if declaration['init'] is None:
# Simple instantiation; no initialization
for var in vars:
if not var:
continue
traverser._declare_variable(var, None)
# The variables are declared inline
elif declaration['init']['type'] == 'ArrayPattern':
# TODO : Test to make sure len(values) == len(vars)
for value in declaration['init']['elements']:
if vars[0]:
traverser._declare_variable(
vars[0], JSWrapper(traverser._traverse_node(value),
traverser=traverser))
vars = vars[1:] # Pop off the first value
# It's being assigned by a JSArray (presumably)
elif declaration['init']['type'] == 'ArrayExpression':
assigner = traverser._traverse_node(declaration['init'])
for value in assigner.value.elements:
if vars[0]:
traverser._declare_variable(vars[0], value)
vars = vars[1:]
elif declaration['id']['type'] == 'ObjectPattern':
init = traverser._traverse_node(declaration['init'])
def _proc_objpattern(init_obj, properties):
for prop in properties:
# Get the name of the init obj's member
if prop['key']['type'] == 'Literal':
prop_name = prop['key']['value']
elif prop['key']['type'] == 'Identifier':
prop_name = prop['key']['name']
else:
continue
if prop['value']['type'] == 'Identifier':
traverser._declare_variable(
prop['value']['name'],
init_obj.get(traverser, prop_name))
elif prop['value']['type'] == 'ObjectPattern':
_proc_objpattern(init_obj.get(traverser, prop_name),
prop['value']['properties'])
if init is not None:
_proc_objpattern(init_obj=init,
properties=declaration['id']['properties'])
else:
var_name = declaration['id']['name']
traverser._debug('NAME>>%s' % var_name)
var_value = traverser._traverse_node(declaration['init'])
traverser._debug('VALUE>>%s' % (var_value.output()
if var_value is not None
else 'None'))
if not isinstance(var_value, JSWrapper):
var = JSWrapper(value=var_value,
const=kind == 'const',
traverser=traverser)
else:
var = var_value
var.const = kind == 'const'
traverser._declare_variable(var_name, var, type_=kind)
if 'body' in node:
traverser._traverse_node(node['body'])
traverser.debug_level -= 1
# The "Declarations" branch contains custom elements.
return True
def _define_obj(traverser, node):
'Creates a local context object'
var = JSObject()
for prop in node['properties']:
if prop['type'] == 'PrototypeMutation':
var_name = 'prototype'
else:
key = prop['key']
if key['type'] == 'Literal':
var_name = key['value']
elif isinstance(key['name'], basestring):
var_name = key['name']
else:
if 'property' in key['name']:
name = key['name']
else:
name = {'property': key['name']}
var_name = _get_member_exp_property(traverser, name)
var_value = traverser._traverse_node(prop['value'])
var.set(var_name, var_value, traverser)
# TODO: Observe "kind"
if not isinstance(var, JSWrapper):
return JSWrapper(var, lazy=True, traverser=traverser)
var.lazy = True
return var
def _define_array(traverser, node):
"""Instantiate an array object from the parse tree."""
arr = JSArray()
arr.elements = map(traverser._traverse_node, node['elements'])
return arr
def _define_template_strings(traverser, node):
"""Instantiate an array of raw and cooked template strings."""
cooked = JSArray()
cooked.elements = map(traverser._traverse_node, node['cooked'])
raw = JSArray()
raw.elements = map(traverser._traverse_node, node['raw'])
cooked.set('raw', raw, traverser)
return cooked
def _define_template(traverser, node):
"""Instantiate a template literal."""
elements = map(traverser._traverse_node, node['elements'])
return reduce(partial(_binary_op, '+', traverser=traverser), elements)
def _define_literal(traverser, node):
"""
Convert a literal node in the parse tree to its corresponding
interpreted value.
"""
value = node['value']
if isinstance(value, dict):
return JSWrapper(JSObject(), traverser=traverser, dirty=True)
wrapper = JSWrapper(value if value is not None else JSLiteral(None),
traverser=traverser)
test_literal(traverser, wrapper)
return wrapper
def test_literal(traverser, wrapper):
"""
Test the value of a literal, in particular only a string literal at the
moment, against possibly dangerous patterns.
"""
value = wrapper.get_literal_value()
if isinstance(value, basestring):
# Local import to prevent import loop.
from validator.testcases.regex import (validate_compat_pref,
validate_string)
validate_string(value, traverser, wrapper=wrapper)
validate_compat_pref(value, traverser, wrapper=wrapper)
def _call_expression(traverser, node):
args = node['arguments']
for arg in args:
traverser._traverse_node(arg, source='arguments')
member = traverser._traverse_node(node['callee'])
if (traverser.filename.startswith('defaults/preferences/') and
('name' not in node['callee'] or
node['callee']['name'] not in (u'pref', u'user_pref'))):
traverser.err.warning(
err_id=('testcases_javascript_actions',
'_call_expression',
'complex_prefs_defaults_code'),
warning='Complex code should not appear in preference defaults '
'files',
description="Calls to functions other than 'pref' and 'user_pref' "
'should not appear in defaults/preferences/ files.',
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
context=traverser.context)
if member.is_global and callable(member.value.get('dangerous', None)):
result = member.value['dangerous'](a=args, t=traverser._traverse_node,
e=traverser.err)
name = member.value.get('name', '')
if result and name:
kwargs = {
'err_id': ('testcases_javascript_actions', '_call_expression',
'called_dangerous_global'),
'warning': '`%s` called in potentially dangerous manner' %
member.value['name'],
'description':
'The global `%s` function was called using a set '
'of dangerous parameters. Calls of this nature '
'are deprecated.' % member.value['name']}
if isinstance(result, DESCRIPTION_TYPES):
kwargs['description'] = result
elif isinstance(result, dict):
kwargs.update(result)
traverser.warning(**kwargs)
elif (node['callee']['type'] == 'MemberExpression' and
node['callee']['property']['type'] == 'Identifier'):
# If we can identify the function being called on any member of any
# instance, we can use that to either generate an output value or test
# for additional conditions.
identifier_name = node['callee']['property']['name']
if identifier_name in instanceactions.INSTANCE_DEFINITIONS:
result = instanceactions.INSTANCE_DEFINITIONS[identifier_name](
args, traverser, node, wrapper=member)
return result
if member.is_global and 'return' in member.value:
if 'object' in node['callee']:
member.parent = trace_member(traverser, node['callee']['object'])
return member.value['return'](wrapper=member, arguments=args,
traverser=traverser)
return JSWrapper(JSObject(), dirty=True, traverser=traverser)
def _call_settimeout(a, t, e):
"""
Handler for setTimeout and setInterval. Should determine whether a[0]
is a lambda function or a string. Strings are banned, lambda functions are
ok. Since we can't do reliable type testing on other variables, we flag
those, too.
"""
if not a:
return
if a[0]['type'] in ('FunctionExpression', 'ArrowFunctionExpression'):
return
if t(a[0]).callable:
return
return {'err_id': ('javascript', 'dangerous_global', 'eval'),
'description':
'In order to prevent vulnerabilities, the `setTimeout` '
'and `setInterval` functions should be called only with '
'function expressions as their first argument.',
'signing_help': (
'Please do not ever call `setTimeout` or `setInterval` with '
'string arguments. If you are passing a function which is '
'not being correctly detected as such, please consider '
'passing a closure or arrow function, which in turn calls '
'the original function.'),
'signing_severity': 'high'}
def _call_require(a, t, e):
"""
Tests for unsafe uses of `require()` in SDK add-ons.
"""
args, traverse, err = a, t, e
if not err.metadata.get('is_jetpack') and len(args):
return
module = traverse(args[0]).get_literal_value()
if not isinstance(module, basestring):
return
if module.startswith('sdk/'):
module = module[len('sdk/'):]
LOW_LEVEL = {
# Added from bugs 689340, 731109
'chrome', 'window-utils', 'observer-service',
# Added from bug 845492
'window/utils', 'sdk/window/utils', 'sdk/deprecated/window-utils',
'tab/utils', 'sdk/tab/utils',
'system/events', 'sdk/system/events',
}
if module in LOW_LEVEL:
err.metadata['requires_chrome'] = True
return {'warning': 'Usage of low-level or non-SDK interface',
'description': 'Your add-on uses an interface which bypasses '
'the high-level protections of the add-on SDK. '
'This interface should be avoided, and its use '
'may significantly complicate your review '
'process.'}
if module == 'widget':
return {'warning': 'Use of deprecated SDK module',
'description':
"The 'widget' module has been deprecated due to a number "
'of performance and usability issues, and has been '
'removed from the SDK as of Firefox 40. Please use the '
"'sdk/ui/button/action' or 'sdk/ui/button/toggle' module "
'instead. See '
'https://developer.mozilla.org/Add-ons/SDK/High-Level_APIs'
'/ui for more information.'}
def _call_create_pref(a, t, e):
"""
Handler for pref() and user_pref() calls in defaults/preferences/*.js files
to ensure that they don't touch preferences outside of the "extensions."
branch.
"""
# We really need to clean up the arguments passed to these functions.
traverser = t.im_self
if not traverser.filename.startswith('defaults/preferences/') or not a:
return
instanceactions.set_preference(JSWrapper(JSLiteral(None),
traverser=traverser),
a, traverser)
value = _get_as_str(t(a[0]))
return test_preference(value)
def test_preference(value):
for branch in 'extensions.', 'services.sync.prefs.sync.extensions.':
if value.startswith(branch) and value.rindex('.') > len(branch):
return
return ('Extensions should not alter preferences outside of the '
"'extensions.' preference branch. Please make sure that "
"all of your extension's preferences are prefixed with "
"'extensions.add-on-name.', where 'add-on-name' is a "
'distinct string unique to and indicative of your add-on.')
def _readonly_top(traverser, right, node_right):
"""Handle the readonly callback for window.top."""
traverser.notice(
err_id=('testcases_javascript_actions',
'_readonly_top'),
notice='window.top is a reserved variable',
description='The `top` global variable is reserved and cannot be '
'assigned any values starting with Gecko 6. Review your '
'code for any uses of the `top` global, and refer to '
'%s for more information.' % BUGZILLA_BUG % 654137,
for_appversions={FIREFOX_GUID: version_range('firefox',
'6.0a1', '7.0a1'),
FENNEC_GUID: version_range('fennec',
'6.0a1', '7.0a1')},
compatibility_type='warning',
tier=5)
def _expression(traverser, node):
"""
This is a helper method that allows node definitions to point at
`_traverse_node` without needing a reference to a traverser.
"""
return traverser._traverse_node(node['expression'])
def _get_this(traverser, node):
'Returns the `this` object'
if not traverser.this_stack:
from predefinedentities import GLOBAL_ENTITIES
return traverser._build_global('window', GLOBAL_ENTITIES[u'window'])
return traverser.this_stack[-1]
def _new(traverser, node):
'Returns a new copy of a node.'
# We don't actually process the arguments as part of the flow because of
# the Angry T-Rex effect. For now, we just traverse them to ensure they
# don't contain anything dangerous.
args = node['arguments']
if isinstance(args, list):
for arg in args:
traverser._traverse_node(arg, source='arguments')
else:
traverser._traverse_node(args)
elem = traverser._traverse_node(node['callee'])
if not isinstance(elem, JSWrapper):
elem = JSWrapper(elem, traverser=traverser)
if elem.is_global:
traverser._debug('Making overwritable')
elem.value = deepcopy(elem.value)
elem.value['overwritable'] = True
return elem
def _ident(traverser, node):
'Initiates an object lookup on the traverser based on an identifier token'
name = node['name']
# Ban bits like "newThread"
test_identifier(traverser, name)
if traverser._is_defined(name):
return traverser._seek_variable(name)
return JSWrapper(JSObject(), traverser=traverser, dirty=True)
def _expr_assignment(traverser, node):
"""Evaluate an AssignmentExpression node."""
traverser._debug('ASSIGNMENT_EXPRESSION')
traverser.debug_level += 1
traverser._debug('ASSIGNMENT>>PARSING RIGHT')
right = traverser._traverse_node(node['right'])
right = JSWrapper(right, traverser=traverser)
# Treat direct assignment different than augmented assignment.
if node['operator'] == '=':
from predefinedentities import GLOBAL_ENTITIES, is_shared_scope
global_overwrite = False
readonly_value = is_shared_scope(traverser)
node_left = node['left']
traverser._debug('ASSIGNMENT:DIRECT(%s)' % node_left['type'])
if node_left['type'] == 'Identifier':
# Identifiers just need the ID name and a value to push.
# Raise a global overwrite issue if the identifier is global.
global_overwrite = traverser._is_global(node_left['name'])
# Get the readonly attribute and store its value if is_global
if global_overwrite:
global_dict = GLOBAL_ENTITIES[node_left['name']]
if 'readonly' in global_dict:
readonly_value = global_dict['readonly']
traverser._declare_variable(node_left['name'], right, type_='glob')
elif node_left['type'] == 'MemberExpression':
member_object = trace_member(traverser, node_left['object'],
instantiate=True)
global_overwrite = (member_object.is_global and
not ('overwritable' in member_object.value and
member_object.value['overwritable']))
member_property = _get_member_exp_property(traverser, node_left)
traverser._debug('ASSIGNMENT:MEMBER_PROPERTY(%s)'
% member_property)
traverser._debug('ASSIGNMENT:GLOB_OV::%s' % global_overwrite)
# Don't do the assignment if we're facing a global.
if not member_object.is_global:
if member_object.value is None:
member_object.value = JSObject()
if not member_object.is_global:
member_object.value.set(member_property, right, traverser)
else:
# It's probably better to do nothing.
pass
elif 'value' in member_object.value:
member_object_value = _expand_globals(traverser,
member_object).value
if member_property in member_object_value['value']:
# If it's a global and the actual member exists, test
# whether it can be safely overwritten.
member = member_object_value['value'][member_property]
if 'readonly' in member:
global_overwrite = True
readonly_value = member['readonly']
traverser._debug('ASSIGNMENT:DIRECT:GLOB_OVERWRITE %s' %
global_overwrite)
traverser._debug('ASSIGNMENT:DIRECT:READONLY %r' %
readonly_value)
if callable(readonly_value):
readonly_value = readonly_value(traverser, right, node['right'])
if readonly_value and global_overwrite:
kwargs = dict(
err_id=('testcases_javascript_actions',
'_expr_assignment',
'global_overwrite'),
warning='Global variable overwrite',
description='An attempt was made to overwrite a global '
'variable in some JavaScript code.')
if isinstance(readonly_value, DESCRIPTION_TYPES):
kwargs['description'] = readonly_value
elif isinstance(readonly_value, dict):
kwargs.update(readonly_value)
traverser.warning(**kwargs)
return right
lit_right = right.get_literal_value()
traverser._debug('ASSIGNMENT>>PARSING LEFT')
left = traverser._traverse_node(node['left'])
traverser._debug('ASSIGNMENT>>DONE PARSING LEFT')
traverser.debug_level -= 1
if isinstance(left, JSWrapper):
if left.dirty:
return left
lit_left = left.get_literal_value()
token = node['operator']
# Don't perform an operation on None. Python freaks out
if lit_left is None:
lit_left = 0
if lit_right is None:
lit_right = 0
# Give them default values so we have them in scope.
gleft, gright = 0, 0
# All of the assignment operators
operators = {'=': lambda: right,
'+=': lambda: lit_left + lit_right,
'-=': lambda: gleft - gright,
'*=': lambda: gleft * gright,
'/=': lambda: 0 if gright == 0 else (gleft / gright),
'%=': lambda: 0 if gright == 0 else (gleft % gright),
'<<=': lambda: int(gleft) << int(gright),
'>>=': lambda: int(gleft) >> int(gright),
'>>>=': lambda: float(abs(int(gleft)) >> gright),
'|=': lambda: int(gleft) | int(gright),
'^=': lambda: int(gleft) ^ int(gright),
'&=': lambda: int(gleft) & int(gright)}
# If we're modifying a non-numeric type with a numeric operator, return
# NaN.
if (not isinstance(lit_left, NUMERIC_TYPES) and
token in NUMERIC_OPERATORS):
left.set_value(get_NaN(traverser), traverser=traverser)
return left
# If either side of the assignment operator is a string, both sides
# need to be casted to strings first.
if (isinstance(lit_left, types.StringTypes) or
isinstance(lit_right, types.StringTypes)):
lit_left = _get_as_str(lit_left)
lit_right = _get_as_str(lit_right)
gleft, gright = _get_as_num(left), _get_as_num(right)
traverser._debug('ASSIGNMENT>>OPERATION:%s' % token)
if token not in operators:
# We don't support that operator. (yet?)
traverser._debug('ASSIGNMENT>>OPERATOR NOT FOUND', 1)
return left
elif token in ('<<=', '>>=', '>>>=') and gright < 0:
# The user is doing weird bitshifting that will return 0 in JS but
# not in Python.
left.set_value(0, traverser=traverser)
return left
elif (token in ('<<=', '>>=', '>>>=', '|=', '^=', '&=') and
(abs(gleft) == float('inf') or abs(gright) == float('inf'))):
# Don't bother handling infinity for integer-converted operations.
left.set_value(get_NaN(traverser), traverser=traverser)
return left
traverser._debug('ASSIGNMENT::L-value global? (%s)' %
('Y' if left.is_global else 'N'), 1)
try:
new_value = operators[token]()
except Exception:
traverser.system_error(exc_info=sys.exc_info())
new_value = None
# Cap the length of analyzed strings.
if (isinstance(new_value, types.StringTypes) and
len(new_value) > MAX_STR_SIZE):
new_value = new_value[:MAX_STR_SIZE]
traverser._debug('ASSIGNMENT::New value >> %s' % new_value, 1)
left.set_value(new_value, traverser=traverser)
return left
# Though it would otherwise be a syntax error, we say that 4=5 should
# evaluate out to 5.
return right
def _expr_binary(traverser, node):
'Evaluates a BinaryExpression node.'
traverser.debug_level += 1
# Select the proper operator.
operator = node['operator']
traverser._debug('BIN_OPERATOR>>%s' % operator)
# Traverse the left half of the binary expression.
with traverser._debug('BIN_EXP>>l-value'):
if (node['left']['type'] == 'BinaryExpression' and
'__traversal' not in node['left']):
# Process the left branch of the binary expression directly. This
# keeps the recursion cap in line and speeds up processing of
# large chains of binary expressions.
left = _expr_binary(traverser, node['left'])
node['left']['__traversal'] = left
else:
left = traverser._traverse_node(node['left'])
# Traverse the right half of the binary expression.
with traverser._debug('BIN_EXP>>r-value'):
if (operator == 'instanceof' and
node['right']['type'] == 'Identifier' and
node['right']['name'] == 'Function'):
# We make an exception for instanceof's r-value if it's a
# dangerous global, specifically Function.
return JSWrapper(True, traverser=traverser)
else:
right = traverser._traverse_node(node['right'])
traverser._debug('Is dirty? %r' % right.dirty, 1)
return _binary_op(operator, left, right, traverser)
def _binary_op(operator, left, right, traverser):
"""Perform a binary operation on two pre-traversed nodes."""
# Dirty l or r values mean we can skip the expression. A dirty value
# indicates that a lazy operation took place that introduced some
# nondeterminacy.
# FIXME(Kris): We should process these as if they're strings anyway.
if left.dirty:
return left
elif right.dirty:
return right
# Binary expressions are only executed on literals.
left = left.get_literal_value()
right_wrap = right
right = right.get_literal_value()
# Coerce the literals to numbers for numeric operations.
gleft = _get_as_num(left)
gright = _get_as_num(right)
operators = {
'==': lambda: left == right or gleft == gright,
'!=': lambda: left != right,
'===': lambda: left == right, # Be flexible.
'!==': lambda: type(left) != type(right) or left != right,
'>': lambda: left > right,
'<': lambda: left < right,
'<=': lambda: left <= right,
'>=': lambda: left >= right,
'<<': lambda: int(gleft) << int(gright),
'>>': lambda: int(gleft) >> int(gright),
'>>>': lambda: float(abs(int(gleft)) >> int(gright)),
'+': lambda: left + right,
'-': lambda: gleft - gright,
'*': lambda: gleft * gright,
'/': lambda: 0 if gright == 0 else (gleft / gright),
'%': lambda: 0 if gright == 0 else (gleft % gright),
'in': lambda: right_wrap.contains(left),
# TODO : implement instanceof
# FIXME(Kris): Treat instanceof the same as `QueryInterface`
}
output = None
if (operator in ('>>', '<<', '>>>') and
(left is None or right is None or gright < 0)):
output = False
elif operator in operators:
# Concatenation can be silly, so always turn undefineds into empty
# strings and if there are strings, make everything strings.
if operator == '+':
if left is None:
left = ''
if right is None:
right = ''
if isinstance(left, basestring) or isinstance(right, basestring):
left = _get_as_str(left)
right = _get_as_str(right)
# Don't even bother handling infinity if it's a numeric computation.
if (operator in ('<<', '>>', '>>>') and
(abs(gleft) == float('inf') or abs(gright) == float('inf'))):
return get_NaN(traverser)
try:
output = operators[operator]()
except Exception:
traverser.system_error(exc_info=sys.exc_info())
output = None
# Cap the length of analyzed strings.
if (isinstance(output, types.StringTypes) and
len(output) > MAX_STR_SIZE):
output = output[:MAX_STR_SIZE]
wrapper = JSWrapper(output, traverser=traverser)
# Test the newly-created literal for dangerous values.
# This may cause duplicate warnings for strings which
# already match a dangerous value prior to concatenation.
test_literal(traverser, wrapper)
return wrapper
return JSWrapper(output, traverser=traverser)
def _expr_unary(traverser, node):
"""Evaluate a UnaryExpression node."""
expr = traverser._traverse_node(node['argument'])
expr_lit = expr.get_literal_value()
expr_num = _get_as_num(expr_lit)
operators = {'-': lambda: -1 * expr_num,
'+': lambda: expr_num,
'!': lambda: not expr_lit,
'~': lambda: -1 * (expr_num + 1),
'void': lambda: None,
'typeof': lambda: _expr_unary_typeof(expr),
'delete': lambda: None} # We never want to empty the context
if node['operator'] in operators:
output = operators[node['operator']]()
else:
output = None
if not isinstance(output, JSWrapper):
output = JSWrapper(output, traverser=traverser)
return output
def _expr_unary_typeof(wrapper):
"""Evaluate the "typeof" value for a JSWrapper object."""
if (wrapper.callable or
(wrapper.is_global and 'return' in wrapper.value and
'value' not in wrapper.value)):
return 'function'
value = wrapper.value
if value is None:
return 'undefined'
elif isinstance(value, JSLiteral):
value = value.value
if isinstance(value, bool):
return 'boolean'
elif isinstance(value, (int, long, float)):
return 'number'
elif isinstance(value, types.StringTypes):
return 'string'
return 'object'
def _get_as_num(value):
"""Return the JS numeric equivalent for a value."""
if isinstance(value, JSWrapper):
value = value.get_literal_value()
if value is None:
return 0
try:
if isinstance(value, types.StringTypes):
if value.startswith('0x'):
return int(value, 16)
else:
return float(value)
elif isinstance(value, (int, float, long)):
return value
else:
return int(value)
except (ValueError, TypeError):
return 0
def _get_as_str(value):
"""Return the JS string equivalent for a literal value."""
if isinstance(value, JSWrapper):
value = value.get_literal_value()
if value is None:
return ''
if isinstance(value, bool):
return u'true' if value else u'false'
elif isinstance(value, (int, float, long)):
if value == float('inf'):
return u'Infinity'
elif value == float('-inf'):
return u'-Infinity'
# Try to see if we can shave off some trailing significant figures.
try:
if int(value) == value:
return unicode(int(value))
except ValueError:
pass
return unicode(value)
| bsd-3-clause |
michaelpacer/networkx | networkx/algorithms/connectivity/utils.py | 24 | 3304 | # -*- coding: utf-8 -*-
"""
Utilities for connectivity package
"""
import networkx as nx
__author__ = '\n'.join(['Jordi Torrents <jtorrents@milnou.net>'])
__all__ = ['build_auxiliary_node_connectivity',
'build_auxiliary_edge_connectivity']
def build_auxiliary_node_connectivity(G):
r"""Creates a directed graph D from an undirected graph G to compute flow
based node connectivity.
For an undirected graph G having `n` nodes and `m` edges we derive a
directed graph D with `2n` nodes and `2m+n` arcs by replacing each
original node `v` with two nodes `vA`, `vB` linked by an (internal)
arc in D. Then for each edge (`u`, `v`) in G we add two arcs (`uB`, `vA`)
and (`vB`, `uA`) in D. Finally we set the attribute capacity = 1 for each
arc in D [1]_.
For a directed graph having `n` nodes and `m` arcs we derive a
directed graph D with `2n` nodes and `m+n` arcs by replacing each
original node `v` with two nodes `vA`, `vB` linked by an (internal)
arc (`vA`, `vB`) in D. Then for each arc (`u`, `v`) in G we add one
arc (`uB`, `vA`) in D. Finally we set the attribute capacity = 1 for
each arc in D.
A dictionary with a mapping between nodes in the original graph and the
auxiliary digraph is stored as a graph attribute: H.graph['mapping'].
References
----------
.. [1] Kammer, Frank and Hanjo Taubig. Graph Connectivity. in Brandes and
Erlebach, 'Network Analysis: Methodological Foundations', Lecture
Notes in Computer Science, Volume 3418, Springer-Verlag, 2005.
http://www.informatik.uni-augsburg.de/thi/personen/kammer/Graph_Connectivity.pdf
"""
directed = G.is_directed()
mapping = {}
H = nx.DiGraph()
for i, node in enumerate(G):
mapping[node] = i
H.add_node('%dA' % i, id=node)
H.add_node('%dB' % i, id=node)
H.add_edge('%dA' % i, '%dB' % i, capacity=1)
edges = []
for (source, target) in G.edges():
edges.append(('%sB' % mapping[source], '%sA' % mapping[target]))
if not directed:
edges.append(('%sB' % mapping[target], '%sA' % mapping[source]))
H.add_edges_from(edges, capacity=1)
# Store mapping as graph attribute
H.graph['mapping'] = mapping
return H
def build_auxiliary_edge_connectivity(G):
"""Auxiliary digraph for computing flow based edge connectivity
If the input graph is undirected, we replace each edge (`u`,`v`) with
two reciprocal arcs (`u`, `v`) and (`v`, `u`) and then we set the attribute
'capacity' for each arc to 1. If the input graph is directed we simply
add the 'capacity' attribute. Part of algorithm 1 in [1]_ .
References
----------
.. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. (this is a
chapter, look for the reference of the book).
http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf
"""
if G.is_directed():
H = nx.DiGraph()
H.add_nodes_from(G.nodes())
H.add_edges_from(G.edges(), capacity=1)
return H
else:
H = nx.DiGraph()
H.add_nodes_from(G.nodes())
for (source, target) in G.edges():
H.add_edges_from([(source, target), (target, source)], capacity=1)
return H
| bsd-3-clause |
gmoothart/validictory | validictory/validator.py | 4 | 24170 | import re
import sys
import copy
import socket
from datetime import datetime
from decimal import Decimal
from collections import Mapping, Container
if sys.version_info[0] == 3:
_str_type = str
_int_types = (int,)
else:
_str_type = basestring
_int_types = (int, long)
class SchemaError(ValueError):
"""
errors encountered in processing a schema (subclass of :class:`ValueError`)
"""
class ValidationError(ValueError):
"""
validation errors encountered during validation (subclass of
:class:`ValueError`)
"""
class FieldValidationError(ValidationError):
"""
validation error that refers to a specific field
Includes `fieldname` and `value` attributes.
"""
def __init__(self, message, fieldname, value):
super(FieldValidationError, self).__init__(message)
self.fieldname = fieldname
self.value = value
def _generate_datetime_validator(format_option, dateformat_string):
def validate_format_datetime(validator, fieldname, value, format_option):
try:
datetime.strptime(value, dateformat_string)
except ValueError:
raise FieldValidationError(
"Value %(value)r of field '%(fieldname)s' is not in "
"'%(format_option)s' format" % locals(), fieldname, value)
return validate_format_datetime
validate_format_date_time = _generate_datetime_validator('date-time',
'%Y-%m-%dT%H:%M:%SZ')
validate_format_date = _generate_datetime_validator('date', '%Y-%m-%d')
validate_format_time = _generate_datetime_validator('time', '%H:%M:%S')
def validate_format_utc_millisec(validator, fieldname, value, format_option):
if not isinstance(value, _int_types + (float, Decimal)):
raise FieldValidationError("Value %(value)r of field '%(fieldname)s' is "
"not a number" % locals(), fieldname, value)
if not value > 0:
raise FieldValidationError("Value %(value)r of field '%(fieldname)s' is "
"not a positive number" % locals(), fieldname, value)
def validate_format_ip_address(validator, fieldname, value, format_option):
try:
socket.inet_aton(value)
# Make sure we expect "X.X.X.X" as socket.inet_aton() converts "1"
# to "0.0.0.1"
ip = len(value.split('.')) == 4
except:
ip = False
if not ip:
raise FieldValidationError("Value %(value)r of field '%(fieldname)s' is "
"not a ip-address" % locals(), fieldname, value)
DEFAULT_FORMAT_VALIDATORS = {
'date-time': validate_format_date_time,
'date': validate_format_date,
'time': validate_format_time,
'utc-millisec': validate_format_utc_millisec,
'ip-address': validate_format_ip_address,
}
class SchemaValidator(object):
'''
Validator largely based upon the JSON Schema proposal but useful for
validating arbitrary python data structures.
:param format_validators: optional dictionary of custom format validators
:param required_by_default: defaults to True, set to False to make
``required`` schema attribute False by default.
:param blank_by_default: defaults to False, set to True to make ``blank``
schema attribute True by default.
:param disallow_unknown_properties: defaults to False, set to True to
disallow properties not listed in the schema definition
'''
def __init__(self, format_validators=None, required_by_default=True,
blank_by_default=False, disallow_unknown_properties=False):
if format_validators is None:
format_validators = DEFAULT_FORMAT_VALIDATORS.copy()
self._format_validators = format_validators
self.required_by_default = required_by_default
self.blank_by_default = blank_by_default
self.disallow_unknown_properties = disallow_unknown_properties
def register_format_validator(self, format_name, format_validator_fun):
self._format_validators[format_name] = format_validator_fun
def validate_type_string(self, val):
return isinstance(val, _str_type)
def validate_type_integer(self, val):
return type(val) in _int_types
def validate_type_number(self, val):
return type(val) in _int_types + (float, Decimal,)
def validate_type_boolean(self, val):
return type(val) == bool
def validate_type_object(self, val):
return isinstance(val, Mapping)
def validate_type_array(self, val):
return isinstance(val, (list, tuple))
def validate_type_null(self, val):
return val is None
def validate_type_any(self, val):
return True
def _error(self, desc, value, fieldname, **params):
params['value'] = value
params['fieldname'] = fieldname
message = desc % params
raise FieldValidationError(message, fieldname, value)
def _validate_unknown_properties(self, schema, data, fieldname):
schema_properties = set(schema)
data_properties = set(data)
delta = data_properties - schema_properties
if delta:
unknowns = ''
for x in delta:
unknowns += '"%s", ' % x
unknowns = unknowns.rstrip(", ")
raise SchemaError('Unknown properties for field '
'"%(fieldname)s": %(unknowns)s' %
locals())
def validate_type(self, x, fieldname, schema, fieldtype=None):
'''
Validates that the fieldtype specified is correct for the given
data
'''
# We need to know if the field exists or if it's just Null
fieldexists = True
try:
value = x[fieldname]
except KeyError:
fieldexists = False
value = None
if fieldtype and fieldexists:
if isinstance(fieldtype, (list, tuple)):
# Match if type matches any one of the types in the list
datavalid = False
for eachtype in fieldtype:
try:
self.validate_type(x, fieldname, eachtype, eachtype)
datavalid = True
break
except ValidationError:
pass
if not datavalid:
self._error("Value %(value)r for field '%(fieldname)s' is "
"not of type %(fieldtype)s",
value, fieldname, fieldtype=fieldtype)
elif isinstance(fieldtype, dict):
try:
self.__validate(fieldname, x, fieldtype)
except ValueError as e:
raise e
else:
try:
type_checker = getattr(self, 'validate_type_%s' %
fieldtype)
except AttributeError:
raise SchemaError("Field type '%s' is not supported." %
fieldtype)
if not type_checker(value):
self._error("Value %(value)r for field '%(fieldname)s' "
"is not of type %(fieldtype)s",
value, fieldname, fieldtype=fieldtype)
def validate_properties(self, x, fieldname, schema, properties=None):
'''
Validates properties of a JSON object by processing the object's
schema recursively
'''
if x.get(fieldname) is not None:
value = x.get(fieldname)
if isinstance(value, dict):
if isinstance(properties, dict):
if self.disallow_unknown_properties:
self._validate_unknown_properties(properties, value,
fieldname)
for eachProp in properties:
self.__validate(eachProp, value,
properties.get(eachProp))
else:
raise SchemaError("Properties definition of field '%s' is "
"not an object" % fieldname)
def validate_items(self, x, fieldname, schema, items=None):
'''
Validates that all items in the list for the given field match the
given schema
'''
if x.get(fieldname) is not None:
value = x.get(fieldname)
if isinstance(value, (list, tuple)):
if isinstance(items, (list, tuple)):
if (not 'additionalItems' in schema and
len(items) != len(value)):
self._error("Length of list %(value)r for field "
"'%(fieldname)s' is not equal to length "
"of schema list", value, fieldname)
else:
for itemIndex in range(len(items)):
try:
self.validate(value[itemIndex],
items[itemIndex])
except FieldValidationError as e:
raise type(e)("Failed to validate field '%s' "
"list schema: %s" %
(fieldname, e), fieldname, e.value)
elif isinstance(items, dict):
for eachItem in value:
if self.disallow_unknown_properties:
self._validate_unknown_properties(items, eachItem,
fieldname)
try:
self._validate(eachItem, items)
except FieldValidationError as e:
# a bit of a hack: replace reference to _data
# with 'list item' so error messages make sense
old_error = str(e).replace("field '_data'",
'list item')
raise type(e)("Failed to validate field '%s' list "
"schema: %s" %
(fieldname, old_error), fieldname, e.value)
else:
raise SchemaError("Properties definition of field '%s' is "
"not a list or an object" % fieldname)
def validate_required(self, x, fieldname, schema, required):
'''
Validates that the given field is present if required is True
'''
# Make sure the field is present
if fieldname not in x and required:
self._error("Required field '%(fieldname)s' is missing",
None, fieldname)
def validate_blank(self, x, fieldname, schema, blank=False):
'''
Validates that the given field is not blank if blank=False
'''
value = x.get(fieldname)
if isinstance(value, _str_type) and not blank and not value:
self._error("Value %(value)r for field '%(fieldname)s' cannot be "
"blank'", value, fieldname)
def validate_patternProperties(self, x, fieldname, schema,
patternproperties=None):
if patternproperties is None:
patternproperties = {}
value_obj = x.get(fieldname, {})
for pattern, schema in patternproperties.items():
for key, value in value_obj.items():
if re.match(pattern, key):
self.validate(value, schema)
def validate_additionalItems(self, x, fieldname, schema,
additionalItems=False):
value = x.get(fieldname)
if not isinstance(value, (list, tuple)):
return
if isinstance(additionalItems, bool):
if additionalItems or 'items' not in schema:
return
elif len(value) != len(schema['items']):
self._error("Length of list %(value)r for field "
"'%(fieldname)s' is not equal to length of schema "
"list", value, fieldname)
remaining = value[len(schema['items']):]
if len(remaining) > 0:
self._validate(remaining, {'items': additionalItems})
def validate_additionalProperties(self, x, fieldname, schema,
additionalProperties=None):
'''
Validates additional properties of a JSON object that were not
specifically defined by the properties property
'''
# Shouldn't be validating additionalProperties on non-dicts
value = x.get(fieldname)
if not isinstance(value, dict):
return
# If additionalProperties is the boolean value True then we accept
# any additional properties.
if isinstance(additionalProperties, bool) and additionalProperties:
return
value = x.get(fieldname)
if isinstance(additionalProperties, (dict, bool)):
properties = schema.get("properties")
if properties is None:
properties = {}
if value is None:
value = {}
for eachProperty in value:
if eachProperty not in properties:
# If additionalProperties is the boolean value False
# then we don't accept any additional properties.
if (isinstance(additionalProperties, bool) and not
additionalProperties):
self._error("additional property '%(prop)s' "
"not defined by 'properties' are not "
"allowed in field '%(fieldname)s'",
None, fieldname, prop=eachProperty)
self.__validate(eachProperty, value,
additionalProperties)
else:
raise SchemaError("additionalProperties schema definition for "
"field '%s' is not an object" % fieldname)
def validate_dependencies(self, x, fieldname, schema, dependencies=None):
if x.get(fieldname) is not None:
# handle cases where dependencies is a string or list of strings
if isinstance(dependencies, _str_type):
dependencies = [dependencies]
if isinstance(dependencies, (list, tuple)):
for dependency in dependencies:
if dependency not in x:
self._error("Field '%(dependency)s' is required by "
"field '%(fieldname)s'",
None, fieldname, dependency=dependency)
elif isinstance(dependencies, dict):
# NOTE: the version 3 spec is really unclear on what this means
# based on the meta-schema I'm assuming that it should check
# that if a key exists, the appropriate value exists
for k, v in dependencies.items():
if k in x and v not in x:
self._error("Field '%(v)s' is required by field "
"'%(k)s'", None, fieldname, k=k, v=v)
else:
raise SchemaError("'dependencies' must be a string, "
"list of strings, or dict")
def validate_minimum(self, x, fieldname, schema, minimum=None):
'''
Validates that the field is longer than or equal to the minimum
length if specified
'''
exclusive = schema.get('exclusiveMinimum', False)
if x.get(fieldname) is not None:
value = x.get(fieldname)
if value is not None:
if (type(value) in (int, float) and
(not exclusive and value < minimum) or
(exclusive and value <= minimum)):
self._error("Value %(value)r for field '%(fieldname)s' is "
"less than minimum value: %(minimum)f",
value, fieldname, minimum=minimum)
def validate_maximum(self, x, fieldname, schema, maximum=None):
'''
Validates that the field is shorter than or equal to the maximum
length if specified.
'''
exclusive = schema.get('exclusiveMaximum', False)
if x.get(fieldname) is not None:
value = x.get(fieldname)
if value is not None:
if (type(value) in (int, float) and
(not exclusive and value > maximum) or
(exclusive and value >= maximum)):
self._error("Value %(value)r for field '%(fieldname)s' is "
"greater than maximum value: %(maximum)f",
value, fieldname, maximum=maximum)
def validate_maxLength(self, x, fieldname, schema, length=None):
'''
Validates that the value of the given field is shorter than or equal
to the specified length
'''
value = x.get(fieldname)
if isinstance(value, (_str_type, list, tuple)) and len(value) > length:
self._error("Length of value %(value)r for field '%(fieldname)s' "
"must be less than or equal to %(length)d",
value, fieldname, length=length)
def validate_minLength(self, x, fieldname, schema, length=None):
'''
Validates that the value of the given field is longer than or equal
to the specified length
'''
value = x.get(fieldname)
if isinstance(value, (_str_type, list, tuple)) and len(value) < length:
self._error("Length of value %(value)r for field '%(fieldname)s' "
"must be greater than or equal to %(length)d",
value, fieldname, length=length)
validate_minItems = validate_minLength
validate_maxItems = validate_maxLength
def validate_format(self, x, fieldname, schema, format_option=None):
'''
Validates the format of primitive data types
'''
value = x.get(fieldname)
format_validator = self._format_validators.get(format_option, None)
if format_validator and value:
format_validator(self, fieldname, value, format_option)
# TODO: warn about unsupported format ?
def validate_pattern(self, x, fieldname, schema, pattern=None):
'''
Validates that the given field, if a string, matches the given
regular expression.
'''
value = x.get(fieldname)
if isinstance(value, _str_type):
if not re.match(pattern, value):
self._error("Value %(value)r for field '%(fieldname)s' does "
"not match regular expression '%(pattern)s'",
value, fieldname, pattern=pattern)
def validate_uniqueItems(self, x, fieldname, schema, uniqueItems=False):
'''
Validates that all items in an array instance MUST be unique
(contains no two identical values).
'''
# If additionalProperties is the boolean value True then we accept
# any additional properties.
if isinstance(uniqueItems, bool) and not uniqueItems:
return
values = x.get(fieldname)
if not isinstance(values, (list, tuple)):
return
hashables = set()
unhashables = []
for value in values:
if isinstance(value, (list, dict)):
container, add = unhashables, unhashables.append
else:
container, add = hashables, hashables.add
if value in container:
self._error(
"Value %(value)r for field '%(fieldname)s' is not unique",
value, fieldname)
else:
add(value)
def validate_enum(self, x, fieldname, schema, options=None):
'''
Validates that the value of the field is equal to one of the
specified option values
'''
value = x.get(fieldname)
if value is not None:
if not isinstance(options, Container):
raise SchemaError("Enumeration %r for field '%s' must be a "
"container", (options, fieldname))
if value not in options:
self._error("Value %(value)r for field '%(fieldname)s' is not "
"in the enumeration: %(options)r",
value, fieldname, options=options)
def validate_title(self, x, fieldname, schema, title=None):
if not isinstance(title, (_str_type, type(None))):
raise SchemaError("The title for field '%s' must be a string" %
fieldname)
def validate_description(self, x, fieldname, schema, description=None):
if not isinstance(description, (_str_type, type(None))):
raise SchemaError("The description for field '%s' must be a string"
% fieldname)
def validate_divisibleBy(self, x, fieldname, schema, divisibleBy=None):
value = x.get(fieldname)
if not self.validate_type_number(value):
return
if divisibleBy == 0:
raise SchemaError("'%r' <- divisibleBy can not be 0" % schema)
if value % divisibleBy != 0:
self._error("Value %(value)r field '%(fieldname)s' is not "
"divisible by '%(divisibleBy)s'.",
x.get(fieldname), fieldname, divisibleBy=divisibleBy)
def validate_disallow(self, x, fieldname, schema, disallow=None):
'''
Validates that the value of the given field does not match the
disallowed type.
'''
try:
self.validate_type(x, fieldname, schema, disallow)
except ValidationError:
return
self._error("Value %(value)r of type %(disallow)s is disallowed for "
"field '%(fieldname)s'",
x.get(fieldname), fieldname, disallow=disallow)
def validate(self, data, schema):
'''
Validates a piece of json data against the provided json-schema.
'''
self._validate(data, schema)
def _validate(self, data, schema):
self.__validate("_data", {"_data": data}, schema)
def __validate(self, fieldname, data, schema):
if schema is not None:
if not isinstance(schema, dict):
raise SchemaError(
"Type for field '%s' must be 'dict', got: '%s'" %
(fieldname, type(schema).__name__))
newschema = copy.copy(schema)
if 'optional' in schema:
raise SchemaError('The "optional" attribute has been replaced'
' by "required"')
if 'requires' in schema:
raise SchemaError('The "requires" attribute has been replaced'
' by "dependencies"')
if 'required' not in schema:
newschema['required'] = self.required_by_default
if 'blank' not in schema:
newschema['blank'] = self.blank_by_default
for schemaprop in newschema:
validatorname = "validate_" + schemaprop
validator = getattr(self, validatorname, None)
if validator:
validator(data, fieldname, schema,
newschema.get(schemaprop))
return data
__all__ = ['SchemaValidator', 'FieldValidationError']
| mit |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/directpy11/Samples/SampleLights.py | 2 | 2818 |
"""This sample demonstrates some very simple lightning effects."""
import math
import d3d11
import d3d11x
from d3d11c import *
def heightCallback(x, z, byteIndex, data):
return data[byteIndex] * 0.03
class SampleLights(d3d11x.Frame):
def onCreate(self):
#Heightmap.
#self.heightmap = d3d11x.HeightMap(self.device, None, (64, 64), heightCallback, (2, 1, 2), (8, 8), False)
self.heightmap = d3d11x.HeightMap(self.device, d3d11x.getResourceDir("Textures", "heightmap.dds"),
(64, 64), heightCallback, (2, 1, 2), (8, 8), False)
self.heightmap.textureView = self.loadTextureView("ground-marble.dds")
#Sphere mesh.
meshPath = d3d11x.getResourceDir("Mesh", "sphere.obj")
self.sphere = d3d11x.Mesh(self.device, meshPath)
self.sphere.textureView = self.loadTextureView("misc-white.bmp")
def createLights(self):
#Add 7 lights (maximum defined in 'Shared.fx').
lights = []
for i in range(1, 8):
#Each light is little farther than the previous one.
distance = i * 5
lightTime = self.time * (i * 0.5)
#Use sin() and cos() to create a nice little movement pattern.
x = math.sin(lightTime) * distance
z = math.cos(lightTime) * distance
y = self.heightmap.getHeight(x, z)
pos = d3d11.Vector(x, y + 1, z)
#Set color (RGBA) (from 0.0 to 1.0). 30.0 is just a magic value which looks good.
red = i / 30.0
green = (7 - i) / 30.0
color = (red, green, 0, 0)
lights.append((pos, color))
return lights
def onRender(self):
#View- and projectionmatrix.
view = self.createLookAt((-50, 25, -50), (0, 0, 0))
projection = self.createProjection(45, 0.1, 300.0)
lights = self.createLights()
#First the heightmap.
self.heightmap.setLights(lights)
#Add some ambient lightning so that it is not so dark.
self.heightmap.effect.set("lightAmbient", (0.5, 0.5, 0.5, 0))
self.heightmap.render(d3d11.Matrix(), view, projection)
#Then our "light spheres".
self.sphere.setLights(lights)
for light in lights:
#World matrix.
meshWorld = d3d11.Matrix()
lightPos = light[0]
#Add little to y to lift the sphere off the ground.
meshWorld.translate((lightPos.x, lightPos.y + 1, lightPos.z))
#Set ambient to light color.
self.sphere.effect.set("lightAmbient", light[1])
self.sphere.render(meshWorld, view, projection)
if __name__ == "__main__":
sample = SampleLights("Lights - DirectPython 11", __doc__)
sample.mainloop()
| gpl-3.0 |
abioyeayo/proj-ardu-pilot | ardupilot/libraries/AP_InertialSensor/examples/coning.py | 241 | 10508 | #!/usr/bin/python
from math import *
from pymavlink.rotmat import Vector3, Matrix3
from numpy import linspace
from visual import *
class Quat:
def __init__(self,w=1.0,x=0.0,y=0.0,z=0.0):
self.w = w
self.x = x
self.y = y
self.z = z
def to_euler(self):
roll = (atan2(2.0*(self.w*self.x + self.y*self.z), 1 - 2.0*(self.x*self.x + self.y*self.y)))
pitch = asin(2.0*(self.w*self.y - self.z*self.x))
yaw = atan2(2.0*(self.w*self.z + self.x*self.y), 1 - 2.0*(self.y*self.y + self.z*self.z))
return Vector3(roll,pitch,yaw)
def from_euler(self,euler):
#(roll,pitch,yaw)
cr2 = cos(euler[0]*0.5)
cp2 = cos(euler[1]*0.5)
cy2 = cos(euler[2]*0.5)
sr2 = sin(euler[0]*0.5)
sp2 = sin(euler[1]*0.5)
sy2 = sin(euler[2]*0.5)
self.w = cr2*cp2*cy2 + sr2*sp2*sy2
self.x = sr2*cp2*cy2 - cr2*sp2*sy2
self.y = cr2*sp2*cy2 + sr2*cp2*sy2
self.z = cr2*cp2*sy2 - sr2*sp2*cy2
return self
def from_axis_angle(self, vec):
theta = vec.length()
if theta == 0:
self.w = 1.0
self.x = 0.0
self.y = 0.0
self.z = 0.0
return
vec_normalized = vec.normalized()
st2 = sin(theta/2.0)
self.w = cos(theta/2.0)
self.x = vec_normalized.x * st2
self.y = vec_normalized.y * st2
self.z = vec_normalized.z * st2
def rotate(self, vec):
r = Quat()
r.from_axis_angle(vec)
q = self * r
self.w = q.w
self.x = q.x
self.y = q.y
self.z = q.z
def to_axis_angle(self):
l = sqrt(self.x**2+self.y**2+self.z**2)
(x,y,z) = (self.x,self.y,self.z)
if l != 0:
temp = 2.0*atan2(l,self.w)
if temp > pi:
temp -= 2*pi
elif temp < -pi:
temp += 2*pi
(x,y,z) = (temp*x/l,temp*y/l,temp*z/l)
return Vector3(x,y,z)
def to_rotation_matrix(self):
m = Matrix3()
yy = self.y**2
yz = self.y * self.z
xx = self.x**2
xy = self.x * self.y
xz = self.x * self.z
wx = self.w * self.x
wy = self.w * self.y
wz = self.w * self.z
zz = self.z**2
m.a.x = 1.0-2.0*(yy + zz)
m.a.y = 2.0*(xy - wz)
m.a.z = 2.0*(xz + wy)
m.b.x = 2.0*(xy + wz)
m.b.y = 1.0-2.0*(xx + zz)
m.b.z = 2.0*(yz - wx)
m.c.x = 2.0*(xz - wy)
m.c.y = 2.0*(yz + wx)
m.c.z = 1.0-2.0*(xx + yy)
return m
def inverse(self):
return Quat(self.w,-self.x,-self.y,-self.z)
def __mul__(self,operand):
ret = Quat()
w1=self.w
x1=self.x
y1=self.y
z1=self.z
w2=operand.w
x2=operand.x
y2=operand.y
z2=operand.z
ret.w = w1*w2 - x1*x2 - y1*y2 - z1*z2
ret.x = w1*x2 + x1*w2 + y1*z2 - z1*y2
ret.y = w1*y2 - x1*z2 + y1*w2 + z1*x2
ret.z = w1*z2 + x1*y2 - y1*x2 + z1*w2
return ret
def __str__(self):
return "Quat(%f, %f, %f, %f)" % (self.w,self.x,self.y,self.z)
def vpy_vec(vec):
return vector(vec.y, -vec.z, -vec.x)
def update_arrows(q,x,y,z):
m = q.to_rotation_matrix().transposed()
x.axis = vpy_vec(m*Vector3(1,0,0))
x.up = vpy_vec(m*Vector3(0,1,0))
y.axis = vpy_vec(m*Vector3(0,1,0))
y.up = vpy_vec(m*Vector3(1,0,0))
z.axis = vpy_vec(m*Vector3(0,0,1))
z.up = vpy_vec(m*Vector3(1,0,0))
class Attitude:
def __init__(self,reference=False):
self.labels = []
self.xarrows = []
self.yarrows = []
self.zarrows = []
self.q = Quat()
self.reference = reference
self.update_arrows()
def add_arrows(self, arrowpos = Vector3(0,0,0), labeltext=None):
if labeltext is not None:
self.labels.append(label(pos = vpy_vec(arrowpos), text=labeltext))
sw = .005 if self.reference else .05
self.xarrows.append(arrow(pos=vpy_vec(arrowpos),color=color.red,opacity=1,shaftwidth=sw))
self.yarrows.append(arrow(pos=vpy_vec(arrowpos),color=color.green,opacity=1,shaftwidth=sw))
self.zarrows.append(arrow(pos=vpy_vec(arrowpos),color=color.blue,opacity=1,shaftwidth=sw))
self.update_arrows()
def rotate(self, vec):
self.q.rotate(vec)
def update_arrows(self):
m = self.q.to_rotation_matrix().transposed()
sl = 1.1 if self.reference else 1.0
for i in self.xarrows:
i.axis = vpy_vec(m*Vector3(sl,0,0))
i.up = vpy_vec(m*Vector3(0,1,0))
for i in self.yarrows:
i.axis = vpy_vec(m*Vector3(0,sl,0))
i.up = vpy_vec(m*Vector3(1,0,0))
for i in self.zarrows:
i.axis = vpy_vec(m*Vector3(0,0,sl))
i.up = vpy_vec(m*Vector3(1,0,0))
for i in self.labels:
i.xoffset = scene.width*0.07
i.yoffset = scene.width*0.1
class Tian_integrator:
def __init__(self, integrate_separately=True):
self.alpha = Vector3(0,0,0)
self.beta = Vector3(0,0,0)
self.last_alpha = Vector3(0,0,0)
self.last_delta_alpha = Vector3(0,0,0)
self.last_sample = Vector3(0,0,0)
self.integrate_separately = integrate_separately
def add_sample(self, sample, dt):
delta_alpha = (self.last_sample+sample)*0.5*dt
self.alpha += delta_alpha
delta_beta = 0.5 * (self.last_alpha + (1.0/6.0)*self.last_delta_alpha)%delta_alpha
if self.integrate_separately:
self.beta += delta_beta
else:
self.alpha += delta_beta
self.last_alpha = self.alpha
self.last_delta_alpha = delta_alpha
self.last_sample = sample
def pop_delta_angles(self):
ret = self.alpha + self.beta
self.alpha.zero()
self.beta.zero()
return ret
filter2p_1khz_30hz_data = {}
def filter2p_1khz_30hz(sample, key):
global filter2p_1khz_30hz_data
if not key in filter2p_1khz_30hz_data:
filter2p_1khz_30hz_data[key] = (0.0,0.0)
(delay_element_1, delay_element_2) = filter2p_1khz_30hz_data[key]
sample_freq = 1000
cutoff_freq = 30
fr = sample_freq/cutoff_freq
ohm = tan(pi/fr)
c = 1.0+2.0*cos(pi/4.0)*ohm + ohm**2
b0 = ohm**2/c
b1 = 2.0*b0
b2 = b0
a1 = 2.0*(ohm**2-1.0)/c
a2 = (1.0-2.0*cos(pi/4.0)*ohm+ohm**2)/c
delay_element_0 = sample - delay_element_1 * a1 - delay_element_2 * a2
output = delay_element_0 * b0 + delay_element_1 * b1 + delay_element_2 * b2
filter2p_1khz_30hz_data[key] = (delay_element_0, delay_element_1)
return output
def filter2p_1khz_30hz_vector3(sample, key):
ret = Vector3()
ret.x = filter2p_1khz_30hz(sample.x, "vec3f"+key+"x")
ret.y = filter2p_1khz_30hz(sample.y, "vec3f"+key+"y")
ret.z = filter2p_1khz_30hz(sample.z, "vec3f"+key+"z")
return ret
reference_attitude = Attitude(True)
uncorrected_attitude_low = Attitude()
uncorrected_attitude_high = Attitude()
corrected_attitude = Attitude()
corrected_attitude_combined = Attitude()
corrected_attitude_integrator = Tian_integrator()
corrected_attitude_integrator_combined = Tian_integrator(integrate_separately = False)
reference_attitude.add_arrows(Vector3(0,-3,0))
uncorrected_attitude_low.add_arrows(Vector3(0,-3,0), "no correction\nlow rate integration\n30hz software LPF @ 1khz\n(ardupilot 2015-02-18)")
reference_attitude.add_arrows(Vector3(0,-1,0))
uncorrected_attitude_high.add_arrows(Vector3(0,-1,0), "no correction\nhigh rate integration")
reference_attitude.add_arrows(Vector3(0,1,0))
corrected_attitude.add_arrows(Vector3(0,1,0), "Tian et al\nseparate integration")
reference_attitude.add_arrows(Vector3(0,3,0))
corrected_attitude_combined.add_arrows(Vector3(0,3,0), "Tian et al\ncombined_integration\n(proposed patch)")
#scene.scale = (0.3,0.3,0.3)
scene.fov = 0.001
scene.forward = (-0.5, -0.5, -1)
coning_frequency_hz = 50
coning_magnitude_rad_s = 2
label_text = (
"coning motion frequency %f hz\n"
"coning motion peak amplitude %f deg/s\n"
"thin arrows are reference attitude"
) % (coning_frequency_hz, degrees(coning_magnitude_rad_s))
label(pos = vpy_vec(Vector3(0,0,2)), text=label_text)
t = 0.0
dt_10000 = 0.0001
dt_1000 = 0.001
dt_333 = 0.003
accumulated_delta_angle = Vector3(0,0,0)
last_gyro_10000 = Vector3(0,0,0)
last_gyro_1000 = Vector3(0,0,0)
last_filtered_gyro_333 = Vector3(0,0,0)
filtered_gyro = Vector3(0,0,0)
while True:
rate(66)
for i in range(5):
for j in range(3):
for k in range(10):
#vvvvvvvvvv 10 kHz vvvvvvvvvv#
#compute angular rate at current time
gyro = Vector3(sin(t*coning_frequency_hz*2*pi), cos(t*coning_frequency_hz*2*pi),0)*coning_magnitude_rad_s
#integrate reference attitude
reference_attitude.rotate((gyro+last_gyro_10000) * dt_10000 * 0.5)
#increment time
t += dt_10000
last_gyro_10000 = gyro
#vvvvvvvvvv 1 kHz vvvvvvvvvv#
#update filter for sim 1
filtered_gyro = filter2p_1khz_30hz_vector3(gyro, "1")
#update integrator for sim 2
accumulated_delta_angle += (gyro+last_gyro_1000) * dt_1000 * 0.5
#update integrator for sim 3
corrected_attitude_integrator.add_sample(gyro, dt_1000)
#update integrator for sim 4
corrected_attitude_integrator_combined.add_sample(gyro, dt_1000)
last_gyro_1000 = gyro
#vvvvvvvvvv 333 Hz vvvvvvvvvv#
#update sim 1 (leftmost)
uncorrected_attitude_low.rotate((filtered_gyro+last_filtered_gyro_333) * dt_333 * 0.5)
#update sim 2
uncorrected_attitude_high.rotate(accumulated_delta_angle)
accumulated_delta_angle.zero()
#update sim 3
corrected_attitude.rotate(corrected_attitude_integrator.pop_delta_angles())
#update sim 4 (rightmost)
corrected_attitude_combined.rotate(corrected_attitude_integrator_combined.pop_delta_angles())
last_filtered_gyro_333 = filtered_gyro
#vvvvvvvvvv 66 Hz vvvvvvvvvv#
reference_attitude.update_arrows()
corrected_attitude.update_arrows()
corrected_attitude_combined.update_arrows()
uncorrected_attitude_low.update_arrows()
uncorrected_attitude_high.update_arrows()
| gpl-3.0 |
Cosiroc/bleau-database | Triangulation/MathFunctions.py | 2 | 1561 | ####################################################################################################
#
# Bleau Database - A database of the bouldering area of Fontainebleau
# Copyright (C) Salvaire Fabrice 2016
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
####################################################################################################
# Fixme: sign_of ?
def sign(x):
return cmp(x, 0)
####################################################################################################
def trignometric_clamp(x):
""" Clamp *x* in the range [-1.,1]. """
if x > 1.:
return 1.
elif x < -1.:
return -1.
else:
return x
####################################################################################################
def is_in_trignometric_range(x):
return -1. <= x <= 1
| agpl-3.0 |
sam-m888/gramps | gramps/gen/filters/rules/family/_hascitation.py | 5 | 1914 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Filter rule to match family with a particular citation.
"""
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .._hascitationbase import HasCitationBase
#-------------------------------------------------------------------------
#
# HasEvent
#
#-------------------------------------------------------------------------
class HasCitation(HasCitationBase):
"""Rule that checks for a family with a particular value"""
labels = [ _('Volume/Page:'),
_('Date:'),
_('Confidence level:')]
name = _('Families with the <citation>')
description = _("Matches families with a citation of a particular "
"value")
| gpl-2.0 |
gplssm/europepstrans | europepstrans/results/__init__.py | 1 | 13654 | """
TimeFrameResults steals methods from oemof.outputlib adapted to the structure
applied here. Most relevant difference is results data stored in self.data
"""
from oemof.outputlib import DataFramePlot, ResultsDataFrame
import pickle
from matplotlib import pyplot as plt
import logging
import pandas as pd
class TimeFrameResults:
"""
Container for results of one time frame (i.e. one year)
Attributes
----------
data : DataFrame
Structure multi-indexed result data
"""
def __init__(self, **kwargs):
"""
Initializes data object based on oemof results class
"""
results_file = kwargs.get('results_file', None)
self.subset = kwargs.get('subset', None)
self.ax = kwargs.get('ax')
if results_file is None:
# self.data = DataFramePlot(energy_system=kwargs.get('energy_system'))
self.data = ResultsDataFrame(energy_system=kwargs.get('energy_system'))
else:
self.data = pickle.load(open(results_file, 'rb'))
self.reformat_data()
def preview(self):
"""
Print short preview of data
"""
return self.data.head()
def reformat_data(self):
"""
Extract region information from bus label put into separate index label
"""
# TODO: get regions list from elsewhere
regions = ['deu', 'xfra', 'xbnl']
regions_leading_underscore = ['_' + x for x in regions]
# put bus_label to column (required to work on)
self.data.reset_index(level='bus_label', inplace=True)
self.data.reset_index(level='obj_label', inplace=True)
# extra region from bus label and write to new column
self.data['region'] = self.data['bus_label'].str.extract(
r"(?=(" + '|'.join(regions) + r"))", expand=True)
self.data['region'].fillna('global', inplace=True)
# remove region from bus_label and obj_label
self.data['bus_label'] = self.data['bus_label'].str.replace(
r"(" + '|'.join(regions_leading_underscore) + r")", '')
self.data['obj_label'] = self.data['obj_label'].str.replace(
r"(" + '|'.join(regions_leading_underscore) + r")", '')
# put bus_label back to index
self.data = self.data.set_index(['bus_label', 'region', 'obj_label'],
append=True)
# reorder and resort levels
level_order = ['bus_label', 'type', 'obj_label', 'region', 'datetime']
self.data = self.data.reorder_levels(level_order)
def slice_by(self, **kwargs):
r""" Method for slicing the ResultsDataFrame. A subset is returned.
Parameters
----------
bus_label : string
type : string (to_bus/from_bus/other)
obj_label: string
date_from : string
Start date selection e.g. "2016-01-01 00:00:00". If not set, the
whole time range will be plotted.
date_to : string
End date selection e.g. "2016-03-01 00:00:00". If not set, the
whole time range will be plotted.
"""
kwargs.setdefault('bus_label', slice(None))
kwargs.setdefault('type', slice(None))
kwargs.setdefault('obj_label', slice(None))
kwargs.setdefault(
'date_from', self.data.index.get_level_values('datetime')[0])
kwargs.setdefault(
'date_to', self.data.index.get_level_values('datetime')[-1])
# slicing
idx = pd.IndexSlice
subset = self.data.loc[idx[
kwargs['bus_label'],
kwargs['type'],
kwargs['obj_label'],
slice(pd.Timestamp(kwargs['date_from']),
pd.Timestamp(kwargs['date_to']))], :]
return subset
def slice_unstacked(self, unstacklevel='obj_label',
formatted=False, **kwargs):
r"""Method for slicing the ResultsDataFrame. An unstacked
subset is returned.
Parameters
----------
unstacklevel : string (default: 'obj_label')
Level to unstack the subset of the DataFrame.
formatted : boolean
missing...
"""
subset = self.slice_by(**kwargs)
subset = subset.unstack(level=unstacklevel)
if formatted is True:
subset.reset_index(level=['bus_label', 'type'], drop=True,
inplace=True)
# user standard insteadt of multi-indexed columns
subset.columns = subset.columns.get_level_values(1).unique()
# return subset
self.subset = subset
def plot(self, **kwargs):
r""" Passing the data attribute to the pandas plotting method. All
parameters will be directly passed to pandas.DataFrame.plot(). See
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.plot.html
for more information.
Returns
-------
self
"""
self.ax = self.subset.plot(**kwargs)
return self
def io_plot(self, bus_label, cdict, line_kwa=None, lineorder=None,
bar_kwa=None, barorder=None, **kwargs):
r""" Plotting a combined bar and line plot to see the fitting of in-
and outcomming flows of a bus balance.
Parameters
----------
bus_label : string
Uid of the bus to plot the balance.
cdict : dictionary
A dictionary that has all possible components as keys and its
colors as items.
line_kwa : dictionary
Keyword arguments to be passed to the pandas line plot.
bar_kwa : dictionary
Keyword arguments to be passed to the pandas bar plot.
lineorder : list
Order of columns to plot the line plot
barorder : list
Order of columns to plot the bar plot
Note
----
Further keyword arguments will be passed to the
:class:`slice_unstacked method <DataFramePlot.slice_unstacked>`.
Returns
-------
handles, labels
Manipulated labels to correct the unsual construction of the
stack line plot. You can use them for further maipulations.
"""
self.ax = kwargs.get('ax', self.ax)
if bar_kwa is None:
bar_kwa = dict()
if line_kwa is None:
line_kwa = dict()
if self.ax is None:
fig = plt.figure()
self.ax = fig.add_subplot(1, 1, 1)
# Create a bar plot for all input flows
self.slice_unstacked(bus_label=bus_label, type='to_bus', **kwargs)
if barorder is not None:
self.rearrange_subset(barorder)
self.subset.plot(kind='bar', linewidth=0, stacked=True, width=1,
ax=self.ax, color=self.color_from_dict(cdict),
**bar_kwa)
# Create a line plot for all output flows
self.slice_unstacked(bus_label=bus_label, type='from_bus', **kwargs)
if lineorder is not None:
self.rearrange_subset(lineorder)
# The following changes are made to have the bottom line on top layer
# of all lines. Normally the bottom line is the first line that is
# plotted and will be on the lowest layer. This is difficult to read.
new_df = pd.DataFrame(index=self.subset.index)
n = 0
tmp = 0
for col in self.subset.columns:
if n < 1:
new_df[col] = self.subset[col]
else:
new_df[col] = self.subset[col] + tmp
tmp = new_df[col]
n += 1
if lineorder is None:
new_df.sort_index(axis=1, ascending=False, inplace=True)
else:
lineorder.reverse()
new_df = new_df[lineorder]
colorlist = self.color_from_dict(cdict)
if isinstance(colorlist, list):
colorlist.reverse()
separator = len(colorlist)
new_df.plot(kind='line', ax=self.ax, color=colorlist,
drawstyle='steps-mid', **line_kwa)
# Adapt the legend to the new oder
handles, labels = self.ax.get_legend_handles_labels()
tmp_lab = [x for x in reversed(labels[0:separator])]
tmp_hand = [x for x in reversed(handles[0:separator])]
handles = tmp_hand + handles[separator:]
labels = tmp_lab + labels[separator:]
labels.reverse()
handles.reverse()
self.ax.legend(handles, labels)
return handles, labels
def rearrange_subset(self, order):
r"""
Change the order of the subset DataFrame
Parameters
----------
order : list
New order of columns
Returns
-------
self
"""
cols = list(self.subset.columns.values)
neworder = [x for x in list(order) if x in set(cols)]
missing = [x for x in list(cols) if x not in set(order)]
if len(missing) > 0:
logging.warning(
"Columns that are not part of the order list are removed: " +
str(missing))
self.subset = self.subset[neworder]
def color_from_dict(self, colordict):
r""" Method to convert a dictionary containing the components and its
colors to a color list that can be directly useed with the color
parameter of the pandas plotting method.
Parameters
----------
colordict : dictionary
A dictionary that has all possible components as keys and its
colors as items.
Returns
-------
list
Containing the colors of all components of the subset attribute
"""
tmplist = list(
map(colordict.get, list(self.subset.columns)))
tmplist = ['#00FFFF' if v is None else v for v in tmplist]
if len(tmplist) == 1:
colorlist = tmplist[0]
else:
colorlist = tmplist
return colorlist
def set_datetime_ticks(self, tick_distance=None, number_autoticks=3,
date_format='%d-%m-%Y %H:%M'):
r""" Set configurable ticks for the time axis. One can choose the
number of ticks or the distance between ticks and the format.
Parameters
----------
tick_distance : real
The disctance between to ticks in hours. If not set autoticks are
set (see number_autoticks).
number_autoticks : int (default: 3)
The number of ticks on the time axis, independent of the time
range. The higher the number of ticks is, the shorter should be the
date_format string.
date_format : string (default: '%d-%m-%Y %H:%M')
The string to define the format of the date and time. See
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
for more information.
"""
dates = self.subset.index.get_level_values('datetime').unique()
if tick_distance is None:
tick_distance = int(len(dates) / number_autoticks) - 1
self.ax.set_xticks(range(0, len(dates), tick_distance),
minor=False)
self.ax.set_xticklabels(
[item.strftime(date_format)
for item in dates.tolist()[0::tick_distance]],
rotation=0, minor=False)
def outside_legend(self, reverse=False, plotshare=0.9, **kwargs):
r""" Move the legend outside the plot. Bases on the ideas of Joe
Kington. See
http://stackoverflow.com/questions/4700614/how-to-put-the-legend-out-of-the-plot
for more information.
Parameters
----------
reverse : boolean (default: False)
Print out the legend in reverse order. This is interesting for
stack-plots to have the legend in the same order as the stacks.
plotshare : real (default: 0.9)
Share of the plot area to create space for the legend (0 to 1).
loc : string (default: 'center left')
Location of the plot.
bbox_to_anchor : tuple (default: (1, 0.5))
Set the anchor for the legend.
ncol : integer (default: 1)
Number of columns of the legend.
handles : list of handles
A list of handels if they are already modified by another function
or method. Normally these handles will be automatically taken from
the artis object.
lables : list of labels
A list of labels if they are already modified by another function
or method. Normally these handles will be automatically taken from
the artis object.
Note
----
All keyword arguments (kwargs) will be directly passed to the
matplotlib legend class. See
http://matplotlib.org/api/legend_api.html#matplotlib.legend.Legend
for more parameters.
"""
kwargs.setdefault('loc', 'center left')
kwargs.setdefault('bbox_to_anchor', (1, 0.5))
kwargs.setdefault('ncol', 1)
handles = kwargs.pop('handles', self.ax.get_legend_handles_labels()[0])
labels = kwargs.pop('labels', self.ax.get_legend_handles_labels()[1])
if reverse:
handles.reverse()
labels.reverse()
box = self.ax.get_position()
self.ax.set_position([box.x0, box.y0, box.width * plotshare,
box.height])
self.ax.legend(handles, labels, **kwargs)
if __name__ == '__main__':
pass | gpl-3.0 |
yamahata/linux-umem | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
alimony/django | tests/admin_inlines/models.py | 25 | 6614 | """
Testing of admin inline formsets.
"""
import random
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
class Parent(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Teacher(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Child(models.Model):
name = models.CharField(max_length=50)
teacher = models.ForeignKey(Teacher, models.CASCADE)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
parent = GenericForeignKey()
def __str__(self):
return 'I am %s, a child of %s' % (self.name, self.parent)
class Book(models.Model):
name = models.CharField(max_length=50)
class Author(models.Model):
name = models.CharField(max_length=50)
books = models.ManyToManyField(Book)
class NonAutoPKBook(models.Model):
rand_pk = models.IntegerField(primary_key=True, editable=False)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=50)
def save(self, *args, **kwargs):
while not self.rand_pk:
test_pk = random.randint(1, 99999)
if not NonAutoPKBook.objects.filter(rand_pk=test_pk).exists():
self.rand_pk = test_pk
super().save(*args, **kwargs)
class EditablePKBook(models.Model):
manual_pk = models.IntegerField(primary_key=True)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=50)
class Holder(models.Model):
dummy = models.IntegerField()
class Inner(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder, models.CASCADE)
readonly = models.CharField("Inner readonly label", max_length=1)
def get_absolute_url(self):
return '/inner/'
class Holder2(models.Model):
dummy = models.IntegerField()
class Inner2(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder2, models.CASCADE)
class Holder3(models.Model):
dummy = models.IntegerField()
class Inner3(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder3, models.CASCADE)
# Models for ticket #8190
class Holder4(models.Model):
dummy = models.IntegerField()
class Inner4Stacked(models.Model):
dummy = models.IntegerField(help_text="Awesome stacked help text is awesome.")
holder = models.ForeignKey(Holder4, models.CASCADE)
class Inner4Tabular(models.Model):
dummy = models.IntegerField(help_text="Awesome tabular help text is awesome.")
holder = models.ForeignKey(Holder4, models.CASCADE)
# Models for #12749
class Person(models.Model):
firstname = models.CharField(max_length=15)
class OutfitItem(models.Model):
name = models.CharField(max_length=15)
class Fashionista(models.Model):
person = models.OneToOneField(Person, models.CASCADE, primary_key=True)
weaknesses = models.ManyToManyField(OutfitItem, through='ShoppingWeakness', blank=True)
class ShoppingWeakness(models.Model):
fashionista = models.ForeignKey(Fashionista, models.CASCADE)
item = models.ForeignKey(OutfitItem, models.CASCADE)
# Models for #13510
class TitleCollection(models.Model):
pass
class Title(models.Model):
collection = models.ForeignKey(TitleCollection, models.SET_NULL, blank=True, null=True)
title1 = models.CharField(max_length=100)
title2 = models.CharField(max_length=100)
# Models for #15424
class Poll(models.Model):
name = models.CharField(max_length=40)
class Question(models.Model):
poll = models.ForeignKey(Poll, models.CASCADE)
class Novel(models.Model):
name = models.CharField(max_length=40)
class Chapter(models.Model):
name = models.CharField(max_length=40)
novel = models.ForeignKey(Novel, models.CASCADE)
class FootNote(models.Model):
"""
Model added for ticket 19838
"""
chapter = models.ForeignKey(Chapter, models.PROTECT)
note = models.CharField(max_length=40)
# Models for #16838
class CapoFamiglia(models.Model):
name = models.CharField(max_length=100)
class Consigliere(models.Model):
name = models.CharField(max_length=100, help_text='Help text for Consigliere')
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name='+')
class SottoCapo(models.Model):
name = models.CharField(max_length=100)
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name='+')
class ReadOnlyInline(models.Model):
name = models.CharField(max_length=100, help_text='Help text for ReadOnlyInline')
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE)
# Models for #18433
class ParentModelWithCustomPk(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
class ChildModel1(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return '/child_model1/'
class ChildModel2(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return '/child_model2/'
# Models for #19425
class BinaryTree(models.Model):
name = models.CharField(max_length=100)
parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True)
# Models for #19524
class LifeForm(models.Model):
pass
class ExtraTerrestrial(LifeForm):
name = models.CharField(max_length=100)
class Sighting(models.Model):
et = models.ForeignKey(ExtraTerrestrial, models.CASCADE)
place = models.CharField(max_length=100)
# Models for #18263
class SomeParentModel(models.Model):
name = models.CharField(max_length=1)
class SomeChildModel(models.Model):
name = models.CharField(max_length=1)
position = models.PositiveIntegerField()
parent = models.ForeignKey(SomeParentModel, models.CASCADE)
# Other models
class ProfileCollection(models.Model):
pass
class Profile(models.Model):
collection = models.ForeignKey(ProfileCollection, models.SET_NULL, blank=True, null=True)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
| bsd-3-clause |
dyyi/moneybook | venv/Lib/site-packages/django/core/paginator.py | 347 | 5031 | import collections
from math import ceil
from django.utils import six
class InvalidPage(Exception):
pass
class PageNotAnInteger(InvalidPage):
pass
class EmptyPage(InvalidPage):
pass
class Paginator(object):
def __init__(self, object_list, per_page, orphans=0,
allow_empty_first_page=True):
self.object_list = object_list
self.per_page = int(per_page)
self.orphans = int(orphans)
self.allow_empty_first_page = allow_empty_first_page
self._num_pages = self._count = None
def validate_number(self, number):
"""
Validates the given 1-based page number.
"""
try:
number = int(number)
except (TypeError, ValueError):
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
if number > self.num_pages:
if number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage('That page contains no results')
return number
def page(self, number):
"""
Returns a Page object for the given 1-based page number.
"""
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
if top + self.orphans >= self.count:
top = self.count
return self._get_page(self.object_list[bottom:top], number, self)
def _get_page(self, *args, **kwargs):
"""
Returns an instance of a single page.
This hook can be used by subclasses to use an alternative to the
standard :cls:`Page` object.
"""
return Page(*args, **kwargs)
def _get_count(self):
"""
Returns the total number of objects, across all pages.
"""
if self._count is None:
try:
self._count = self.object_list.count()
except (AttributeError, TypeError):
# AttributeError if object_list has no count() method.
# TypeError if object_list.count() requires arguments
# (i.e. is of type list).
self._count = len(self.object_list)
return self._count
count = property(_get_count)
def _get_num_pages(self):
"""
Returns the total number of pages.
"""
if self._num_pages is None:
if self.count == 0 and not self.allow_empty_first_page:
self._num_pages = 0
else:
hits = max(1, self.count - self.orphans)
self._num_pages = int(ceil(hits / float(self.per_page)))
return self._num_pages
num_pages = property(_get_num_pages)
def _get_page_range(self):
"""
Returns a 1-based range of pages for iterating through within
a template for loop.
"""
return six.moves.range(1, self.num_pages + 1)
page_range = property(_get_page_range)
QuerySetPaginator = Paginator # For backwards-compatibility.
class Page(collections.Sequence):
def __init__(self, object_list, number, paginator):
self.object_list = object_list
self.number = number
self.paginator = paginator
def __repr__(self):
return '<Page %s of %s>' % (self.number, self.paginator.num_pages)
def __len__(self):
return len(self.object_list)
def __getitem__(self, index):
if not isinstance(index, (slice,) + six.integer_types):
raise TypeError
# The object_list is converted to a list so that if it was a QuerySet
# it won't be a database hit per __getitem__.
if not isinstance(self.object_list, list):
self.object_list = list(self.object_list)
return self.object_list[index]
def has_next(self):
return self.number < self.paginator.num_pages
def has_previous(self):
return self.number > 1
def has_other_pages(self):
return self.has_previous() or self.has_next()
def next_page_number(self):
return self.paginator.validate_number(self.number + 1)
def previous_page_number(self):
return self.paginator.validate_number(self.number - 1)
def start_index(self):
"""
Returns the 1-based index of the first object on this page,
relative to total objects in the paginator.
"""
# Special case, return zero if no items.
if self.paginator.count == 0:
return 0
return (self.paginator.per_page * (self.number - 1)) + 1
def end_index(self):
"""
Returns the 1-based index of the last object on this page,
relative to total objects found (hits).
"""
# Special case for the last page because there can be orphans.
if self.number == self.paginator.num_pages:
return self.paginator.count
return self.number * self.paginator.per_page
| apache-2.0 |
geodrinx/gearthview | ext-libs/twisted/web/vhost.py | 68 | 4372 | # -*- test-case-name: twisted.web.
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
I am a virtual hosts implementation.
"""
# Twisted Imports
from twisted.python import roots
from twisted.web import resource
class VirtualHostCollection(roots.Homogenous):
"""Wrapper for virtual hosts collection.
This exists for configuration purposes.
"""
entityType = resource.Resource
def __init__(self, nvh):
self.nvh = nvh
def listStaticEntities(self):
return self.nvh.hosts.items()
def getStaticEntity(self, name):
return self.nvh.hosts.get(self)
def reallyPutEntity(self, name, entity):
self.nvh.addHost(name, entity)
def delEntity(self, name):
self.nvh.removeHost(name)
class NameVirtualHost(resource.Resource):
"""I am a resource which represents named virtual hosts.
"""
default = None
def __init__(self):
"""Initialize.
"""
resource.Resource.__init__(self)
self.hosts = {}
def listStaticEntities(self):
return resource.Resource.listStaticEntities(self) + [("Virtual Hosts", VirtualHostCollection(self))]
def getStaticEntity(self, name):
if name == "Virtual Hosts":
return VirtualHostCollection(self)
else:
return resource.Resource.getStaticEntity(self, name)
def addHost(self, name, resrc):
"""Add a host to this virtual host.
This will take a host named `name', and map it to a resource
`resrc'. For example, a setup for our virtual hosts would be::
nvh.addHost('divunal.com', divunalDirectory)
nvh.addHost('www.divunal.com', divunalDirectory)
nvh.addHost('twistedmatrix.com', twistedMatrixDirectory)
nvh.addHost('www.twistedmatrix.com', twistedMatrixDirectory)
"""
self.hosts[name] = resrc
def removeHost(self, name):
"""Remove a host."""
del self.hosts[name]
def _getResourceForRequest(self, request):
"""(Internal) Get the appropriate resource for the given host.
"""
hostHeader = request.getHeader('host')
if hostHeader == None:
return self.default or resource.NoResource()
else:
host = hostHeader.lower().split(':', 1)[0]
return (self.hosts.get(host, self.default)
or resource.NoResource("host %s not in vhost map" % repr(host)))
def render(self, request):
"""Implementation of resource.Resource's render method.
"""
resrc = self._getResourceForRequest(request)
return resrc.render(request)
def getChild(self, path, request):
"""Implementation of resource.Resource's getChild method.
"""
resrc = self._getResourceForRequest(request)
if resrc.isLeaf:
request.postpath.insert(0,request.prepath.pop(-1))
return resrc
else:
return resrc.getChildWithDefault(path, request)
class _HostResource(resource.Resource):
def getChild(self, path, request):
if ':' in path:
host, port = path.split(':', 1)
port = int(port)
else:
host, port = path, 80
request.setHost(host, port)
prefixLen = 3+request.isSecure()+4+len(path)+len(request.prepath[-3])
request.path = '/'+'/'.join(request.postpath)
request.uri = request.uri[prefixLen:]
del request.prepath[:3]
return request.site.getResourceFor(request)
class VHostMonsterResource(resource.Resource):
"""
Use this to be able to record the hostname and method (http vs. https)
in the URL without disturbing your web site. If you put this resource
in a URL http://foo.com/bar then requests to
http://foo.com/bar/http/baz.com/something will be equivalent to
http://foo.com/something, except that the hostname the request will
appear to be accessing will be "baz.com". So if "baz.com" is redirecting
all requests for to foo.com, while foo.com is inaccessible from the outside,
then redirect and url generation will work correctly
"""
def getChild(self, path, request):
if path == 'http':
request.isSecure = lambda: 0
elif path == 'https':
request.isSecure = lambda: 1
return _HostResource()
| gpl-3.0 |
imsparsh/python-for-android | python3-alpha/python3-src/Lib/test/test_urlparse.py | 46 | 40151 | #! /usr/bin/env python3
from test import support
import unittest
import urllib.parse
RFC1808_BASE = "http://a/b/c/d;p?q#f"
RFC2396_BASE = "http://a/b/c/d;p?q"
RFC3986_BASE = 'http://a/b/c/d;p?q'
SIMPLE_BASE = 'http://a/b/c/d'
# A list of test cases. Each test case is a a two-tuple that contains
# a string with the query and a dictionary with the expected result.
parse_qsl_test_cases = [
("", []),
("&", []),
("&&", []),
("=", [('', '')]),
("=a", [('', 'a')]),
("a", [('a', '')]),
("a=", [('a', '')]),
("a=", [('a', '')]),
("&a=b", [('a', 'b')]),
("a=a+b&b=b+c", [('a', 'a b'), ('b', 'b c')]),
("a=1&a=2", [('a', '1'), ('a', '2')]),
(b"", []),
(b"&", []),
(b"&&", []),
(b"=", [(b'', b'')]),
(b"=a", [(b'', b'a')]),
(b"a", [(b'a', b'')]),
(b"a=", [(b'a', b'')]),
(b"a=", [(b'a', b'')]),
(b"&a=b", [(b'a', b'b')]),
(b"a=a+b&b=b+c", [(b'a', b'a b'), (b'b', b'b c')]),
(b"a=1&a=2", [(b'a', b'1'), (b'a', b'2')]),
]
class UrlParseTestCase(unittest.TestCase):
def checkRoundtrips(self, url, parsed, split):
result = urllib.parse.urlparse(url)
self.assertEqual(result, parsed)
t = (result.scheme, result.netloc, result.path,
result.params, result.query, result.fragment)
self.assertEqual(t, parsed)
# put it back together and it should be the same
result2 = urllib.parse.urlunparse(result)
self.assertEqual(result2, url)
self.assertEqual(result2, result.geturl())
# the result of geturl() is a fixpoint; we can always parse it
# again to get the same result:
result3 = urllib.parse.urlparse(result.geturl())
self.assertEqual(result3.geturl(), result.geturl())
self.assertEqual(result3, result)
self.assertEqual(result3.scheme, result.scheme)
self.assertEqual(result3.netloc, result.netloc)
self.assertEqual(result3.path, result.path)
self.assertEqual(result3.params, result.params)
self.assertEqual(result3.query, result.query)
self.assertEqual(result3.fragment, result.fragment)
self.assertEqual(result3.username, result.username)
self.assertEqual(result3.password, result.password)
self.assertEqual(result3.hostname, result.hostname)
self.assertEqual(result3.port, result.port)
# check the roundtrip using urlsplit() as well
result = urllib.parse.urlsplit(url)
self.assertEqual(result, split)
t = (result.scheme, result.netloc, result.path,
result.query, result.fragment)
self.assertEqual(t, split)
result2 = urllib.parse.urlunsplit(result)
self.assertEqual(result2, url)
self.assertEqual(result2, result.geturl())
# check the fixpoint property of re-parsing the result of geturl()
result3 = urllib.parse.urlsplit(result.geturl())
self.assertEqual(result3.geturl(), result.geturl())
self.assertEqual(result3, result)
self.assertEqual(result3.scheme, result.scheme)
self.assertEqual(result3.netloc, result.netloc)
self.assertEqual(result3.path, result.path)
self.assertEqual(result3.query, result.query)
self.assertEqual(result3.fragment, result.fragment)
self.assertEqual(result3.username, result.username)
self.assertEqual(result3.password, result.password)
self.assertEqual(result3.hostname, result.hostname)
self.assertEqual(result3.port, result.port)
def test_qsl(self):
for orig, expect in parse_qsl_test_cases:
result = urllib.parse.parse_qsl(orig, keep_blank_values=True)
self.assertEqual(result, expect, "Error parsing %r" % orig)
expect_without_blanks = [v for v in expect if len(v[1])]
result = urllib.parse.parse_qsl(orig, keep_blank_values=False)
self.assertEqual(result, expect_without_blanks,
"Error parsing %r" % orig)
def test_roundtrips(self):
str_cases = [
('file:///tmp/junk.txt',
('file', '', '/tmp/junk.txt', '', '', ''),
('file', '', '/tmp/junk.txt', '', '')),
('imap://mail.python.org/mbox1',
('imap', 'mail.python.org', '/mbox1', '', '', ''),
('imap', 'mail.python.org', '/mbox1', '', '')),
('mms://wms.sys.hinet.net/cts/Drama/09006251100.asf',
('mms', 'wms.sys.hinet.net', '/cts/Drama/09006251100.asf',
'', '', ''),
('mms', 'wms.sys.hinet.net', '/cts/Drama/09006251100.asf',
'', '')),
('nfs://server/path/to/file.txt',
('nfs', 'server', '/path/to/file.txt', '', '', ''),
('nfs', 'server', '/path/to/file.txt', '', '')),
('svn+ssh://svn.zope.org/repos/main/ZConfig/trunk/',
('svn+ssh', 'svn.zope.org', '/repos/main/ZConfig/trunk/',
'', '', ''),
('svn+ssh', 'svn.zope.org', '/repos/main/ZConfig/trunk/',
'', '')),
('git+ssh://git@github.com/user/project.git',
('git+ssh', 'git@github.com','/user/project.git',
'','',''),
('git+ssh', 'git@github.com','/user/project.git',
'', '')),
]
def _encode(t):
return (t[0].encode('ascii'),
tuple(x.encode('ascii') for x in t[1]),
tuple(x.encode('ascii') for x in t[2]))
bytes_cases = [_encode(x) for x in str_cases]
for url, parsed, split in str_cases + bytes_cases:
self.checkRoundtrips(url, parsed, split)
def test_http_roundtrips(self):
# urllib.parse.urlsplit treats 'http:' as an optimized special case,
# so we test both 'http:' and 'https:' in all the following.
# Three cheers for white box knowledge!
str_cases = [
('://www.python.org',
('www.python.org', '', '', '', ''),
('www.python.org', '', '', '')),
('://www.python.org#abc',
('www.python.org', '', '', '', 'abc'),
('www.python.org', '', '', 'abc')),
('://www.python.org?q=abc',
('www.python.org', '', '', 'q=abc', ''),
('www.python.org', '', 'q=abc', '')),
('://www.python.org/#abc',
('www.python.org', '/', '', '', 'abc'),
('www.python.org', '/', '', 'abc')),
('://a/b/c/d;p?q#f',
('a', '/b/c/d', 'p', 'q', 'f'),
('a', '/b/c/d;p', 'q', 'f')),
]
def _encode(t):
return (t[0].encode('ascii'),
tuple(x.encode('ascii') for x in t[1]),
tuple(x.encode('ascii') for x in t[2]))
bytes_cases = [_encode(x) for x in str_cases]
str_schemes = ('http', 'https')
bytes_schemes = (b'http', b'https')
str_tests = str_schemes, str_cases
bytes_tests = bytes_schemes, bytes_cases
for schemes, test_cases in (str_tests, bytes_tests):
for scheme in schemes:
for url, parsed, split in test_cases:
url = scheme + url
parsed = (scheme,) + parsed
split = (scheme,) + split
self.checkRoundtrips(url, parsed, split)
def checkJoin(self, base, relurl, expected):
str_components = (base, relurl, expected)
self.assertEqual(urllib.parse.urljoin(base, relurl), expected)
bytes_components = baseb, relurlb, expectedb = [
x.encode('ascii') for x in str_components]
self.assertEqual(urllib.parse.urljoin(baseb, relurlb), expectedb)
def test_unparse_parse(self):
str_cases = ['Python', './Python','x-newscheme://foo.com/stuff','x://y','x:/y','x:/','/',]
bytes_cases = [x.encode('ascii') for x in str_cases]
for u in str_cases + bytes_cases:
self.assertEqual(urllib.parse.urlunsplit(urllib.parse.urlsplit(u)), u)
self.assertEqual(urllib.parse.urlunparse(urllib.parse.urlparse(u)), u)
def test_RFC1808(self):
# "normal" cases from RFC 1808:
self.checkJoin(RFC1808_BASE, 'g:h', 'g:h')
self.checkJoin(RFC1808_BASE, 'g', 'http://a/b/c/g')
self.checkJoin(RFC1808_BASE, './g', 'http://a/b/c/g')
self.checkJoin(RFC1808_BASE, 'g/', 'http://a/b/c/g/')
self.checkJoin(RFC1808_BASE, '/g', 'http://a/g')
self.checkJoin(RFC1808_BASE, '//g', 'http://g')
self.checkJoin(RFC1808_BASE, 'g?y', 'http://a/b/c/g?y')
self.checkJoin(RFC1808_BASE, 'g?y/./x', 'http://a/b/c/g?y/./x')
self.checkJoin(RFC1808_BASE, '#s', 'http://a/b/c/d;p?q#s')
self.checkJoin(RFC1808_BASE, 'g#s', 'http://a/b/c/g#s')
self.checkJoin(RFC1808_BASE, 'g#s/./x', 'http://a/b/c/g#s/./x')
self.checkJoin(RFC1808_BASE, 'g?y#s', 'http://a/b/c/g?y#s')
self.checkJoin(RFC1808_BASE, 'g;x', 'http://a/b/c/g;x')
self.checkJoin(RFC1808_BASE, 'g;x?y#s', 'http://a/b/c/g;x?y#s')
self.checkJoin(RFC1808_BASE, '.', 'http://a/b/c/')
self.checkJoin(RFC1808_BASE, './', 'http://a/b/c/')
self.checkJoin(RFC1808_BASE, '..', 'http://a/b/')
self.checkJoin(RFC1808_BASE, '../', 'http://a/b/')
self.checkJoin(RFC1808_BASE, '../g', 'http://a/b/g')
self.checkJoin(RFC1808_BASE, '../..', 'http://a/')
self.checkJoin(RFC1808_BASE, '../../', 'http://a/')
self.checkJoin(RFC1808_BASE, '../../g', 'http://a/g')
# "abnormal" cases from RFC 1808:
self.checkJoin(RFC1808_BASE, '', 'http://a/b/c/d;p?q#f')
self.checkJoin(RFC1808_BASE, '../../../g', 'http://a/../g')
self.checkJoin(RFC1808_BASE, '../../../../g', 'http://a/../../g')
self.checkJoin(RFC1808_BASE, '/./g', 'http://a/./g')
self.checkJoin(RFC1808_BASE, '/../g', 'http://a/../g')
self.checkJoin(RFC1808_BASE, 'g.', 'http://a/b/c/g.')
self.checkJoin(RFC1808_BASE, '.g', 'http://a/b/c/.g')
self.checkJoin(RFC1808_BASE, 'g..', 'http://a/b/c/g..')
self.checkJoin(RFC1808_BASE, '..g', 'http://a/b/c/..g')
self.checkJoin(RFC1808_BASE, './../g', 'http://a/b/g')
self.checkJoin(RFC1808_BASE, './g/.', 'http://a/b/c/g/')
self.checkJoin(RFC1808_BASE, 'g/./h', 'http://a/b/c/g/h')
self.checkJoin(RFC1808_BASE, 'g/../h', 'http://a/b/c/h')
# RFC 1808 and RFC 1630 disagree on these (according to RFC 1808),
# so we'll not actually run these tests (which expect 1808 behavior).
#self.checkJoin(RFC1808_BASE, 'http:g', 'http:g')
#self.checkJoin(RFC1808_BASE, 'http:', 'http:')
def test_RFC2368(self):
# Issue 11467: path that starts with a number is not parsed correctly
self.assertEqual(urllib.parse.urlparse('mailto:1337@example.org'),
('mailto', '', '1337@example.org', '', '', ''))
def test_RFC2396(self):
# cases from RFC 2396
self.checkJoin(RFC2396_BASE, 'g:h', 'g:h')
self.checkJoin(RFC2396_BASE, 'g', 'http://a/b/c/g')
self.checkJoin(RFC2396_BASE, './g', 'http://a/b/c/g')
self.checkJoin(RFC2396_BASE, 'g/', 'http://a/b/c/g/')
self.checkJoin(RFC2396_BASE, '/g', 'http://a/g')
self.checkJoin(RFC2396_BASE, '//g', 'http://g')
self.checkJoin(RFC2396_BASE, 'g?y', 'http://a/b/c/g?y')
self.checkJoin(RFC2396_BASE, '#s', 'http://a/b/c/d;p?q#s')
self.checkJoin(RFC2396_BASE, 'g#s', 'http://a/b/c/g#s')
self.checkJoin(RFC2396_BASE, 'g?y#s', 'http://a/b/c/g?y#s')
self.checkJoin(RFC2396_BASE, 'g;x', 'http://a/b/c/g;x')
self.checkJoin(RFC2396_BASE, 'g;x?y#s', 'http://a/b/c/g;x?y#s')
self.checkJoin(RFC2396_BASE, '.', 'http://a/b/c/')
self.checkJoin(RFC2396_BASE, './', 'http://a/b/c/')
self.checkJoin(RFC2396_BASE, '..', 'http://a/b/')
self.checkJoin(RFC2396_BASE, '../', 'http://a/b/')
self.checkJoin(RFC2396_BASE, '../g', 'http://a/b/g')
self.checkJoin(RFC2396_BASE, '../..', 'http://a/')
self.checkJoin(RFC2396_BASE, '../../', 'http://a/')
self.checkJoin(RFC2396_BASE, '../../g', 'http://a/g')
self.checkJoin(RFC2396_BASE, '', RFC2396_BASE)
self.checkJoin(RFC2396_BASE, '../../../g', 'http://a/../g')
self.checkJoin(RFC2396_BASE, '../../../../g', 'http://a/../../g')
self.checkJoin(RFC2396_BASE, '/./g', 'http://a/./g')
self.checkJoin(RFC2396_BASE, '/../g', 'http://a/../g')
self.checkJoin(RFC2396_BASE, 'g.', 'http://a/b/c/g.')
self.checkJoin(RFC2396_BASE, '.g', 'http://a/b/c/.g')
self.checkJoin(RFC2396_BASE, 'g..', 'http://a/b/c/g..')
self.checkJoin(RFC2396_BASE, '..g', 'http://a/b/c/..g')
self.checkJoin(RFC2396_BASE, './../g', 'http://a/b/g')
self.checkJoin(RFC2396_BASE, './g/.', 'http://a/b/c/g/')
self.checkJoin(RFC2396_BASE, 'g/./h', 'http://a/b/c/g/h')
self.checkJoin(RFC2396_BASE, 'g/../h', 'http://a/b/c/h')
self.checkJoin(RFC2396_BASE, 'g;x=1/./y', 'http://a/b/c/g;x=1/y')
self.checkJoin(RFC2396_BASE, 'g;x=1/../y', 'http://a/b/c/y')
self.checkJoin(RFC2396_BASE, 'g?y/./x', 'http://a/b/c/g?y/./x')
self.checkJoin(RFC2396_BASE, 'g?y/../x', 'http://a/b/c/g?y/../x')
self.checkJoin(RFC2396_BASE, 'g#s/./x', 'http://a/b/c/g#s/./x')
self.checkJoin(RFC2396_BASE, 'g#s/../x', 'http://a/b/c/g#s/../x')
def test_RFC3986(self):
# Test cases from RFC3986
self.checkJoin(RFC3986_BASE, '?y','http://a/b/c/d;p?y')
self.checkJoin(RFC2396_BASE, ';x', 'http://a/b/c/;x')
self.checkJoin(RFC3986_BASE, 'g:h','g:h')
self.checkJoin(RFC3986_BASE, 'g','http://a/b/c/g')
self.checkJoin(RFC3986_BASE, './g','http://a/b/c/g')
self.checkJoin(RFC3986_BASE, 'g/','http://a/b/c/g/')
self.checkJoin(RFC3986_BASE, '/g','http://a/g')
self.checkJoin(RFC3986_BASE, '//g','http://g')
self.checkJoin(RFC3986_BASE, '?y','http://a/b/c/d;p?y')
self.checkJoin(RFC3986_BASE, 'g?y','http://a/b/c/g?y')
self.checkJoin(RFC3986_BASE, '#s','http://a/b/c/d;p?q#s')
self.checkJoin(RFC3986_BASE, 'g#s','http://a/b/c/g#s')
self.checkJoin(RFC3986_BASE, 'g?y#s','http://a/b/c/g?y#s')
self.checkJoin(RFC3986_BASE, ';x','http://a/b/c/;x')
self.checkJoin(RFC3986_BASE, 'g;x','http://a/b/c/g;x')
self.checkJoin(RFC3986_BASE, 'g;x?y#s','http://a/b/c/g;x?y#s')
self.checkJoin(RFC3986_BASE, '','http://a/b/c/d;p?q')
self.checkJoin(RFC3986_BASE, '.','http://a/b/c/')
self.checkJoin(RFC3986_BASE, './','http://a/b/c/')
self.checkJoin(RFC3986_BASE, '..','http://a/b/')
self.checkJoin(RFC3986_BASE, '../','http://a/b/')
self.checkJoin(RFC3986_BASE, '../g','http://a/b/g')
self.checkJoin(RFC3986_BASE, '../..','http://a/')
self.checkJoin(RFC3986_BASE, '../../','http://a/')
self.checkJoin(RFC3986_BASE, '../../g','http://a/g')
#Abnormal Examples
# The 'abnormal scenarios' are incompatible with RFC2986 parsing
# Tests are here for reference.
#self.checkJoin(RFC3986_BASE, '../../../g','http://a/g')
#self.checkJoin(RFC3986_BASE, '../../../../g','http://a/g')
#self.checkJoin(RFC3986_BASE, '/./g','http://a/g')
#self.checkJoin(RFC3986_BASE, '/../g','http://a/g')
self.checkJoin(RFC3986_BASE, 'g.','http://a/b/c/g.')
self.checkJoin(RFC3986_BASE, '.g','http://a/b/c/.g')
self.checkJoin(RFC3986_BASE, 'g..','http://a/b/c/g..')
self.checkJoin(RFC3986_BASE, '..g','http://a/b/c/..g')
self.checkJoin(RFC3986_BASE, './../g','http://a/b/g')
self.checkJoin(RFC3986_BASE, './g/.','http://a/b/c/g/')
self.checkJoin(RFC3986_BASE, 'g/./h','http://a/b/c/g/h')
self.checkJoin(RFC3986_BASE, 'g/../h','http://a/b/c/h')
self.checkJoin(RFC3986_BASE, 'g;x=1/./y','http://a/b/c/g;x=1/y')
self.checkJoin(RFC3986_BASE, 'g;x=1/../y','http://a/b/c/y')
self.checkJoin(RFC3986_BASE, 'g?y/./x','http://a/b/c/g?y/./x')
self.checkJoin(RFC3986_BASE, 'g?y/../x','http://a/b/c/g?y/../x')
self.checkJoin(RFC3986_BASE, 'g#s/./x','http://a/b/c/g#s/./x')
self.checkJoin(RFC3986_BASE, 'g#s/../x','http://a/b/c/g#s/../x')
#self.checkJoin(RFC3986_BASE, 'http:g','http:g') # strict parser
self.checkJoin(RFC3986_BASE, 'http:g','http://a/b/c/g') #relaxed parser
# Test for issue9721
self.checkJoin('http://a/b/c/de', ';x','http://a/b/c/;x')
def test_urljoins(self):
self.checkJoin(SIMPLE_BASE, 'g:h','g:h')
self.checkJoin(SIMPLE_BASE, 'http:g','http://a/b/c/g')
self.checkJoin(SIMPLE_BASE, 'http:','http://a/b/c/d')
self.checkJoin(SIMPLE_BASE, 'g','http://a/b/c/g')
self.checkJoin(SIMPLE_BASE, './g','http://a/b/c/g')
self.checkJoin(SIMPLE_BASE, 'g/','http://a/b/c/g/')
self.checkJoin(SIMPLE_BASE, '/g','http://a/g')
self.checkJoin(SIMPLE_BASE, '//g','http://g')
self.checkJoin(SIMPLE_BASE, '?y','http://a/b/c/d?y')
self.checkJoin(SIMPLE_BASE, 'g?y','http://a/b/c/g?y')
self.checkJoin(SIMPLE_BASE, 'g?y/./x','http://a/b/c/g?y/./x')
self.checkJoin(SIMPLE_BASE, '.','http://a/b/c/')
self.checkJoin(SIMPLE_BASE, './','http://a/b/c/')
self.checkJoin(SIMPLE_BASE, '..','http://a/b/')
self.checkJoin(SIMPLE_BASE, '../','http://a/b/')
self.checkJoin(SIMPLE_BASE, '../g','http://a/b/g')
self.checkJoin(SIMPLE_BASE, '../..','http://a/')
self.checkJoin(SIMPLE_BASE, '../../g','http://a/g')
self.checkJoin(SIMPLE_BASE, '../../../g','http://a/../g')
self.checkJoin(SIMPLE_BASE, './../g','http://a/b/g')
self.checkJoin(SIMPLE_BASE, './g/.','http://a/b/c/g/')
self.checkJoin(SIMPLE_BASE, '/./g','http://a/./g')
self.checkJoin(SIMPLE_BASE, 'g/./h','http://a/b/c/g/h')
self.checkJoin(SIMPLE_BASE, 'g/../h','http://a/b/c/h')
self.checkJoin(SIMPLE_BASE, 'http:g','http://a/b/c/g')
self.checkJoin(SIMPLE_BASE, 'http:','http://a/b/c/d')
self.checkJoin(SIMPLE_BASE, 'http:?y','http://a/b/c/d?y')
self.checkJoin(SIMPLE_BASE, 'http:g?y','http://a/b/c/g?y')
self.checkJoin(SIMPLE_BASE, 'http:g?y/./x','http://a/b/c/g?y/./x')
self.checkJoin('http:///', '..','http:///')
self.checkJoin('', 'http://a/b/c/g?y/./x','http://a/b/c/g?y/./x')
self.checkJoin('', 'http://a/./g', 'http://a/./g')
self.checkJoin('svn://pathtorepo/dir1', 'dir2', 'svn://pathtorepo/dir2')
self.checkJoin('svn+ssh://pathtorepo/dir1', 'dir2', 'svn+ssh://pathtorepo/dir2')
def test_RFC2732(self):
str_cases = [
('http://Test.python.org:5432/foo/', 'test.python.org', 5432),
('http://12.34.56.78:5432/foo/', '12.34.56.78', 5432),
('http://[::1]:5432/foo/', '::1', 5432),
('http://[dead:beef::1]:5432/foo/', 'dead:beef::1', 5432),
('http://[dead:beef::]:5432/foo/', 'dead:beef::', 5432),
('http://[dead:beef:cafe:5417:affe:8FA3:deaf:feed]:5432/foo/',
'dead:beef:cafe:5417:affe:8fa3:deaf:feed', 5432),
('http://[::12.34.56.78]:5432/foo/', '::12.34.56.78', 5432),
('http://[::ffff:12.34.56.78]:5432/foo/',
'::ffff:12.34.56.78', 5432),
('http://Test.python.org/foo/', 'test.python.org', None),
('http://12.34.56.78/foo/', '12.34.56.78', None),
('http://[::1]/foo/', '::1', None),
('http://[dead:beef::1]/foo/', 'dead:beef::1', None),
('http://[dead:beef::]/foo/', 'dead:beef::', None),
('http://[dead:beef:cafe:5417:affe:8FA3:deaf:feed]/foo/',
'dead:beef:cafe:5417:affe:8fa3:deaf:feed', None),
('http://[::12.34.56.78]/foo/', '::12.34.56.78', None),
('http://[::ffff:12.34.56.78]/foo/',
'::ffff:12.34.56.78', None),
]
def _encode(t):
return t[0].encode('ascii'), t[1].encode('ascii'), t[2]
bytes_cases = [_encode(x) for x in str_cases]
for url, hostname, port in str_cases + bytes_cases:
urlparsed = urllib.parse.urlparse(url)
self.assertEqual((urlparsed.hostname, urlparsed.port) , (hostname, port))
str_cases = [
'http://::12.34.56.78]/',
'http://[::1/foo/',
'ftp://[::1/foo/bad]/bad',
'http://[::1/foo/bad]/bad',
'http://[::ffff:12.34.56.78']
bytes_cases = [x.encode('ascii') for x in str_cases]
for invalid_url in str_cases + bytes_cases:
self.assertRaises(ValueError, urllib.parse.urlparse, invalid_url)
def test_urldefrag(self):
str_cases = [
('http://python.org#frag', 'http://python.org', 'frag'),
('http://python.org', 'http://python.org', ''),
('http://python.org/#frag', 'http://python.org/', 'frag'),
('http://python.org/', 'http://python.org/', ''),
('http://python.org/?q#frag', 'http://python.org/?q', 'frag'),
('http://python.org/?q', 'http://python.org/?q', ''),
('http://python.org/p#frag', 'http://python.org/p', 'frag'),
('http://python.org/p?q', 'http://python.org/p?q', ''),
(RFC1808_BASE, 'http://a/b/c/d;p?q', 'f'),
(RFC2396_BASE, 'http://a/b/c/d;p?q', ''),
]
def _encode(t):
return type(t)(x.encode('ascii') for x in t)
bytes_cases = [_encode(x) for x in str_cases]
for url, defrag, frag in str_cases + bytes_cases:
result = urllib.parse.urldefrag(url)
self.assertEqual(result.geturl(), url)
self.assertEqual(result, (defrag, frag))
self.assertEqual(result.url, defrag)
self.assertEqual(result.fragment, frag)
def test_urlsplit_attributes(self):
url = "HTTP://WWW.PYTHON.ORG/doc/#frag"
p = urllib.parse.urlsplit(url)
self.assertEqual(p.scheme, "http")
self.assertEqual(p.netloc, "WWW.PYTHON.ORG")
self.assertEqual(p.path, "/doc/")
self.assertEqual(p.query, "")
self.assertEqual(p.fragment, "frag")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, "www.python.org")
self.assertEqual(p.port, None)
# geturl() won't return exactly the original URL in this case
# since the scheme is always case-normalized
# We handle this by ignoring the first 4 characters of the URL
self.assertEqual(p.geturl()[4:], url[4:])
url = "http://User:Pass@www.python.org:080/doc/?query=yes#frag"
p = urllib.parse.urlsplit(url)
self.assertEqual(p.scheme, "http")
self.assertEqual(p.netloc, "User:Pass@www.python.org:080")
self.assertEqual(p.path, "/doc/")
self.assertEqual(p.query, "query=yes")
self.assertEqual(p.fragment, "frag")
self.assertEqual(p.username, "User")
self.assertEqual(p.password, "Pass")
self.assertEqual(p.hostname, "www.python.org")
self.assertEqual(p.port, 80)
self.assertEqual(p.geturl(), url)
# Addressing issue1698, which suggests Username can contain
# "@" characters. Though not RFC compliant, many ftp sites allow
# and request email addresses as usernames.
url = "http://User@example.com:Pass@www.python.org:080/doc/?query=yes#frag"
p = urllib.parse.urlsplit(url)
self.assertEqual(p.scheme, "http")
self.assertEqual(p.netloc, "User@example.com:Pass@www.python.org:080")
self.assertEqual(p.path, "/doc/")
self.assertEqual(p.query, "query=yes")
self.assertEqual(p.fragment, "frag")
self.assertEqual(p.username, "User@example.com")
self.assertEqual(p.password, "Pass")
self.assertEqual(p.hostname, "www.python.org")
self.assertEqual(p.port, 80)
self.assertEqual(p.geturl(), url)
# And check them all again, only with bytes this time
url = b"HTTP://WWW.PYTHON.ORG/doc/#frag"
p = urllib.parse.urlsplit(url)
self.assertEqual(p.scheme, b"http")
self.assertEqual(p.netloc, b"WWW.PYTHON.ORG")
self.assertEqual(p.path, b"/doc/")
self.assertEqual(p.query, b"")
self.assertEqual(p.fragment, b"frag")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, b"www.python.org")
self.assertEqual(p.port, None)
self.assertEqual(p.geturl()[4:], url[4:])
url = b"http://User:Pass@www.python.org:080/doc/?query=yes#frag"
p = urllib.parse.urlsplit(url)
self.assertEqual(p.scheme, b"http")
self.assertEqual(p.netloc, b"User:Pass@www.python.org:080")
self.assertEqual(p.path, b"/doc/")
self.assertEqual(p.query, b"query=yes")
self.assertEqual(p.fragment, b"frag")
self.assertEqual(p.username, b"User")
self.assertEqual(p.password, b"Pass")
self.assertEqual(p.hostname, b"www.python.org")
self.assertEqual(p.port, 80)
self.assertEqual(p.geturl(), url)
url = b"http://User@example.com:Pass@www.python.org:080/doc/?query=yes#frag"
p = urllib.parse.urlsplit(url)
self.assertEqual(p.scheme, b"http")
self.assertEqual(p.netloc, b"User@example.com:Pass@www.python.org:080")
self.assertEqual(p.path, b"/doc/")
self.assertEqual(p.query, b"query=yes")
self.assertEqual(p.fragment, b"frag")
self.assertEqual(p.username, b"User@example.com")
self.assertEqual(p.password, b"Pass")
self.assertEqual(p.hostname, b"www.python.org")
self.assertEqual(p.port, 80)
self.assertEqual(p.geturl(), url)
def test_attributes_bad_port(self):
"""Check handling of non-integer ports."""
p = urllib.parse.urlsplit("http://www.example.net:foo")
self.assertEqual(p.netloc, "www.example.net:foo")
self.assertRaises(ValueError, lambda: p.port)
p = urllib.parse.urlparse("http://www.example.net:foo")
self.assertEqual(p.netloc, "www.example.net:foo")
self.assertRaises(ValueError, lambda: p.port)
# Once again, repeat ourselves to test bytes
p = urllib.parse.urlsplit(b"http://www.example.net:foo")
self.assertEqual(p.netloc, b"www.example.net:foo")
self.assertRaises(ValueError, lambda: p.port)
p = urllib.parse.urlparse(b"http://www.example.net:foo")
self.assertEqual(p.netloc, b"www.example.net:foo")
self.assertRaises(ValueError, lambda: p.port)
def test_attributes_without_netloc(self):
# This example is straight from RFC 3261. It looks like it
# should allow the username, hostname, and port to be filled
# in, but doesn't. Since it's a URI and doesn't use the
# scheme://netloc syntax, the netloc and related attributes
# should be left empty.
uri = "sip:alice@atlanta.com;maddr=239.255.255.1;ttl=15"
p = urllib.parse.urlsplit(uri)
self.assertEqual(p.netloc, "")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, None)
self.assertEqual(p.port, None)
self.assertEqual(p.geturl(), uri)
p = urllib.parse.urlparse(uri)
self.assertEqual(p.netloc, "")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, None)
self.assertEqual(p.port, None)
self.assertEqual(p.geturl(), uri)
# You guessed it, repeating the test with bytes input
uri = b"sip:alice@atlanta.com;maddr=239.255.255.1;ttl=15"
p = urllib.parse.urlsplit(uri)
self.assertEqual(p.netloc, b"")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, None)
self.assertEqual(p.port, None)
self.assertEqual(p.geturl(), uri)
p = urllib.parse.urlparse(uri)
self.assertEqual(p.netloc, b"")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, None)
self.assertEqual(p.port, None)
self.assertEqual(p.geturl(), uri)
def test_noslash(self):
# Issue 1637: http://foo.com?query is legal
self.assertEqual(urllib.parse.urlparse("http://example.com?blahblah=/foo"),
('http', 'example.com', '', '', 'blahblah=/foo', ''))
self.assertEqual(urllib.parse.urlparse(b"http://example.com?blahblah=/foo"),
(b'http', b'example.com', b'', b'', b'blahblah=/foo', b''))
def test_withoutscheme(self):
# Test urlparse without scheme
# Issue 754016: urlparse goes wrong with IP:port without scheme
# RFC 1808 specifies that netloc should start with //, urlparse expects
# the same, otherwise it classifies the portion of url as path.
self.assertEqual(urllib.parse.urlparse("path"),
('','','path','','',''))
self.assertEqual(urllib.parse.urlparse("//www.python.org:80"),
('','www.python.org:80','','','',''))
self.assertEqual(urllib.parse.urlparse("http://www.python.org:80"),
('http','www.python.org:80','','','',''))
# Repeat for bytes input
self.assertEqual(urllib.parse.urlparse(b"path"),
(b'',b'',b'path',b'',b'',b''))
self.assertEqual(urllib.parse.urlparse(b"//www.python.org:80"),
(b'',b'www.python.org:80',b'',b'',b'',b''))
self.assertEqual(urllib.parse.urlparse(b"http://www.python.org:80"),
(b'http',b'www.python.org:80',b'',b'',b'',b''))
def test_portseparator(self):
# Issue 754016 makes changes for port separator ':' from scheme separator
self.assertEqual(urllib.parse.urlparse("path:80"),
('','','path:80','','',''))
self.assertEqual(urllib.parse.urlparse("http:"),('http','','','','',''))
self.assertEqual(urllib.parse.urlparse("https:"),('https','','','','',''))
self.assertEqual(urllib.parse.urlparse("http://www.python.org:80"),
('http','www.python.org:80','','','',''))
# As usual, need to check bytes input as well
self.assertEqual(urllib.parse.urlparse(b"path:80"),
(b'',b'',b'path:80',b'',b'',b''))
self.assertEqual(urllib.parse.urlparse(b"http:"),(b'http',b'',b'',b'',b'',b''))
self.assertEqual(urllib.parse.urlparse(b"https:"),(b'https',b'',b'',b'',b'',b''))
self.assertEqual(urllib.parse.urlparse(b"http://www.python.org:80"),
(b'http',b'www.python.org:80',b'',b'',b'',b''))
def test_usingsys(self):
# Issue 3314: sys module is used in the error
self.assertRaises(TypeError, urllib.parse.urlencode, "foo")
def test_anyscheme(self):
# Issue 7904: s3://foo.com/stuff has netloc "foo.com".
self.assertEqual(urllib.parse.urlparse("s3://foo.com/stuff"),
('s3', 'foo.com', '/stuff', '', '', ''))
self.assertEqual(urllib.parse.urlparse("x-newscheme://foo.com/stuff"),
('x-newscheme', 'foo.com', '/stuff', '', '', ''))
# And for bytes...
self.assertEqual(urllib.parse.urlparse(b"s3://foo.com/stuff"),
(b's3', b'foo.com', b'/stuff', b'', b'', b''))
self.assertEqual(urllib.parse.urlparse(b"x-newscheme://foo.com/stuff"),
(b'x-newscheme', b'foo.com', b'/stuff', b'', b'', b''))
def test_mixed_types_rejected(self):
# Several functions that process either strings or ASCII encoded bytes
# accept multiple arguments. Check they reject mixed type input
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlparse("www.python.org", b"http")
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlparse(b"www.python.org", "http")
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlsplit("www.python.org", b"http")
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlsplit(b"www.python.org", "http")
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlunparse(( b"http", "www.python.org","","","",""))
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlunparse(("http", b"www.python.org","","","",""))
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlunsplit((b"http", "www.python.org","","",""))
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlunsplit(("http", b"www.python.org","","",""))
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urljoin("http://python.org", b"http://python.org")
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urljoin(b"http://python.org", "http://python.org")
def _check_result_type(self, str_type):
num_args = len(str_type._fields)
bytes_type = str_type._encoded_counterpart
self.assertIs(bytes_type._decoded_counterpart, str_type)
str_args = ('',) * num_args
bytes_args = (b'',) * num_args
str_result = str_type(*str_args)
bytes_result = bytes_type(*bytes_args)
encoding = 'ascii'
errors = 'strict'
self.assertEqual(str_result, str_args)
self.assertEqual(bytes_result.decode(), str_args)
self.assertEqual(bytes_result.decode(), str_result)
self.assertEqual(bytes_result.decode(encoding), str_args)
self.assertEqual(bytes_result.decode(encoding), str_result)
self.assertEqual(bytes_result.decode(encoding, errors), str_args)
self.assertEqual(bytes_result.decode(encoding, errors), str_result)
self.assertEqual(bytes_result, bytes_args)
self.assertEqual(str_result.encode(), bytes_args)
self.assertEqual(str_result.encode(), bytes_result)
self.assertEqual(str_result.encode(encoding), bytes_args)
self.assertEqual(str_result.encode(encoding), bytes_result)
self.assertEqual(str_result.encode(encoding, errors), bytes_args)
self.assertEqual(str_result.encode(encoding, errors), bytes_result)
def test_result_pairs(self):
# Check encoding and decoding between result pairs
result_types = [
urllib.parse.DefragResult,
urllib.parse.SplitResult,
urllib.parse.ParseResult,
]
for result_type in result_types:
self._check_result_type(result_type)
def test_parse_qs_encoding(self):
result = urllib.parse.parse_qs("key=\u0141%E9", encoding="latin-1")
self.assertEqual(result, {'key': ['\u0141\xE9']})
result = urllib.parse.parse_qs("key=\u0141%C3%A9", encoding="utf-8")
self.assertEqual(result, {'key': ['\u0141\xE9']})
result = urllib.parse.parse_qs("key=\u0141%C3%A9", encoding="ascii")
self.assertEqual(result, {'key': ['\u0141\ufffd\ufffd']})
result = urllib.parse.parse_qs("key=\u0141%E9-", encoding="ascii")
self.assertEqual(result, {'key': ['\u0141\ufffd-']})
result = urllib.parse.parse_qs("key=\u0141%E9-", encoding="ascii",
errors="ignore")
self.assertEqual(result, {'key': ['\u0141-']})
def test_parse_qsl_encoding(self):
result = urllib.parse.parse_qsl("key=\u0141%E9", encoding="latin-1")
self.assertEqual(result, [('key', '\u0141\xE9')])
result = urllib.parse.parse_qsl("key=\u0141%C3%A9", encoding="utf-8")
self.assertEqual(result, [('key', '\u0141\xE9')])
result = urllib.parse.parse_qsl("key=\u0141%C3%A9", encoding="ascii")
self.assertEqual(result, [('key', '\u0141\ufffd\ufffd')])
result = urllib.parse.parse_qsl("key=\u0141%E9-", encoding="ascii")
self.assertEqual(result, [('key', '\u0141\ufffd-')])
result = urllib.parse.parse_qsl("key=\u0141%E9-", encoding="ascii",
errors="ignore")
self.assertEqual(result, [('key', '\u0141-')])
def test_splitnport(self):
# Normal cases are exercised by other tests; ensure that we also
# catch cases with no port specified. (testcase ensuring coverage)
result = urllib.parse.splitnport('parrot:88')
self.assertEqual(result, ('parrot', 88))
result = urllib.parse.splitnport('parrot')
self.assertEqual(result, ('parrot', -1))
result = urllib.parse.splitnport('parrot', 55)
self.assertEqual(result, ('parrot', 55))
result = urllib.parse.splitnport('parrot:')
self.assertEqual(result, ('parrot', None))
def test_splitquery(self):
# Normal cases are exercised by other tests; ensure that we also
# catch cases with no port specified (testcase ensuring coverage)
result = urllib.parse.splitquery('http://python.org/fake?foo=bar')
self.assertEqual(result, ('http://python.org/fake', 'foo=bar'))
result = urllib.parse.splitquery('http://python.org/fake?foo=bar?')
self.assertEqual(result, ('http://python.org/fake?foo=bar', ''))
result = urllib.parse.splitquery('http://python.org/fake')
self.assertEqual(result, ('http://python.org/fake', None))
def test_splitvalue(self):
# Normal cases are exercised by other tests; test pathological cases
# with no key/value pairs. (testcase ensuring coverage)
result = urllib.parse.splitvalue('foo=bar')
self.assertEqual(result, ('foo', 'bar'))
result = urllib.parse.splitvalue('foo=')
self.assertEqual(result, ('foo', ''))
result = urllib.parse.splitvalue('foobar')
self.assertEqual(result, ('foobar', None))
def test_to_bytes(self):
result = urllib.parse.to_bytes('http://www.python.org')
self.assertEqual(result, 'http://www.python.org')
self.assertRaises(UnicodeError, urllib.parse.to_bytes,
'http://www.python.org/medi\u00e6val')
def test_urlencode_sequences(self):
# Other tests incidentally urlencode things; test non-covered cases:
# Sequence and object values.
result = urllib.parse.urlencode({'a': [1, 2], 'b': (3, 4, 5)}, True)
self.assertEqual(result, 'a=1&a=2&b=3&b=4&b=5')
class Trivial:
def __str__(self):
return 'trivial'
result = urllib.parse.urlencode({'a': Trivial()}, True)
self.assertEqual(result, 'a=trivial')
def test_quote_from_bytes(self):
self.assertRaises(TypeError, urllib.parse.quote_from_bytes, 'foo')
result = urllib.parse.quote_from_bytes(b'archaeological arcana')
self.assertEqual(result, 'archaeological%20arcana')
result = urllib.parse.quote_from_bytes(b'')
self.assertEqual(result, '')
def test_unquote_to_bytes(self):
result = urllib.parse.unquote_to_bytes('abc%20def')
self.assertEqual(result, b'abc def')
result = urllib.parse.unquote_to_bytes('')
self.assertEqual(result, b'')
def test_quote_errors(self):
self.assertRaises(TypeError, urllib.parse.quote, b'foo',
encoding='utf-8')
self.assertRaises(TypeError, urllib.parse.quote, b'foo', errors='strict')
def test_main():
support.run_unittest(UrlParseTestCase)
if __name__ == "__main__":
test_main()
| apache-2.0 |
scrollback/kuma | vendor/packages/nose/nose/loader.py | 14 | 25489 | """
Test Loader
-----------
nose's test loader implements the same basic functionality as its
superclass, unittest.TestLoader, but extends it by more liberal
interpretations of what may be a test and how a test may be named.
"""
from __future__ import generators
import logging
import os
import sys
import unittest
import types
from inspect import isfunction
from nose.pyversion import unbound_method, ismethod
from nose.case import FunctionTestCase, MethodTestCase
from nose.failure import Failure
from nose.config import Config
from nose.importer import Importer, add_path, remove_path
from nose.selector import defaultSelector, TestAddress
from nose.util import func_lineno, getpackage, isclass, isgenerator, \
ispackage, regex_last_key, resolve_name, transplant_func, \
transplant_class, test_address
from nose.suite import ContextSuiteFactory, ContextList, LazySuite
from nose.pyversion import sort_list, cmp_to_key
log = logging.getLogger(__name__)
#log.setLevel(logging.DEBUG)
# for efficiency and easier mocking
op_normpath = os.path.normpath
op_abspath = os.path.abspath
op_join = os.path.join
op_isdir = os.path.isdir
op_isfile = os.path.isfile
__all__ = ['TestLoader', 'defaultTestLoader']
class TestLoader(unittest.TestLoader):
"""Test loader that extends unittest.TestLoader to:
* Load tests from test-like functions and classes that are not
unittest.TestCase subclasses
* Find and load test modules in a directory
* Support tests that are generators
* Support easy extensions of or changes to that behavior through plugins
"""
config = None
importer = None
workingDir = None
selector = None
suiteClass = None
def __init__(self, config=None, importer=None, workingDir=None,
selector=None):
"""Initialize a test loader.
Parameters (all optional):
* config: provide a `nose.config.Config`_ or other config class
instance; if not provided a `nose.config.Config`_ with
default values is used.
* importer: provide an importer instance that implements
`importFromPath`. If not provided, a
`nose.importer.Importer`_ is used.
* workingDir: the directory to which file and module names are
relative. If not provided, assumed to be the current working
directory.
* selector: a selector class or instance. If a class is
provided, it will be instantiated with one argument, the
current config. If not provided, a `nose.selector.Selector`_
is used.
"""
if config is None:
config = Config()
if importer is None:
importer = Importer(config=config)
if workingDir is None:
workingDir = config.workingDir
if selector is None:
selector = defaultSelector(config)
elif isclass(selector):
selector = selector(config)
self.config = config
self.importer = importer
self.workingDir = op_normpath(op_abspath(workingDir))
self.selector = selector
if config.addPaths:
add_path(workingDir, config)
self.suiteClass = ContextSuiteFactory(config=config)
self._visitedPaths = set([])
unittest.TestLoader.__init__(self)
def getTestCaseNames(self, testCaseClass):
"""Override to select with selector, unless
config.getTestCaseNamesCompat is True
"""
if self.config.getTestCaseNamesCompat:
return unittest.TestLoader.getTestCaseNames(self, testCaseClass)
def wanted(attr, cls=testCaseClass, sel=self.selector):
item = getattr(cls, attr, None)
if isfunction(item):
item = unbound_method(cls, item)
elif not ismethod(item):
return False
return sel.wantMethod(item)
cases = filter(wanted, dir(testCaseClass))
for base in testCaseClass.__bases__:
for case in self.getTestCaseNames(base):
if case not in cases:
cases.append(case)
# add runTest if nothing else picked
if not cases and hasattr(testCaseClass, 'runTest'):
cases = ['runTest']
if self.sortTestMethodsUsing:
sort_list(cases, cmp_to_key(self.sortTestMethodsUsing))
return cases
def _haveVisited(self, path):
# For cases where path is None, we always pretend we haven't visited
# them.
if path is None:
return False
return path in self._visitedPaths
def _addVisitedPath(self, path):
if path is not None:
self._visitedPaths.add(path)
def loadTestsFromDir(self, path):
"""Load tests from the directory at path. This is a generator
-- each suite of tests from a module or other file is yielded
and is expected to be executed before the next file is
examined.
"""
log.debug("load from dir %s", path)
plugins = self.config.plugins
plugins.beforeDirectory(path)
if self.config.addPaths:
paths_added = add_path(path, self.config)
entries = os.listdir(path)
sort_list(entries, regex_last_key(self.config.testMatch))
for entry in entries:
# this hard-coded initial-dot test will be removed:
# http://code.google.com/p/python-nose/issues/detail?id=82
if entry.startswith('.'):
continue
entry_path = op_abspath(op_join(path, entry))
is_file = op_isfile(entry_path)
wanted = False
if is_file:
is_dir = False
wanted = self.selector.wantFile(entry_path)
else:
is_dir = op_isdir(entry_path)
if is_dir:
# this hard-coded initial-underscore test will be removed:
# http://code.google.com/p/python-nose/issues/detail?id=82
if entry.startswith('_'):
continue
wanted = self.selector.wantDirectory(entry_path)
is_package = ispackage(entry_path)
# Python 3.3 now implements PEP 420: Implicit Namespace Packages.
# As a result, it's now possible that parent paths that have a
# segment with the same basename as our package ends up
# in module.__path__. So we have to keep track of what we've
# visited, and not-revisit them again.
if wanted and not self._haveVisited(entry_path):
self._addVisitedPath(entry_path)
if is_file:
plugins.beforeContext()
if entry.endswith('.py'):
yield self.loadTestsFromName(
entry_path, discovered=True)
else:
yield self.loadTestsFromFile(entry_path)
plugins.afterContext()
elif is_package:
# Load the entry as a package: given the full path,
# loadTestsFromName() will figure it out
yield self.loadTestsFromName(
entry_path, discovered=True)
else:
# Another test dir in this one: recurse lazily
yield self.suiteClass(
lambda: self.loadTestsFromDir(entry_path))
tests = []
for test in plugins.loadTestsFromDir(path):
tests.append(test)
# TODO: is this try/except needed?
try:
if tests:
yield self.suiteClass(tests)
except (KeyboardInterrupt, SystemExit):
raise
except:
yield self.suiteClass([Failure(*sys.exc_info())])
# pop paths
if self.config.addPaths:
for p in paths_added:
remove_path(p)
plugins.afterDirectory(path)
def loadTestsFromFile(self, filename):
"""Load tests from a non-module file. Default is to raise a
ValueError; plugins may implement `loadTestsFromFile` to
provide a list of tests loaded from the file.
"""
log.debug("Load from non-module file %s", filename)
try:
tests = [test for test in
self.config.plugins.loadTestsFromFile(filename)]
if tests:
# Plugins can yield False to indicate that they were
# unable to load tests from a file, but it was not an
# error -- the file just had no tests to load.
tests = filter(None, tests)
return self.suiteClass(tests)
else:
# Nothing was able to even try to load from this file
open(filename, 'r').close() # trigger os error
raise ValueError("Unable to load tests from file %s"
% filename)
except (KeyboardInterrupt, SystemExit):
raise
except:
exc = sys.exc_info()
return self.suiteClass(
[Failure(exc[0], exc[1], exc[2],
address=(filename, None, None))])
def loadTestsFromGenerator(self, generator, module):
"""Lazy-load tests from a generator function. The generator function
may yield either:
* a callable, or
* a function name resolvable within the same module
"""
def generate(g=generator, m=module):
try:
for test in g():
test_func, arg = self.parseGeneratedTest(test)
if not callable(test_func):
test_func = getattr(m, test_func)
yield FunctionTestCase(test_func, arg=arg, descriptor=g)
except KeyboardInterrupt:
raise
except:
exc = sys.exc_info()
yield Failure(exc[0], exc[1], exc[2],
address=test_address(generator))
return self.suiteClass(generate, context=generator, can_split=False)
def loadTestsFromGeneratorMethod(self, generator, cls):
"""Lazy-load tests from a generator method.
This is more complicated than loading from a generator function,
since a generator method may yield:
* a function
* a bound or unbound method, or
* a method name
"""
# convert the unbound generator method
# into a bound method so it can be called below
if hasattr(generator, 'im_class'):
cls = generator.im_class
inst = cls()
method = generator.__name__
generator = getattr(inst, method)
def generate(g=generator, c=cls):
try:
for test in g():
test_func, arg = self.parseGeneratedTest(test)
if not callable(test_func):
test_func = unbound_method(c, getattr(c, test_func))
if ismethod(test_func):
yield MethodTestCase(test_func, arg=arg, descriptor=g)
elif isfunction(test_func):
# In this case we're forcing the 'MethodTestCase'
# to run the inline function as its test call,
# but using the generator method as the 'method of
# record' (so no need to pass it as the descriptor)
yield MethodTestCase(g, test=test_func, arg=arg)
else:
yield Failure(
TypeError,
"%s is not a function or method" % test_func)
except KeyboardInterrupt:
raise
except:
exc = sys.exc_info()
yield Failure(exc[0], exc[1], exc[2],
address=test_address(generator))
return self.suiteClass(generate, context=generator, can_split=False)
def loadTestsFromModule(self, module, path=None, discovered=False):
"""Load all tests from module and return a suite containing
them. If the module has been discovered and is not test-like,
the suite will be empty by default, though plugins may add
their own tests.
"""
log.debug("Load from module %s", module)
tests = []
test_classes = []
test_funcs = []
# For *discovered* modules, we only load tests when the module looks
# testlike. For modules we've been directed to load, we always
# look for tests. (discovered is set to True by loadTestsFromDir)
if not discovered or self.selector.wantModule(module):
for item in dir(module):
test = getattr(module, item, None)
# print "Check %s (%s) in %s" % (item, test, module.__name__)
if isclass(test):
if self.selector.wantClass(test):
test_classes.append(test)
elif isfunction(test) and self.selector.wantFunction(test):
test_funcs.append(test)
sort_list(test_classes, lambda x: x.__name__)
sort_list(test_funcs, func_lineno)
tests = map(lambda t: self.makeTest(t, parent=module),
test_classes + test_funcs)
# Now, descend into packages
# FIXME can or should this be lazy?
# is this syntax 2.2 compatible?
module_paths = getattr(module, '__path__', [])
if path:
path = os.path.realpath(path)
for module_path in module_paths:
log.debug("Load tests from module path %s?", module_path)
log.debug("path: %s os.path.realpath(%s): %s",
path, module_path, os.path.realpath(module_path))
if (self.config.traverseNamespace or not path) or \
os.path.realpath(module_path).startswith(path):
tests.extend(self.loadTestsFromDir(module_path))
for test in self.config.plugins.loadTestsFromModule(module, path):
tests.append(test)
return self.suiteClass(ContextList(tests, context=module))
def loadTestsFromName(self, name, module=None, discovered=False):
"""Load tests from the entity with the given name.
The name may indicate a file, directory, module, or any object
within a module. See `nose.util.split_test_name` for details on
test name parsing.
"""
# FIXME refactor this method into little bites?
log.debug("load from %s (%s)", name, module)
suite = self.suiteClass
# give plugins first crack
plug_tests = self.config.plugins.loadTestsFromName(name, module)
if plug_tests:
return suite(plug_tests)
addr = TestAddress(name, workingDir=self.workingDir)
if module:
# Two cases:
# name is class.foo
# The addr will be incorrect, since it thinks class.foo is
# a dotted module name. It's actually a dotted attribute
# name. In this case we want to use the full submitted
# name as the name to load from the module.
# name is module:class.foo
# The addr will be correct. The part we want is the part after
# the :, which is in addr.call.
if addr.call:
name = addr.call
parent, obj = self.resolve(name, module)
if (isclass(parent)
and getattr(parent, '__module__', None) != module.__name__
and not isinstance(obj, Failure)):
parent = transplant_class(parent, module.__name__)
obj = getattr(parent, obj.__name__)
log.debug("parent %s obj %s module %s", parent, obj, module)
if isinstance(obj, Failure):
return suite([obj])
else:
return suite(ContextList([self.makeTest(obj, parent)],
context=parent))
else:
if addr.module:
try:
if addr.filename is None:
module = resolve_name(addr.module)
else:
self.config.plugins.beforeImport(
addr.filename, addr.module)
# FIXME: to support module.name names,
# do what resolve-name does and keep trying to
# import, popping tail of module into addr.call,
# until we either get an import or run out of
# module parts
try:
module = self.importer.importFromPath(
addr.filename, addr.module)
finally:
self.config.plugins.afterImport(
addr.filename, addr.module)
except (KeyboardInterrupt, SystemExit):
raise
except:
exc = sys.exc_info()
return suite([Failure(exc[0], exc[1], exc[2],
address=addr.totuple())])
if addr.call:
return self.loadTestsFromName(addr.call, module)
else:
return self.loadTestsFromModule(
module, addr.filename,
discovered=discovered)
elif addr.filename:
path = addr.filename
if addr.call:
package = getpackage(path)
if package is None:
return suite([
Failure(ValueError,
"Can't find callable %s in file %s: "
"file is not a python module" %
(addr.call, path),
address=addr.totuple())])
return self.loadTestsFromName(addr.call, module=package)
else:
if op_isdir(path):
# In this case we *can* be lazy since we know
# that each module in the dir will be fully
# loaded before its tests are executed; we
# also know that we're not going to be asked
# to load from . and ./some_module.py *as part
# of this named test load*
return LazySuite(
lambda: self.loadTestsFromDir(path))
elif op_isfile(path):
return self.loadTestsFromFile(path)
else:
return suite([
Failure(OSError, "No such file %s" % path,
address=addr.totuple())])
else:
# just a function? what to do? I think it can only be
# handled when module is not None
return suite([
Failure(ValueError, "Unresolvable test name %s" % name,
address=addr.totuple())])
def loadTestsFromNames(self, names, module=None):
"""Load tests from all names, returning a suite containing all
tests.
"""
plug_res = self.config.plugins.loadTestsFromNames(names, module)
if plug_res:
suite, names = plug_res
if suite:
return self.suiteClass([
self.suiteClass(suite),
unittest.TestLoader.loadTestsFromNames(self, names, module)
])
return unittest.TestLoader.loadTestsFromNames(self, names, module)
def loadTestsFromTestCase(self, testCaseClass):
"""Load tests from a unittest.TestCase subclass.
"""
cases = []
plugins = self.config.plugins
for case in plugins.loadTestsFromTestCase(testCaseClass):
cases.append(case)
# For efficiency in the most common case, just call and return from
# super. This avoids having to extract cases and rebuild a context
# suite when there are no plugin-contributed cases.
if not cases:
return super(TestLoader, self).loadTestsFromTestCase(testCaseClass)
cases.extend(
[case for case in
super(TestLoader, self).loadTestsFromTestCase(testCaseClass)])
return self.suiteClass(cases)
def loadTestsFromTestClass(self, cls):
"""Load tests from a test class that is *not* a unittest.TestCase
subclass.
In this case, we can't depend on the class's `__init__` taking method
name arguments, so we have to compose a MethodTestCase for each
method in the class that looks testlike.
"""
def wanted(attr, cls=cls, sel=self.selector):
item = getattr(cls, attr, None)
if isfunction(item):
item = unbound_method(cls, item)
elif not ismethod(item):
return False
return sel.wantMethod(item)
cases = [self.makeTest(getattr(cls, case), cls)
for case in filter(wanted, dir(cls))]
for test in self.config.plugins.loadTestsFromTestClass(cls):
cases.append(test)
return self.suiteClass(ContextList(cases, context=cls))
def makeTest(self, obj, parent=None):
try:
return self._makeTest(obj, parent)
except (KeyboardInterrupt, SystemExit):
raise
except:
exc = sys.exc_info()
try:
addr = test_address(obj)
except KeyboardInterrupt:
raise
except:
addr = None
return Failure(exc[0], exc[1], exc[2], address=addr)
def _makeTest(self, obj, parent=None):
"""Given a test object and its parent, return a test case
or test suite.
"""
plug_tests = []
try:
addr = test_address(obj)
except KeyboardInterrupt:
raise
except:
addr = None
for test in self.config.plugins.makeTest(obj, parent):
plug_tests.append(test)
# TODO: is this try/except needed?
try:
if plug_tests:
return self.suiteClass(plug_tests)
except (KeyboardInterrupt, SystemExit):
raise
except:
exc = sys.exc_info()
return Failure(exc[0], exc[1], exc[2], address=addr)
if isfunction(obj) and parent and not isinstance(parent, types.ModuleType):
# This is a Python 3.x 'unbound method'. Wrap it with its
# associated class..
obj = unbound_method(parent, obj)
if isinstance(obj, unittest.TestCase):
return obj
elif isclass(obj):
if parent and obj.__module__ != parent.__name__:
obj = transplant_class(obj, parent.__name__)
if issubclass(obj, unittest.TestCase):
return self.loadTestsFromTestCase(obj)
else:
return self.loadTestsFromTestClass(obj)
elif ismethod(obj):
if parent is None:
parent = obj.__class__
if issubclass(parent, unittest.TestCase):
return parent(obj.__name__)
else:
if isgenerator(obj):
return self.loadTestsFromGeneratorMethod(obj, parent)
else:
return MethodTestCase(obj)
elif isfunction(obj):
if parent and obj.__module__ != parent.__name__:
obj = transplant_func(obj, parent.__name__)
if isgenerator(obj):
return self.loadTestsFromGenerator(obj, parent)
else:
return FunctionTestCase(obj)
else:
return Failure(TypeError,
"Can't make a test from %s" % obj,
address=addr)
def resolve(self, name, module):
"""Resolve name within module
"""
obj = module
parts = name.split('.')
for part in parts:
parent, obj = obj, getattr(obj, part, None)
if obj is None:
# no such test
obj = Failure(ValueError, "No such test %s" % name)
return parent, obj
def parseGeneratedTest(self, test):
"""Given the yield value of a test generator, return a func and args.
This is used in the two loadTestsFromGenerator* methods.
"""
if not isinstance(test, tuple): # yield test
test_func, arg = (test, tuple())
elif len(test) == 1: # yield (test,)
test_func, arg = (test[0], tuple())
else: # yield test, foo, bar, ...
assert len(test) > 1 # sanity check
test_func, arg = (test[0], test[1:])
return test_func, arg
defaultTestLoader = TestLoader
| mpl-2.0 |
gjvis/splinter | tests/base.py | 6 | 7889 | # -*- coding: utf-8 -*-
# Copyright 2012 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from .async_finder import AsyncFinderTests
from .click_elements import ClickElementsTest
from .cookies import CookiesTest
from .element_does_not_exist import ElementDoestNotExistTest
from .fake_webapp import EXAMPLE_APP
from .find_elements import FindElementsTest
from .form_elements import FormElementsTest
from .iframes import IFrameElementsTest
from .element import ElementTest
from .is_element_present import IsElementPresentTest
from .is_element_visible import IsElementVisibleTest
from .is_text_present import IsTextPresentTest
from .mouse_interaction import MouseInteractionTest
from .status_code import StatusCodeTest
from .screenshot import ScreenshotTest
from .type import SlowlyTypeTest
from .popups import PopupWindowsTest
class BaseBrowserTests(ElementTest, FindElementsTest, FormElementsTest, ClickElementsTest,
CookiesTest, SlowlyTypeTest, IsTextPresentTest):
def setUp(self):
self.fail("You should set up your browser in the setUp() method")
def test_can_open_page(self):
"should be able to visit, get title and quit"
title = self.browser.title
self.assertEqual('Example Title', title)
def test_can_back_on_history(self):
"should be able to back on history"
self.browser.visit("%s/iframe" % EXAMPLE_APP.rstrip('/'))
self.browser.back()
self.assertEqual(EXAMPLE_APP, self.browser.url)
def test_can_forward_on_history(self):
"should be able to forward history"
url = "%s/iframe" % EXAMPLE_APP.rstrip('/')
self.browser.visit(url)
self.browser.back()
self.browser.forward()
self.assertEqual(url, self.browser.url)
def test_should_have_html(self):
"should have access to the html"
html = self.browser.html
assert '<title>Example Title</title>' in html
assert '<h1 id="firstheader">Example Header</h1>' in html
def test_should_reload_a_page(self):
"should reload a page"
title = self.browser.title
self.browser.reload()
self.assertEqual('Example Title', title)
def test_should_have_url(self):
"should have access to the url"
self.assertEqual(EXAMPLE_APP, self.browser.url)
def test_accessing_attributes_of_links(self):
"should allow link's attributes retrieval"
foo = self.browser.find_link_by_text('FOO')
self.assertEqual('http://localhost:5000/foo', foo['href'])
def test_accessing_attributes_of_inputs(self):
"should allow input's attributes retrieval"
button = self.browser.find_by_css('input[name="send"]')
self.assertEqual('send', button['name'])
def test_accessing_attributes_of_simple_elements(self):
"should allow simple element's attributes retrieval"
header = self.browser.find_by_css('h1')
self.assertEqual('firstheader', header['id'])
def test_links_should_have_value_attribute(self):
foo = self.browser.find_link_by_href('http://localhost:5000/foo')
self.assertEqual('FOO', foo.value)
def test_should_receive_browser_on_parent(self):
"element should contains the browser on \"parent\" attribute"
element = self.browser.find_by_id("firstheader")
self.assertEqual(self.browser, element.parent)
class WebDriverTests(BaseBrowserTests, IFrameElementsTest, ElementDoestNotExistTest, IsElementPresentTest,
IsElementVisibleTest, AsyncFinderTests, StatusCodeTest, MouseInteractionTest,
PopupWindowsTest, ScreenshotTest):
def test_can_execute_javascript(self):
"should be able to execute javascript"
self.browser.execute_script("$('body').empty()")
self.assertEqual("", self.browser.find_by_tag("body").value)
def test_can_evaluate_script(self):
"should evaluate script"
self.assertEqual(8, self.browser.evaluate_script("4+4"))
def test_can_see_the_text_for_an_element(self):
"should provide text for an element"
self.assertEqual(self.browser.find_by_id("simple_text").text, "my test text")
def test_the_text_for_an_element_strips_html_tags(self):
"should show that the text attribute strips html"
self.assertEqual(self.browser.find_by_id("text_with_html").text, "another bit of text")
def test_can_verify_if_a_element_is_visible(self):
"should provide verify if element is visible"
self.assertTrue(self.browser.find_by_id("visible").visible)
def test_can_verify_if_a_element_is_invisible(self):
"should provide verify if element is invisible"
self.assertFalse(self.browser.find_by_id("invisible").visible)
def test_default_wait_time(self):
"should driver default wait time 2"
self.assertEqual(2, self.browser.wait_time)
def test_access_alerts_and_accept_them(self):
self.browser.visit(EXAMPLE_APP + 'alert')
self.browser.find_by_tag('h1').click()
alert = self.browser.get_alert()
self.assertEqual('This is an alert example.', alert.text)
alert.accept()
def test_access_prompts_and_be_able_to_fill_then(self):
self.browser.visit(EXAMPLE_APP + 'alert')
self.browser.find_by_tag('h2').click()
alert = self.browser.get_alert()
self.assertEqual('What is your name?', alert.text)
alert.fill_with('Splinter')
alert.accept()
response = self.browser.get_alert()
self.assertEqual('Splinter', response.text)
response.accept()
def test_access_confirm_and_accept_and_dismiss_them(self):
self.browser.visit(EXAMPLE_APP + 'alert')
self.browser.find_by_tag('h3').click()
alert = self.browser.get_alert()
self.assertEqual('Should I continue?', alert.text)
alert.accept()
alert = self.browser.get_alert()
self.assertEqual('You say I should', alert.text)
alert.accept()
self.browser.find_by_tag('h3').click()
alert = self.browser.get_alert()
self.assertEqual('Should I continue?', alert.text)
alert.dismiss()
alert = self.browser.get_alert()
self.assertEqual('You say I should not', alert.text)
alert.accept()
def test_access_confirm_and_accept_and_dismiss_them_using_with(self):
self.browser.visit(EXAMPLE_APP + 'alert')
self.browser.find_by_tag('h3').click()
with self.browser.get_alert() as alert:
self.assertEqual('Should I continue?', alert.text)
alert.accept()
with self.browser.get_alert() as alert:
self.assertEqual('You say I should', alert.text)
alert.accept()
self.browser.find_by_tag('h3').click()
with self.browser.get_alert() as alert:
self.assertEqual('Should I continue?', alert.text)
alert.dismiss()
with self.browser.get_alert() as alert:
self.assertEqual('You say I should not', alert.text)
alert.accept()
def test_access_alerts_using_with(self):
"should access alerts using 'with' statement"
self.browser.visit(EXAMPLE_APP + 'alert')
self.browser.find_by_tag('h1').click()
with self.browser.get_alert() as alert:
self.assertEqual('This is an alert example.', alert.text)
alert.accept()
def test_can_select_a_option_via_element_text(self):
"should provide a way to select a option via element"
self.assertFalse(self.browser.find_option_by_value("rj").selected)
self.browser.find_by_name("uf").select_by_text("Rio de Janeiro")
self.assertTrue(self.browser.find_option_by_value("rj").selected)
| bsd-3-clause |
njantrania/osf.io | website/addons/base/views.py | 2 | 20027 | import os
import uuid
import httplib
import datetime
import jwe
import jwt
import furl
from flask import request
from flask import redirect
from flask import make_response
from modularodm.exceptions import NoResultsFound
from framework import sentry
from framework.auth import cas
from framework.auth import Auth
from framework.auth import oauth_scopes
from framework.routing import json_renderer
from framework.sentry import log_exception
from framework.exceptions import HTTPError
from framework.transactions.context import TokuTransaction
from framework.transactions.handlers import no_auto_transaction
from framework.auth.decorators import must_be_logged_in, must_be_signed, collect_auth
from website import mails
from website import settings
from website.files.models import FileNode
from website.files.models import TrashedFileNode
from website.project import decorators
from website.addons.base import exceptions
from website.addons.base import signals as file_signals
from website.addons.base import StorageAddonBase
from website.models import User, Node, NodeLog
from website.util import rubeus
from website.profile.utils import get_gravatar
from website.project.decorators import must_be_valid_project, must_be_contributor_or_public
from website.project.utils import serialize_node
# import so that associated listener is instantiated and gets emails
from website.notifications.events.files import FileEvent # noqa
FILE_GONE_ERROR_MESSAGE = u'''
<style>
.file-download{{display: none;}}
.file-share{{display: none;}}
.file-delete{{display: none;}}
</style>
<div class="alert alert-info" role="alert">
This link to the file "{file_name}" is no longer valid.
</div>'''
WATERBUTLER_JWE_KEY = jwe.kdf(settings.WATERBUTLER_JWE_SECRET.encode('utf-8'), settings.WATERBUTLER_JWE_SALT.encode('utf-8'))
@decorators.must_have_permission('write')
@decorators.must_not_be_registration
def disable_addon(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
addon_name = kwargs.get('addon')
if addon_name is None:
raise HTTPError(httplib.BAD_REQUEST)
deleted = node.delete_addon(addon_name, auth)
return {'deleted': deleted}
@must_be_logged_in
def get_addon_user_config(**kwargs):
user = kwargs['auth'].user
addon_name = kwargs.get('addon')
if addon_name is None:
raise HTTPError(httplib.BAD_REQUEST)
addon = user.get_addon(addon_name)
if addon is None:
raise HTTPError(httplib.BAD_REQUEST)
return addon.to_json(user)
permission_map = {
'create_folder': 'write',
'revisions': 'read',
'metadata': 'read',
'download': 'read',
'upload': 'write',
'delete': 'write',
'copy': 'write',
'move': 'write',
'copyto': 'write',
'moveto': 'write',
'copyfrom': 'read',
'movefrom': 'write',
}
def check_access(node, auth, action, cas_resp):
"""Verify that user can perform requested action on resource. Raise appropriate
error code if action cannot proceed.
"""
permission = permission_map.get(action, None)
if permission is None:
raise HTTPError(httplib.BAD_REQUEST)
if cas_resp:
if permission == 'read':
if node.is_public:
return True
required_scope = oauth_scopes.CoreScopes.NODE_FILE_READ
else:
required_scope = oauth_scopes.CoreScopes.NODE_FILE_WRITE
if not cas_resp.authenticated \
or required_scope not in oauth_scopes.normalize_scopes(cas_resp.attributes['accessTokenScope']):
raise HTTPError(httplib.FORBIDDEN)
if permission == 'read' and node.can_view(auth):
return True
if permission == 'write' and node.can_edit(auth):
return True
# Users attempting to register projects with components might not have
# `write` permissions for all components. This will result in a 403 for
# all `copyto` actions as well as `copyfrom` actions if the component
# in question is not public. To get around this, we have to recursively
# check the node's parent node to determine if they have `write`
# permissions up the stack.
# TODO(hrybacki): is there a way to tell if this is for a registration?
# All nodes being registered that receive the `copyto` action will have
# `node.is_registration` == True. However, we have no way of telling if
# `copyfrom` actions are originating from a node being registered.
# TODO This is raise UNAUTHORIZED for registrations that have not been archived yet
if action == 'copyfrom' or (action == 'copyto' and node.is_registration):
parent = node.parent_node
while parent:
if parent.can_edit(auth):
return True
parent = parent.parent_node
raise HTTPError(httplib.FORBIDDEN if auth.user else httplib.UNAUTHORIZED)
def make_auth(user):
if user is not None:
return {
'id': user._id,
'email': '{}@osf.io'.format(user._id),
'name': user.fullname,
}
return {}
@collect_auth
def get_auth(auth, **kwargs):
cas_resp = None
if not auth.user:
# Central Authentication Server OAuth Bearer Token
authorization = request.headers.get('Authorization')
if authorization and authorization.startswith('Bearer '):
client = cas.get_client()
try:
access_token = cas.parse_auth_header(authorization)
cas_resp = client.profile(access_token)
except cas.CasError as err:
sentry.log_exception()
# NOTE: We assume that the request is an AJAX request
return json_renderer(err)
if cas_resp.authenticated:
auth.user = User.load(cas_resp.user)
try:
data = jwt.decode(
jwe.decrypt(request.args.get('payload', '').encode('utf-8'), WATERBUTLER_JWE_KEY),
settings.WATERBUTLER_JWT_SECRET,
options={'require_exp': True},
algorithm=settings.WATERBUTLER_JWT_ALGORITHM
)['data']
except (jwt.InvalidTokenError, KeyError):
raise HTTPError(httplib.FORBIDDEN)
if not auth.user:
auth.user = User.from_cookie(data.get('cookie', ''))
try:
action = data['action']
node_id = data['nid']
provider_name = data['provider']
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
node = Node.load(node_id)
if not node:
raise HTTPError(httplib.NOT_FOUND)
check_access(node, auth, action, cas_resp)
provider_settings = node.get_addon(provider_name)
if not provider_settings:
raise HTTPError(httplib.BAD_REQUEST)
try:
credentials = provider_settings.serialize_waterbutler_credentials()
waterbutler_settings = provider_settings.serialize_waterbutler_settings()
except exceptions.AddonError:
log_exception()
raise HTTPError(httplib.BAD_REQUEST)
return {'payload': jwe.encrypt(jwt.encode({
'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=settings.WATERBUTLER_JWT_EXPIRATION),
'data': {
'auth': make_auth(auth.user), # A waterbutler auth dict not an Auth object
'credentials': credentials,
'settings': waterbutler_settings,
'callback_url': node.api_url_for(
('create_waterbutler_log' if not node.is_registration else 'registration_callbacks'),
_absolute=True,
),
}
}, settings.WATERBUTLER_JWT_SECRET, algorithm=settings.WATERBUTLER_JWT_ALGORITHM), WATERBUTLER_JWE_KEY)}
LOG_ACTION_MAP = {
'move': NodeLog.FILE_MOVED,
'copy': NodeLog.FILE_COPIED,
'rename': NodeLog.FILE_RENAMED,
'create': NodeLog.FILE_ADDED,
'update': NodeLog.FILE_UPDATED,
'delete': NodeLog.FILE_REMOVED,
'create_folder': NodeLog.FOLDER_CREATED,
}
@must_be_signed
@no_auto_transaction
@must_be_valid_project
def create_waterbutler_log(payload, **kwargs):
with TokuTransaction():
try:
auth = payload['auth']
action = LOG_ACTION_MAP[payload['action']]
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
user = User.load(auth['id'])
if user is None:
raise HTTPError(httplib.BAD_REQUEST)
auth = Auth(user=user)
node = kwargs['node'] or kwargs['project']
if action in (NodeLog.FILE_MOVED, NodeLog.FILE_COPIED):
for bundle in ('source', 'destination'):
for key in ('provider', 'materialized', 'name', 'nid'):
if key not in payload[bundle]:
raise HTTPError(httplib.BAD_REQUEST)
dest = payload['destination']
src = payload['source']
if src is not None and dest is not None:
dest_path = dest['materialized']
src_path = src['materialized']
if dest_path.endswith('/') and src_path.endswith('/'):
dest_path = os.path.dirname(dest_path)
src_path = os.path.dirname(src_path)
if (
os.path.split(dest_path)[0] == os.path.split(src_path)[0] and
dest['provider'] == src['provider'] and
dest['nid'] == src['nid'] and
dest['name'] != src['name']
):
action = LOG_ACTION_MAP['rename']
destination_node = node # For clarity
source_node = Node.load(payload['source']['nid'])
source = source_node.get_addon(payload['source']['provider'])
destination = node.get_addon(payload['destination']['provider'])
payload['source'].update({
'materialized': payload['source']['materialized'].lstrip('/'),
'addon': source.config.full_name,
'url': source_node.web_url_for(
'addon_view_or_download_file',
path=payload['source']['path'].lstrip('/'),
provider=payload['source']['provider']
),
'node': {
'_id': source_node._id,
'url': source_node.url,
'title': source_node.title,
}
})
payload['destination'].update({
'materialized': payload['destination']['materialized'].lstrip('/'),
'addon': destination.config.full_name,
'url': destination_node.web_url_for(
'addon_view_or_download_file',
path=payload['destination']['path'].lstrip('/'),
provider=payload['destination']['provider']
),
'node': {
'_id': destination_node._id,
'url': destination_node.url,
'title': destination_node.title,
}
})
payload.update({
'node': destination_node._id,
'project': destination_node.parent_id,
})
if not payload.get('errors'):
destination_node.add_log(
action=action,
auth=auth,
params=payload
)
if payload.get('email') is True or payload.get('errors'):
mails.send_mail(
user.username,
mails.FILE_OPERATION_FAILED if payload.get('errors')
else mails.FILE_OPERATION_SUCCESS,
action=payload['action'],
source_node=source_node,
destination_node=destination_node,
source_path=payload['source']['path'],
destination_path=payload['source']['path'],
source_addon=payload['source']['addon'],
destination_addon=payload['destination']['addon'],
)
if payload.get('error'):
# Action failed but our function succeeded
# Bail out to avoid file_signals
return {'status': 'success'}
else:
try:
metadata = payload['metadata']
node_addon = node.get_addon(payload['provider'])
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
if node_addon is None:
raise HTTPError(httplib.BAD_REQUEST)
metadata['path'] = metadata['path'].lstrip('/')
node_addon.create_waterbutler_log(auth, action, metadata)
with TokuTransaction():
file_signals.file_updated.send(node=node, user=user, event_type=action, payload=payload)
return {'status': 'success'}
@must_be_valid_project
def addon_view_or_download_file_legacy(**kwargs):
query_params = request.args.to_dict()
node = kwargs.get('node') or kwargs['project']
action = query_params.pop('action', 'view')
provider = kwargs.get('provider', 'osfstorage')
if kwargs.get('path'):
path = kwargs['path']
elif kwargs.get('fid'):
path = kwargs['fid']
if 'download' in request.path or request.path.startswith('/api/v1/'):
action = 'download'
if kwargs.get('vid'):
query_params['version'] = kwargs['vid']
# If provider is OSFstorage, check existence of requested file in the filetree
# This prevents invalid GUIDs from being created
if provider == 'osfstorage':
node_settings = node.get_addon('osfstorage')
try:
path = node_settings.get_root().find_child_by_name(path)._id
except NoResultsFound:
raise HTTPError(
404, data=dict(
message_short='File not found',
message_long='You requested a file that does not exist.'
)
)
return redirect(
node.web_url_for(
'addon_view_or_download_file',
path=path,
provider=provider,
action=action,
**query_params
),
code=httplib.MOVED_PERMANENTLY
)
@must_be_valid_project
@must_be_contributor_or_public
def addon_deleted_file(auth, node, **kwargs):
"""Shows a nice error message to users when they try to view
a deleted file
"""
# Allow file_node to be passed in so other views can delegate to this one
trashed = kwargs.get('file_node') or TrashedFileNode.load(kwargs.get('trashed_id'))
if not trashed:
raise HTTPError(httplib.NOT_FOUND, {
'message_short': 'Not Found',
'message_long': 'This file does not exist'
})
ret = serialize_node(node, auth, primary=True)
ret.update(rubeus.collect_addon_assets(node))
ret.update({
'urls': {
'render': None,
'sharejs': None,
'mfr': settings.MFR_SERVER_URL,
'gravatar': get_gravatar(auth.user, 25),
'files': node.web_url_for('collect_file_trees'),
},
'extra': {},
'size': 9966699, # Prevent file from being editted, just in case
'sharejs_uuid': None,
'file_name': trashed.name,
'file_path': trashed.path,
'provider': trashed.provider,
'materialized_path': trashed.materialized_path,
'error': FILE_GONE_ERROR_MESSAGE.format(file_name=trashed.name),
'private': getattr(node.get_addon(trashed.provider), 'is_private', False),
})
return ret, httplib.GONE
@must_be_valid_project
@must_be_contributor_or_public
def addon_view_or_download_file(auth, path, provider, **kwargs):
extras = request.args.to_dict()
extras.pop('_', None) # Clean up our url params a bit
action = extras.get('action', 'view')
node = kwargs.get('node') or kwargs['project']
node_addon = node.get_addon(provider)
if not path:
raise HTTPError(httplib.BAD_REQUEST)
if not isinstance(node_addon, StorageAddonBase):
raise HTTPError(httplib.BAD_REQUEST, {
'message_short': 'Bad Request',
'message_long': 'The add-on containing this file is no longer connected to the {}.'.format(node.project_or_component)
})
if not node_addon.has_auth:
raise HTTPError(httplib.UNAUTHORIZED, {
'message_short': 'Unauthorized',
'message_long': 'The add-on containing this file is no longer authorized.'
})
if not node_addon.complete:
raise HTTPError(httplib.BAD_REQUEST, {
'message_short': 'Bad Request',
'message_long': 'The add-on containing this file is no longer configured.'
})
file_node = FileNode.resolve_class(provider, FileNode.FILE).get_or_create(node, path)
# Note: Cookie is provided for authentication to waterbutler
# it is overriden to force authentication as the current user
# the auth header is also pass to support basic auth
version = file_node.touch(
request.headers.get('Authorization'),
**dict(
extras,
cookie=request.cookies.get(settings.COOKIE_NAME)
)
)
if version is None:
if file_node.get_guid():
# If this file has been successfully view before but no longer exists
# Show a nice error message
return addon_deleted_file(file_node=file_node, **kwargs)
raise HTTPError(httplib.NOT_FOUND, {
'message_short': 'Not Found',
'message_long': 'This file does not exist'
})
# TODO clean up these urls and unify what is used as a version identifier
if request.method == 'HEAD':
return make_response(('', 200, {
'Location': file_node.generate_waterbutler_url(**dict(extras, direct=None, version=version.identifier))
}))
if action == 'download':
return redirect(file_node.generate_waterbutler_url(**dict(extras, direct=None, version=version.identifier)))
if len(request.path.strip('/').split('/')) > 1:
guid = file_node.get_guid(create=True)
return redirect(furl.furl('/{}/'.format(guid._id)).set(args=extras).url)
return addon_view_file(auth, node, file_node, version)
def addon_view_file(auth, node, file_node, version):
# TODO: resolve circular import issue
from website.addons.wiki import settings as wiki_settings
if isinstance(version, tuple):
version, error = version
error = error.replace('\n', '').strip()
else:
error = None
ret = serialize_node(node, auth, primary=True)
if file_node._id not in node.file_guid_to_share_uuids:
node.file_guid_to_share_uuids[file_node._id] = uuid.uuid4()
node.save()
if ret['user']['can_edit']:
sharejs_uuid = str(node.file_guid_to_share_uuids[file_node._id])
else:
sharejs_uuid = None
download_url = furl.furl(request.url.encode('utf-8')).set(args=dict(request.args, **{
'direct': None,
'mode': 'render',
'action': 'download',
}))
render_url = furl.furl(settings.MFR_SERVER_URL).set(
path=['render'],
args={'url': download_url.url}
)
ret.update({
'urls': {
'render': render_url.url,
'mfr': settings.MFR_SERVER_URL,
'sharejs': wiki_settings.SHAREJS_URL,
'gravatar': get_gravatar(auth.user, 25),
'files': node.web_url_for('collect_file_trees'),
},
'error': error,
'file_name': file_node.name,
'file_path': file_node.path,
'sharejs_uuid': sharejs_uuid,
'provider': file_node.provider,
'materialized_path': file_node.materialized_path,
'extra': version.metadata.get('extra', {}),
'size': version.size if version.size is not None else 9966699,
'private': getattr(node.get_addon(file_node.provider), 'is_private', False),
})
ret.update(rubeus.collect_addon_assets(node))
return ret
| apache-2.0 |
boundlessgeo/QGIS | tests/src/python/test_qgssinglesymbolrenderer.py | 17 | 3133 | # -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgssinglesymbolrenderer.py
---------------------
Date : December 2015
Copyright : (C) 2015 by Matthias Kuhn
Email : matthias at opengis dot ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Matthias Kuhn'
__date__ = 'December 2015'
__copyright__ = '(C) 2015, Matthias Kuhn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import os
from qgis.PyQt.QtCore import QSize
from qgis.core import (QgsVectorLayer,
QgsProject,
QgsRectangle,
QgsMultiRenderChecker,
QgsSingleSymbolRenderer,
QgsFillSymbol,
QgsFeatureRequest
)
from qgis.testing import unittest
from qgis.testing.mocked import get_iface
from utilities import unitTestDataPath
TEST_DATA_DIR = unitTestDataPath()
class TestQgsSingleSymbolRenderer(unittest.TestCase):
def setUp(self):
self.iface = get_iface()
myShpFile = os.path.join(TEST_DATA_DIR, 'polys_overlapping.shp')
layer = QgsVectorLayer(myShpFile, 'Polys', 'ogr')
QgsProject.instance().addMapLayer(layer)
# Create rulebased style
sym1 = QgsFillSymbol.createSimple({'color': '#fdbf6f', 'outline_color': 'black'})
self.renderer = QgsSingleSymbolRenderer(sym1)
layer.setRenderer(self.renderer)
rendered_layers = [layer]
self.mapsettings = self.iface.mapCanvas().mapSettings()
self.mapsettings.setOutputSize(QSize(400, 400))
self.mapsettings.setOutputDpi(96)
self.mapsettings.setExtent(QgsRectangle(-163, 22, -70, 52))
self.mapsettings.setLayers(rendered_layers)
def testOrderBy(self):
self.renderer.setOrderBy(QgsFeatureRequest.OrderBy([QgsFeatureRequest.OrderByClause('Value', False)]))
self.renderer.setOrderByEnabled(True)
# Setup rendering check
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlName('expected_singlesymbol_orderby')
self.assertTrue(renderchecker.runTest('singlesymbol_orderby'))
# disable order by and retest
self.renderer.setOrderByEnabled(False)
self.assertTrue(renderchecker.runTest('single'))
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
cloudera/hue | desktop/core/ext-py/phoenixdb-1.1.0.dev0/setup.py | 3 | 3121 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from setuptools import setup, find_packages
import setuptools
import sys
cmdclass = {}
try:
from sphinx.setup_command import BuildDoc
cmdclass['build_sphinx'] = BuildDoc
except ImportError:
pass
def readme():
with open('README.rst') as f:
return f.read()
if setuptools.__version__ < '20.8.1':
# Workaround for source install on old setuptools
# This won't be able to create a proper multi-version pacakage
install_requires=[
'protobuf>=3.0.0',
'requests',
'requests-gssapi',
'SQLAlchemy'
]
if sys.version_info < (3,6):
install_requires.append('gssapi<1.6.0')
#Don't build the docs on an old stack
setup_requires=[]
else:
install_requires=[
'protobuf>=3.0.0',
'requests',
'requests-gssapi',
'gssapi<1.6.0;python_version<"3.6"',
'SQLAlchemy'
]
setup_requires=[
'Sphinx;python_version>="3.6"',
],
version = "1.1.0.dev0"
setup(
name="phoenixdb",
version=version,
description="Phoenix database adapter for Python",
long_description=readme(),
author="Apache Software Foundation",
author_email="dev@phoenix.apache.org",
url="http://phoenix.apache.org/python.html",
license="Apache 2",
packages=find_packages(),
include_package_data=True,
cmdclass=cmdclass,
command_options={
'build_sphinx': {
'version': ('setup.py', version),
'release': ('setup.py', version),
},
},
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
install_requires=install_requires,
extras_require={
'SQLAlchemy': ['SQLAlchemy'],
},
tests_require=[
'SQLAlchemy',
'nose',
'flake8'
],
setup_requires=setup_requires,
entry_points={
"sqlalchemy.dialects": [
"phoenix = phoenixdb.sqlalchemy_phoenix:PhoenixDialect"
]
},
)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.