repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
refeed/coala-bears | bears/general/LineCountBear.py | 3 | 2410 | import re
import logging
from coalib.bears.LocalBear import LocalBear
from coalib.results.Result import Result
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
class LineCountBear(LocalBear):
LANGUAGES = {'All'}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Formatting'}
def _get_blank_line_count(self, file):
num_blank_lines = len(
list(filter(lambda x: re.match(r'^\s*$', x), file)))
return num_blank_lines
def run(self, filename, file, min_lines_per_file: int = 1,
max_lines_per_file: int = 1000,
exclude_blank_lines: bool = False,
):
"""
Count the number of lines in a file and ensure that they lie within
the range of given sizes.
:param min_lines_per_file: Minimum number of lines required per file.
:param max_lines_per_file: Maximum number of lines allowed per file.
:param exclude_blank_lines: ``True`` if blank lines are to be excluded.
"""
file_length = len(file)
if min_lines_per_file > max_lines_per_file:
logging.error('Allowed maximum lines per file ({}) is smaller '
'than minimum lines per file ({})'
.format(max_lines_per_file,
min_lines_per_file))
return
if exclude_blank_lines:
num_blank_lines = self._get_blank_line_count(file)
file_length = file_length - num_blank_lines
if file_length > max_lines_per_file:
yield Result.from_values(
origin=self,
message=('This file had {count} lines, which is {extra} '
'lines more than the maximum limit specified.'
.format(count=file_length,
extra=file_length-max_lines_per_file)),
severity=RESULT_SEVERITY.NORMAL,
file=filename)
elif file_length < min_lines_per_file:
yield Result.from_values(
origin=self,
message=('This file has {} lines, while {} lines are '
'required.'
.format(file_length,
min_lines_per_file)),
file=filename)
| agpl-3.0 |
ashemedai/ansible | lib/ansible/modules/cloud/webfaction/webfaction_mailbox.py | 63 | 4510 | #!/usr/bin/python
#
# Create webfaction mailbox using Ansible and the Webfaction API
#
# ------------------------------------------
# (c) Quentin Stafford-Fraser and Andy Baker 2015
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: webfaction_mailbox
short_description: Add or remove mailboxes on Webfaction
description:
- Add or remove mailboxes on a Webfaction account. Further documentation at http://github.com/quentinsf/ansible-webfaction.
author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0"
notes:
- >
You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
your host, you may want to add C(serial: 1) to the plays.
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
options:
mailbox_name:
description:
- The name of the mailbox
required: true
mailbox_password:
description:
- The password for the mailbox
required: true
default: null
state:
description:
- Whether the mailbox should exist
required: false
choices: ['present', 'absent']
default: "present"
login_name:
description:
- The webfaction account to use
required: true
login_password:
description:
- The webfaction password to use
required: true
'''
EXAMPLES = '''
- name: Create a mailbox
webfaction_mailbox:
mailbox_name="mybox"
mailbox_password="myboxpw"
state=present
login_name={{webfaction_user}}
login_password={{webfaction_passwd}}
'''
import socket
import xmlrpclib
webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
def main():
module = AnsibleModule(
argument_spec=dict(
mailbox_name=dict(required=True),
mailbox_password=dict(required=True, no_log=True),
state=dict(required=False, choices=['present', 'absent'], default='present'),
login_name=dict(required=True),
login_password=dict(required=True, no_log=True),
),
supports_check_mode=True
)
mailbox_name = module.params['mailbox_name']
site_state = module.params['state']
session_id, account = webfaction.login(
module.params['login_name'],
module.params['login_password']
)
mailbox_list = [x['mailbox'] for x in webfaction.list_mailboxes(session_id)]
existing_mailbox = mailbox_name in mailbox_list
result = {}
# Here's where the real stuff happens
if site_state == 'present':
# Does a mailbox with this name already exist?
if existing_mailbox:
module.exit_json(changed=False,)
positional_args = [session_id, mailbox_name]
if not module.check_mode:
# If this isn't a dry run, create the mailbox
result.update(webfaction.create_mailbox(*positional_args))
elif site_state == 'absent':
# If the mailbox is already not there, nothing changed.
if not existing_mailbox:
module.exit_json(changed=False)
if not module.check_mode:
# If this isn't a dry run, delete the mailbox
result.update(webfaction.delete_mailbox(session_id, mailbox_name))
else:
module.fail_json(msg="Unknown state specified: {}".format(site_state))
module.exit_json(changed=True, result=result)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
IITBinterns13/edx-platform-dev | i18n/tests/test_validate.py | 16 | 1119 | import os, sys, logging
from unittest import TestCase
from nose.plugins.skip import SkipTest
from config import LOCALE_DIR
from execute import call
def test_po_files(root=LOCALE_DIR):
"""
This is a generator. It yields all of the .po files under root, and tests each one.
"""
log = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
for (dirpath, dirnames, filenames) in os.walk(root):
for name in filenames:
(base, ext) = os.path.splitext(name)
if ext.lower() == '.po':
yield validate_po_file, os.path.join(dirpath, name), log
def validate_po_file(filename, log):
"""
Call GNU msgfmt -c on each .po file to validate its format.
Any errors caught by msgfmt are logged to log.
"""
# Skip this test for now because it's very noisy
raise SkipTest()
# Use relative paths to make output less noisy.
rfile = os.path.relpath(filename, LOCALE_DIR)
(out, err) = call(['msgfmt','-c', rfile], working_directory=LOCALE_DIR)
if err != '':
log.warn('\n'+err)
| agpl-3.0 |
zorroz/microblog | flask/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/exceptions.py | 374 | 3274 | # urllib3/exceptions.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
## Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
## Leaf Exceptions
class MaxRetryError(RequestError):
"Raised when the maximum number of retries is exceeded."
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s" % url
if reason:
message += " (Caused by %s: %s)" % (type(reason), reason)
else:
message += " (Caused by redirect)"
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationParseError(ValueError, HTTPError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
| bsd-3-clause |
ArielCabib/python-tkinter-calculator | Calculator/Helpers/History.py | 1 | 3920 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# History.py
#
# Copyright 2010 Ariel Haviv <ariel.haviv@gmail.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
***Class to handle history records***
Calculator by Ariel Haviv (ariel.haviv@gmail.com)
Instructors: Anatoly Peymer, Zehava Lavi
"""
import notation as nota
class History():
def __init__(self):
#all numbers will be saved allways at base 10
self.history = ['0']
self.index = 0
def load_file(self, file_):
if isinstance(file_, file):
try:
#integrity check:
tmp = eval(file_.read())
if not isinstance(tmp, list):
raise Exception ("File read failed.")
else:
for i in tmp:
if not isinstance(i, str):
raise Exception ("File read failed.")
#all good. replacing history
self.history = tmp
self.index = len(self.history)-2 #caller will revoke **redo** afterwards, so he will get the last member of history
except:
raise Exception ("File read failed.")
file_.close()
else:
raise Exception ("Not a file chosen")
def save_file(self, file_):
if isinstance(file_, file):
try:
file_.write(str(self.history))
except:
raise Exception ("File write failed.")
file_.close()
else:
raise Exception ("Not a file chosen")
def append(self, text):
#converting to ast and back to infix text, to arrange and cancel spare brackets:
lst = nota.make_list_from_str(text)
ast = nota.make_ast_from_list(lst)
text = nota.nota(ast, 'in', 0)
#adding entry to history
history = self.history
if history[self.index] != text:
self.index += 1
history.insert(self.index, text)
self.history = history[:self.index+1]
#print 'append:', self.index, self.history
def get_next(self):
#returning next history entry
history = self.history
if self.index < len(history)-1:
self.index += 1
#print 'get_next:', self.index, self.history
return history[self.index]
else:
raise Exception
def get_prev(self, move_index = True):
#returning last history entry
history = self.history
if self.index > 0:
#print 'get_prev:', self.index, self.history
if move_index:
self.index -= 1
return history[self.index]
else:
return history[self.index-1]
else:
if move_index:
raise Exception
else:
return None
| bsd-3-clause |
rosswhitfield/mantid | qt/python/mantidqt/widgets/codeeditor/multifileinterpreter.py | 3 | 14026 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2017 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantidqt package
#
#
# std imports
import os.path as osp
from os import linesep
# 3rd party imports
from qtpy.QtCore import Qt, Slot, Signal
from qtpy.QtWidgets import QVBoxLayout, QWidget
# local imports
from mantidqt.widgets.codeeditor.interpreter import PythonFileInterpreter
from mantidqt.widgets.codeeditor.scriptcompatibility import add_mantid_api_import, mantid_api_import_needed
from mantidqt.widgets.codeeditor.tab_widget.codeeditor_tab_view import CodeEditorTabWidget
NEW_TAB_TITLE = 'New'
MODIFIED_MARKER = '*'
class MultiPythonFileInterpreter(QWidget):
"""Provides a tabbed widget for editing multiple files"""
sig_code_exec_start = Signal(str)
sig_file_name_changed = Signal(str, str)
sig_current_tab_changed = Signal(str)
sig_tab_closed = Signal(str)
def __init__(self, font=None, default_content=None, parent=None):
"""
:param font: An optional font to override the default editor font
:param default_content: str, if provided this will populate any new editor that is created
:param parent: An optional parent widget
"""
super(MultiPythonFileInterpreter, self).__init__(parent)
# attributes
self.default_content = default_content
self.default_font = font
self.prev_session_tabs = None
self.whitespace_visible = False
self.setAttribute(Qt.WA_DeleteOnClose, True)
# widget setup
layout = QVBoxLayout(self)
self._tabs = CodeEditorTabWidget(self)
self._tabs.currentChanged.connect(self._emit_current_tab_changed)
layout.addWidget(self._tabs)
self.setLayout(layout)
layout.setContentsMargins(0, 0, 0, 0)
self.zoom_level = 0
# add a single editor by default
self.append_new_editor()
# setting defaults
self.confirm_on_save = True
def _tab_title_and_tooltip(self, filename):
"""Create labels for the tab title and tooltip from a filename"""
if filename is None:
title = NEW_TAB_TITLE
i = 1
while title in self.stripped_tab_titles:
title = "{} ({})".format(NEW_TAB_TITLE, i)
i += 1
return title, title
else:
return osp.basename(filename), filename
@property
def stripped_tab_titles(self):
tab_text = [self._tabs.tabText(i) for i in range(self.editor_count)]
tab_text = [txt.rstrip('*') for txt in tab_text]
# Some DEs (such as KDE) will automatically assign keyboard shortcuts using the Qt & annotation
# see Qt Docs - qtabwidget#addTab
tab_text = [txt.replace('&', '') for txt in tab_text]
return tab_text
def closeEvent(self, event):
self.deleteLater()
super(MultiPythonFileInterpreter, self).closeEvent(event)
def load_settings_from_config(self, config):
self.confirm_on_save = config.get('project', 'prompt_save_editor_modified')
@property
def editor_count(self):
return self._tabs.count()
@property
def tab_filepaths(self):
file_paths = []
for idx in range(self.editor_count):
file_path = self.editor_at(idx).filename
if file_path:
file_paths.append(file_path)
return file_paths
def append_new_editor(self, font=None, content=None, filename=None):
"""
Appends a new editor the tabbed widget
:param font: A reference to the font to be used by the editor. If None is given
then self.default_font is used
:param content: An optional string containing content to be placed
into the editor on opening. If None then self.default_content is used
:param filename: An optional string containing the filename of the editor
if applicable.
:return:
"""
if content is None:
content = self.default_content
if font is None:
font = self.default_font
if self.editor_count > 0:
# If there are other tabs open the same zoom level
# as these is used.
current_zoom = self._tabs.widget(0).editor.getZoom()
else:
# Otherwise the zoom level of the last tab closed is used
# Or the default (0) if this is the very first tab
current_zoom = self.zoom_level
interpreter = PythonFileInterpreter(font, content, filename=filename,
parent=self)
interpreter.editor.zoomTo(current_zoom)
if self.whitespace_visible:
interpreter.set_whitespace_visible()
# monitor future modifications
interpreter.sig_editor_modified.connect(self.mark_current_tab_modified)
interpreter.sig_filename_modified.connect(self.on_filename_modified)
interpreter.editor.textZoomedIn.connect(self.zoom_in_all_tabs)
interpreter.editor.textZoomedOut.connect(self.zoom_out_all_tabs)
tab_title, tab_tooltip = self._tab_title_and_tooltip(filename)
tab_idx = self._tabs.addTab(interpreter, tab_title)
self._tabs.setTabToolTip(tab_idx, tab_tooltip)
self._tabs.setCurrentIndex(tab_idx)
# set the cursor to the last line and give the new editor focus
interpreter.editor.setFocus()
if content is not None:
line_count = content.count(linesep)
interpreter.editor.setCursorPosition(line_count,0)
return tab_idx
def abort_current(self):
"""Request that that the current execution be cancelled"""
self.current_editor().abort()
@Slot()
def abort_all(self):
"""Request that all executing tabs are cancelled"""
for ii in range(0, len(self._tabs)):
editor = self.editor_at(ii)
editor.abort()
def close_all(self):
"""
Close all tabs
:return: True if all tabs are closed, False if cancelled
"""
for idx in reversed(range(self.editor_count)):
if not self.close_tab(idx, allow_zero_tabs=True):
return False
return True
def close_tab(self, idx, allow_zero_tabs=False):
"""
Close the tab at the given index.
:param idx: The tab index
:param allow_zero_tabs: If True then closing the last tab does not add a new empty tab.
:return: True if tab is to be closed, False if cancelled
"""
if idx >= self.editor_count:
return True
# Make the current tab active so that it is clear what you
# are being prompted to save
self._tabs.setCurrentIndex(idx)
if self.current_editor().confirm_close():
# If the last editor tab is being closed, its zoom level
# is saved for the new tab which opens automatically.
if self.editor_count == 1:
self.zoom_level = self.current_editor().editor.getZoom()
widget = self.editor_at(idx)
filename = self.editor_at(idx).filename
# note: this does not close the widget, that is why we manually close it
self._tabs.removeTab(idx)
widget.close()
else:
return False
if (not allow_zero_tabs) and self.editor_count == 0:
self.append_new_editor()
if filename is not None:
self.sig_tab_closed.emit(filename)
else:
self.sig_tab_closed.emit("")
return True
def current_editor(self):
return self._tabs.currentWidget()
def editor_at(self, idx):
"""Return the editor at the given index. Must be in range"""
return self._tabs.widget(idx)
def _emit_current_tab_changed(self, index):
if index == -1:
self.sig_current_tab_changed.emit("")
else:
self.sig_current_tab_changed.emit(self.current_tab_filename)
def _emit_code_exec_start(self):
"""Emit signal that code execution has started"""
if not self.current_editor().filename:
filename = self._tabs.tabText(self._tabs.currentIndex()).rstrip('*')
self.sig_code_exec_start.emit(filename)
else:
self.sig_code_exec_start.emit(self.current_editor().filename)
@property
def current_tab_filename(self):
if not self.current_editor().filename:
return self._tabs.tabText(self._tabs.currentIndex()).rstrip('*')
return self.current_editor().filename
def execute_current_async(self):
"""
Execute content of the current file. If a selection is active
then only this portion of code is executed, this is completed asynchronously
"""
self._emit_code_exec_start()
return self.current_editor().execute_async()
def execute_async(self):
"""
Execute ALL the content in the current file.
Selection is ignored.
This is completed asynchronously.
"""
self._emit_code_exec_start()
return self.current_editor().execute_async(ignore_selection=True)
@Slot()
def execute_current_async_blocking(self):
"""
Execute content of the current file. If a selection is active
then only this portion of code is executed, completed asynchronously
which blocks calling thread.
"""
self._emit_code_exec_start()
self.current_editor().execute_async_blocking()
def mark_current_tab_modified(self, modified):
"""Update the current tab title to indicate that the
content has been modified"""
self.mark_tab_modified(self._tabs.currentIndex(), modified)
def mark_tab_modified(self, idx, modified):
"""Update the tab title to indicate that the
content has been modified or not"""
title_cur = self._tabs.tabText(idx)
if modified:
if not title_cur.endswith(MODIFIED_MARKER):
title_new = title_cur + MODIFIED_MARKER
else:
title_new = title_cur
else:
if title_cur.endswith(MODIFIED_MARKER):
title_new = title_cur.rstrip('*')
else:
title_new = title_cur
self._tabs.setTabText(idx, title_new)
def on_filename_modified(self, filename):
old_filename = self._tabs.tabToolTip(self._tabs.currentIndex()).rstrip('*')
if not filename:
filename = self._tabs.tabText(self._tabs.currentIndex()).rstrip('*')
self.sig_file_name_changed.emit(old_filename, filename)
title, tooltip = self._tab_title_and_tooltip(filename)
idx_cur = self._tabs.currentIndex()
self._tabs.setTabText(idx_cur, title)
self._tabs.setTabToolTip(idx_cur, tooltip)
@Slot(str)
def open_file_in_new_tab(self, filepath, startup=False):
"""Open the existing file in a new tab in the editor
:param filepath: A path to an existing file
:param startup: Flag for if function is being called on startup
"""
with open(filepath, 'r') as code_file:
content = code_file.read()
self.append_new_editor(content=content, filename=filepath)
if startup is False and mantid_api_import_needed(content) is True:
add_mantid_api_import(self.current_editor().editor, content)
def open_files_in_new_tabs(self, filepaths):
for filepath in filepaths:
self.open_file_in_new_tab(filepath)
def plus_button_clicked(self, _):
"""Add a new tab when the plus button is clicked"""
self.append_new_editor()
def restore_session_tabs(self):
if self.prev_session_tabs is not None:
try:
self.open_files_in_new_tabs(self.prev_session_tabs)
except IOError:
pass
self.close_tab(0) # close default empty script
def save_current_file(self):
"""Save the current file"""
self.current_editor().save(force_save=True)
def save_current_file_as(self):
previous_filename = self.current_editor().filename
saved, filename = self.current_editor().save_as()
if saved:
self.current_editor().close()
self.open_file_in_new_tab(filename)
if previous_filename:
self.sig_file_name_changed.emit(previous_filename, filename)
def spaces_to_tabs_current(self):
self.current_editor().replace_spaces_with_tabs()
def tabs_to_spaces_current(self):
self.current_editor().replace_tabs_with_spaces()
def toggle_comment_current(self):
self.current_editor().toggle_comment()
def toggle_find_replace_dialog(self):
self.current_editor().show_find_replace_dialog()
def toggle_whitespace_visible_all(self):
if self.whitespace_visible:
for idx in range(self.editor_count):
self.editor_at(idx).set_whitespace_invisible()
self.whitespace_visible = False
else:
for idx in range(self.editor_count):
self.editor_at(idx).set_whitespace_visible()
self.whitespace_visible = True
def zoom_in_all_tabs(self):
current_tab_index = self._tabs.currentIndex()
for i in range(self.editor_count):
if i == current_tab_index:
continue
self.editor_at(i).editor.zoomIn()
def zoom_out_all_tabs(self):
current_tab_index = self._tabs.currentIndex()
for i in range(self.editor_count):
if i == current_tab_index:
continue
self.editor_at(i).editor.zoomOut()
| gpl-3.0 |
DemocracyClub/monitoring | monitoring/apps/twitter_accounts/migrations/0001_initial.py | 1 | 2209 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Tweet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)),
('raw_data', models.TextField()),
('text', models.CharField(max_length=200, blank=True)),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TwitterUser',
fields=[
('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)),
('username', models.CharField(max_length=100, serialize=False, primary_key=True, blank=True)),
('source', models.CharField(max_length=100, blank=True)),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='tweet',
name='twitter_user',
field=models.ForeignKey(to='twitter_accounts.TwitterUser'),
preserve_default=True,
),
]
| bsd-3-clause |
randynobx/ansible | lib/ansible/modules/cloud/webfaction/webfaction_site.py | 63 | 7118 | #!/usr/bin/python
#
# Create Webfaction website using Ansible and the Webfaction API
#
# ------------------------------------------
#
# (c) Quentin Stafford-Fraser 2015
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: webfaction_site
short_description: Add or remove a website on a Webfaction host
description:
- Add or remove a website on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0"
notes:
- Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP
address. You can use a DNS name.
- If a site of the same name exists in the account but on a different host, the operation will exit.
- >
You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
your host, you may want to add C(serial: 1) to the plays.
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
options:
name:
description:
- The name of the website
required: true
state:
description:
- Whether the website should exist
required: false
choices: ['present', 'absent']
default: "present"
host:
description:
- The webfaction host on which the site should be created.
required: true
https:
description:
- Whether or not to use HTTPS
required: false
choices:
- true
- false
default: 'false'
site_apps:
description:
- A mapping of URLs to apps
required: false
subdomains:
description:
- A list of subdomains associated with this site.
required: false
default: null
login_name:
description:
- The webfaction account to use
required: true
login_password:
description:
- The webfaction password to use
required: true
'''
EXAMPLES = '''
- name: create website
webfaction_site:
name: testsite1
state: present
host: myhost.webfaction.com
subdomains:
- 'testsite1.my_domain.org'
site_apps:
- ['testapp1', '/']
https: no
login_name: "{{webfaction_user}}"
login_password: "{{webfaction_passwd}}"
'''
import socket
import xmlrpclib
webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(required=False, choices=['present', 'absent'], default='present'),
# You can specify an IP address or hostname.
host = dict(required=True),
https = dict(required=False, type='bool', default=False),
subdomains = dict(required=False, type='list', default=[]),
site_apps = dict(required=False, type='list', default=[]),
login_name = dict(required=True),
login_password = dict(required=True, no_log=True),
),
supports_check_mode=True
)
site_name = module.params['name']
site_state = module.params['state']
site_host = module.params['host']
site_ip = socket.gethostbyname(site_host)
session_id, account = webfaction.login(
module.params['login_name'],
module.params['login_password']
)
site_list = webfaction.list_websites(session_id)
site_map = dict([(i['name'], i) for i in site_list])
existing_site = site_map.get(site_name)
result = {}
# Here's where the real stuff happens
if site_state == 'present':
# Does a site with this name already exist?
if existing_site:
# If yes, but it's on a different IP address, then fail.
# If we wanted to allow relocation, we could add a 'relocate=true' option
# which would get the existing IP address, delete the site there, and create it
# at the new address. A bit dangerous, perhaps, so for now we'll require manual
# deletion if it's on another host.
if existing_site['ip'] != site_ip:
module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.")
# If it's on this host and the key parameters are the same, nothing needs to be done.
if (existing_site['https'] == module.boolean(module.params['https'])) and \
(set(existing_site['subdomains']) == set(module.params['subdomains'])) and \
(dict(existing_site['website_apps']) == dict(module.params['site_apps'])):
module.exit_json(
changed = False
)
positional_args = [
session_id, site_name, site_ip,
module.boolean(module.params['https']),
module.params['subdomains'],
]
for a in module.params['site_apps']:
positional_args.append( (a[0], a[1]) )
if not module.check_mode:
# If this isn't a dry run, create or modify the site
result.update(
webfaction.create_website(
*positional_args
) if not existing_site else webfaction.update_website (
*positional_args
)
)
elif site_state == 'absent':
# If the site's already not there, nothing changed.
if not existing_site:
module.exit_json(
changed = False,
)
if not module.check_mode:
# If this isn't a dry run, delete the site
result.update(
webfaction.delete_website(session_id, site_name, site_ip)
)
else:
module.fail_json(msg="Unknown state specified: {}".format(site_state))
module.exit_json(
changed = True,
result = result
)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
babyliynfg/cross | tools/project-creator/Python2.6.6/Lib/lib2to3/fixes/fix_metaclass.py | 2 | 8415 | """Fixer for __metaclass__ = X -> (metaclass=X) methods.
The various forms of classef (inherits nothing, inherits once, inherints
many) don't parse the same in the CST so we look at ALL classes for
a __metaclass__ and if we find one normalize the inherits to all be
an arglist.
For one-liner classes ('class X: pass') there is no indent/dedent so
we normalize those into having a suite.
Moving the __metaclass__ into the classdef can also cause the class
body to be empty so there is some special casing for that as well.
This fixer also tries very hard to keep original indenting and spacing
in all those corner cases.
"""
# Author: Jack Diederich
# Local imports
from .. import fixer_base
from ..pygram import token
from ..fixer_util import Name, syms, Node, Leaf
def has_metaclass(parent):
""" we have to check the cls_node without changing it.
There are two possiblities:
1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta')
2) clsdef => simple_stmt => expr_stmt => Leaf('__meta')
"""
for node in parent.children:
if node.type == syms.suite:
return has_metaclass(node)
elif node.type == syms.simple_stmt and node.children:
expr_node = node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
left_side = expr_node.children[0]
if isinstance(left_side, Leaf) and \
left_side.value == '__metaclass__':
return True
return False
def fixup_parse_tree(cls_node):
""" one-line classes don't get a suite in the parse tree so we add
one to normalize the tree
"""
for node in cls_node.children:
if node.type == syms.suite:
# already in the prefered format, do nothing
return
# !%@#! oneliners have no suite node, we have to fake one up
for i, node in enumerate(cls_node.children):
if node.type == token.COLON:
break
else:
raise ValueError("No class suite and no ':'!")
# move everything into a suite node
suite = Node(syms.suite, [])
while cls_node.children[i+1:]:
move_node = cls_node.children[i+1]
suite.append_child(move_node.clone())
move_node.remove()
cls_node.append_child(suite)
node = suite
def fixup_simple_stmt(parent, i, stmt_node):
""" if there is a semi-colon all the parts count as part of the same
simple_stmt. We just want the __metaclass__ part so we move
everything efter the semi-colon into its own simple_stmt node
"""
for semi_ind, node in enumerate(stmt_node.children):
if node.type == token.SEMI: # *sigh*
break
else:
return
node.remove() # kill the semicolon
new_expr = Node(syms.expr_stmt, [])
new_stmt = Node(syms.simple_stmt, [new_expr])
while stmt_node.children[semi_ind:]:
move_node = stmt_node.children[semi_ind]
new_expr.append_child(move_node.clone())
move_node.remove()
parent.insert_child(i, new_stmt)
new_leaf1 = new_stmt.children[0].children[0]
old_leaf1 = stmt_node.children[0].children[0]
new_leaf1.prefix = old_leaf1.prefix
def remove_trailing_newline(node):
if node.children and node.children[-1].type == token.NEWLINE:
node.children[-1].remove()
def find_metas(cls_node):
# find the suite node (Mmm, sweet nodes)
for node in cls_node.children:
if node.type == syms.suite:
break
else:
raise ValueError("No class suite!")
# look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ]
for i, simple_node in list(enumerate(node.children)):
if simple_node.type == syms.simple_stmt and simple_node.children:
expr_node = simple_node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
# Check if the expr_node is a simple assignment.
left_node = expr_node.children[0]
if isinstance(left_node, Leaf) and \
left_node.value == u'__metaclass__':
# We found a assignment to __metaclass__.
fixup_simple_stmt(node, i, simple_node)
remove_trailing_newline(simple_node)
yield (node, i, simple_node)
def fixup_indent(suite):
""" If an INDENT is followed by a thing with a prefix then nuke the prefix
Otherwise we get in trouble when removing __metaclass__ at suite start
"""
kids = suite.children[::-1]
# find the first indent
while kids:
node = kids.pop()
if node.type == token.INDENT:
break
# find the first Leaf
while kids:
node = kids.pop()
if isinstance(node, Leaf) and node.type != token.DEDENT:
if node.prefix:
node.prefix = u''
return
else:
kids.extend(node.children[::-1])
class FixMetaclass(fixer_base.BaseFix):
PATTERN = """
classdef<any*>
"""
def transform(self, node, results):
if not has_metaclass(node):
return
fixup_parse_tree(node)
# find metaclasses, keep the last one
last_metaclass = None
for suite, i, stmt in find_metas(node):
last_metaclass = stmt
stmt.remove()
text_type = node.children[0].type # always Leaf(nnn, 'class')
# figure out what kind of classdef we have
if len(node.children) == 7:
# Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite])
# 0 1 2 3 4 5 6
if node.children[3].type == syms.arglist:
arglist = node.children[3]
# Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite])
else:
parent = node.children[3].clone()
arglist = Node(syms.arglist, [parent])
node.set_child(3, arglist)
elif len(node.children) == 6:
# Node(classdef, ['class', 'name', '(', ')', ':', suite])
# 0 1 2 3 4 5
arglist = Node(syms.arglist, [])
node.insert_child(3, arglist)
elif len(node.children) == 4:
# Node(classdef, ['class', 'name', ':', suite])
# 0 1 2 3
arglist = Node(syms.arglist, [])
node.insert_child(2, Leaf(token.RPAR, u')'))
node.insert_child(2, arglist)
node.insert_child(2, Leaf(token.LPAR, u'('))
else:
raise ValueError("Unexpected class definition")
# now stick the metaclass in the arglist
meta_txt = last_metaclass.children[0].children[0]
meta_txt.value = 'metaclass'
orig_meta_prefix = meta_txt.prefix
if arglist.children:
arglist.append_child(Leaf(token.COMMA, u','))
meta_txt.prefix = u' '
else:
meta_txt.prefix = u''
# compact the expression "metaclass = Meta" -> "metaclass=Meta"
expr_stmt = last_metaclass.children[0]
assert expr_stmt.type == syms.expr_stmt
expr_stmt.children[1].prefix = u''
expr_stmt.children[2].prefix = u''
arglist.append_child(last_metaclass)
fixup_indent(suite)
# check for empty suite
if not suite.children:
# one-liner that was just __metaclass_
suite.remove()
pass_leaf = Leaf(text_type, u'pass')
pass_leaf.prefix = orig_meta_prefix
node.append_child(pass_leaf)
node.append_child(Leaf(token.NEWLINE, u'\n'))
elif len(suite.children) > 1 and \
(suite.children[-2].type == token.INDENT and
suite.children[-1].type == token.DEDENT):
# there was only one line in the class body and it was __metaclass__
pass_leaf = Leaf(text_type, u'pass')
suite.insert_child(-1, pass_leaf)
suite.insert_child(-1, Leaf(token.NEWLINE, u'\n'))
| mit |
mganeva/mantid | scripts/Inelastic/Direct/ISISDirecInelasticConfig.py | 1 | 42948 | #!/usr/bin/python
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import os
import sys
import platform
import shutil
import re
import copy
from datetime import date
import time
from xml.dom import minidom
from six import iteritems
# the list of instruments this configuration is applicable to
INELASTIC_INSTRUMENTS = ['MAPS', 'LET', 'MERLIN', 'MARI', 'HET']
# the list of the parameters, which can be replaced if found in user files
USER_PROPERTIES = ['instrument', 'userID', 'cycleID', 'start_date', 'rb_folder']
class UserProperties(object):
"""Helper class to define & retrieve user properties
as retrieved from file provided by user office
"""
def __init__(self, *args):
""" Build user properties from space separated string in the form:
"userId instr_name rb_num cycle_mu start_date"
or list of five elements with the same meaning
"""
self._instrument = {}
self._rb_dirs = {}
self._cycle_IDs = {}
self._start_dates = {}
self._rb_exist = {}
self._user_id = None
self._recent_dateID = None
if args[0] is None:
return
if len(args) == 1:
input_str = str(args[0])
param = input_str.split()
self._user_id = param[0]
if len(param) == 5:
self.set_user_properties(param[1], param[2], param[3], param[4])
else: # only userID was provided, nothing else is defined
return
elif len(args) == 5:
self._user_id = str(args[0])
self.set_user_properties(args[1], args[2], args[3], args[4])
else:
raise RuntimeError("User has to be defined by the list of 5 components in the form:\n{0}".
format("[userId,instr_name,rb_num,cycle_mu,start_date]"))
def __str__(self):
"""Convert class to string. Only last cycle settings are returned"""
if self._user_id:
return "{0} {1} {2} {3} {4}".format(self._user_id, self.instrument,
self.rb_folder, self.cycleID, str(self.start_date))
else:
return "None"
#
def set_user_properties(self, instrument, rb_folder_or_id, cycle, start_date):
"""Define the information, user office provides about user.
The info has the form:
instrument -- string with full instrument name
date -- string with experiment start date in the form YYYYMMDD
cycle -- string with the cycle id in the form CYCLEYYYYN
where N is the cycle number within the year
rb_folder -- string containing the full path to working folder available
for all users and IS participating in the experiment.
"""
instrument, start_date, cycle, rb_folder_or_id, rb_exist = self.check_input(instrument, start_date, cycle,
rb_folder_or_id)
# when user starts
recent_date = date(int(start_date[0:4]), int(start_date[4:6]), int(start_date[6:8]))
recent_date_id = str(recent_date)
self._start_dates[recent_date_id] = recent_date
self._rb_exist[recent_date_id] = rb_exist
# a data which define the cycle ID e.g 2014_3 or something
self._cycle_IDs[recent_date_id] = (str(cycle[5:9]), str(cycle[9:]))
self._instrument[recent_date_id] = str(instrument).upper()
self._rb_dirs[recent_date_id] = rb_folder_or_id
if self._recent_dateID:
max_date = self._start_dates[self._recent_dateID]
for date_key, a_date in iteritems(self._start_dates):
if a_date > max_date:
self._recent_dateID = date_key
max_date = a_date
else:
self._recent_dateID = recent_date_id
def replace_variables(self, data_string):
"""Replace variables defined in USER_PROPERTIES
and enclosed in $ sign with their values
defined for a user
"""
str_parts = data_string.split('$')
for prop in USER_PROPERTIES:
try:
ind = str_parts.index(prop)
# pylint: disable=W0703
except Exception:
ind = None
if ind is not None:
str_parts[ind] = str(getattr(self, prop))
data_string = "".join(str_parts)
return data_string
#
@property
def GID(self):
"""Returns user's group ID which coincide with
number part of the rb directory
"""
if self._user_id:
RBfolder = os.path.basename(self.rb_dir)
return RBfolder[2:]
else:
return None
#
@property
def rb_folder(self):
"""Returns short name of user's RB folder
consisting of string RB and string representation of
RB number e.g. RB1510324
"""
if self._user_id:
RBfolder = os.path.basename(self.rb_dir)
return RBfolder
else:
return None
@property
def rb_dir(self):
"""return rb folder used in last actual instrument"""
if self._recent_dateID:
return self._rb_dirs[self._recent_dateID]
else:
raise RuntimeError("User's experiment date is not defined. User undefined")
@rb_dir.setter
def rb_dir(self, user_home_path):
"""Set user's rb-folder path"""
rb_path = self.rb_folder
full_path = os.path.join(user_home_path, rb_path)
if os.path.exists(full_path) and os.path.isdir(full_path):
self._rb_dirs[self._recent_dateID] = full_path
self._rb_exist[self._recent_dateID] = True
else:
pass
def get_rb_num(self, exp_date):
"""Returns short name of user's RB folder
consisting of string RB and string representation of
RB number e.g. RB1510324,
used on the date specified
"""
return os.path.basename(self._rb_dirs[exp_date])
#
def get_rb_dir(self, exp_date):
"""Returns full name name of user's RB folder corresponding to the
experiment, with the data provided.
"""
return self._rb_dirs[exp_date]
@property
def rb_id(self):
"""the same as rb_folder:
returns string with RB and string representation of
RB number e.g. RB1510324
"""
return self.rb_folder
#
@property
def start_date(self):
"""Last start date"""
if self._recent_dateID:
return self._start_dates[self._recent_dateID]
else:
raise RuntimeError("User's experiment date is not defined. User undefined")
#
@property
def instrument(self):
"""return instrument used in last actual experiment"""
if self._recent_dateID:
return self._instrument[self._recent_dateID]
else:
raise RuntimeError("User's experiment date is not defined. User undefined")
#
def get_instrument(self, cycle_date_id):
"""Return the instrument, used in the cycle with the date specified"""
return self._instrument[cycle_date_id]
#
@property
def rb_dir_exist(self):
"""return true if user's rb dir exist and false otherwise"""
if self._recent_dateID:
return self._rb_exist[self._recent_dateID]
else:
raise RuntimeError("User's experiment date is not defined. User undefined")
#
@property
def cycleID(self):
"""return last cycleID the user is participating"""
if self._recent_dateID:
year, num = self._cycle_IDs[self._recent_dateID]
return "{0}_{1}".format(year, num)
else:
raise RuntimeError("User's experiment date is not defined. User undefined")
@property
def cycle(self):
"""return last cycle the user is participating"""
if self._recent_dateID:
return self._cycle_IDs[self._recent_dateID]
else:
raise RuntimeError("User's experiment date is not defined. User undefined")
#
@property
def userID(self):
return self._user_id
@userID.setter
def userID(self, val):
self._user_id = str(val)
# number of branches as necessary
# pylint: disable=R0912
def check_input(self, instrument, start_date, cycle, rb_folder_or_id):
"""Verify that input is correct"""
# Checks if instrument is inelastic and raises RuntimeError if not
self.validate_instrument(instrument)
# Checks if the date is valid and raises a RuntimeError if not
start_date = self.validate_date(start_date)
def convert_cycle_int(cycle_int):
if cycle_int > 999: # Full cycle format 20151
cycle = "CYCLE{0:05}".format(cycle_int)
else:
cycle = "CYCLE20{0:03}".format(cycle_int)
return cycle
if isinstance(cycle, int):
cycle = convert_cycle_int(cycle)
if isinstance(cycle, str):
if len(cycle) == 11:
last_letter = cycle[-1]
if not last_letter.upper() in {'A','B','C','D','E'}:
raise RuntimeError("Cycle should be a string in the form CYCLEYYYYN[A,B,C,D "
"N-- the cycle's number in a year or integer in the form: YYYYN or YYN "
"but it is {0}".format(cycle))
else:
cycle = cycle.upper()
elif len(cycle) < 10:
cycle = cycle.replace('_', '')
try:
cycle = int(cycle)
except ValueError:
raise RuntimeError("Cycle should be a string in the form CYCLEYYYYN "
"N-- the cycle's number in a year or integer in the form: YYYYN or YYN "
"but it is {0}".format(cycle))
cycle = convert_cycle_int(cycle)
if not (len(cycle) == 10 and re.match('^CYCLE', cycle)):
raise RuntimeError("Cycle should be a string in form CYCLEYYYYN "
"N-- the cycle's number in a year or integer in the form: YYYYN or YYN "
"but it is {0}".format(cycle))
if isinstance(rb_folder_or_id, int):
rb_folder_or_id = "RB{0:07}".format(rb_folder_or_id)
if not isinstance(rb_folder_or_id, str):
raise RuntimeError("RB Folder {0} should be a string".format(rb_folder_or_id))
else:
f_path, rbf = os.path.split(rb_folder_or_id)
if len(rbf) != 9:
try:
rbf = int(rbf)
rbf = "RB{0:07}".format(rbf)
rb_folder_or_id = os.path.join(f_path, rbf)
except ValueError:
raise RuntimeError(
"RB Folder {0} should be a string containing RB number at the end".format(rb_folder_or_id))
# end
if os.path.exists(rb_folder_or_id) and os.path.isdir(rb_folder_or_id):
rb_exist = True
else:
rb_exist = False
return instrument, start_date, cycle, rb_folder_or_id, rb_exist
#-----------------------------------------------------------------------------------------------
def validate_instrument(self, instrument):
if instrument not in INELASTIC_INSTRUMENTS:
raise RuntimeError("Instrument {0} has to be one of "
"ISIS inelastic instruments".format(instrument))
def validate_date(self, start_date):
if isinstance(start_date, str):
# the date of express -- let's make it long in the past
if start_date.lower() == 'none':
start_date = '19800101'
error = False
else:
start_date = start_date.replace('-', '')
if len(start_date) != 8:
start_date = '20' + start_date
if len(start_date) == 8:
error = False
else:
error = True
else:
error = True
if error:
raise RuntimeError("Experiment start date should be defined as"
" a string in the form YYYYMMDD or YYMMDD but it is: {0}".format(start_date))
return start_date
def get_all_instruments(self):
""" Return list of all instruments, user is working on during this cycle"""
return list(self._instrument.values())
def get_all_cycles(self):
"""Return list of all cycles the user participates in"""
return list(self._instrument.keys())
def get_all_rb(self):
"""Return list of all rb folders the user participates in"""
return list(self._rb_dirs.values())
#
# --------------------------------------------------------------------#
#
class MantidConfigDirectInelastic(object):
"""Class describes Mantid server specific user's configuration,
necessary for Direct Inelastic reduction and analysis to work
The class should not depend on Mantid itself.
1) Valid for Mantid 3.4 available on 18/05/2015 and expects server
to have:
Map/masks folder with layout defined on (e.g. svn checkout)
https://svn.isis.rl.ac.uk/InstrumentFiles/trunk
2) User scripts folder with layout defined on
(e.g. git checkout or Mantid script repository set-up):
git@github.com:mantidproject/scriptrepository.git
see https://github.com/mantidproject/scriptrepository for details
3) The data can be found in archive, mounted at /archive/NDXxxxxx/Instrument/data/cycle_XX_Y
4)There are number of other assumptions about script path, used scripts, Mantid confg,
and other folders
All these assumptions are summarized within __init__
The class have to change/to be amended if the configuration
changes or has additional features.
"""
# pylint: disable=too-many-instance-attributes
# It has as many as parameters describing ISIS configuration.
def __init__(self, mantid='/opt/Mantid/', home_dir='/home/',
script_repo='/opt/UserScripts/',
map_mask_folder='/usr/local/mprogs/InstrumentFiles/'):
"""Initialize generic config variables and variables specific to a server"""
self._mantid_path = str(mantid)
self._home_path = str(home_dir)
self._script_repo = str(script_repo)
self._map_mask_folder = str(map_mask_folder)
# check if all necessary server folders specified as class parameters are present
self._check_server_folders_present()
#
# Static Parts of dynamic contents of Mantid configuration file
self._root_data_folder = '/archive' # root folder for all experimental results -- particular one will depend on
# instrument and cycle number.
# the common part of all strings, generated dynamically as function of input class parameters.
self._dynamic_options_base = ['default.facility=ISIS']
# Path to python scripts, defined and used by Mantid wrt to Mantid Root (this path may be version specific)
self._python_mantid_path = ['scripts/Calibration/', 'scripts/Examples/', 'scripts/Interface/', 'scripts/Vates/']
# Static paths to user scripts, defined wrt script repository root
self._python_user_scripts = set(['direct_inelastic/ISIS/qtiGenie/'])
# Relative to a particular user path to place links, important to user
self._user_specific_link_path = 'Desktop'
# Relative to a particular user name of folders with link to instrument files
self._map_mask_link_name = 'instrument_files'
# the name of the file, which describes python files to copy to user. The file has to be placed in
# script_repository/instrument_name folder
# File name, used as source of reduction scripts for particular instrument
self._user_files_descr = 'USER_Files_description.xml'
# fall back files defined to use if USER_Files_description is for some reason not available or wrong
# pylint: disable=W0108
# it will not work without lambda as intended
self._sample_reduction_file = lambda InstrName: '{0}Reduction_Sample.py'.format(InstrName)
# File name, used as target for copying to user folder for user to deploy as the base for his reduction script
# it will not work without lambda as intended
self._target_reduction_file = lambda InstrName, cycleID: '{0}Reduction_{1}.py'.format(InstrName, cycleID)
# Static contents of the Mantid Config file
self._header = ("# This file can be used to override any properties for this installation.\n"
"# Any properties found in this file will override any that are found in the Mantid.Properties file\n"
"# As this file will not be replaced with further installations of Mantid it is a safe place to put\n"
"# properties that suit your particular installation.\n"
"#\n"
"# See here for a list of possible options:''"
"# http://www.mantidproject.org/Properties_File#Mantid.User.Properties''\n"
"#\n"
"#uncomment to enable archive search - ICat and Orbiter\n"
"datasearch.searcharchive = On # may be important for autoreduction to work,\n")
#
self._footer = ("##\n"
"## LOGGING\n"
"##\n"
"\n"
"## Uncomment to change logging level\n"
"## Default is notice\n"
"## Valid values are: error, warning, notice, information, debug\n"
"#logging.loggers.root.level=information\n"
"\n"
"## MantidPlot\n"
"##\n"
"## Show invisible workspaces\n"
"#MantidOptions.InvisibleWorkspaces=0\n"
"## Re-use plot instances for different plot types\n"
"#MantidOptions.ReusePlotInstances=Off\n\n"
"## Uncomment to disable use of OpenGL to render unwrapped instrument views\n"
"#MantidOptions.InstrumentView.UseOpenGL=Off\n")
# Methods, which build & verify various parts of Mantid configuration
self._dynamic_options = [self._set_default_inst,
self._set_script_repo,
# necessary to have on an Instrument scientist account, disabled on generic setup
self._def_python_search_path,
self._set_datasearch_directory, self._set_rb_directory]
self._user = None
self._cycle_data_folder = set()
# this is the list, containing configuration strings
# generated by the class. No configuration is present initially.
# Its contents is generated by _init_config method from server and user specific
# input parameters together.
self._dynamic_configuration = None
# Unconditionally rewrite Mantid Configuration
self._force_change_config = False
# Unconditionally rewrite copy of sample reduction script
self._force_change_script = False
#
def config_need_replacing(self, config_file_name):
"""Method specifies conditions when existing configuration file should be replaced"""
if self._force_change_config:
return True
# missing file should always be replaced
if not os.path.isfile(config_file_name):
return True
start_date = self._user.start_date
unmodified_creation_time = time.mktime(start_date.timetuple())
targ_config_time = os.path.getmtime(config_file_name)
# Only rewrite configuration if nobody have touched it
if unmodified_creation_time == targ_config_time:
return True
else:
return False
#
#
def get_user_file_description(self, instr_name=None):
"""returbs full file name (with path) for an xml file which describes
files, which should be copied to a user.
If instrument name is known or provided, function
calculates this name wrt. the location of the file in the Mantid user
script repository.
"""
if self._user:
if not instr_name:
instr_name = self._user.instrument
return os.path.join(self._script_repo, 'direct_inelastic', instr_name,
self._user_files_descr)
else:
return self._user_files_descr
#
def script_need_replacing(self, target_script_name):
"""Method specifies conditions when existing reduction file should be replaced
by a sample file.
"""
if self._force_change_script:
return True
# non-existing file should always be replaced
if not os.path.isfile(target_script_name):
return True
# Always replace sample file if it has not been touched
start_date = self._user.start_date
# this time is set up to the file, copied from the repository
unmodified_file_time = time.mktime(start_date.timetuple())
targ_file_time = os.path.getmtime(target_script_name)
if unmodified_file_time == targ_file_time:
return True
else: # somebody have modified the target file. Leave it alone
return False
#
def _fullpath_to_copy(self, short_source_file=None, short_target_file=None, cycle_id=None):
"""Append full path to source and target files """
if cycle_id:
InstrName = self._user.get_instrument(cycle_id)
rb_folder = self._user.get_rb_dir(cycle_id)
else:
InstrName = self._user.instrument
rb_folder = self._user.rb_dir
if short_source_file is None:
short_source_file = self._sample_reduction_file(InstrName)
if short_target_file is None:
CycleID = self._user.cycleID
short_target_file = self._target_reduction_file(InstrName, CycleID)
source_path = os.path.join(self._script_repo, 'direct_inelastic', InstrName.upper())
full_source = os.path.join(source_path, short_source_file)
full_target = os.path.join(rb_folder, short_target_file)
return full_source, full_target
#
def copy_reduction_sample(self, user_file_description=None, cycle_id=None, rb_group=None):
"""copy sample reduction scripts from Mantid script repository
to user folder.
"""
if user_file_description is None:
user_file_description = self.get_user_file_description()
if rb_group is None:
rb_group = self._user.userID
info_to_copy = self._parse_user_files_description(user_file_description, cycle_id)
for source_file, dest_file, subst_list in info_to_copy:
self._copy_user_file_job(source_file, dest_file, rb_group, subst_list)
def _copy_and_parse_user_file(self, input_file, output_file, replacemets_list):
"""Method processes file provided for user and replaces list of keywords, describing user
and experiment (See comments in User_files_description.xml) with their values
"""
fh_targ = open(output_file, 'w')
if not fh_targ:
return
var_to_replace = list(replacemets_list.keys())
with open(input_file) as fh_source:
for line in fh_source:
rez = line
for var in var_to_replace:
if var in rez:
rez = rez.replace(var, replacemets_list[var])
fh_targ.write(rez)
fh_targ.close()
#
def _copy_user_file_job(self, input_file, output_file, rb_group, replacement_list=None):
"""Method copies file provided into the requested destination
and replaces keys specified in replacement list dictionary with their
values if replacement_list is provided.
"""
if not os.path.isfile(input_file):
return
# already have target file or modified by user
if not self.script_need_replacing(output_file):
return
if os.path.isfile(output_file):
os.remove(output_file)
if replacement_list is None:
shutil.copyfile(input_file, output_file)
else:
self._copy_and_parse_user_file(input_file, output_file, replacement_list)
os.chmod(output_file, 0o777)
ownership_str = "chown {0}:{1} {2}".format(self._user.userID, rb_group, output_file)
if platform.system() != 'Windows':
os.system(ownership_str)
# Set up the file creation and modification dates to the users start date
start_date = self._user.start_date
file_time = time.mktime(start_date.timetuple())
os.utime(output_file, (file_time, file_time))
def _get_file_attributes(self, file_node, cycle=None):
"""processes xml file_node to retrieve file attributes to copy """
source_file = file_node.getAttribute("file_name")
if source_file is None:
return (None, None)
target_file = file_node.getAttribute("copy_as")
if target_file is None:
source_file = target_file
else:
if "$" in target_file:
target_file = self._user.replace_variables(target_file)
full_source, full_target = self._fullpath_to_copy(source_file, target_file, cycle)
return (full_source, full_target)
#
def _parse_replacement_info(self, repl_info):
"""process dom element 'replacement' and
returns the variables with its correspondent value
to replace variable by their value.
If value contains one or more of the supported variables as its part, this
variable is replaced by its value.
Supported variables are defined by global list USER_PROPERTIES
and their values are taken from current self._user class
"""
# what should be replaced in the file
source = repl_info.getAttribute("var")
if len(source) == 0:
raise ValueError(
'"replace" field of {0} file for instrument {1} has to contain attribute "var" and its value'
.format(self._user_files_descr, self._user.instrument))
# what should be placed instead of the replacement
dest = repl_info.getAttribute("by_var")
if len(dest) == 0:
raise ValueError(
'"replace" field of {0} file for instrument {1} has to contain attribute "by_var" and its value'
.format(self._user_files_descr, self._user.instrument))
# replace use-specific variables by their values
if '$' in dest:
dest = self._user.replace_variables(dest)
return (source, dest)
def _parse_user_files_description(self, job_description_file, cycle_id=None):
""" Method parses xml file used to describe files to provide to user"""
# mainly for debugging purposes
filenames_to_copy = []
# does not work if user is not defined
if self._user is None:
return None
# parse job description file, fail down on default behaviour if
# user files description is not there
try:
domObj = minidom.parse(job_description_file)
# have no idea what minidom specific exception is:
# pylint: disable=W0703
except Exception:
input_file, output_file = self._fullpath_to_copy(None, None, cycle_id)
filenames_to_copy.append((input_file, output_file, None))
return filenames_to_copy
files_to_copy = domObj.getElementsByTagName("file_to_copy")
# go through all files in the description and define file copying operations
for file_node in files_to_copy:
# retrieve file attributes or its default values if the attributes are missing
input_file, output_file = self._get_file_attributes(file_node, cycle_id)
if input_file is None:
continue
# identify all replacements, defined for this file
replacements_info = file_node.getElementsByTagName('replace')
if len(replacements_info) == 0:
replacement_list = None
else:
replacement_list = {}
for replacement in replacements_info:
source, dest = self._parse_replacement_info(replacement)
replacement_list[source] = dest
filenames_to_copy.append((input_file, output_file, replacement_list))
return filenames_to_copy
#
def get_data_folder_name(self, instr, cycle_ID):
"""Method to generate a data folder from instrument name and the cycle start date
(cycle ID)
The agreement on the naming as currently in ISIS:
e.g: /archive/NDXMERLIN/Instrument/data/cycle_08_1
Note: will fail if cycle numbers ever become a 2-digit numbers e.g. cycle_22_10
"""
# cycle folder have short form without leading numbers
cycle_fold_n = int(cycle_ID[0]) - 2000
folder = os.path.join(self._root_data_folder, 'NDX' + instr.upper(),
"Instrument/data/cycle_{0:02}_{1}".format(cycle_fold_n, str(cycle_ID[1][0])))
return folder
def is_inelastic(self, instr_name):
"""Check if the instrument is inelastic"""
if instr_name in INELASTIC_INSTRUMENTS:
return True
else:
return False
#
def init_user(self, fedIDorUser, theUser=None):
"""Define settings, specific to a user
Supports two interfaces -- old and the new one
where
OldInterface: requested two input parameters
fedID -- users federal id
theUser -- class defining all other user property
NewInterface: requested single parameter:
theUser -- class defining all user's properties including fedID
"""
if not theUser:
if isinstance(fedIDorUser, UserProperties):
theUser = fedIDorUser
else:
raise RuntimeError("self.init_user(val) has to have val of UserProperty type only and got")
else:
theUser.userID = fedIDorUser
# check if all users instruments are inelastic instruments. (script works for inelastic only)
users_instruments = theUser.get_all_instruments()
for instr in users_instruments:
if not self.is_inelastic(instr):
raise RuntimeError('Instrument {0} is not among acceptable instruments'.format(instr))
self._user = theUser
# pylint: disable=W0201
# its init method so the change is reasonable
self._fedid = theUser.userID
user_folder = os.path.join(self._home_path, self._fedid)
if not os.path.exists(user_folder):
raise RuntimeError("User with fedID {0} does not exist. Create such user folder first".format(self._fedid))
# get RB folders for all experiments user participates in.
all_rbf = theUser.get_all_rb()
for rb_folder in all_rbf:
if not os.path.exists(str(rb_folder)):
raise RuntimeError(
"Experiment folder with {0} does not exist. Create such folder first".format(rb_folder))
#
# how to check cycle folders, they may not be available
self._cycle_data_folder = set()
# pylint: disable=W0212
for date_key, folder_id in list(theUser._cycle_IDs.items()):
self._cycle_data_folder.add(self.get_data_folder_name(theUser._instrument[date_key], folder_id))
# Initialize configuration settings
self._dynamic_configuration = copy.deepcopy(self._dynamic_options_base)
self._init_config()
#
def _check_server_folders_present(self):
"""Routine checks all necessary server folder are present"""
if not os.path.exists(self._mantid_path):
raise RuntimeError("SERVER ERROR: no correct Mantid path defined at {0}".format(self._mantid_path))
if not os.path.exists(self._home_path):
raise RuntimeError("SERVER ERROR: no correct home path defined at {0}".format(self._home_path))
if not os.path.exists(self._script_repo):
raise RuntimeError(("SERVER ERROR: no correct user script repository defined at {0}\n"
"Check out Mantid script repository from account, "
"which have admin rights").format(self._script_repo))
if not os.path.exists(self._map_mask_folder):
raise RuntimeError(("SERVER ERROR: no correct map/mask folder defined at {0}\n"
"Check out Mantid map/mask files from svn at"
" https://svn.isis.rl.ac.uk/InstrumentFiles/trunk").format(self._map_mask_folder))
def _init_config(self):
"""Execute Mantid properties setup methods"""
for fun in self._dynamic_options:
fun()
#
def _set_default_inst(self):
"""Set up last instrument, deployed by user"""
if self._user:
InstrName = self._user.instrument
self._dynamic_configuration.append('default.instrument={0}'.format(InstrName))
else:
self._dynamic_configuration.append('default.instrument={0}'.format('MARI'))
#
def _set_script_repo(self):
""" defines script repository location. By default its option is commented"""
self._dynamic_configuration.append('#ScriptLocalRepository={0}'.format(self._script_repo))
#
def _def_python_search_path(self):
"""Define path for Mantid Inelastic python scripts"""
# Note, instrument name script folder is currently upper case on GIT
if not self._user:
raise RuntimeError("Can not define python search path without defined user")
# define main Mantid scripts search path
path = os.path.join(self._mantid_path, 'scripts/')
for part in self._python_mantid_path:
path += ';' + os.path.join(self._mantid_path, part)
# define and append user scripts search path
user_path_part = copy.deepcopy(self._python_user_scripts)
# pylint: disable=W0212
for instr in self._user._instrument.values():
user_path_part.add(os.path.join('direct_inelastic', instr.upper()))
for part in user_path_part:
path += ';' + os.path.join(self._script_repo, part) + '/'
self._dynamic_configuration.append('pythonscripts.directories=' + path)
#
def _set_rb_directory(self):
"""Set up default save directory, the one where data are saved by default"""
if self._user:
rb_folder = self._user.rb_dir
self._dynamic_configuration.append('defaultsave.directory={0}'.format(rb_folder))
else:
raise RuntimeError("Can not define RB folder without user being defined")
#
def _set_datasearch_directory(self):
"""Note, map/mask instrument folder is lower case as if loaded from SVN.
Autoreduction may have it upper case"""
if not self._user:
raise RuntimeError("Can not define Data search path without user being defined")
instr_name = self._user.instrument
map_mask_dir = os.path.abspath(os.path.join('{0}'.format(self._map_mask_folder),
'{0}'.format(str.lower(instr_name))))
# set up all data folders
all_data_folders = list(self._cycle_data_folder)
data_dir = os.path.abspath('{0}'.format(all_data_folders[0]))
for folder in all_data_folders[1:]:
data_dir += ';' + os.path.abspath('{0}'.format(folder))
all_rb_folders = self._user.get_all_rb()
for folder in all_rb_folders:
data_dir += ';' + os.path.abspath('{0}'.format(folder))
self._dynamic_configuration.append('datasearch.directories=' + map_mask_dir + ';' + data_dir)
#
def generate_config(self, key_users_list=None):
"""Save generated Mantid configuration file into user's home folder
and copy other files, necessary for Mantid to work properly
"""
user_path = os.path.join(self._home_path, self._fedid)
config_path = os.path.join(user_path, '.mantid')
if not os.path.exists(config_path):
os.makedirs(config_path)
config_file = os.path.join(config_path, 'Mantid.user.properties')
if self.config_need_replacing(config_file):
self._write_user_config_file(config_file)
else:
pass
if platform.system() != 'Windows':
os.system('chown -R {0}:{0} {1}'.format(self._fedid, config_path))
self.make_map_mask_links(user_path)
users_cycles = self._user.get_all_cycles()
users_rb = self._user.get_all_rb()
# extract rb folder without path, which gives RB group name
users_rb = list(map(os.path.basename, users_rb))
#
for cycle, rb_name in zip(users_cycles, users_rb):
if key_users_list:
key_user = str(key_users_list[rb_name])
if self._fedid.lower() != key_user.lower():
continue
instr = self._user.get_instrument(cycle)
self.copy_reduction_sample(self.get_user_file_description(instr), cycle, rb_name)
#
#
def make_map_mask_links(self, user_path):
"""The method generates references to map files and places these references
to the user's desktop.
"""
# the path where to set up links, important to user
links_path = os.path.join(user_path, self._user_specific_link_path)
if not os.path.exists(links_path):
os.makedirs(links_path)
# the path have to belong to user
if platform.system() != 'Windows':
os.system('chown -R {0}:{0} {1}'.format(self._fedid, links_path))
map_mask_folder_link = os.path.join(links_path, self._map_mask_link_name)
if os.path.exists(map_mask_folder_link):
return
# create link to map mask folder
if platform.system() == 'Windows':
# the script is not intended to run on Windows, so this is just for testing
mmfl = map_mask_folder_link.replace('/', '\\')
mmf = self._map_mask_folder.replace('/', '\\')
os.system("mklink /J {0} {1}".format(mmfl, mmf))
else:
os.system('ln -s {0} {1}'.format(self._map_mask_folder, map_mask_folder_link))
def _write_user_config_file(self, config_file_name):
"""Write existing dynamic configuration from memory to
user defined configuration file
"""
# pylint: disable=C0103
# What is wrong with fp variable name here?
fp = open(config_file_name, 'w')
fp.write(self._header)
fp.write('## ----- Generated user properties ------------ \n')
fp.write('##\n')
for opt in self._dynamic_configuration:
fp.write(opt)
fp.write('\n##\n')
fp.write(self._footer)
fp.close()
if platform.system() != 'Windows':
os.system('chown -R {0}:{0} {1}'.format(self._fedid, config_file_name))
# Set up configuration for the specific time, which should change only if user
# modified this configuration
start_date = self._user.start_date
file_time = time.mktime(start_date.timetuple())
os.utime(config_file_name, (file_time, file_time))
# pylint: disable = invalid-name
if __name__ == "__main__":
if len(sys.argv) != 6:
print("usage: Config.py userID instrument RBNumber cycleID start_date")
exit()
argi = sys.argv[1:]
user = UserProperties(*argi)
if platform.system() == 'Windows':
sys.path.insert(0, 'c:/Mantid/scripts/Inelastic/Direct')
base = 'd:/Data/Mantid_Testing/config_script_test_folder'
analysisDir = base
MantidDir = r"c:\Mantid\_builds\br_master\bin\Release"
UserScriptRepoDir = os.path.join(analysisDir, "UserScripts")
MapMaskDir = os.path.join(analysisDir, "InstrumentFiles")
rootDir = os.path.join(base, 'users')
else:
sys.path.insert(0, '/opt/Mantid/scripts/Inelastic/Direct/')
# sys.path.insert(0,'/opt/mantidnightly/scripts/Inelastic/Direct/')
MantidDir = '/opt/Mantid'
MapMaskDir = '/usr/local/mprogs/InstrumentFiles/'
UserScriptRepoDir = '/opt/UserScripts'
home = '/home'
#
rootDir = "/home/"
analysisDir = "/instrument/"
# initialize Mantid configuration
# its testing route under main so it rightly imports itself
# pylint: disable=W0406
mcf = MantidConfigDirectInelastic(MantidDir, rootDir, UserScriptRepoDir, MapMaskDir)
print("Successfully initialized ISIS Inelastic Configuration script generator")
rb_user_folder = os.path.join(mcf._home_path, user.userID)
user.rb_dir = rb_user_folder
if not user.rb_dir_exist:
print("RB folder {0} for user {1} should exist and be accessible to configure this user".format(user.rb_dir,
user.userID))
exit()
# Configure user
mcf.init_user(user.userID, user)
mcf.generate_config()
print("Successfully Configured user: {0} for instrument {1} and RBNum: {2}"
.format(user.userID, user.instrument, user.rb_folder))
| gpl-3.0 |
h3biomed/ansible | test/units/modules/storage/netapp/test_na_ontap_svm.py | 37 | 10298 | # (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit test template for ONTAP Ansible module '''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_svm \
import NetAppOntapSVM as svm_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, data=None):
''' save arguments '''
self.type = kind
self.params = data
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.type == 'vserver':
xml = self.build_vserver_info(self.params)
self.xml_out = xml
return xml
@staticmethod
def build_vserver_info(vserver):
''' build xml data for vserser-info '''
xml = netapp_utils.zapi.NaElement('xml')
data = {'num-records': 1, 'attributes-list': {'vserver-info': {
'vserver-name': vserver['name'],
'ipspace': vserver['ipspace'],
'root-volume': vserver['root_volume'],
'root-volume-aggregate': vserver['root_volume_aggregate'],
'language': vserver['language'],
'comment': vserver['comment'],
'snapshot-policy': vserver['snapshot_policy'],
'vserver-subtype': vserver['subtype'],
'allowed-protocols': [{'protocol': 'nfs'}, {'protocol': 'cifs'}],
'aggr-list': [{'aggr-name': 'aggr_1'}, {'aggr-name': 'aggr_2'}],
}}}
xml.translate_struct(data)
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.server = MockONTAPConnection()
self.mock_vserver = {
'name': 'test_svm',
'root_volume': 'ansible_vol',
'root_volume_aggregate': 'ansible_aggr',
'aggr_list': 'aggr_1,aggr_2',
'ipspace': 'ansible_ipspace',
'subtype': 'default',
'language': 'c.utf_8',
'snapshot_policy': 'old_snapshot_policy',
'comment': 'this is a comment'
}
def mock_args(self):
return {
'name': self.mock_vserver['name'],
'root_volume': self.mock_vserver['root_volume'],
'root_volume_aggregate': self.mock_vserver['root_volume_aggregate'],
'aggr_list': self.mock_vserver['aggr_list'],
'ipspace': self.mock_vserver['ipspace'],
'comment': self.mock_vserver['comment'],
'subtype': 'default',
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!'
}
def get_vserver_mock_object(self, kind=None, data=None):
"""
Helper method to return an na_ontap_volume object
:param kind: passes this param to MockONTAPConnection()
:param data: passes this param to MockONTAPConnection()
:return: na_ontap_volume object
"""
vserver_obj = svm_module()
vserver_obj.asup_log_for_cserver = Mock(return_value=None)
vserver_obj.cluster = Mock()
vserver_obj.cluster.invoke_successfully = Mock()
if kind is None:
vserver_obj.server = MockONTAPConnection()
else:
if data is None:
vserver_obj.server = MockONTAPConnection(kind='vserver', data=self.mock_vserver)
else:
vserver_obj.server = MockONTAPConnection(kind='vserver', data=data)
return vserver_obj
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
svm_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_get_nonexistent_vserver(self):
''' test if get_vserver() throws an error if vserver is not specified '''
data = self.mock_args()
set_module_args(data)
result = self.get_vserver_mock_object().get_vserver()
assert result is None
def test_create_error_missing_name(self):
''' Test if create throws an error if name is not specified'''
data = self.mock_args()
del data['name']
set_module_args(data)
with pytest.raises(AnsibleFailJson) as exc:
self.get_vserver_mock_object('vserver').create_vserver()
msg = 'missing required arguments: name'
assert exc.value.args[0]['msg'] == msg
@patch('ansible.modules.storage.netapp.na_ontap_svm.NetAppOntapSVM.create_vserver')
def test_successful_create(self, create_vserver):
'''Test successful create'''
data = self.mock_args()
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_vserver_mock_object().apply()
assert exc.value.args[0]['changed']
create_vserver.assert_called_with()
@patch('ansible.modules.storage.netapp.na_ontap_svm.NetAppOntapSVM.create_vserver')
def test_create_idempotency(self, create_vserver):
'''Test successful create'''
data = self.mock_args()
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_vserver_mock_object('vserver').apply()
assert not exc.value.args[0]['changed']
create_vserver.assert_not_called()
def test_successful_delete(self):
'''Test successful delete'''
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_vserver_mock_object('vserver').apply()
assert exc.value.args[0]['changed']
@patch('ansible.modules.storage.netapp.na_ontap_svm.NetAppOntapSVM.delete_vserver')
def test_delete_idempotency(self, delete_vserver):
'''Test delete idempotency'''
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_vserver_mock_object().apply()
assert not exc.value.args[0]['changed']
delete_vserver.assert_not_called()
@patch('ansible.modules.storage.netapp.na_ontap_svm.NetAppOntapSVM.get_vserver')
def test_successful_rename(self, get_vserver):
'''Test successful rename'''
data = self.mock_args()
data['from_name'] = 'test_svm'
data['name'] = 'test_new_svm'
set_module_args(data)
current = {
'name': 'test_svm',
'root_volume': 'ansible_vol',
'root_volume_aggregate': 'ansible_aggr',
'ipspace': 'ansible_ipspace',
'subtype': 'default',
'language': 'c.utf_8'
}
get_vserver.side_effect = [
None,
current
]
with pytest.raises(AnsibleExitJson) as exc:
self.get_vserver_mock_object().apply()
assert exc.value.args[0]['changed']
def test_successful_modify_language(self):
'''Test successful modify language'''
data = self.mock_args()
data['language'] = 'c'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_vserver_mock_object('vserver').apply()
assert exc.value.args[0]['changed']
def test_successful_modify_snapshot_policy(self):
'''Test successful modify language'''
data = self.mock_args()
data['snapshot_policy'] = 'new_snapshot_policy'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_vserver_mock_object('vserver').apply()
assert exc.value.args[0]['changed']
def test_successful_modify_allowed_protocols(self):
'''Test successful modify allowed protocols'''
data = self.mock_args()
data['allowed_protocols'] = 'protocol_1,protocol_2'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_vserver_mock_object('vserver').apply()
assert exc.value.args[0]['changed']
def test_successful_modify_aggr_list(self):
'''Test successful modify aggr-list'''
data = self.mock_args()
data['aggr_list'] = 'aggr_3,aggr_4'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_vserver_mock_object('vserver').apply()
assert exc.value.args[0]['changed']
| gpl-3.0 |
vietpn/ghost-nodejs | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/lexers/_openedgebuiltins.py | 370 | 40661 | # -*- coding: utf-8 -*-
"""
pygments.lexers._openedgebuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Builtin list for the OpenEdgeLexer.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
OPENEDGEKEYWORDS = [
'ABSOLUTE', 'ABS', 'ABSO', 'ABSOL', 'ABSOLU', 'ABSOLUT', 'ACCELERATOR',
'ACCUM', 'ACCUMULATE', 'ACCUM', 'ACCUMU', 'ACCUMUL', 'ACCUMULA',
'ACCUMULAT', 'ACTIVE-FORM', 'ACTIVE-WINDOW', 'ADD', 'ADD-BUFFER',
'ADD-CALC-COLUMN', 'ADD-COLUMNS-FROM', 'ADD-EVENTS-PROCEDURE',
'ADD-FIELDS-FROM', 'ADD-FIRST', 'ADD-INDEX-FIELD', 'ADD-LAST',
'ADD-LIKE-COLUMN', 'ADD-LIKE-FIELD', 'ADD-LIKE-INDEX', 'ADD-NEW-FIELD',
'ADD-NEW-INDEX', 'ADD-SCHEMA-LOCATION', 'ADD-SUPER-PROCEDURE', 'ADM-DATA',
'ADVISE', 'ALERT-BOX', 'ALIAS', 'ALL', 'ALLOW-COLUMN-SEARCHING',
'ALLOW-REPLICATION', 'ALTER', 'ALWAYS-ON-TOP', 'AMBIGUOUS', 'AMBIG',
'AMBIGU', 'AMBIGUO', 'AMBIGUOU', 'ANALYZE', 'ANALYZ', 'AND', 'ANSI-ONLY',
'ANY', 'ANYWHERE', 'APPEND', 'APPL-ALERT-BOXES', 'APPL-ALERT',
'APPL-ALERT-', 'APPL-ALERT-B', 'APPL-ALERT-BO', 'APPL-ALERT-BOX',
'APPL-ALERT-BOXE', 'APPL-CONTEXT-ID', 'APPLICATION', 'APPLY',
'APPSERVER-INFO', 'APPSERVER-PASSWORD', 'APPSERVER-USERID', 'ARRAY-MESSAGE',
'AS', 'ASC', 'ASCENDING', 'ASCE', 'ASCEN', 'ASCEND', 'ASCENDI', 'ASCENDIN',
'ASK-OVERWRITE', 'ASSEMBLY', 'ASSIGN', 'ASYNCHRONOUS',
'ASYNC-REQUEST-COUNT', 'ASYNC-REQUEST-HANDLE', 'AT', 'ATTACHED-PAIRLIST',
'ATTR-SPACE', 'ATTR', 'ATTRI', 'ATTRIB', 'ATTRIBU', 'ATTRIBUT',
'AUDIT-CONTROL', 'AUDIT-ENABLED', 'AUDIT-EVENT-CONTEXT', 'AUDIT-POLICY',
'AUTHENTICATION-FAILED', 'AUTHORIZATION', 'AUTO-COMPLETION', 'AUTO-COMP',
'AUTO-COMPL', 'AUTO-COMPLE', 'AUTO-COMPLET', 'AUTO-COMPLETI',
'AUTO-COMPLETIO', 'AUTO-ENDKEY', 'AUTO-END-KEY', 'AUTO-GO', 'AUTO-INDENT',
'AUTO-IND', 'AUTO-INDE', 'AUTO-INDEN', 'AUTOMATIC', 'AUTO-RESIZE',
'AUTO-RETURN', 'AUTO-RET', 'AUTO-RETU', 'AUTO-RETUR', 'AUTO-SYNCHRONIZE',
'AUTO-ZAP', 'AUTO-Z', 'AUTO-ZA', 'AVAILABLE', 'AVAIL', 'AVAILA', 'AVAILAB',
'AVAILABL', 'AVAILABLE-FORMATS', 'AVERAGE', 'AVE', 'AVER', 'AVERA',
'AVERAG', 'AVG', 'BACKGROUND', 'BACK', 'BACKG', 'BACKGR', 'BACKGRO',
'BACKGROU', 'BACKGROUN', 'BACKWARDS', 'BACKWARD', 'BASE64-DECODE',
'BASE64-ENCODE', 'BASE-ADE', 'BASE-KEY', 'BATCH-MODE', 'BATCH', 'BATCH-',
'BATCH-M', 'BATCH-MO', 'BATCH-MOD', 'BATCH-SIZE', 'BEFORE-HIDE', 'BEFORE-H',
'BEFORE-HI', 'BEFORE-HID', 'BEGIN-EVENT-GROUP', 'BEGINS', 'BELL', 'BETWEEN',
'BGCOLOR', 'BGC', 'BGCO', 'BGCOL', 'BGCOLO', 'BIG-ENDIAN', 'BINARY', 'BIND',
'BIND-WHERE', 'BLANK', 'BLOCK-ITERATION-DISPLAY', 'BORDER-BOTTOM-CHARS',
'BORDER-B', 'BORDER-BO', 'BORDER-BOT', 'BORDER-BOTT', 'BORDER-BOTTO',
'BORDER-BOTTOM-PIXELS', 'BORDER-BOTTOM-P', 'BORDER-BOTTOM-PI',
'BORDER-BOTTOM-PIX', 'BORDER-BOTTOM-PIXE', 'BORDER-BOTTOM-PIXEL',
'BORDER-LEFT-CHARS', 'BORDER-L', 'BORDER-LE', 'BORDER-LEF', 'BORDER-LEFT',
'BORDER-LEFT-', 'BORDER-LEFT-C', 'BORDER-LEFT-CH', 'BORDER-LEFT-CHA',
'BORDER-LEFT-CHAR', 'BORDER-LEFT-PIXELS', 'BORDER-LEFT-P', 'BORDER-LEFT-PI',
'BORDER-LEFT-PIX', 'BORDER-LEFT-PIXE', 'BORDER-LEFT-PIXEL',
'BORDER-RIGHT-CHARS', 'BORDER-R', 'BORDER-RI', 'BORDER-RIG', 'BORDER-RIGH',
'BORDER-RIGHT', 'BORDER-RIGHT-', 'BORDER-RIGHT-C', 'BORDER-RIGHT-CH',
'BORDER-RIGHT-CHA', 'BORDER-RIGHT-CHAR', 'BORDER-RIGHT-PIXELS',
'BORDER-RIGHT-P', 'BORDER-RIGHT-PI', 'BORDER-RIGHT-PIX',
'BORDER-RIGHT-PIXE', 'BORDER-RIGHT-PIXEL', 'BORDER-TOP-CHARS', 'BORDER-T',
'BORDER-TO', 'BORDER-TOP', 'BORDER-TOP-', 'BORDER-TOP-C', 'BORDER-TOP-CH',
'BORDER-TOP-CHA', 'BORDER-TOP-CHAR', 'BORDER-TOP-PIXELS', 'BORDER-TOP-P',
'BORDER-TOP-PI', 'BORDER-TOP-PIX', 'BORDER-TOP-PIXE', 'BORDER-TOP-PIXEL',
'BOX', 'BOX-SELECTABLE', 'BOX-SELECT', 'BOX-SELECTA', 'BOX-SELECTAB',
'BOX-SELECTABL', 'BREAK', 'BROWSE', 'BUFFER', 'BUFFER-CHARS',
'BUFFER-COMPARE', 'BUFFER-COPY', 'BUFFER-CREATE', 'BUFFER-DELETE',
'BUFFER-FIELD', 'BUFFER-HANDLE', 'BUFFER-LINES', 'BUFFER-NAME',
'BUFFER-RELEASE', 'BUFFER-VALUE', 'BUTTON', 'BUTTONS', 'BUTTON', 'BY',
'BY-POINTER', 'BY-VARIANT-POINTER', 'CACHE', 'CACHE-SIZE', 'CALL',
'CALL-NAME', 'CALL-TYPE', 'CANCEL-BREAK', 'CANCEL-BUTTON', 'CAN-CREATE',
'CAN-DELETE', 'CAN-DO', 'CAN-FIND', 'CAN-QUERY', 'CAN-READ', 'CAN-SET',
'CAN-WRITE', 'CAPS', 'CAREFUL-PAINT', 'CASE', 'CASE-SENSITIVE', 'CASE-SEN',
'CASE-SENS', 'CASE-SENSI', 'CASE-SENSIT', 'CASE-SENSITI', 'CASE-SENSITIV',
'CAST', 'CATCH', 'CDECL', 'CENTERED', 'CENTER', 'CENTERE', 'CHAINED',
'CHARACTER_LENGTH', 'CHARSET', 'CHECK', 'CHECKED', 'CHOOSE', 'CHR', 'CLASS',
'CLASS-TYPE', 'CLEAR', 'CLEAR-APPL-CONTEXT', 'CLEAR-LOG', 'CLEAR-SELECTION',
'CLEAR-SELECT', 'CLEAR-SELECTI', 'CLEAR-SELECTIO', 'CLEAR-SORT-ARROWS',
'CLEAR-SORT-ARROW', 'CLIENT-CONNECTION-ID', 'CLIENT-PRINCIPAL',
'CLIENT-TTY', 'CLIENT-TYPE', 'CLIENT-WORKSTATION', 'CLIPBOARD', 'CLOSE',
'CLOSE-LOG', 'CODE', 'CODEBASE-LOCATOR', 'CODEPAGE', 'CODEPAGE-CONVERT',
'COLLATE', 'COL-OF', 'COLON', 'COLON-ALIGNED', 'COLON-ALIGN',
'COLON-ALIGNE', 'COLOR', 'COLOR-TABLE', 'COLUMN', 'COL', 'COLU', 'COLUM',
'COLUMN-BGCOLOR', 'COLUMN-DCOLOR', 'COLUMN-FGCOLOR', 'COLUMN-FONT',
'COLUMN-LABEL', 'COLUMN-LAB', 'COLUMN-LABE', 'COLUMN-MOVABLE', 'COLUMN-OF',
'COLUMN-PFCOLOR', 'COLUMN-READ-ONLY', 'COLUMN-RESIZABLE', 'COLUMNS',
'COLUMN-SCROLLING', 'COMBO-BOX', 'COMMAND', 'COMPARES', 'COMPILE',
'COMPILER', 'COMPLETE', 'COM-SELF', 'CONFIG-NAME', 'CONNECT', 'CONNECTED',
'CONSTRUCTOR', 'CONTAINS', 'CONTENTS', 'CONTEXT', 'CONTEXT-HELP',
'CONTEXT-HELP-FILE', 'CONTEXT-HELP-ID', 'CONTEXT-POPUP', 'CONTROL',
'CONTROL-BOX', 'CONTROL-FRAME', 'CONVERT', 'CONVERT-3D-COLORS',
'CONVERT-TO-OFFSET', 'CONVERT-TO-OFFS', 'CONVERT-TO-OFFSE', 'COPY-DATASET',
'COPY-LOB', 'COPY-SAX-ATTRIBUTES', 'COPY-TEMP-TABLE', 'COUNT', 'COUNT-OF',
'CPCASE', 'CPCOLL', 'CPINTERNAL', 'CPLOG', 'CPPRINT', 'CPRCODEIN',
'CPRCODEOUT', 'CPSTREAM', 'CPTERM', 'CRC-VALUE', 'CREATE', 'CREATE-LIKE',
'CREATE-LIKE-SEQUENTIAL', 'CREATE-NODE-NAMESPACE',
'CREATE-RESULT-LIST-ENTRY', 'CREATE-TEST-FILE', 'CURRENT', 'CURRENT_DATE',
'CURRENT_DATE', 'CURRENT-CHANGED', 'CURRENT-COLUMN', 'CURRENT-ENVIRONMENT',
'CURRENT-ENV', 'CURRENT-ENVI', 'CURRENT-ENVIR', 'CURRENT-ENVIRO',
'CURRENT-ENVIRON', 'CURRENT-ENVIRONM', 'CURRENT-ENVIRONME',
'CURRENT-ENVIRONMEN', 'CURRENT-ITERATION', 'CURRENT-LANGUAGE',
'CURRENT-LANG', 'CURRENT-LANGU', 'CURRENT-LANGUA', 'CURRENT-LANGUAG',
'CURRENT-QUERY', 'CURRENT-RESULT-ROW', 'CURRENT-ROW-MODIFIED',
'CURRENT-VALUE', 'CURRENT-WINDOW', 'CURSOR', 'CURS', 'CURSO', 'CURSOR-CHAR',
'CURSOR-LINE', 'CURSOR-OFFSET', 'DATABASE', 'DATA-BIND',
'DATA-ENTRY-RETURN', 'DATA-ENTRY-RET', 'DATA-ENTRY-RETU',
'DATA-ENTRY-RETUR', 'DATA-RELATION', 'DATA-REL', 'DATA-RELA', 'DATA-RELAT',
'DATA-RELATI', 'DATA-RELATIO', 'DATASERVERS', 'DATASET', 'DATASET-HANDLE',
'DATA-SOURCE', 'DATA-SOURCE-COMPLETE-MAP', 'DATA-SOURCE-MODIFIED',
'DATA-SOURCE-ROWID', 'DATA-TYPE', 'DATA-T', 'DATA-TY', 'DATA-TYP',
'DATE-FORMAT', 'DATE-F', 'DATE-FO', 'DATE-FOR', 'DATE-FORM', 'DATE-FORMA',
'DAY', 'DBCODEPAGE', 'DBCOLLATION', 'DBNAME', 'DBPARAM', 'DB-REFERENCES',
'DBRESTRICTIONS', 'DBREST', 'DBRESTR', 'DBRESTRI', 'DBRESTRIC',
'DBRESTRICT', 'DBRESTRICTI', 'DBRESTRICTIO', 'DBRESTRICTION', 'DBTASKID',
'DBTYPE', 'DBVERSION', 'DBVERS', 'DBVERSI', 'DBVERSIO', 'DCOLOR', 'DDE',
'DDE-ERROR', 'DDE-ID', 'DDE-I', 'DDE-ITEM', 'DDE-NAME', 'DDE-TOPIC',
'DEBLANK', 'DEBUG', 'DEBU', 'DEBUG-ALERT', 'DEBUGGER', 'DEBUG-LIST',
'DECIMALS', 'DECLARE', 'DECLARE-NAMESPACE', 'DECRYPT', 'DEFAULT',
'DEFAULT-BUFFER-HANDLE', 'DEFAULT-BUTTON', 'DEFAUT-B', 'DEFAUT-BU',
'DEFAUT-BUT', 'DEFAUT-BUTT', 'DEFAUT-BUTTO', 'DEFAULT-COMMIT',
'DEFAULT-EXTENSION', 'DEFAULT-EX', 'DEFAULT-EXT', 'DEFAULT-EXTE',
'DEFAULT-EXTEN', 'DEFAULT-EXTENS', 'DEFAULT-EXTENSI', 'DEFAULT-EXTENSIO',
'DEFAULT-NOXLATE', 'DEFAULT-NOXL', 'DEFAULT-NOXLA', 'DEFAULT-NOXLAT',
'DEFAULT-VALUE', 'DEFAULT-WINDOW', 'DEFINED', 'DEFINE-USER-EVENT-MANAGER',
'DELETE', 'DEL', 'DELE', 'DELET', 'DELETE-CHARACTER', 'DELETE-CHAR',
'DELETE-CHARA', 'DELETE-CHARAC', 'DELETE-CHARACT', 'DELETE-CHARACTE',
'DELETE-CURRENT-ROW', 'DELETE-LINE', 'DELETE-RESULT-LIST-ENTRY',
'DELETE-SELECTED-ROW', 'DELETE-SELECTED-ROWS', 'DELIMITER', 'DESC',
'DESCENDING', 'DESC', 'DESCE', 'DESCEN', 'DESCEND', 'DESCENDI', 'DESCENDIN',
'DESELECT-FOCUSED-ROW', 'DESELECTION', 'DESELECT-ROWS',
'DESELECT-SELECTED-ROW', 'DESTRUCTOR', 'DIALOG-BOX', 'DICTIONARY', 'DICT',
'DICTI', 'DICTIO', 'DICTION', 'DICTIONA', 'DICTIONAR', 'DIR', 'DISABLE',
'DISABLE-AUTO-ZAP', 'DISABLED', 'DISABLE-DUMP-TRIGGERS',
'DISABLE-LOAD-TRIGGERS', 'DISCONNECT', 'DISCON', 'DISCONN', 'DISCONNE',
'DISCONNEC', 'DISP', 'DISPLAY', 'DISP', 'DISPL', 'DISPLA',
'DISPLAY-MESSAGE', 'DISPLAY-TYPE', 'DISPLAY-T', 'DISPLAY-TY', 'DISPLAY-TYP',
'DISTINCT', 'DO', 'DOMAIN-DESCRIPTION', 'DOMAIN-NAME', 'DOMAIN-TYPE', 'DOS',
'DOUBLE', 'DOWN', 'DRAG-ENABLED', 'DROP', 'DROP-DOWN', 'DROP-DOWN-LIST',
'DROP-FILE-NOTIFY', 'DROP-TARGET', 'DUMP', 'DYNAMIC', 'DYNAMIC-FUNCTION',
'EACH', 'ECHO', 'EDGE-CHARS', 'EDGE', 'EDGE-', 'EDGE-C', 'EDGE-CH',
'EDGE-CHA', 'EDGE-CHAR', 'EDGE-PIXELS', 'EDGE-P', 'EDGE-PI', 'EDGE-PIX',
'EDGE-PIXE', 'EDGE-PIXEL', 'EDIT-CAN-PASTE', 'EDIT-CAN-UNDO', 'EDIT-CLEAR',
'EDIT-COPY', 'EDIT-CUT', 'EDITING', 'EDITOR', 'EDIT-PASTE', 'EDIT-UNDO',
'ELSE', 'EMPTY', 'EMPTY-TEMP-TABLE', 'ENABLE', 'ENABLED-FIELDS', 'ENCODE',
'ENCRYPT', 'ENCRYPT-AUDIT-MAC-KEY', 'ENCRYPTION-SALT', 'END',
'END-DOCUMENT', 'END-ELEMENT', 'END-EVENT-GROUP', 'END-FILE-DROP', 'ENDKEY',
'END-KEY', 'END-MOVE', 'END-RESIZE', 'END-ROW-RESIZE', 'END-USER-PROMPT',
'ENTERED', 'ENTRY', 'EQ', 'ERROR', 'ERROR-COLUMN', 'ERROR-COL',
'ERROR-COLU', 'ERROR-COLUM', 'ERROR-ROW', 'ERROR-STACK-TRACE',
'ERROR-STATUS', 'ERROR-STAT', 'ERROR-STATU', 'ESCAPE', 'ETIME',
'EVENT-GROUP-ID', 'EVENT-PROCEDURE', 'EVENT-PROCEDURE-CONTEXT', 'EVENTS',
'EVENT', 'EVENT-TYPE', 'EVENT-T', 'EVENT-TY', 'EVENT-TYP', 'EXCEPT',
'EXCLUSIVE-ID', 'EXCLUSIVE-LOCK', 'EXCLUSIVE', 'EXCLUSIVE-', 'EXCLUSIVE-L',
'EXCLUSIVE-LO', 'EXCLUSIVE-LOC', 'EXCLUSIVE-WEB-USER', 'EXECUTE', 'EXISTS',
'EXP', 'EXPAND', 'EXPANDABLE', 'EXPLICIT', 'EXPORT', 'EXPORT-PRINCIPAL',
'EXTENDED', 'EXTENT', 'EXTERNAL', 'FALSE', 'FETCH', 'FETCH-SELECTED-ROW',
'FGCOLOR', 'FGC', 'FGCO', 'FGCOL', 'FGCOLO', 'FIELD', 'FIELDS', 'FIELD',
'FILE', 'FILE-CREATE-DATE', 'FILE-CREATE-TIME', 'FILE-INFORMATION',
'FILE-INFO', 'FILE-INFOR', 'FILE-INFORM', 'FILE-INFORMA', 'FILE-INFORMAT',
'FILE-INFORMATI', 'FILE-INFORMATIO', 'FILE-MOD-DATE', 'FILE-MOD-TIME',
'FILENAME', 'FILE-NAME', 'FILE-OFFSET', 'FILE-OFF', 'FILE-OFFS',
'FILE-OFFSE', 'FILE-SIZE', 'FILE-TYPE', 'FILL', 'FILLED', 'FILL-IN',
'FILTERS', 'FINAL', 'FINALLY', 'FIND', 'FIND-BY-ROWID',
'FIND-CASE-SENSITIVE', 'FIND-CURRENT', 'FINDER', 'FIND-FIRST',
'FIND-GLOBAL', 'FIND-LAST', 'FIND-NEXT-OCCURRENCE', 'FIND-PREV-OCCURRENCE',
'FIND-SELECT', 'FIND-UNIQUE', 'FIND-WRAP-AROUND', 'FIRST',
'FIRST-ASYNCH-REQUEST', 'FIRST-CHILD', 'FIRST-COLUMN', 'FIRST-FORM',
'FIRST-OBJECT', 'FIRST-OF', 'FIRST-PROCEDURE', 'FIRST-PROC', 'FIRST-PROCE',
'FIRST-PROCED', 'FIRST-PROCEDU', 'FIRST-PROCEDUR', 'FIRST-SERVER',
'FIRST-TAB-ITEM', 'FIRST-TAB-I', 'FIRST-TAB-IT', 'FIRST-TAB-ITE',
'FIT-LAST-COLUMN', 'FIXED-ONLY', 'FLAT-BUTTON', 'FLOAT', 'FOCUS',
'FOCUSED-ROW', 'FOCUSED-ROW-SELECTED', 'FONT', 'FONT-TABLE', 'FOR',
'FORCE-FILE', 'FOREGROUND', 'FORE', 'FOREG', 'FOREGR', 'FOREGRO',
'FOREGROU', 'FOREGROUN', 'FORM', 'FORMAT', 'FORM', 'FORMA', 'FORMATTED',
'FORMATTE', 'FORM-LONG-INPUT', 'FORWARD', 'FORWARDS', 'FORWARD', 'FRAGMENT',
'FRAGMEN', 'FRAME', 'FRAM', 'FRAME-COL', 'FRAME-DB', 'FRAME-DOWN',
'FRAME-FIELD', 'FRAME-FILE', 'FRAME-INDEX', 'FRAME-INDE', 'FRAME-LINE',
'FRAME-NAME', 'FRAME-ROW', 'FRAME-SPACING', 'FRAME-SPA', 'FRAME-SPAC',
'FRAME-SPACI', 'FRAME-SPACIN', 'FRAME-VALUE', 'FRAME-VAL', 'FRAME-VALU',
'FRAME-X', 'FRAME-Y', 'FREQUENCY', 'FROM', 'FROM-CHARS', 'FROM-C',
'FROM-CH', 'FROM-CHA', 'FROM-CHAR', 'FROM-CURRENT', 'FROM-CUR', 'FROM-CURR',
'FROM-CURRE', 'FROM-CURREN', 'FROM-PIXELS', 'FROM-P', 'FROM-PI', 'FROM-PIX',
'FROM-PIXE', 'FROM-PIXEL', 'FULL-HEIGHT-CHARS', 'FULL-HEIGHT',
'FULL-HEIGHT-', 'FULL-HEIGHT-C', 'FULL-HEIGHT-CH', 'FULL-HEIGHT-CHA',
'FULL-HEIGHT-CHAR', 'FULL-HEIGHT-PIXELS', 'FULL-HEIGHT-P', 'FULL-HEIGHT-PI',
'FULL-HEIGHT-PIX', 'FULL-HEIGHT-PIXE', 'FULL-HEIGHT-PIXEL', 'FULL-PATHNAME',
'FULL-PATHN', 'FULL-PATHNA', 'FULL-PATHNAM', 'FULL-WIDTH-CHARS',
'FULL-WIDTH', 'FULL-WIDTH-', 'FULL-WIDTH-C', 'FULL-WIDTH-CH',
'FULL-WIDTH-CHA', 'FULL-WIDTH-CHAR', 'FULL-WIDTH-PIXELS', 'FULL-WIDTH-P',
'FULL-WIDTH-PI', 'FULL-WIDTH-PIX', 'FULL-WIDTH-PIXE', 'FULL-WIDTH-PIXEL',
'FUNCTION', 'FUNCTION-CALL-TYPE', 'GATEWAYS', 'GATEWAY', 'GE',
'GENERATE-MD5', 'GENERATE-PBE-KEY', 'GENERATE-PBE-SALT',
'GENERATE-RANDOM-KEY', 'GENERATE-UUID', 'GET', 'GET-ATTR-CALL-TYPE',
'GET-ATTRIBUTE-NODE', 'GET-BINARY-DATA', 'GET-BLUE-VALUE', 'GET-BLUE',
'GET-BLUE-', 'GET-BLUE-V', 'GET-BLUE-VA', 'GET-BLUE-VAL', 'GET-BLUE-VALU',
'GET-BROWSE-COLUMN', 'GET-BUFFER-HANDLEGETBYTE', 'GET-BYTE',
'GET-CALLBACK-PROC-CONTEXT', 'GET-CALLBACK-PROC-NAME', 'GET-CGI-LIST',
'GET-CGI-LONG-VALUE', 'GET-CGI-VALUE', 'GET-CODEPAGES', 'GET-COLLATIONS',
'GET-CONFIG-VALUE', 'GET-CURRENT', 'GET-DOUBLE', 'GET-DROPPED-FILE',
'GET-DYNAMIC', 'GET-ERROR-COLUMN', 'GET-ERROR-ROW', 'GET-FILE',
'GET-FILE-NAME', 'GET-FILE-OFFSET', 'GET-FILE-OFFSE', 'GET-FIRST',
'GET-FLOAT', 'GET-GREEN-VALUE', 'GET-GREEN', 'GET-GREEN-', 'GET-GREEN-V',
'GET-GREEN-VA', 'GET-GREEN-VAL', 'GET-GREEN-VALU',
'GET-INDEX-BY-NAMESPACE-NAME', 'GET-INDEX-BY-QNAME', 'GET-INT64',
'GET-ITERATION', 'GET-KEY-VALUE', 'GET-KEY-VAL', 'GET-KEY-VALU', 'GET-LAST',
'GET-LOCALNAME-BY-INDEX', 'GET-LONG', 'GET-MESSAGE', 'GET-NEXT',
'GET-NUMBER', 'GET-POINTER-VALUE', 'GET-PREV', 'GET-PRINTERS',
'GET-PROPERTY', 'GET-QNAME-BY-INDEX', 'GET-RED-VALUE', 'GET-RED',
'GET-RED-', 'GET-RED-V', 'GET-RED-VA', 'GET-RED-VAL', 'GET-RED-VALU',
'GET-REPOSITIONED-ROW', 'GET-RGB-VALUE', 'GET-SELECTED-WIDGET',
'GET-SELECTED', 'GET-SELECTED-', 'GET-SELECTED-W', 'GET-SELECTED-WI',
'GET-SELECTED-WID', 'GET-SELECTED-WIDG', 'GET-SELECTED-WIDGE', 'GET-SHORT',
'GET-SIGNATURE', 'GET-SIZE', 'GET-STRING', 'GET-TAB-ITEM',
'GET-TEXT-HEIGHT-CHARS', 'GET-TEXT-HEIGHT', 'GET-TEXT-HEIGHT-',
'GET-TEXT-HEIGHT-C', 'GET-TEXT-HEIGHT-CH', 'GET-TEXT-HEIGHT-CHA',
'GET-TEXT-HEIGHT-CHAR', 'GET-TEXT-HEIGHT-PIXELS', 'GET-TEXT-HEIGHT-P',
'GET-TEXT-HEIGHT-PI', 'GET-TEXT-HEIGHT-PIX', 'GET-TEXT-HEIGHT-PIXE',
'GET-TEXT-HEIGHT-PIXEL', 'GET-TEXT-WIDTH-CHARS', 'GET-TEXT-WIDTH',
'GET-TEXT-WIDTH-', 'GET-TEXT-WIDTH-C', 'GET-TEXT-WIDTH-CH',
'GET-TEXT-WIDTH-CHA', 'GET-TEXT-WIDTH-CHAR', 'GET-TEXT-WIDTH-PIXELS',
'GET-TEXT-WIDTH-P', 'GET-TEXT-WIDTH-PI', 'GET-TEXT-WIDTH-PIX',
'GET-TEXT-WIDTH-PIXE', 'GET-TEXT-WIDTH-PIXEL', 'GET-TYPE-BY-INDEX',
'GET-TYPE-BY-NAMESPACE-NAME', 'GET-TYPE-BY-QNAME', 'GET-UNSIGNED-LONG',
'GET-UNSIGNED-SHORT', 'GET-URI-BY-INDEX', 'GET-VALUE-BY-INDEX',
'GET-VALUE-BY-NAMESPACE-NAME', 'GET-VALUE-BY-QNAME', 'GET-WAIT-STATE',
'GLOBAL', 'GO-ON', 'GO-PENDING', 'GO-PEND', 'GO-PENDI', 'GO-PENDIN',
'GRANT', 'GRAPHIC-EDGE', 'GRAPHIC-E', 'GRAPHIC-ED', 'GRAPHIC-EDG',
'GRID-FACTOR-HORIZONTAL', 'GRID-FACTOR-H', 'GRID-FACTOR-HO',
'GRID-FACTOR-HOR', 'GRID-FACTOR-HORI', 'GRID-FACTOR-HORIZ',
'GRID-FACTOR-HORIZO', 'GRID-FACTOR-HORIZON', 'GRID-FACTOR-HORIZONT',
'GRID-FACTOR-HORIZONTA', 'GRID-FACTOR-VERTICAL', 'GRID-FACTOR-V',
'GRID-FACTOR-VE', 'GRID-FACTOR-VER', 'GRID-FACTOR-VERT', 'GRID-FACTOR-VERT',
'GRID-FACTOR-VERTI', 'GRID-FACTOR-VERTIC', 'GRID-FACTOR-VERTICA',
'GRID-SNAP', 'GRID-UNIT-HEIGHT-CHARS', 'GRID-UNIT-HEIGHT',
'GRID-UNIT-HEIGHT-', 'GRID-UNIT-HEIGHT-C', 'GRID-UNIT-HEIGHT-CH',
'GRID-UNIT-HEIGHT-CHA', 'GRID-UNIT-HEIGHT-PIXELS', 'GRID-UNIT-HEIGHT-P',
'GRID-UNIT-HEIGHT-PI', 'GRID-UNIT-HEIGHT-PIX', 'GRID-UNIT-HEIGHT-PIXE',
'GRID-UNIT-HEIGHT-PIXEL', 'GRID-UNIT-WIDTH-CHARS', 'GRID-UNIT-WIDTH',
'GRID-UNIT-WIDTH-', 'GRID-UNIT-WIDTH-C', 'GRID-UNIT-WIDTH-CH',
'GRID-UNIT-WIDTH-CHA', 'GRID-UNIT-WIDTH-CHAR', 'GRID-UNIT-WIDTH-PIXELS',
'GRID-UNIT-WIDTH-P', 'GRID-UNIT-WIDTH-PI', 'GRID-UNIT-WIDTH-PIX',
'GRID-UNIT-WIDTH-PIXE', 'GRID-UNIT-WIDTH-PIXEL', 'GRID-VISIBLE', 'GROUP',
'GT', 'GUID', 'HANDLER', 'HAS-RECORDS', 'HAVING', 'HEADER', 'HEIGHT-CHARS',
'HEIGHT', 'HEIGHT-', 'HEIGHT-C', 'HEIGHT-CH', 'HEIGHT-CHA', 'HEIGHT-CHAR',
'HEIGHT-PIXELS', 'HEIGHT-P', 'HEIGHT-PI', 'HEIGHT-PIX', 'HEIGHT-PIXE',
'HEIGHT-PIXEL', 'HELP', 'HEX-DECODE', 'HEX-ENCODE', 'HIDDEN', 'HIDE',
'HORIZONTAL', 'HORI', 'HORIZ', 'HORIZO', 'HORIZON', 'HORIZONT', 'HORIZONTA',
'HOST-BYTE-ORDER', 'HTML-CHARSET', 'HTML-END-OF-LINE', 'HTML-END-OF-PAGE',
'HTML-FRAME-BEGIN', 'HTML-FRAME-END', 'HTML-HEADER-BEGIN',
'HTML-HEADER-END', 'HTML-TITLE-BEGIN', 'HTML-TITLE-END', 'HWND', 'ICON',
'IF', 'IMAGE', 'IMAGE-DOWN', 'IMAGE-INSENSITIVE', 'IMAGE-SIZE',
'IMAGE-SIZE-CHARS', 'IMAGE-SIZE-C', 'IMAGE-SIZE-CH', 'IMAGE-SIZE-CHA',
'IMAGE-SIZE-CHAR', 'IMAGE-SIZE-PIXELS', 'IMAGE-SIZE-P', 'IMAGE-SIZE-PI',
'IMAGE-SIZE-PIX', 'IMAGE-SIZE-PIXE', 'IMAGE-SIZE-PIXEL', 'IMAGE-UP',
'IMMEDIATE-DISPLAY', 'IMPLEMENTS', 'IMPORT', 'IMPORT-PRINCIPAL', 'IN',
'INCREMENT-EXCLUSIVE-ID', 'INDEX', 'INDEXED-REPOSITION', 'INDEX-HINT',
'INDEX-INFORMATION', 'INDICATOR', 'INFORMATION', 'INFO', 'INFOR', 'INFORM',
'INFORMA', 'INFORMAT', 'INFORMATI', 'INFORMATIO', 'IN-HANDLE',
'INHERIT-BGCOLOR', 'INHERIT-BGC', 'INHERIT-BGCO', 'INHERIT-BGCOL',
'INHERIT-BGCOLO', 'INHERIT-FGCOLOR', 'INHERIT-FGC', 'INHERIT-FGCO',
'INHERIT-FGCOL', 'INHERIT-FGCOLO', 'INHERITS', 'INITIAL', 'INIT', 'INITI',
'INITIA', 'INITIAL-DIR', 'INITIAL-FILTER', 'INITIALIZE-DOCUMENT-TYPE',
'INITIATE', 'INNER-CHARS', 'INNER-LINES', 'INPUT', 'INPUT-OUTPUT',
'INPUT-O', 'INPUT-OU', 'INPUT-OUT', 'INPUT-OUTP', 'INPUT-OUTPU',
'INPUT-VALUE', 'INSERT', 'INSERT-ATTRIBUTE', 'INSERT-BACKTAB', 'INSERT-B',
'INSERT-BA', 'INSERT-BAC', 'INSERT-BACK', 'INSERT-BACKT', 'INSERT-BACKTA',
'INSERT-FILE', 'INSERT-ROW', 'INSERT-STRING', 'INSERT-TAB', 'INSERT-T',
'INSERT-TA', 'INTERFACE', 'INTERNAL-ENTRIES', 'INTO', 'INVOKE', 'IS',
'IS-ATTR-SPACE', 'IS-ATTR', 'IS-ATTR-', 'IS-ATTR-S', 'IS-ATTR-SP',
'IS-ATTR-SPA', 'IS-ATTR-SPAC', 'IS-CLASS', 'IS-CLAS', 'IS-LEAD-BYTE',
'IS-ATTR', 'IS-OPEN', 'IS-PARAMETER-SET', 'IS-ROW-SELECTED', 'IS-SELECTED',
'ITEM', 'ITEMS-PER-ROW', 'JOIN', 'JOIN-BY-SQLDB', 'KBLABEL',
'KEEP-CONNECTION-OPEN', 'KEEP-FRAME-Z-ORDER', 'KEEP-FRAME-Z',
'KEEP-FRAME-Z-', 'KEEP-FRAME-Z-O', 'KEEP-FRAME-Z-OR', 'KEEP-FRAME-Z-ORD',
'KEEP-FRAME-Z-ORDE', 'KEEP-MESSAGES', 'KEEP-SECURITY-CACHE',
'KEEP-TAB-ORDER', 'KEY', 'KEYCODE', 'KEY-CODE', 'KEYFUNCTION', 'KEYFUNC',
'KEYFUNCT', 'KEYFUNCTI', 'KEYFUNCTIO', 'KEY-FUNCTION', 'KEY-FUNC',
'KEY-FUNCT', 'KEY-FUNCTI', 'KEY-FUNCTIO', 'KEYLABEL', 'KEY-LABEL', 'KEYS',
'KEYWORD', 'KEYWORD-ALL', 'LABEL', 'LABEL-BGCOLOR', 'LABEL-BGC',
'LABEL-BGCO', 'LABEL-BGCOL', 'LABEL-BGCOLO', 'LABEL-DCOLOR', 'LABEL-DC',
'LABEL-DCO', 'LABEL-DCOL', 'LABEL-DCOLO', 'LABEL-FGCOLOR', 'LABEL-FGC',
'LABEL-FGCO', 'LABEL-FGCOL', 'LABEL-FGCOLO', 'LABEL-FONT', 'LABEL-PFCOLOR',
'LABEL-PFC', 'LABEL-PFCO', 'LABEL-PFCOL', 'LABEL-PFCOLO', 'LABELS',
'LANDSCAPE', 'LANGUAGES', 'LANGUAGE', 'LARGE', 'LARGE-TO-SMALL', 'LAST',
'LAST-ASYNCH-REQUEST', 'LAST-BATCH', 'LAST-CHILD', 'LAST-EVENT',
'LAST-EVEN', 'LAST-FORM', 'LASTKEY', 'LAST-KEY', 'LAST-OBJECT', 'LAST-OF',
'LAST-PROCEDURE', 'LAST-PROCE', 'LAST-PROCED', 'LAST-PROCEDU',
'LAST-PROCEDUR', 'LAST-SERVER', 'LAST-TAB-ITEM', 'LAST-TAB-I',
'LAST-TAB-IT', 'LAST-TAB-ITE', 'LC', 'LDBNAME', 'LE', 'LEAVE',
'LEFT-ALIGNED', 'LEFT-ALIGN', 'LEFT-ALIGNE', 'LEFT-TRIM', 'LENGTH',
'LIBRARY', 'LIKE', 'LIKE-SEQUENTIAL', 'LINE', 'LINE-COUNTER', 'LINE-COUNT',
'LINE-COUNTE', 'LIST-EVENTS', 'LISTING', 'LISTI', 'LISTIN',
'LIST-ITEM-PAIRS', 'LIST-ITEMS', 'LIST-PROPERTY-NAMES', 'LIST-QUERY-ATTRS',
'LIST-SET-ATTRS', 'LIST-WIDGETS', 'LITERAL-QUESTION', 'LITTLE-ENDIAN',
'LOAD', 'LOAD-DOMAINS', 'LOAD-ICON', 'LOAD-IMAGE', 'LOAD-IMAGE-DOWN',
'LOAD-IMAGE-INSENSITIVE', 'LOAD-IMAGE-UP', 'LOAD-MOUSE-POINTER',
'LOAD-MOUSE-P', 'LOAD-MOUSE-PO', 'LOAD-MOUSE-POI', 'LOAD-MOUSE-POIN',
'LOAD-MOUSE-POINT', 'LOAD-MOUSE-POINTE', 'LOAD-PICTURE', 'LOAD-SMALL-ICON',
'LOCAL-NAME', 'LOCATOR-COLUMN-NUMBER', 'LOCATOR-LINE-NUMBER',
'LOCATOR-PUBLIC-ID', 'LOCATOR-SYSTEM-ID', 'LOCATOR-TYPE', 'LOCKED',
'LOCK-REGISTRATION', 'LOG', 'LOG-AUDIT-EVENT', 'LOGIN-EXPIRATION-TIMESTAMP',
'LOGIN-HOST', 'LOGIN-STATE', 'LOG-MANAGER', 'LOGOUT', 'LOOKAHEAD', 'LOOKUP',
'LT', 'MACHINE-CLASS', 'MANDATORY', 'MANUAL-HIGHLIGHT', 'MAP',
'MARGIN-EXTRA', 'MARGIN-HEIGHT-CHARS', 'MARGIN-HEIGHT', 'MARGIN-HEIGHT-',
'MARGIN-HEIGHT-C', 'MARGIN-HEIGHT-CH', 'MARGIN-HEIGHT-CHA',
'MARGIN-HEIGHT-CHAR', 'MARGIN-HEIGHT-PIXELS', 'MARGIN-HEIGHT-P',
'MARGIN-HEIGHT-PI', 'MARGIN-HEIGHT-PIX', 'MARGIN-HEIGHT-PIXE',
'MARGIN-HEIGHT-PIXEL', 'MARGIN-WIDTH-CHARS', 'MARGIN-WIDTH',
'MARGIN-WIDTH-', 'MARGIN-WIDTH-C', 'MARGIN-WIDTH-CH', 'MARGIN-WIDTH-CHA',
'MARGIN-WIDTH-CHAR', 'MARGIN-WIDTH-PIXELS', 'MARGIN-WIDTH-P',
'MARGIN-WIDTH-PI', 'MARGIN-WIDTH-PIX', 'MARGIN-WIDTH-PIXE',
'MARGIN-WIDTH-PIXEL', 'MARK-NEW', 'MARK-ROW-STATE', 'MATCHES', 'MAX',
'MAX-BUTTON', 'MAX-CHARS', 'MAX-DATA-GUESS', 'MAX-HEIGHT',
'MAX-HEIGHT-CHARS', 'MAX-HEIGHT-C', 'MAX-HEIGHT-CH', 'MAX-HEIGHT-CHA',
'MAX-HEIGHT-CHAR', 'MAX-HEIGHT-PIXELS', 'MAX-HEIGHT-P', 'MAX-HEIGHT-PI',
'MAX-HEIGHT-PIX', 'MAX-HEIGHT-PIXE', 'MAX-HEIGHT-PIXEL', 'MAXIMIZE',
'MAXIMUM', 'MAX', 'MAXI', 'MAXIM', 'MAXIMU', 'MAXIMUM-LEVEL', 'MAX-ROWS',
'MAX-SIZE', 'MAX-VALUE', 'MAX-VAL', 'MAX-VALU', 'MAX-WIDTH',
'MAX-WIDTH-CHARS', 'MAX-WIDTH', 'MAX-WIDTH-', 'MAX-WIDTH-C', 'MAX-WIDTH-CH',
'MAX-WIDTH-CHA', 'MAX-WIDTH-CHAR', 'MAX-WIDTH-PIXELS', 'MAX-WIDTH-P',
'MAX-WIDTH-PI', 'MAX-WIDTH-PIX', 'MAX-WIDTH-PIXE', 'MAX-WIDTH-PIXEL',
'MD5-DIGEST', 'MEMBER', 'MEMPTR-TO-NODE-VALUE', 'MENU', 'MENUBAR',
'MENU-BAR', 'MENU-ITEM', 'MENU-KEY', 'MENU-K', 'MENU-KE', 'MENU-MOUSE',
'MENU-M', 'MENU-MO', 'MENU-MOU', 'MENU-MOUS', 'MERGE-BY-FIELD', 'MESSAGE',
'MESSAGE-AREA', 'MESSAGE-AREA-FONT', 'MESSAGE-LINES', 'METHOD', 'MIN',
'MIN-BUTTON', 'MIN-COLUMN-WIDTH-CHARS', 'MIN-COLUMN-WIDTH-C',
'MIN-COLUMN-WIDTH-CH', 'MIN-COLUMN-WIDTH-CHA', 'MIN-COLUMN-WIDTH-CHAR',
'MIN-COLUMN-WIDTH-PIXELS', 'MIN-COLUMN-WIDTH-P', 'MIN-COLUMN-WIDTH-PI',
'MIN-COLUMN-WIDTH-PIX', 'MIN-COLUMN-WIDTH-PIXE', 'MIN-COLUMN-WIDTH-PIXEL',
'MIN-HEIGHT-CHARS', 'MIN-HEIGHT', 'MIN-HEIGHT-', 'MIN-HEIGHT-C',
'MIN-HEIGHT-CH', 'MIN-HEIGHT-CHA', 'MIN-HEIGHT-CHAR', 'MIN-HEIGHT-PIXELS',
'MIN-HEIGHT-P', 'MIN-HEIGHT-PI', 'MIN-HEIGHT-PIX', 'MIN-HEIGHT-PIXE',
'MIN-HEIGHT-PIXEL', 'MINIMUM', 'MIN', 'MINI', 'MINIM', 'MINIMU', 'MIN-SIZE',
'MIN-VALUE', 'MIN-VAL', 'MIN-VALU', 'MIN-WIDTH-CHARS', 'MIN-WIDTH',
'MIN-WIDTH-', 'MIN-WIDTH-C', 'MIN-WIDTH-CH', 'MIN-WIDTH-CHA',
'MIN-WIDTH-CHAR', 'MIN-WIDTH-PIXELS', 'MIN-WIDTH-P', 'MIN-WIDTH-PI',
'MIN-WIDTH-PIX', 'MIN-WIDTH-PIXE', 'MIN-WIDTH-PIXEL', 'MODIFIED', 'MODULO',
'MOD', 'MODU', 'MODUL', 'MONTH', 'MOUSE', 'MOUSE-POINTER', 'MOUSE-P',
'MOUSE-PO', 'MOUSE-POI', 'MOUSE-POIN', 'MOUSE-POINT', 'MOUSE-POINTE',
'MOVABLE', 'MOVE-AFTER-TAB-ITEM', 'MOVE-AFTER', 'MOVE-AFTER-',
'MOVE-AFTER-T', 'MOVE-AFTER-TA', 'MOVE-AFTER-TAB', 'MOVE-AFTER-TAB-',
'MOVE-AFTER-TAB-I', 'MOVE-AFTER-TAB-IT', 'MOVE-AFTER-TAB-ITE',
'MOVE-BEFORE-TAB-ITEM', 'MOVE-BEFOR', 'MOVE-BEFORE', 'MOVE-BEFORE-',
'MOVE-BEFORE-T', 'MOVE-BEFORE-TA', 'MOVE-BEFORE-TAB', 'MOVE-BEFORE-TAB-',
'MOVE-BEFORE-TAB-I', 'MOVE-BEFORE-TAB-IT', 'MOVE-BEFORE-TAB-ITE',
'MOVE-COLUMN', 'MOVE-COL', 'MOVE-COLU', 'MOVE-COLUM', 'MOVE-TO-BOTTOM',
'MOVE-TO-B', 'MOVE-TO-BO', 'MOVE-TO-BOT', 'MOVE-TO-BOTT', 'MOVE-TO-BOTTO',
'MOVE-TO-EOF', 'MOVE-TO-TOP', 'MOVE-TO-T', 'MOVE-TO-TO', 'MPE',
'MULTI-COMPILE', 'MULTIPLE', 'MULTIPLE-KEY', 'MULTITASKING-INTERVAL',
'MUST-EXIST', 'NAME', 'NAMESPACE-PREFIX', 'NAMESPACE-URI', 'NATIVE', 'NE',
'NEEDS-APPSERVER-PROMPT', 'NEEDS-PROMPT', 'NEW', 'NEW-INSTANCE', 'NEW-ROW',
'NEXT', 'NEXT-COLUMN', 'NEXT-PROMPT', 'NEXT-ROWID', 'NEXT-SIBLING',
'NEXT-TAB-ITEM', 'NEXT-TAB-I', 'NEXT-TAB-IT', 'NEXT-TAB-ITE', 'NEXT-VALUE',
'NO', 'NO-APPLY', 'NO-ARRAY-MESSAGE', 'NO-ASSIGN', 'NO-ATTR-LIST',
'NO-ATTR', 'NO-ATTR-', 'NO-ATTR-L', 'NO-ATTR-LI', 'NO-ATTR-LIS',
'NO-ATTR-SPACE', 'NO-ATTR', 'NO-ATTR-', 'NO-ATTR-S', 'NO-ATTR-SP',
'NO-ATTR-SPA', 'NO-ATTR-SPAC', 'NO-AUTO-VALIDATE', 'NO-BIND-WHERE',
'NO-BOX', 'NO-CONSOLE', 'NO-CONVERT', 'NO-CONVERT-3D-COLORS',
'NO-CURRENT-VALUE', 'NO-DEBUG', 'NODE-VALUE-TO-MEMPTR', 'NO-DRAG',
'NO-ECHO', 'NO-EMPTY-SPACE', 'NO-ERROR', 'NO-FILL', 'NO-F', 'NO-FI',
'NO-FIL', 'NO-FOCUS', 'NO-HELP', 'NO-HIDE', 'NO-INDEX-HINT',
'NO-INHERIT-BGCOLOR', 'NO-INHERIT-BGC', 'NO-INHERIT-BGCO', 'LABEL-BGCOL',
'LABEL-BGCOLO', 'NO-INHERIT-FGCOLOR', 'NO-INHERIT-FGC', 'NO-INHERIT-FGCO',
'NO-INHERIT-FGCOL', 'NO-INHERIT-FGCOLO', 'NO-JOIN-BY-SQLDB', 'NO-LABELS',
'NO-LABE', 'NO-LOBS', 'NO-LOCK', 'NO-LOOKAHEAD', 'NO-MAP', 'NO-MESSAGE',
'NO-MES', 'NO-MESS', 'NO-MESSA', 'NO-MESSAG', 'NONAMESPACE-SCHEMA-LOCATION',
'NONE', 'NO-PAUSE', 'NO-PREFETCH', 'NO-PREFE', 'NO-PREFET', 'NO-PREFETC',
'NORMALIZE', 'NO-ROW-MARKERS', 'NO-SCROLLBAR-VERTICAL',
'NO-SEPARATE-CONNECTION', 'NO-SEPARATORS', 'NOT', 'NO-TAB-STOP',
'NOT-ACTIVE', 'NO-UNDERLINE', 'NO-UND', 'NO-UNDE', 'NO-UNDER', 'NO-UNDERL',
'NO-UNDERLI', 'NO-UNDERLIN', 'NO-UNDO', 'NO-VALIDATE', 'NO-VAL', 'NO-VALI',
'NO-VALID', 'NO-VALIDA', 'NO-VALIDAT', 'NOW', 'NO-WAIT', 'NO-WORD-WRAP',
'NULL', 'NUM-ALIASES', 'NUM-ALI', 'NUM-ALIA', 'NUM-ALIAS', 'NUM-ALIASE',
'NUM-BUFFERS', 'NUM-BUTTONS', 'NUM-BUT', 'NUM-BUTT', 'NUM-BUTTO',
'NUM-BUTTON', 'NUM-COLUMNS', 'NUM-COL', 'NUM-COLU', 'NUM-COLUM',
'NUM-COLUMN', 'NUM-COPIES', 'NUM-DBS', 'NUM-DROPPED-FILES', 'NUM-ENTRIES',
'NUMERIC', 'NUMERIC-FORMAT', 'NUMERIC-F', 'NUMERIC-FO', 'NUMERIC-FOR',
'NUMERIC-FORM', 'NUMERIC-FORMA', 'NUM-FIELDS', 'NUM-FORMATS', 'NUM-ITEMS',
'NUM-ITERATIONS', 'NUM-LINES', 'NUM-LOCKED-COLUMNS', 'NUM-LOCKED-COL',
'NUM-LOCKED-COLU', 'NUM-LOCKED-COLUM', 'NUM-LOCKED-COLUMN', 'NUM-MESSAGES',
'NUM-PARAMETERS', 'NUM-REFERENCES', 'NUM-REPLACED', 'NUM-RESULTS',
'NUM-SELECTED-ROWS', 'NUM-SELECTED-WIDGETS', 'NUM-SELECTED',
'NUM-SELECTED-', 'NUM-SELECTED-W', 'NUM-SELECTED-WI', 'NUM-SELECTED-WID',
'NUM-SELECTED-WIDG', 'NUM-SELECTED-WIDGE', 'NUM-SELECTED-WIDGET',
'NUM-TABS', 'NUM-TO-RETAIN', 'NUM-VISIBLE-COLUMNS', 'OCTET-LENGTH', 'OF',
'OFF', 'OK', 'OK-CANCEL', 'OLD', 'ON', 'ON-FRAME-BORDER', 'ON-FRAME',
'ON-FRAME-', 'ON-FRAME-B', 'ON-FRAME-BO', 'ON-FRAME-BOR', 'ON-FRAME-BORD',
'ON-FRAME-BORDE', 'OPEN', 'OPSYS', 'OPTION', 'OR', 'ORDERED-JOIN',
'ORDINAL', 'OS-APPEND', 'OS-COMMAND', 'OS-COPY', 'OS-CREATE-DIR',
'OS-DELETE', 'OS-DIR', 'OS-DRIVES', 'OS-DRIVE', 'OS-ERROR', 'OS-GETENV',
'OS-RENAME', 'OTHERWISE', 'OUTPUT', 'OVERLAY', 'OVERRIDE', 'OWNER', 'PAGE',
'PAGE-BOTTOM', 'PAGE-BOT', 'PAGE-BOTT', 'PAGE-BOTTO', 'PAGED',
'PAGE-NUMBER', 'PAGE-NUM', 'PAGE-NUMB', 'PAGE-NUMBE', 'PAGE-SIZE',
'PAGE-TOP', 'PAGE-WIDTH', 'PAGE-WID', 'PAGE-WIDT', 'PARAMETER', 'PARAM',
'PARAME', 'PARAMET', 'PARAMETE', 'PARENT', 'PARSE-STATUS', 'PARTIAL-KEY',
'PASCAL', 'PASSWORD-FIELD', 'PATHNAME', 'PAUSE', 'PBE-HASH-ALGORITHM',
'PBE-HASH-ALG', 'PBE-HASH-ALGO', 'PBE-HASH-ALGOR', 'PBE-HASH-ALGORI',
'PBE-HASH-ALGORIT', 'PBE-HASH-ALGORITH', 'PBE-KEY-ROUNDS', 'PDBNAME',
'PERSISTENT', 'PERSIST', 'PERSISTE', 'PERSISTEN',
'PERSISTENT-CACHE-DISABLED', 'PFCOLOR', 'PFC', 'PFCO', 'PFCOL', 'PFCOLO',
'PIXELS', 'PIXELS-PER-COLUMN', 'PIXELS-PER-COL', 'PIXELS-PER-COLU',
'PIXELS-PER-COLUM', 'PIXELS-PER-ROW', 'POPUP-MENU', 'POPUP-M', 'POPUP-ME',
'POPUP-MEN', 'POPUP-ONLY', 'POPUP-O', 'POPUP-ON', 'POPUP-ONL', 'PORTRAIT',
'POSITION', 'PRECISION', 'PREFER-DATASET', 'PREPARED', 'PREPARE-STRING',
'PREPROCESS', 'PREPROC', 'PREPROCE', 'PREPROCES', 'PRESELECT', 'PRESEL',
'PRESELE', 'PRESELEC', 'PREV', 'PREV-COLUMN', 'PREV-SIBLING',
'PREV-TAB-ITEM', 'PREV-TAB-I', 'PREV-TAB-IT', 'PREV-TAB-ITE', 'PRIMARY',
'PRINTER', 'PRINTER-CONTROL-HANDLE', 'PRINTER-HDC', 'PRINTER-NAME',
'PRINTER-PORT', 'PRINTER-SETUP', 'PRIVATE', 'PRIVATE-DATA', 'PRIVATE-D',
'PRIVATE-DA', 'PRIVATE-DAT', 'PRIVILEGES', 'PROCEDURE', 'PROCE', 'PROCED',
'PROCEDU', 'PROCEDUR', 'PROCEDURE-CALL-TYPE', 'PROCESS', 'PROC-HANDLE',
'PROC-HA', 'PROC-HAN', 'PROC-HAND', 'PROC-HANDL', 'PROC-STATUS', 'PROC-ST',
'PROC-STA', 'PROC-STAT', 'PROC-STATU', 'proc-text', 'proc-text-buffe',
'PROFILER', 'PROGRAM-NAME', 'PROGRESS', 'PROGRESS-SOURCE', 'PROGRESS-S',
'PROGRESS-SO', 'PROGRESS-SOU', 'PROGRESS-SOUR', 'PROGRESS-SOURC', 'PROMPT',
'PROMPT-FOR', 'PROMPT-F', 'PROMPT-FO', 'PROMSGS', 'PROPATH', 'PROPERTY',
'PROTECTED', 'PROVERSION', 'PROVERS', 'PROVERSI', 'PROVERSIO', 'PROXY',
'PROXY-PASSWORD', 'PROXY-USERID', 'PUBLIC', 'PUBLIC-ID', 'PUBLISH',
'PUBLISHED-EVENTS', 'PUT', 'PUTBYTE', 'PUT-BYTE', 'PUT-DOUBLE', 'PUT-FLOAT',
'PUT-INT64', 'PUT-KEY-VALUE', 'PUT-KEY-VAL', 'PUT-KEY-VALU', 'PUT-LONG',
'PUT-SHORT', 'PUT-STRING', 'PUT-UNSIGNED-LONG', 'QUERY', 'QUERY-CLOSE',
'QUERY-OFF-END', 'QUERY-OPEN', 'QUERY-PREPARE', 'QUERY-TUNING', 'QUESTION',
'QUIT', 'QUOTER', 'RADIO-BUTTONS', 'RADIO-SET', 'RANDOM', 'RAW-TRANSFER',
'RCODE-INFORMATION', 'RCODE-INFO', 'RCODE-INFOR', 'RCODE-INFORM',
'RCODE-INFORMA', 'RCODE-INFORMAT', 'RCODE-INFORMATI', 'RCODE-INFORMATIO',
'READ-AVAILABLE', 'READ-EXACT-NUM', 'READ-FILE', 'READKEY', 'READ-ONLY',
'READ-XML', 'READ-XMLSCHEMA', 'REAL', 'RECORD-LENGTH', 'RECTANGLE', 'RECT',
'RECTA', 'RECTAN', 'RECTANG', 'RECTANGL', 'RECURSIVE', 'REFERENCE-ONLY',
'REFRESH', 'REFRESHABLE', 'REFRESH-AUDIT-POLICY', 'REGISTER-DOMAIN',
'RELEASE', 'REMOTE', 'REMOVE-EVENTS-PROCEDURE', 'REMOVE-SUPER-PROCEDURE',
'REPEAT', 'REPLACE', 'REPLACE-SELECTION-TEXT', 'REPOSITION',
'REPOSITION-BACKWARD', 'REPOSITION-FORWARD', 'REPOSITION-MODE',
'REPOSITION-TO-ROW', 'REPOSITION-TO-ROWID', 'REQUEST', 'RESET', 'RESIZABLE',
'RESIZA', 'RESIZAB', 'RESIZABL', 'RESIZE', 'RESTART-ROW', 'RESTART-ROWID',
'RETAIN', 'RETAIN-SHAPE', 'RETRY', 'RETRY-CANCEL', 'RETURN',
'RETURN-INSERTED', 'RETURN-INS', 'RETURN-INSE', 'RETURN-INSER',
'RETURN-INSERT', 'RETURN-INSERTE', 'RETURNS', 'RETURN-TO-START-DIR',
'RETURN-TO-START-DI', 'RETURN-VALUE', 'RETURN-VAL', 'RETURN-VALU',
'RETURN-VALUE-DATA-TYPE', 'REVERSE-FROM', 'REVERT', 'REVOKE', 'RGB-VALUE',
'RIGHT-ALIGNED', 'RETURN-ALIGN', 'RETURN-ALIGNE', 'RIGHT-TRIM', 'R-INDEX',
'ROLES', 'ROUND', 'ROUTINE-LEVEL', 'ROW', 'ROW-HEIGHT-CHARS', 'HEIGHT',
'ROW-HEIGHT-PIXELS', 'HEIGHT-P', 'ROW-MARKERS', 'ROW-OF', 'ROW-RESIZABLE',
'RULE', 'RUN', 'RUN-PROCEDURE', 'SAVE', 'SAVE-AS', 'SAVE-FILE',
'SAX-COMPLETE', 'SAX-COMPLE', 'SAX-COMPLET', 'SAX-PARSE', 'SAX-PARSE-FIRST',
'SAX-PARSE-NEXT', 'SAX-PARSER-ERROR', 'SAX-RUNNING', 'SAX-UNINITIALIZED',
'SAX-WRITE-BEGIN', 'SAX-WRITE-COMPLETE', 'SAX-WRITE-CONTENT',
'SAX-WRITE-ELEMENT', 'SAX-WRITE-ERROR', 'SAX-WRITE-IDLE', 'SAX-WRITER',
'SAX-WRITE-TAG', 'SCHEMA', 'SCHEMA-LOCATION', 'SCHEMA-MARSHAL',
'SCHEMA-PATH', 'SCREEN', 'SCREEN-IO', 'SCREEN-LINES', 'SCREEN-VALUE',
'SCREEN-VAL', 'SCREEN-VALU', 'SCROLL', 'SCROLLABLE', 'SCROLLBAR-HORIZONTAL',
'SCROLLBAR-H', 'SCROLLBAR-HO', 'SCROLLBAR-HOR', 'SCROLLBAR-HORI',
'SCROLLBAR-HORIZ', 'SCROLLBAR-HORIZO', 'SCROLLBAR-HORIZON',
'SCROLLBAR-HORIZONT', 'SCROLLBAR-HORIZONTA', 'SCROLL-BARS',
'SCROLLBAR-VERTICAL', 'SCROLLBAR-V', 'SCROLLBAR-VE', 'SCROLLBAR-VER',
'SCROLLBAR-VERT', 'SCROLLBAR-VERTI', 'SCROLLBAR-VERTIC',
'SCROLLBAR-VERTICA', 'SCROLL-DELTA', 'SCROLLED-ROW-POSITION',
'SCROLLED-ROW-POS', 'SCROLLED-ROW-POSI', 'SCROLLED-ROW-POSIT',
'SCROLLED-ROW-POSITI', 'SCROLLED-ROW-POSITIO', 'SCROLLING', 'SCROLL-OFFSET',
'SCROLL-TO-CURRENT-ROW', 'SCROLL-TO-ITEM', 'SCROLL-TO-I', 'SCROLL-TO-IT',
'SCROLL-TO-ITE', 'SCROLL-TO-SELECTED-ROW', 'SDBNAME', 'SEAL',
'SEAL-TIMESTAMP', 'SEARCH', 'SEARCH-SELF', 'SEARCH-TARGET', 'SECTION',
'SECURITY-POLICY', 'SEEK', 'SELECT', 'SELECTABLE', 'SELECT-ALL', 'SELECTED',
'SELECT-FOCUSED-ROW', 'SELECTION', 'SELECTION-END', 'SELECTION-LIST',
'SELECTION-START', 'SELECTION-TEXT', 'SELECT-NEXT-ROW', 'SELECT-PREV-ROW',
'SELECT-ROW', 'SELF', 'SEND', 'send-sql-statement', 'send-sql', 'SENSITIVE',
'SEPARATE-CONNECTION', 'SEPARATOR-FGCOLOR', 'SEPARATORS', 'SERVER',
'SERVER-CONNECTION-BOUND', 'SERVER-CONNECTION-BOUND-REQUEST',
'SERVER-CONNECTION-CONTEXT', 'SERVER-CONNECTION-ID',
'SERVER-OPERATING-MODE', 'SESSION', 'SESSION-ID', 'SET', 'SET-APPL-CONTEXT',
'SET-ATTR-CALL-TYPE', 'SET-ATTRIBUTE-NODE', 'SET-BLUE-VALUE', 'SET-BLUE',
'SET-BLUE-', 'SET-BLUE-V', 'SET-BLUE-VA', 'SET-BLUE-VAL', 'SET-BLUE-VALU',
'SET-BREAK', 'SET-BUFFERS', 'SET-CALLBACK', 'SET-CLIENT', 'SET-COMMIT',
'SET-CONTENTS', 'SET-CURRENT-VALUE', 'SET-DB-CLIENT', 'SET-DYNAMIC',
'SET-EVENT-MANAGER-OPTION', 'SET-GREEN-VALUE', 'SET-GREEN', 'SET-GREEN-',
'SET-GREEN-V', 'SET-GREEN-VA', 'SET-GREEN-VAL', 'SET-GREEN-VALU',
'SET-INPUT-SOURCE', 'SET-OPTION', 'SET-OUTPUT-DESTINATION', 'SET-PARAMETER',
'SET-POINTER-VALUE', 'SET-PROPERTY', 'SET-RED-VALUE', 'SET-RED', 'SET-RED-',
'SET-RED-V', 'SET-RED-VA', 'SET-RED-VAL', 'SET-RED-VALU',
'SET-REPOSITIONED-ROW', 'SET-RGB-VALUE', 'SET-ROLLBACK', 'SET-SELECTION',
'SET-SIZE', 'SET-SORT-ARROW', 'SETUSERID', 'SETUSER', 'SETUSERI',
'SET-WAIT-STATE', 'SHA1-DIGEST', 'SHARED', 'SHARE-LOCK', 'SHARE', 'SHARE-',
'SHARE-L', 'SHARE-LO', 'SHARE-LOC', 'SHOW-IN-TASKBAR', 'SHOW-STATS',
'SHOW-STAT', 'SIDE-LABEL-HANDLE', 'SIDE-LABEL-H', 'SIDE-LABEL-HA',
'SIDE-LABEL-HAN', 'SIDE-LABEL-HAND', 'SIDE-LABEL-HANDL', 'SIDE-LABELS',
'SIDE-LAB', 'SIDE-LABE', 'SIDE-LABEL', 'SILENT', 'SIMPLE', 'SINGLE', 'SIZE',
'SIZE-CHARS', 'SIZE-C', 'SIZE-CH', 'SIZE-CHA', 'SIZE-CHAR', 'SIZE-PIXELS',
'SIZE-P', 'SIZE-PI', 'SIZE-PIX', 'SIZE-PIXE', 'SIZE-PIXEL', 'SKIP',
'SKIP-DELETED-RECORD', 'SLIDER', 'SMALL-ICON', 'SMALLINT', 'SMALL-TITLE',
'SOME', 'SORT', 'SORT-ASCENDING', 'SORT-NUMBER', 'SOURCE',
'SOURCE-PROCEDURE', 'SPACE', 'SQL', 'SQRT', 'SSL-SERVER-NAME', 'STANDALONE',
'START', 'START-DOCUMENT', 'START-ELEMENT', 'START-MOVE', 'START-RESIZE',
'START-ROW-RESIZE', 'STATE-DETAIL', 'STATIC', 'STATUS', 'STATUS-AREA',
'STATUS-AREA-FONT', 'STDCALL', 'STOP', 'STOP-PARSING', 'STOPPED', 'STOPPE',
'STORED-PROCEDURE', 'STORED-PROC', 'STORED-PROCE', 'STORED-PROCED',
'STORED-PROCEDU', 'STORED-PROCEDUR', 'STREAM', 'STREAM-HANDLE', 'STREAM-IO',
'STRETCH-TO-FIT', 'STRICT', 'STRING', 'STRING-VALUE', 'STRING-XREF',
'SUB-AVERAGE', 'SUB-AVE', 'SUB-AVER', 'SUB-AVERA', 'SUB-AVERAG',
'SUB-COUNT', 'SUB-MAXIMUM', 'SUM-MAX', 'SUM-MAXI', 'SUM-MAXIM',
'SUM-MAXIMU', 'SUB-MENU', 'SUBSUB-', 'MINIMUM', 'SUB-MIN', 'SUBSCRIBE',
'SUBSTITUTE', 'SUBST', 'SUBSTI', 'SUBSTIT', 'SUBSTITU', 'SUBSTITUT',
'SUBSTRING', 'SUBSTR', 'SUBSTRI', 'SUBSTRIN', 'SUB-TOTAL', 'SUBTYPE', 'SUM',
'SUPER', 'SUPER-PROCEDURES', 'SUPPRESS-NAMESPACE-PROCESSING',
'SUPPRESS-WARNINGS', 'SUPPRESS-W', 'SUPPRESS-WA', 'SUPPRESS-WAR',
'SUPPRESS-WARN', 'SUPPRESS-WARNI', 'SUPPRESS-WARNIN', 'SUPPRESS-WARNING',
'SYMMETRIC-ENCRYPTION-ALGORITHM', 'SYMMETRIC-ENCRYPTION-IV',
'SYMMETRIC-ENCRYPTION-KEY', 'SYMMETRIC-SUPPORT', 'SYSTEM-ALERT-BOXES',
'SYSTEM-ALERT', 'SYSTEM-ALERT-', 'SYSTEM-ALERT-B', 'SYSTEM-ALERT-BO',
'SYSTEM-ALERT-BOX', 'SYSTEM-ALERT-BOXE', 'SYSTEM-DIALOG', 'SYSTEM-HELP',
'SYSTEM-ID', 'TABLE', 'TABLE-HANDLE', 'TABLE-NUMBER', 'TAB-POSITION',
'TAB-STOP', 'TARGET', 'TARGET-PROCEDURE', 'TEMP-DIRECTORY', 'TEMP-DIR',
'TEMP-DIRE', 'TEMP-DIREC', 'TEMP-DIRECT', 'TEMP-DIRECTO', 'TEMP-DIRECTOR',
'TEMP-TABLE', 'TEMP-TABLE-PREPARE', 'TERM', 'TERMINAL', 'TERM', 'TERMI',
'TERMIN', 'TERMINA', 'TERMINATE', 'TEXT', 'TEXT-CURSOR', 'TEXT-SEG-GROW',
'TEXT-SELECTED', 'THEN', 'THIS-OBJECT', 'THIS-PROCEDURE', 'THREE-D',
'THROW', 'THROUGH', 'THRU', 'TIC-MARKS', 'TIME', 'TIME-SOURCE', 'TITLE',
'TITLE-BGCOLOR', 'TITLE-BGC', 'TITLE-BGCO', 'TITLE-BGCOL', 'TITLE-BGCOLO',
'TITLE-DCOLOR', 'TITLE-DC', 'TITLE-DCO', 'TITLE-DCOL', 'TITLE-DCOLO',
'TITLE-FGCOLOR', 'TITLE-FGC', 'TITLE-FGCO', 'TITLE-FGCOL', 'TITLE-FGCOLO',
'TITLE-FONT', 'TITLE-FO', 'TITLE-FON', 'TO', 'TODAY', 'TOGGLE-BOX',
'TOOLTIP', 'TOOLTIPS', 'TOPIC', 'TOP-NAV-QUERY', 'TOP-ONLY', 'TO-ROWID',
'TOTAL', 'TRAILING', 'TRANS', 'TRANSACTION', 'TRANSACTION-MODE',
'TRANS-INIT-PROCEDURE', 'TRANSPARENT', 'TRIGGER', 'TRIGGERS', 'TRIM',
'TRUE', 'TRUNCATE', 'TRUNC', 'TRUNCA', 'TRUNCAT', 'TYPE', 'TYPE-OF',
'UNBOX', 'UNBUFFERED', 'UNBUFF', 'UNBUFFE', 'UNBUFFER', 'UNBUFFERE',
'UNDERLINE', 'UNDERL', 'UNDERLI', 'UNDERLIN', 'UNDO', 'UNFORMATTED',
'UNFORM', 'UNFORMA', 'UNFORMAT', 'UNFORMATT', 'UNFORMATTE', 'UNION',
'UNIQUE', 'UNIQUE-ID', 'UNIQUE-MATCH', 'UNIX', 'UNLESS-HIDDEN', 'UNLOAD',
'UNSIGNED-LONG', 'UNSUBSCRIBE', 'UP', 'UPDATE', 'UPDATE-ATTRIBUTE', 'URL',
'URL-DECODE', 'URL-ENCODE', 'URL-PASSWORD', 'URL-USERID', 'USE',
'USE-DICT-EXPS', 'USE-FILENAME', 'USE-INDEX', 'USER', 'USE-REVVIDEO',
'USERID', 'USER-ID', 'USE-TEXT', 'USE-UNDERLINE', 'USE-WIDGET-POOL',
'USING', 'V6DISPLAY', 'V6FRAME', 'VALIDATE', 'VALIDATE-EXPRESSION',
'VALIDATE-MESSAGE', 'VALIDATE-SEAL', 'VALIDATION-ENABLED', 'VALID-EVENT',
'VALID-HANDLE', 'VALID-OBJECT', 'VALUE', 'VALUE-CHANGED', 'VALUES',
'VARIABLE', 'VAR', 'VARI', 'VARIA', 'VARIAB', 'VARIABL', 'VERBOSE',
'VERSION', 'VERTICAL', 'VERT', 'VERTI', 'VERTIC', 'VERTICA', 'VIEW',
'VIEW-AS', 'VIEW-FIRST-COLUMN-ON-REOPEN', 'VIRTUAL-HEIGHT-CHARS',
'VIRTUAL-HEIGHT', 'VIRTUAL-HEIGHT-', 'VIRTUAL-HEIGHT-C',
'VIRTUAL-HEIGHT-CH', 'VIRTUAL-HEIGHT-CHA', 'VIRTUAL-HEIGHT-CHAR',
'VIRTUAL-HEIGHT-PIXELS', 'VIRTUAL-HEIGHT-P', 'VIRTUAL-HEIGHT-PI',
'VIRTUAL-HEIGHT-PIX', 'VIRTUAL-HEIGHT-PIXE', 'VIRTUAL-HEIGHT-PIXEL',
'VIRTUAL-WIDTH-CHARS', 'VIRTUAL-WIDTH', 'VIRTUAL-WIDTH-', 'VIRTUAL-WIDTH-C',
'VIRTUAL-WIDTH-CH', 'VIRTUAL-WIDTH-CHA', 'VIRTUAL-WIDTH-CHAR',
'VIRTUAL-WIDTH-PIXELS', 'VIRTUAL-WIDTH-P', 'VIRTUAL-WIDTH-PI',
'VIRTUAL-WIDTH-PIX', 'VIRTUAL-WIDTH-PIXE', 'VIRTUAL-WIDTH-PIXEL', 'VISIBLE',
'VOID', 'WAIT', 'WAIT-FOR', 'WARNING', 'WEB-CONTEXT', 'WEEKDAY', 'WHEN',
'WHERE', 'WHILE', 'WIDGET', 'WIDGET-ENTER', 'WIDGET-E', 'WIDGET-EN',
'WIDGET-ENT', 'WIDGET-ENTE', 'WIDGET-ID', 'WIDGET-LEAVE', 'WIDGET-L',
'WIDGET-LE', 'WIDGET-LEA', 'WIDGET-LEAV', 'WIDGET-POOL', 'WIDTH',
'WIDTH-CHARS', 'WIDTH', 'WIDTH-', 'WIDTH-C', 'WIDTH-CH', 'WIDTH-CHA',
'WIDTH-CHAR', 'WIDTH-PIXELS', 'WIDTH-P', 'WIDTH-PI', 'WIDTH-PIX',
'WIDTH-PIXE', 'WIDTH-PIXEL', 'WINDOW', 'WINDOW-MAXIMIZED', 'WINDOW-MAXIM',
'WINDOW-MAXIMI', 'WINDOW-MAXIMIZ', 'WINDOW-MAXIMIZE', 'WINDOW-MINIMIZED',
'WINDOW-MINIM', 'WINDOW-MINIMI', 'WINDOW-MINIMIZ', 'WINDOW-MINIMIZE',
'WINDOW-NAME', 'WINDOW-NORMAL', 'WINDOW-STATE', 'WINDOW-STA', 'WINDOW-STAT',
'WINDOW-SYSTEM', 'WITH', 'WORD-INDEX', 'WORD-WRAP',
'WORK-AREA-HEIGHT-PIXELS', 'WORK-AREA-WIDTH-PIXELS', 'WORK-AREA-X',
'WORK-AREA-Y', 'WORKFILE', 'WORK-TABLE', 'WORK-TAB', 'WORK-TABL', 'WRITE',
'WRITE-CDATA', 'WRITE-CHARACTERS', 'WRITE-COMMENT', 'WRITE-DATA-ELEMENT',
'WRITE-EMPTY-ELEMENT', 'WRITE-ENTITY-REF', 'WRITE-EXTERNAL-DTD',
'WRITE-FRAGMENT', 'WRITE-MESSAGE', 'WRITE-PROCESSING-INSTRUCTION',
'WRITE-STATUS', 'WRITE-XML', 'WRITE-XMLSCHEMA', 'X', 'XCODE',
'XML-DATA-TYPE', 'XML-NODE-TYPE', 'XML-SCHEMA-PATH',
'XML-SUPPRESS-NAMESPACE-PROCESSING', 'X-OF', 'XREF', 'XREF-XML', 'Y',
'YEAR', 'YEAR-OFFSET', 'YES', 'YES-NO', 'YES-NO-CANCEL', 'Y-OF'
]
| mit |
steeve/plugin.video.pulsar | resources/site-packages/pulsar/provider.py | 5 | 5712 | import urllib2
from urllib import quote, quote_plus, urlencode
from pulsar.util import notify
from pulsar.logger import log
from pulsar.addon import ADDON
from cookielib import CookieJar
RESOLUTION_UNKNOWN = 0
RESOLUTION_480P = 1
RESOLUTION_720P = 2
RESOLUTION_1080P = 3
RESOLUTION_1440P = 4
RESOLUTION_4K2K = 5
RIP_UNKNOWN = 0
RIP_CAM = 1
RIP_TS = 2
RIP_TC = 3
RIP_SCR = 4
RIP_DVDSCR = 5
RIP_DVD = 6
RIP_HDTV = 7
RIP_WEB = 8
RIP_BLURAY = 9
RATING_UNKNOWN = 0
RATING_PROPER = 1
RATING_NUKED = 2
CODEC_UNKNOWN = 0
CODEC_XVID = 1
CODEC_H264 = 2
CODEC_MP3 = 3
CODEC_AAC = 4
CODEC_AC3 = 5
CODEC_DTS = 6
CODEC_DTSHD = 7
CODEC_DTSHDMA = 8
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.66 Safari/537.36"
COOKIES = CookieJar()
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPCookieProcessor(COOKIES)))
class closing(object):
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
def parse_json(data):
try:
import simplejson as json
except ImportError:
import json
return json.loads(data)
def parse_xml(data):
import xml.etree.ElementTree as ET
return ET.fromstring(data)
def request(url, params={}, headers={}, data=None, method=None):
if params:
import urllib
url = "".join([url, "?", urlencode(params)])
req = urllib2.Request(url)
if method:
req.get_method = lambda: method
req.add_header("User-Agent", USER_AGENT)
req.add_header("Accept-Encoding", "gzip")
for k, v in headers.items():
req.add_header(k, v)
if data:
req.add_data(data)
try:
with closing(urllib2.urlopen(req)) as response:
data = response.read()
if response.headers.get("Content-Encoding", "") == "gzip":
import zlib
data = zlib.decompressobj(16 + zlib.MAX_WBITS).decompress(data)
response.data = data
response.json = lambda: parse_json(data)
response.xml = lambda: parse_xml(data)
return response
except urllib2.HTTPError, e:
log.error("http error: %s => %d %s" % (url, e.code, e.reason))
return None, None
HEAD = lambda *args, **kwargs: request(*args, method="HEAD", **kwargs)
GET = lambda *args, **kwargs: request(*args, method="GET", **kwargs)
POST = lambda *args, **kwargs: request(*args, method="POST", **kwargs)
PUT = lambda *args, **kwargs: request(*args, method="PUT", **kwargs)
DELETE = lambda *args, **kwargs: request(*args, method="DELETE", **kwargs)
def append_headers(uri, headers):
return uri + "|" + "|".join(["%s=%s" % h for h in headers.items()])
def with_cookies(uri):
return uri + "|Cookies=" + "; ".join(["%s=%s" % (c.name, c.value) for c in COOKIE_JAR])
def extract_magnets(data):
import re
for magnet in re.findall(r'magnet:\?[^\'"\s<>\[\]]+', data):
yield {"uri": magnet}
def parse_rss(root):
NSMAP = {
"torrent": "http://xmlns.ezrss.it/0.1/",
"showrss": "http://showrss.info/",
}
def _maybe_int(val):
try:
return int(val)
except:
return 0
def _text(node, path):
n = node.find(path)
if n is not None:
return n.text
def _attr(node, path, attrib):
n = node.find(path)
if n is not None:
return n.attrib.get(attrib)
for item in root.getiterator("item"):
yield {
"name": _text(item, "title") or _text(item, ".//{%(torrent)s}fileName" % NSMAP) or _text(item, ".//{%(showrss)s}rawtitle" % NSMAP),
"info_hash": _text(item, ".//info_hash") or _text(item, ".//{%(showrss)s}info_hash" % NSMAP),
"uri": _text(item, ".//{%(torrent)s}magnetURI" % NSMAP) or _attr(item, ".//enclosure", "url") or _text(item, "./link"),
"seeds": _maybe_int(_text(item, ".//{%(torrent)s}seeds" % NSMAP) or _text(item, "numSeeders") or _text(item, "seeders")),
"peers": _maybe_int(_text(item, ".//{%(torrent)s}peers" % NSMAP) or _text(item, "numLeechers") or _text(item, "leechers")),
"size": _maybe_int(_text(item, ".//{%(torrent)s}contentLength" % NSMAP) or _attr(item, ".//enclosure", "length")),
}
# Borrowed from xbmcswift2
def get_setting(key, converter=str, choices=None):
value = ADDON.getSetting(id=key)
if converter is str:
return value
elif converter is unicode:
return value.decode('utf-8')
elif converter is bool:
return value == 'true'
elif converter is int:
return int(value)
elif isinstance(choices, (list, tuple)):
return choices[int(value)]
else:
raise TypeError('Acceptable converters are str, unicode, bool and '
'int. Acceptable choices are instances of list '
' or tuple.')
def set_setting(key, val):
return ADDON.setSetting(id=key, value=val)
def register(search, search_movie, search_episode):
import base64
import json
import sys
try:
payload = json.loads(base64.b64decode(sys.argv[1]))
except:
notify("This addon can only be run from within Pulsar", time=1000)
return
results = ()
method = {
"search": search,
"search_movie": search_movie,
"search_episode": search_episode,
}.get(payload["method"]) or (lambda *a, **kw: [])
try:
results = tuple(method(payload["search_object"]))
finally:
urllib2.urlopen(
payload["callback_url"],
data=json.dumps(results)
)
| bsd-3-clause |
payal97/portal | systers_portal/meetup/tests/test_forms.py | 2 | 14407 | from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
from django.utils.timezone import timedelta
from cities_light.models import City, Country
from django.contrib.contenttypes.models import ContentType
from meetup.forms import (AddMeetupForm, EditMeetupForm,
AddMeetupCommentForm, EditMeetupCommentForm,
RsvpForm, AddSupportRequestForm,
EditSupportRequestForm, AddSupportRequestCommentForm,
EditSupportRequestCommentForm,
RequestMeetupForm)
from meetup.models import (Meetup, Rsvp, SupportRequest,
RequestMeetup)
from users.models import SystersUser
from common.models import Comment
class MeetupFormTestCaseBase:
def setUp(self):
self.user = User.objects.create_user(username='foo', password='foobar',
email='user@test.com')
self.systers_user = SystersUser.objects.get(user=self.user)
country = Country.objects.create(name='Bar', continent='AS')
self.location = City.objects.create(name='Baz', display_name='Baz', country=country)
self.meetup = Meetup.objects.create(title='Foo Bar Baz', slug='foobarbaz',
date=timezone.now().date(),
time=timezone.now().time(),
description='This is test Meetup',
meetup_location=self.location,
created_by=self.systers_user,
leader=self.systers_user,
last_updated=timezone.now())
class RequestMeetupFormTestCase(MeetupFormTestCaseBase, TestCase):
def test_add_request_meetup_form(self):
# Testing form with invalid data
invalid_data = {'title': 'abc', 'date': timezone.now().date()}
form = RequestMeetupForm(data=invalid_data, created_by=self.user)
self.assertFalse(form.is_valid())
date = (timezone.now() + timedelta(2)).date()
time = timezone.now().time()
data = {'title': 'Foo', 'slug': 'foo', 'date': date, 'time': time,
'meetup_location': self.location.id,
'description': "It's a test meetup."}
form = RequestMeetupForm(data=data, created_by=self.user)
self.assertTrue(form.is_valid())
form.save()
new_meetup_request = RequestMeetup.objects.get(slug='foo')
self.assertTrue(new_meetup_request.title, 'Foo')
self.assertTrue(new_meetup_request.created_by, self.systers_user)
self.assertTrue(new_meetup_request.meetup_location, self.location)
def test_request_meetup_form_with_past_date(self):
"""Test add Meetup form with a date that has passed."""
date = (timezone.now() - timedelta(2)).date()
time = timezone.now().time()
data = {'title': 'Foo', 'slug': 'foo', 'date': date, 'time': time,
'meetup_location': self.location.id,
'description': "It's a test meetup."}
form = RequestMeetupForm(data=data, created_by=self.systers_user)
self.assertFalse(form.is_valid())
self.assertTrue(form.errors['date'], ["Date should not be before today's date."])
def test_request_meetup_form_with_passed_time(self):
"""Test add Meetup form with a time that has passed."""
date = timezone.now().date()
time = (timezone.now() - timedelta(2)).time()
data = {'title': 'Foo', 'slug': 'foo', 'date': date, 'time': time,
'meetup_location': self.location.id,
'description': "It's a test meetup."}
form = RequestMeetupForm(data=data, created_by=self.systers_user)
self.assertFalse(form.is_valid())
self.assertTrue(form.errors['time'],
["Time should not be a time that has already passed."])
class AddMeetupFormTestCase(MeetupFormTestCaseBase, TestCase):
def setUp(self):
super(AddMeetupFormTestCase, self).setUp()
self.password = 'bazbar'
self.user2 = User.objects.create_user(username='baz', password=self.password,
email='user2@test.com')
self.systers_user2 = SystersUser.objects.get(user=self.user2)
def test_add_meetup_form(self):
"""Test add Meetup form"""
invalid_data = {'title': 'abc', 'date': timezone.now().date()}
form = AddMeetupForm(data=invalid_data,
created_by=self.systers_user,
leader=self.systers_user)
self.assertFalse(form.is_valid())
date = (timezone.now() + timedelta(2)).date()
time = timezone.now().time()
data = {'title': 'Foo', 'slug': 'foo', 'date': date, 'time': time,
'meetup_location': self.location.id,
'description': "It's a test meetup."}
form = AddMeetupForm(data=data, created_by=self.user, leader=self.systers_user)
self.assertTrue(form.is_valid())
form.save()
new_meetup = Meetup.objects.get(slug='foo')
self.assertTrue(new_meetup.title, 'Foo')
self.assertTrue(new_meetup.created_by, self.systers_user)
self.assertTrue(new_meetup.meetup_location, self.location)
def test_add_meetup_form_with_past_date(self):
"""Test add Meetup form with a date that has passed."""
date = (timezone.now() - timedelta(2)).date()
time = timezone.now().time()
data = {'title': 'Foo', 'slug': 'foo', 'date': date, 'time': time,
'meetup_location': self.location,
'description': "It's a test meetup."}
form = AddMeetupForm(data=data, created_by=self.systers_user, leader=self.systers_user)
self.assertFalse(form.is_valid())
self.assertTrue(form.errors['date'], ["Date should not be before today's date."])
def test_add_meetup_form_with_passed_time(self):
"""Test add Meetup form with a time that has passed."""
date = timezone.now().date()
time = (timezone.now() - timedelta(2)).time()
data = {'title': 'Foo', 'slug': 'foo', 'date': date, 'time': time,
'meetup_location': self.location,
'description': "It's a test meetup."}
form = AddMeetupForm(data=data, created_by=self.systers_user, leader=self.systers_user)
self.assertFalse(form.is_valid())
self.assertTrue(form.errors['time'],
["Time should not be a time that has already passed."])
class EditMeetupFormTestCase(MeetupFormTestCaseBase, TestCase):
def test_edit_meetup_form(self):
"""Test edit meetup"""
incomplete_data = {'slug': 'slug', 'date': timezone.now().date()}
form = EditMeetupForm(data=incomplete_data)
self.assertFalse(form.is_valid())
date = (timezone.now() + timedelta(2)).date()
time = timezone.now().time()
data = {'slug': 'foobar', 'title': 'Foo Bar', 'date': date, 'time': time,
'description': "It's a test meetup.", 'venue': 'test address'}
form = EditMeetupForm(instance=self.meetup, data=data)
self.assertTrue(form.is_valid())
form.save()
meetup = Meetup.objects.get()
self.assertEqual(meetup.title, 'Foo Bar')
self.assertEqual(meetup.slug, 'foobar')
self.assertEqual(meetup.created_by, self.systers_user)
self.assertEqual(meetup.meetup_location, self.location)
class AddMeetupCommentFormTestCase(MeetupFormTestCaseBase, TestCase):
def test_add_meetup_comment_form(self):
"""Test add meetup Comment form"""
data = {'body': 'This is a test comment'}
form = AddMeetupCommentForm(data=data, author=self.user,
content_object=self.meetup)
self.assertTrue(form.is_valid())
form.save()
comments = Comment.objects.all()
self.assertEqual(len(comments), 1)
self.assertEqual(comments[0].body, 'This is a test comment')
self.assertEqual(comments[0].author, self.systers_user)
self.assertEqual(comments[0].content_object, self.meetup)
class EditMeetupCommentFormTestCase(MeetupFormTestCaseBase, TestCase):
def setUp(self):
super(EditMeetupCommentFormTestCase, self).setUp()
meetup_content_type = ContentType.objects.get(app_label='meetup', model='meetup')
self.comment = Comment.objects.create(author=self.systers_user, is_approved=True,
body='This is a test comment',
content_type=meetup_content_type,
object_id=self.meetup.id)
def test_edit_meetup_comment_form(self):
"""Test edit meetup Comment form"""
data = {'body': 'This is an edited test comment'}
form = EditMeetupCommentForm(instance=self.comment, data=data)
self.assertTrue(form.is_valid())
form.save()
comments = Comment.objects.all()
self.assertEqual(len(comments), 1)
self.assertEqual(comments[0].body, 'This is an edited test comment')
self.assertEqual(comments[0].author, self.systers_user)
self.assertEqual(comments[0].content_object, self.meetup)
class RsvpFormTestCase(MeetupFormTestCaseBase, TestCase):
def test_rsvp_form(self):
"""Test Rsvp form"""
data = {'coming': True, 'plus_one': True}
form = RsvpForm(data=data, user=self.user,
meetup=self.meetup)
self.assertTrue(form.is_valid())
form.save()
rsvp_list = Rsvp.objects.filter(meetup=self.meetup)
self.assertEqual(len(rsvp_list), 1)
self.assertEqual(rsvp_list[0].coming, True)
self.assertEqual(rsvp_list[0].plus_one, True)
self.assertEqual(rsvp_list[0].user, self.systers_user)
self.assertEqual(rsvp_list[0].meetup, self.meetup)
class AddSupportRequestFormTestCase(MeetupFormTestCaseBase, TestCase):
def test_add_support_request_form(self):
"""Test add Support Request form"""
data = {'description': 'This is a test description'}
form = AddSupportRequestForm(data=data, volunteer=self.user,
meetup=self.meetup)
self.assertTrue(form.is_valid())
form.save()
support_requests = SupportRequest.objects.all()
self.assertEqual(len(support_requests), 1)
self.assertEqual(support_requests[0].description, 'This is a test description')
self.assertEqual(support_requests[0].volunteer, self.systers_user)
self.assertEqual(support_requests[0].meetup, self.meetup)
class EditSupportRequestFormTestCase(MeetupFormTestCaseBase, TestCase):
def setUp(self):
super(EditSupportRequestFormTestCase, self).setUp()
self.support_request = SupportRequest.objects.create(
volunteer=self.systers_user, meetup=self.meetup,
description='This is a test description', is_approved=False)
def test_edit_support_request_form(self):
"""Test edit Support Request form"""
data = {'description': 'This is an edited test description'}
form = EditSupportRequestForm(instance=self.support_request, data=data)
self.assertTrue(form.is_valid())
form.save()
support_requests = SupportRequest.objects.all()
self.assertEqual(len(support_requests), 1)
self.assertEqual(support_requests[0].description, 'This is an edited test description')
self.assertEqual(support_requests[0].volunteer, self.systers_user)
self.assertEqual(support_requests[0].meetup, self.meetup)
class AddSupportRequestCommentFormTestCase(MeetupFormTestCaseBase, TestCase):
def setUp(self):
super(AddSupportRequestCommentFormTestCase, self).setUp()
self.support_request = SupportRequest.objects.create(
volunteer=self.systers_user, meetup=self.meetup,
description='This is a test description', is_approved=False)
def test_add_support_request_comment_form(self):
"""Test add support request Comment form"""
data = {'body': 'This is a test comment'}
form = AddSupportRequestCommentForm(data=data, author=self.user,
content_object=self.support_request)
self.assertTrue(form.is_valid())
form.save()
comments = Comment.objects.all()
self.assertEqual(len(comments), 1)
self.assertEqual(comments[0].body, 'This is a test comment')
self.assertEqual(comments[0].author, self.systers_user)
self.assertEqual(comments[0].content_object, self.support_request)
class EditSupportRequestCommentFormTestCase(MeetupFormTestCaseBase, TestCase):
def setUp(self):
super(EditSupportRequestCommentFormTestCase, self).setUp()
self.support_request = SupportRequest.objects.create(
volunteer=self.systers_user, meetup=self.meetup,
description='This is a test description', is_approved=False)
support_request_content_type = ContentType.objects.get(app_label='meetup',
model='supportrequest')
self.comment = Comment.objects.create(author=self.systers_user, is_approved=True,
body='This is a test comment',
content_type=support_request_content_type,
object_id=self.support_request.id)
def test_edit_support_request_comment_form(self):
"""Test edit support request Comment form"""
data = {'body': 'This is an edited test comment'}
form = EditSupportRequestCommentForm(instance=self.comment, data=data)
self.assertTrue(form.is_valid())
form.save()
comments = Comment.objects.all()
self.assertEqual(len(comments), 1)
self.assertEqual(comments[0].body, 'This is an edited test comment')
self.assertEqual(comments[0].author, self.systers_user)
self.assertEqual(comments[0].content_object, self.support_request)
| gpl-2.0 |
CubicERP/geraldo | site/newsite/django_1_0/tests/modeltests/pagination/models.py | 9 | 6402 | """
30. Object pagination
Django provides a framework for paginating a list of objects in a few lines
of code. This is often useful for dividing search results or long lists of
objects into easily readable pages.
In Django 0.96 and earlier, a single ObjectPaginator class implemented this
functionality. In the Django development version, the behavior is split across
two classes -- Paginator and Page -- that are more easier to use. The legacy
ObjectPaginator class is deprecated.
"""
from django.db import models
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField()
def __unicode__(self):
return self.headline
__test__ = {'API_TESTS':"""
# Prepare a list of objects for pagination.
>>> from datetime import datetime
>>> for x in range(1, 10):
... a = Article(headline='Article %s' % x, pub_date=datetime(2005, 7, 29))
... a.save()
####################################
# New/current API (Paginator/Page) #
####################################
>>> from django.core.paginator import Paginator
>>> paginator = Paginator(Article.objects.all(), 5)
>>> paginator.count
9
>>> paginator.num_pages
2
>>> paginator.page_range
[1, 2]
# Get the first page.
>>> p = paginator.page(1)
>>> p
<Page 1 of 2>
>>> p.object_list
[<Article: Article 1>, <Article: Article 2>, <Article: Article 3>, <Article: Article 4>, <Article: Article 5>]
>>> p.has_next()
True
>>> p.has_previous()
False
>>> p.has_other_pages()
True
>>> p.next_page_number()
2
>>> p.previous_page_number()
0
>>> p.start_index()
1
>>> p.end_index()
5
# Get the second page.
>>> p = paginator.page(2)
>>> p
<Page 2 of 2>
>>> p.object_list
[<Article: Article 6>, <Article: Article 7>, <Article: Article 8>, <Article: Article 9>]
>>> p.has_next()
False
>>> p.has_previous()
True
>>> p.has_other_pages()
True
>>> p.next_page_number()
3
>>> p.previous_page_number()
1
>>> p.start_index()
6
>>> p.end_index()
9
# Empty pages raise EmptyPage.
>>> paginator.page(0)
Traceback (most recent call last):
...
EmptyPage: ...
>>> paginator.page(3)
Traceback (most recent call last):
...
EmptyPage: ...
# Empty paginators with allow_empty_first_page=True.
>>> paginator = Paginator(Article.objects.filter(id=0), 5, allow_empty_first_page=True)
>>> paginator.count
0
>>> paginator.num_pages
1
>>> paginator.page_range
[1]
# Empty paginators with allow_empty_first_page=False.
>>> paginator = Paginator(Article.objects.filter(id=0), 5, allow_empty_first_page=False)
>>> paginator.count
0
>>> paginator.num_pages
0
>>> paginator.page_range
[]
# Paginators work with regular lists/tuples, too -- not just with QuerySets.
>>> paginator = Paginator([1, 2, 3, 4, 5, 6, 7, 8, 9], 5)
>>> paginator.count
9
>>> paginator.num_pages
2
>>> paginator.page_range
[1, 2]
# Get the first page.
>>> p = paginator.page(1)
>>> p
<Page 1 of 2>
>>> p.object_list
[1, 2, 3, 4, 5]
>>> p.has_next()
True
>>> p.has_previous()
False
>>> p.has_other_pages()
True
>>> p.next_page_number()
2
>>> p.previous_page_number()
0
>>> p.start_index()
1
>>> p.end_index()
5
# Paginator can be passed other objects with a count() method.
>>> class CountContainer:
... def count(self):
... return 42
>>> paginator = Paginator(CountContainer(), 10)
>>> paginator.count
42
>>> paginator.num_pages
5
>>> paginator.page_range
[1, 2, 3, 4, 5]
# Paginator can be passed other objects that implement __len__.
>>> class LenContainer:
... def __len__(self):
... return 42
>>> paginator = Paginator(LenContainer(), 10)
>>> paginator.count
42
>>> paginator.num_pages
5
>>> paginator.page_range
[1, 2, 3, 4, 5]
################################
# Legacy API (ObjectPaginator) #
################################
# Don't print out the deprecation warnings during testing.
>>> from warnings import filterwarnings
>>> filterwarnings("ignore")
>>> from django.core.paginator import ObjectPaginator, EmptyPage
>>> paginator = ObjectPaginator(Article.objects.all(), 5)
>>> paginator.hits
9
>>> paginator.pages
2
>>> paginator.page_range
[1, 2]
# Get the first page.
>>> paginator.get_page(0)
[<Article: Article 1>, <Article: Article 2>, <Article: Article 3>, <Article: Article 4>, <Article: Article 5>]
>>> paginator.has_next_page(0)
True
>>> paginator.has_previous_page(0)
False
>>> paginator.first_on_page(0)
1
>>> paginator.last_on_page(0)
5
# Get the second page.
>>> paginator.get_page(1)
[<Article: Article 6>, <Article: Article 7>, <Article: Article 8>, <Article: Article 9>]
>>> paginator.has_next_page(1)
False
>>> paginator.has_previous_page(1)
True
>>> paginator.first_on_page(1)
6
>>> paginator.last_on_page(1)
9
# Invalid pages raise EmptyPage.
>>> paginator.get_page(-1)
Traceback (most recent call last):
...
EmptyPage: ...
>>> paginator.get_page(2)
Traceback (most recent call last):
...
EmptyPage: ...
# Empty paginators with allow_empty_first_page=True.
>>> paginator = ObjectPaginator(Article.objects.filter(id=0), 5)
>>> paginator.count
0
>>> paginator.num_pages
1
>>> paginator.page_range
[1]
# ObjectPaginator can be passed lists too.
>>> paginator = ObjectPaginator([1, 2, 3], 5)
>>> paginator.hits
3
>>> paginator.pages
1
>>> paginator.page_range
[1]
# ObjectPaginator can be passed other objects without a count() method.
>>> class Container:
... def __len__(self):
... return 42
>>> paginator = ObjectPaginator(Container(), 10)
>>> paginator.hits
42
>>> paginator.pages
5
>>> paginator.page_range
[1, 2, 3, 4, 5]
##################
# Orphan support #
##################
# Add a few more records to test out the orphans feature.
>>> for x in range(10, 13):
... Article(headline="Article %s" % x, pub_date=datetime(2006, 10, 6)).save()
# With orphans set to 3 and 10 items per page, we should get all 12 items on a single page.
>>> paginator = Paginator(Article.objects.all(), 10, orphans=3)
>>> paginator.num_pages
1
# With orphans only set to 1, we should get two pages.
>>> paginator = ObjectPaginator(Article.objects.all(), 10, orphans=1)
>>> paginator.num_pages
2
# LEGACY: With orphans set to 3 and 10 items per page, we should get all 12 items on a single page.
>>> paginator = ObjectPaginator(Article.objects.all(), 10, orphans=3)
>>> paginator.pages
1
# LEGACY: With orphans only set to 1, we should get two pages.
>>> paginator = ObjectPaginator(Article.objects.all(), 10, orphans=1)
>>> paginator.pages
2
"""}
| lgpl-3.0 |
ajdavis/asyncio | asyncio/base_subprocess.py | 3 | 9053 | import collections
import subprocess
import warnings
from . import compat
from . import protocols
from . import transports
from .coroutines import coroutine
from .log import logger
class BaseSubprocessTransport(transports.SubprocessTransport):
def __init__(self, loop, protocol, args, shell,
stdin, stdout, stderr, bufsize,
waiter=None, extra=None, **kwargs):
super().__init__(extra)
self._closed = False
self._protocol = protocol
self._loop = loop
self._proc = None
self._pid = None
self._returncode = None
self._exit_waiters = []
self._pending_calls = collections.deque()
self._pipes = {}
self._finished = False
if stdin == subprocess.PIPE:
self._pipes[0] = None
if stdout == subprocess.PIPE:
self._pipes[1] = None
if stderr == subprocess.PIPE:
self._pipes[2] = None
# Create the child process: set the _proc attribute
try:
self._start(args=args, shell=shell, stdin=stdin, stdout=stdout,
stderr=stderr, bufsize=bufsize, **kwargs)
except:
self.close()
raise
self._pid = self._proc.pid
self._extra['subprocess'] = self._proc
if self._loop.get_debug():
if isinstance(args, (bytes, str)):
program = args
else:
program = args[0]
logger.debug('process %r created: pid %s',
program, self._pid)
self._loop.create_task(self._connect_pipes(waiter))
def __repr__(self):
info = [self.__class__.__name__]
if self._closed:
info.append('closed')
if self._pid is not None:
info.append('pid=%s' % self._pid)
if self._returncode is not None:
info.append('returncode=%s' % self._returncode)
elif self._pid is not None:
info.append('running')
else:
info.append('not started')
stdin = self._pipes.get(0)
if stdin is not None:
info.append('stdin=%s' % stdin.pipe)
stdout = self._pipes.get(1)
stderr = self._pipes.get(2)
if stdout is not None and stderr is stdout:
info.append('stdout=stderr=%s' % stdout.pipe)
else:
if stdout is not None:
info.append('stdout=%s' % stdout.pipe)
if stderr is not None:
info.append('stderr=%s' % stderr.pipe)
return '<%s>' % ' '.join(info)
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
raise NotImplementedError
def set_protocol(self, protocol):
self._protocol = protocol
def get_protocol(self):
return self._protocol
def is_closing(self):
return self._closed
def close(self):
if self._closed:
return
self._closed = True
for proto in self._pipes.values():
if proto is None:
continue
proto.pipe.close()
if (self._proc is not None
# the child process finished?
and self._returncode is None
# the child process finished but the transport was not notified yet?
and self._proc.poll() is None
):
if self._loop.get_debug():
logger.warning('Close running child process: kill %r', self)
try:
self._proc.kill()
except ProcessLookupError:
pass
# Don't clear the _proc reference yet: _post_init() may still run
# On Python 3.3 and older, objects with a destructor part of a reference
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
# to the PEP 442.
if compat.PY34:
def __del__(self):
if not self._closed:
warnings.warn("unclosed transport %r" % self, ResourceWarning)
self.close()
def get_pid(self):
return self._pid
def get_returncode(self):
return self._returncode
def get_pipe_transport(self, fd):
if fd in self._pipes:
return self._pipes[fd].pipe
else:
return None
def _check_proc(self):
if self._proc is None:
raise ProcessLookupError()
def send_signal(self, signal):
self._check_proc()
self._proc.send_signal(signal)
def terminate(self):
self._check_proc()
self._proc.terminate()
def kill(self):
self._check_proc()
self._proc.kill()
@coroutine
def _connect_pipes(self, waiter):
try:
proc = self._proc
loop = self._loop
if proc.stdin is not None:
_, pipe = yield from loop.connect_write_pipe(
lambda: WriteSubprocessPipeProto(self, 0),
proc.stdin)
self._pipes[0] = pipe
if proc.stdout is not None:
_, pipe = yield from loop.connect_read_pipe(
lambda: ReadSubprocessPipeProto(self, 1),
proc.stdout)
self._pipes[1] = pipe
if proc.stderr is not None:
_, pipe = yield from loop.connect_read_pipe(
lambda: ReadSubprocessPipeProto(self, 2),
proc.stderr)
self._pipes[2] = pipe
assert self._pending_calls is not None
loop.call_soon(self._protocol.connection_made, self)
for callback, data in self._pending_calls:
loop.call_soon(callback, *data)
self._pending_calls = None
except Exception as exc:
if waiter is not None and not waiter.cancelled():
waiter.set_exception(exc)
else:
if waiter is not None and not waiter.cancelled():
waiter.set_result(None)
def _call(self, cb, *data):
if self._pending_calls is not None:
self._pending_calls.append((cb, data))
else:
self._loop.call_soon(cb, *data)
def _pipe_connection_lost(self, fd, exc):
self._call(self._protocol.pipe_connection_lost, fd, exc)
self._try_finish()
def _pipe_data_received(self, fd, data):
self._call(self._protocol.pipe_data_received, fd, data)
def _process_exited(self, returncode):
assert returncode is not None, returncode
assert self._returncode is None, self._returncode
if self._loop.get_debug():
logger.info('%r exited with return code %r',
self, returncode)
self._returncode = returncode
if self._proc.returncode is None:
# asyncio uses a child watcher: copy the status into the Popen
# object. On Python 3.6, it is required to avoid a ResourceWarning.
self._proc.returncode = returncode
self._call(self._protocol.process_exited)
self._try_finish()
# wake up futures waiting for wait()
for waiter in self._exit_waiters:
if not waiter.cancelled():
waiter.set_result(returncode)
self._exit_waiters = None
@coroutine
def _wait(self):
"""Wait until the process exit and return the process return code.
This method is a coroutine."""
if self._returncode is not None:
return self._returncode
waiter = self._loop.create_future()
self._exit_waiters.append(waiter)
return (yield from waiter)
def _try_finish(self):
assert not self._finished
if self._returncode is None:
return
if all(p is not None and p.disconnected
for p in self._pipes.values()):
self._finished = True
self._call(self._call_connection_lost, None)
def _call_connection_lost(self, exc):
try:
self._protocol.connection_lost(exc)
finally:
self._loop = None
self._proc = None
self._protocol = None
class WriteSubprocessPipeProto(protocols.BaseProtocol):
def __init__(self, proc, fd):
self.proc = proc
self.fd = fd
self.pipe = None
self.disconnected = False
def connection_made(self, transport):
self.pipe = transport
def __repr__(self):
return ('<%s fd=%s pipe=%r>'
% (self.__class__.__name__, self.fd, self.pipe))
def connection_lost(self, exc):
self.disconnected = True
self.proc._pipe_connection_lost(self.fd, exc)
self.proc = None
def pause_writing(self):
self.proc._protocol.pause_writing()
def resume_writing(self):
self.proc._protocol.resume_writing()
class ReadSubprocessPipeProto(WriteSubprocessPipeProto,
protocols.Protocol):
def data_received(self, data):
self.proc._pipe_data_received(self.fd, data)
| apache-2.0 |
JianyuWang/nova | nova/api/openstack/compute/legacy_v2/contrib/evacuate.py | 61 | 4643 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import strutils
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova import utils
authorize = extensions.extension_authorizer('compute', 'evacuate')
class Controller(wsgi.Controller):
def __init__(self, ext_mgr, *args, **kwargs):
super(Controller, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.host_api = compute.HostAPI()
self.ext_mgr = ext_mgr
@wsgi.action('evacuate')
def _evacuate(self, req, id, body):
"""Permit admins to evacuate a server from a failed host
to a new one.
If host is empty, the scheduler will select one.
"""
context = req.environ["nova.context"]
authorize(context)
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks. This has to be left only for API v2.0 because
# this version has to be stable even if it means that only admins
# can call this method while the policy could be changed.
nova_context.require_admin_context(context)
if not self.is_valid_body(body, "evacuate"):
raise exc.HTTPBadRequest(_("Malformed request body"))
evacuate_body = body["evacuate"]
host = evacuate_body.get("host")
if (not host and
not self.ext_mgr.is_loaded('os-extended-evacuate-find-host')):
msg = _("host must be specified.")
raise exc.HTTPBadRequest(explanation=msg)
try:
on_shared_storage = strutils.bool_from_string(
evacuate_body["onSharedStorage"])
except (TypeError, KeyError):
msg = _("onSharedStorage must be specified.")
raise exc.HTTPBadRequest(explanation=msg)
password = None
if 'adminPass' in evacuate_body:
# check that if requested to evacuate server on shared storage
# password not specified
if on_shared_storage:
msg = _("admin password can't be changed on existing disk")
raise exc.HTTPBadRequest(explanation=msg)
password = evacuate_body['adminPass']
elif not on_shared_storage:
password = utils.generate_password()
if host is not None:
try:
self.host_api.service_get_by_compute_host(context, host)
except exception.NotFound:
msg = _("Compute host %s not found.") % host
raise exc.HTTPNotFound(explanation=msg)
instance = common.get_instance(self.compute_api, context, id)
try:
if instance.host == host:
msg = _("The target host can't be the same one.")
raise exc.HTTPBadRequest(explanation=msg)
self.compute_api.evacuate(context, instance, host,
on_shared_storage, password)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'evacuate', id)
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceInUse as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if password:
return {'adminPass': password}
class Evacuate(extensions.ExtensionDescriptor):
"""Enables server evacuation."""
name = "Evacuate"
alias = "os-evacuate"
namespace = "http://docs.openstack.org/compute/ext/evacuate/api/v2"
updated = "2013-01-06T00:00:00Z"
def get_controller_extensions(self):
controller = Controller(self.ext_mgr)
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| apache-2.0 |
campbe13/openhatch | vendor/packages/Django/tests/regressiontests/views/generic_urls.py | 44 | 2143 | # -*- coding:utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import patterns, url
from django.views.generic import RedirectView
from . import views
from .models import Article, DateArticle, UrlArticle
date_based_info_dict = {
'queryset': Article.objects.all(),
'date_field': 'date_created',
'month_format': '%m',
}
object_list_dict = {
'queryset': Article.objects.all(),
'paginate_by': 2,
}
object_list_no_paginate_by = {
'queryset': Article.objects.all(),
}
numeric_days_info_dict = dict(date_based_info_dict, day_format='%d')
date_based_datefield_info_dict = dict(date_based_info_dict, queryset=DateArticle.objects.all())
urlpatterns = patterns('',
(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),
(r'^accounts/logout/$', 'django.contrib.auth.views.logout'),
# Special URLs for particular regression cases.
url('^中文/$', 'regressiontests.views.views.redirect'),
url('^中文/target/$', 'regressiontests.views.views.index_page'),
)
# rediriects, both temporary and permanent, with non-ASCII targets
urlpatterns += patterns('',
('^nonascii_redirect/$', RedirectView.as_view(
url='/中文/target/', permanent=False)),
('^permanent_nonascii_redirect/$', RedirectView.as_view(
url='/中文/target/', permanent=True)),
)
urlpatterns += patterns('regressiontests.views.views',
(r'^shortcuts/render_to_response/$', 'render_to_response_view'),
(r'^shortcuts/render_to_response/request_context/$', 'render_to_response_view_with_request_context'),
(r'^shortcuts/render_to_response/content_type/$', 'render_to_response_view_with_content_type'),
(r'^shortcuts/render/$', 'render_view'),
(r'^shortcuts/render/base_context/$', 'render_view_with_base_context'),
(r'^shortcuts/render/content_type/$', 'render_view_with_content_type'),
(r'^shortcuts/render/status/$', 'render_view_with_status'),
(r'^shortcuts/render/current_app/$', 'render_view_with_current_app'),
(r'^shortcuts/render/current_app_conflict/$', 'render_view_with_current_app_conflict'),
)
| agpl-3.0 |
hydrospanner/DForurm | DForurm/env/Lib/site-packages/django/conf/locale/en/formats.py | 1007 | 1815 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'N j, Y, P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'F j'
SHORT_DATE_FORMAT = 'm/d/Y'
SHORT_DATETIME_FORMAT = 'm/d/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| mit |
ankurankan/scikit-learn | examples/ensemble/plot_partial_dependence.py | 249 | 4456 | """
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [1]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [HTF2009]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [HTF2009] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [1] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
# fetch California housing dataset
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = plt.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
| bsd-3-clause |
arekolek/MaxIST | map.py | 1 | 2115 | from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
from xml.etree import ElementTree as ET
from itertools import chain
import graph_tool.all as gt
def vertices(f):
root = ET.parse(f)
ns = {'s': 'http://sndlib.zib.de/network'}
for e in root.findall('*/*/s:node', ns):
yield (float(e.find('*/s:x', ns).text), float(e.find('*/s:y', ns).text))
def edges(f):
root = ET.parse(f)
ns = {'s': 'http://sndlib.zib.de/network'}
pos = {e.get('id'):(float(e.find('*/s:x', ns).text), float(e.find('*/s:y', ns).text)) for e in root.findall('*/*/s:node', ns)}
for e in root.findall('*/*/s:link', ns):
yield chain(pos[e.find('s:source', ns).text], pos[e.find('s:target', ns).text])
if __name__ == '__main__':
from sys import argv
from re import findall
for f in argv[1:]:
vs = np.array(list(vertices(f)))
xmin, ymin = vs.min(axis=0)
xmax, ymax = vs.max(axis=0)
#x, y = vs.mean(axis=0)
x, y = (xmin+xmax)/2, (ymin+ymax)/2
m = Basemap(projection='stere', lon_0=x, lat_0=y, width=1000, height=1000)
xlo, ylo = m(xmin, ymin)
xhi, yhi = m(xmax, ymax)
span = max(xhi-xlo, yhi-ylo) * 1.15
#xmin, xmax = xmin-(xmax-xmin)/10, xmax+(xmax-xmin)/10
#ymin, ymax = ymin-(ymax-ymin)/5, ymax+(ymax-ymin)/5
# create new figure, axes instances.
fig = plt.figure(frameon=False)
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
# setup mercator map projection.
m = Basemap(#llcrnrlon=xmin,llcrnrlat=ymin,urcrnrlon=xmax,urcrnrlat=ymax,\
#rsphere=(6378137.00,6356752.3142),\
resolution='l', projection='stere',\
lon_0=x, lat_0=y, width=span, height=span)
#m.drawcountries(linestyle='dotted')
m.fillcontinents(color='#dddddd')
for e in edges(f):
m.drawgreatcircle(*e,linewidth=0.2,color='black')
m.scatter(vs[:,0], vs[:,1], latlon=True, s=4, c='black', alpha=1, zorder=10)
name = findall('[^/.]+', f)[-2]
fig.set_size_inches(1.042, 1.042)
fig.savefig('output/{}.pdf'.format(name), dpi=100)
| mit |
tinfoil/phantomjs | src/qt/qtbase/src/3rdparty/freetype/builds/mac/ascii2mpw.py | 830 | 1033 | #!/usr/bin/env python
import sys
import string
if len( sys.argv ) == 1 :
for asc_line in sys.stdin.readlines():
mpw_line = string.replace(asc_line, "\\xA5", "\245")
mpw_line = string.replace(mpw_line, "\\xB6", "\266")
mpw_line = string.replace(mpw_line, "\\xC4", "\304")
mpw_line = string.replace(mpw_line, "\\xC5", "\305")
mpw_line = string.replace(mpw_line, "\\xFF", "\377")
mpw_line = string.replace(mpw_line, "\n", "\r")
mpw_line = string.replace(mpw_line, "\\n", "\n")
sys.stdout.write(mpw_line)
elif sys.argv[1] == "-r" :
for mpw_line in sys.stdin.readlines():
asc_line = string.replace(mpw_line, "\n", "\\n")
asc_line = string.replace(asc_line, "\r", "\n")
asc_line = string.replace(asc_line, "\245", "\\xA5")
asc_line = string.replace(asc_line, "\266", "\\xB6")
asc_line = string.replace(asc_line, "\304", "\\xC4")
asc_line = string.replace(asc_line, "\305", "\\xC5")
asc_line = string.replace(asc_line, "\377", "\\xFF")
sys.stdout.write(asc_line)
| bsd-3-clause |
daineseh/kodi-plugin.video.ted-talks-chinese | youtube_dl/extractor/udn.py | 24 | 2302 | # coding: utf-8
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
js_to_json,
ExtractorError,
)
from ..compat import compat_urlparse
class UDNEmbedIE(InfoExtractor):
IE_DESC = '聯合影音'
_PROTOCOL_RELATIVE_VALID_URL = r'//video\.udn\.com/(?:embed|play)/news/(?P<id>\d+)'
_VALID_URL = r'https?:' + _PROTOCOL_RELATIVE_VALID_URL
_TESTS = [{
'url': 'http://video.udn.com/embed/news/300040',
'md5': 'de06b4c90b042c128395a88f0384817e',
'info_dict': {
'id': '300040',
'ext': 'mp4',
'title': '生物老師男變女 全校挺"做自己"',
'thumbnail': 're:^https?://.*\.jpg$',
}
}, {
'url': 'https://video.udn.com/embed/news/300040',
'only_matching': True,
}, {
# From https://video.udn.com/news/303776
'url': 'https://video.udn.com/play/news/303776',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
page = self._download_webpage(url, video_id)
options = json.loads(js_to_json(self._html_search_regex(
r'var options\s*=\s*([^;]+);', page, 'video urls dictionary')))
video_urls = options['video']
if video_urls.get('youtube'):
return self.url_result(video_urls.get('youtube'), 'Youtube')
try:
del video_urls['youtube']
except KeyError:
pass
formats = [{
'url': self._download_webpage(
compat_urlparse.urljoin(url, api_url), video_id,
'retrieve url for %s video' % video_type),
'format_id': video_type,
'preference': 0 if video_type == 'mp4' else -1,
} for video_type, api_url in video_urls.items() if api_url]
if not formats:
raise ExtractorError('No videos found', expected=True)
self._sort_formats(formats)
thumbnail = None
if options.get('gallery') and len(options['gallery']):
thumbnail = options['gallery'][0].get('original')
return {
'id': video_id,
'formats': formats,
'title': options['title'],
'thumbnail': thumbnail
}
| gpl-2.0 |
GLolol/Cinnamon | files/usr/lib/cinnamon-settings/modules/cs_desklets.py | 16 | 2769 | #!/usr/bin/env python2
from ExtensionCore import ExtensionSidePage
from gi.repository import Gtk
from SettingsWidgets import *
class Module:
comment = _("Manage your Cinnamon desklets")
name = "desklets"
category = "prefs"
def __init__(self, content_box):
keywords = _("desklet, desktop, slideshow")
self.sidePage = DeskletsViewSidePage(_("Desklets"), "cs-desklets", keywords, content_box, "desklet", self)
def on_module_selected(self):
if not self.loaded:
print "Loading Desklets module"
self.sidePage.load()
def _setParentRef(self, window):
self.sidePage.window = window
class DeskletsViewSidePage (ExtensionSidePage):
def __init__(self, name, icon, keywords, content_box, collection_type, module):
self.RemoveString = _("You can remove specific instances from the desktop via that desklet's context menu")
ExtensionSidePage.__init__(self, name, icon, keywords, content_box, collection_type, module)
def toSettingString(self, uuid, instanceId):
screen = Gdk.Screen.get_default()
primary = screen.get_primary_monitor()
primary_rect = screen.get_monitor_geometry(primary)
return ("%s:%d:%d:%d") % (uuid, instanceId, primary_rect.x + 100, primary_rect.y + 100)
def fromSettingString(self, string):
uuid, instanceId, x, y = string.split(":")
return uuid
def getAdditionalPage(self):
page = SettingsPage()
page.label = _("General Settings")
settings = page.add_section(_("General Desklets Settings"))
dec = [[0, _("No decoration")], [1, _("Border only")], [2, _("Border and header")]]
widget = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
combo_box = GSettingsComboBox(_("Decoration of desklets"), "org.cinnamon", "desklet-decorations", dec, valtype="int")
widget.pack_start(combo_box, False, False, 0)
line1 = Gtk.Label()
line1.set_markup("<i><small>%s</small></i>" % _("Note: Some desklets require the border/header to be always present"))
line1.get_style_context().add_class("dim-label")
widget.pack_start(line1, True, True, 0)
line2 = Gtk.Label()
line2.set_markup("<i><small>%s</small></i>" % _("Such requirements override the settings selected here"))
line2.get_style_context().add_class("dim-label")
widget.pack_start(line2, True, True, 0)
settings.add_row(widget)
settings.add_row(GSettingsSwitch(_("Snap desklets to grid"), "org.cinnamon", "desklet-snap"))
settings.add_reveal_row(GSettingsSpinButton(_("Width of desklet snap grid"), "org.cinnamon", "desklet-snap-interval", "", 0, 100, 1, 5), "org.cinnamon", "desklet-snap")
return page
| gpl-2.0 |
hishnash/inspyred | inspyred/ec/terminators.py | 1 | 12518 | """
===================================================
:mod:`terminators` -- Algorithm termination methods
===================================================
This module provides pre-defined terminators for evolutionary computations.
Terminators specify when the evolutionary process should end. All
terminators must return a Boolean value where True implies that
the evolution should end.
All terminator functions have the following arguments:
- *population* -- the population of Individuals
- *num_generations* -- the number of elapsed generations
- *num_evaluations* -- the number of candidate solution evaluations
- *args* -- a dictionary of keyword arguments
.. note::
The *population* is really a shallow copy of the actual population of
the evolutionary computation. This means that any activities like
sorting will not affect the actual population.
.. Copyright 2012 Aaron Garrett
.. This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
.. This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
.. You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
.. module:: terminators
.. moduleauthor:: Aaron Garrett <aaron.lee.garrett@gmail.com>
"""
import itertools
import math
import sys
import time
def default_termination(population, num_generations, num_evaluations, args):
"""Return True.
This function acts as a default termination criterion for an evolutionary computation.
.. Arguments:
population -- the population of Individuals
num_generations -- the number of elapsed generations
num_evaluations -- the number of candidate solution evaluations
args -- a dictionary of keyword arguments
"""
return True
def diversity_termination(population, num_generations, num_evaluations, args):
"""Return True if population diversity is less than a minimum diversity.
This function calculates the Euclidean distance between every pair of
individuals in the population. It then compares the maximum of those
distances with a specified minimum required diversity. This terminator
is really only well-defined for candidate solutions which are list
types of numeric values.
.. Arguments:
population -- the population of Individuals
num_generations -- the number of elapsed generations
num_evaluations -- the number of candidate solution evaluations
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *min_diversity* -- the minimum population diversity allowed (default 0.001)
"""
min_diversity = args.setdefault('min_diversity', 0.001)
cart_prod = itertools.product(population, population)
distance = []
for (p, q) in cart_prod:
d = 0
for x, y in zip(p.candidate, q.candidate):
d += (x - y)**2
distance.append(math.sqrt(d))
return max(distance) < min_diversity
def average_fitness_termination(population, num_generations, num_evaluations, args):
"""Return True if the population's average fitness is near its best fitness.
This function calculates the average fitness of the population, as well
as the best fitness. If the difference between those values is less
than a specified tolerance, the function returns True.
.. Arguments:
population -- the population of Individuals
num_generations -- the number of elapsed generations
num_evaluations -- the number of candidate solution evaluations
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *tolerance* -- the minimum allowable difference between average
and best fitness (default 0.001)
"""
tolerance = args.setdefault('tolerance', 0.001)
avg_fit = sum([x.fitness for x in population]) / float(len(population))
best_fit = max([x.fitness for x in population])
return (best_fit - avg_fit) < tolerance
def evaluation_termination(population, num_generations, num_evaluations, args):
"""Return True if the number of function evaluations meets or exceeds a maximum.
This function compares the number of function evaluations that have been
generated with a specified maximum. It returns True if the maximum is met
or exceeded.
.. Arguments:
population -- the population of Individuals
num_generations -- the number of elapsed generations
num_evaluations -- the number of candidate solution evaluations
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *max_evaluations* -- the maximum candidate solution evaluations (default
len(population))
"""
max_evaluations = args.setdefault('max_evaluations', len(population))
return num_evaluations >= max_evaluations
def generation_termination(population, num_generations, num_evaluations, args):
"""Return True if the number of generations meets or exceeds a maximum.
This function compares the number of generations with a specified
maximum. It returns True if the maximum is met or exceeded.
.. Arguments:
population -- the population of Individuals
num_generations -- the number of elapsed generations
num_evaluations -- the number of candidate solution evaluations
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *max_generations* -- the maximum generations (default 1)
"""
max_generations = args.setdefault('max_generations', 1)
return num_generations >= max_generations
def time_termination(population, num_generations, num_evaluations, args):
"""Return True if the elapsed time meets or exceeds a duration of time.
This function compares the elapsed time with a specified maximum.
It returns True if the maximum is met or exceeded. If the `start_time`
keyword argument is omitted, it defaults to `None` and will be set to
the current system time (in seconds). If the `max_time` keyword argument
is omitted, it will default to `None` and will immediately terminate.
The `max_time` argument can be specified in seconds as a floating-point
number, as minutes/seconds as a two-element tuple of floating-point
numbers, or as hours/minutes/seconds as a three-element tuple of
floating-point numbers.
.. Arguments:
population -- the population of Individuals
num_generations -- the number of elapsed generations
num_evaluations -- the number of candidate solution evaluations
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *start_time* -- the time from which to start measuring (default None)
- *max_time* -- the maximum time that should elapse (default None)
"""
start_time = args.setdefault('start_time', None)
max_time = args.setdefault('max_time', None)
logging = args.get('_ec').logger
if start_time is None:
start_time = time.time()
args['start_time'] = start_time
logging.debug('time_termination terminator added without setting the start_time argument; setting start_time to current time')
if max_time is None:
logging.debug('time_termination terminator added without setting the max_time argument; terminator will immediately terminate')
else:
try:
max_time = max_time[0] * 3600.0 + max_time[1] * 60.00 + max_time[2]
args['max_time'] = max_time
except TypeError:
pass
except IndexError:
max_time = max_time[0] * 60 + max_time[1]
args['max_time'] = max_time
time_elapsed = time.time() - start_time
return max_time is None or time_elapsed >= max_time
def user_termination(population, num_generations, num_evaluations, args):
"""Return True if user presses the ESC key when prompted.
This function prompts the user to press the ESC key to terminate the
evolution. The prompt persists for a specified number of seconds before
evolution continues. Additionally, the function can be customized to
allow any press of the ESC key to be stored until the next time this
function is called.
.. note::
This function makes use of the ``msvcrt`` (Windows) and ``curses``
(Unix) libraries. Other systems may not be supported.
.. Arguments:
population -- the population of Individuals
num_generations -- the number of elapsed generations
num_evaluations -- the number of candidate solution evaluations
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *termination_response_timeout* -- the number of seconds to wait for
the user to press the ESC key (default 5)
- *clear_termination_buffer* -- whether the keyboard buffer should be
cleared before allowing the user to press a key (default True)
"""
def getch():
unix = ('darwin', 'linux2')
if sys.platform not in unix:
try:
import msvcrt
except ImportError:
return -1
if msvcrt.kbhit():
return msvcrt.getch()
else:
return -1
elif sys.platform in unix:
def _getch(stdscr):
stdscr.nodelay(1)
ch = stdscr.getch()
stdscr.nodelay(0)
return ch
import curses
return curses.wrapper(_getch)
num_secs = args.get('termination_response_timeout', 5)
clear_buffer = args.get('clear_termination_buffer', True)
if clear_buffer:
while getch() > -1:
pass
sys.stdout.write('Press ESC to terminate (%d secs):' % num_secs)
count = 1
start = time.time()
while time.time() - start < num_secs:
ch = getch()
if ch > -1 and ord(ch) == 27:
sys.stdout.write('\n\n')
return True
elif time.time() - start == count:
sys.stdout.write('.')
count += 1
sys.stdout.write('\n')
return False
def no_improvement_termination(population, num_generations, num_evaluations, args):
"""Return True if the best fitness does not change for a number of generations.
This function keeps track of the current best fitness and compares it to
the best fitness in previous generations. Whenever those values are the
same, it begins a generation count. If that count exceeds a specified
number, the terminator returns True.
.. Arguments:
population -- the population of Individuals
num_generations -- the number of elapsed generations
num_evaluations -- the number of candidate solution evaluations
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *max_generations* -- the number of generations allowed for no change in fitness (default 10)
"""
max_generations = args.setdefault('max_generations', 10)
previous_best = args.setdefault('previous_best', None)
current_best = max(population).fitness
if previous_best is None or previous_best != current_best:
args['previous_best'] = current_best
args['generation_count'] = 0
return False
else:
if args['generation_count'] >= max_generations:
return True
else:
args['generation_count'] += 1
return False
| mit |
cailean-d/NodeJS-Chat-Application | node_modules/node-gyp/gyp/pylib/gyp/generator/eclipse.py | 1825 | 17014 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""GYP backend that generates Eclipse CDT settings files.
This backend DOES NOT generate Eclipse CDT projects. Instead, it generates XML
files that can be imported into an Eclipse CDT project. The XML file contains a
list of include paths and symbols (i.e. defines).
Because a full .cproject definition is not created by this generator, it's not
possible to properly define the include dirs and symbols for each file
individually. Instead, one set of includes/symbols is generated for the entire
project. This works fairly well (and is a vast improvement in general), but may
still result in a few indexer issues here and there.
This generator has no automated tests, so expect it to be broken.
"""
from xml.sax.saxutils import escape
import os.path
import subprocess
import gyp
import gyp.common
import gyp.msvs_emulation
import shlex
import xml.etree.cElementTree as ET
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!), so we convert them to variables
generator_default_variables[dirname] = '$' + dirname
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
# Include dirs will occasionally use the SHARED_INTERMEDIATE_DIR variable as
# part of the path when dealing with generated headers. This value will be
# replaced dynamically for each configuration.
generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \
'$SHARED_INTERMEDIATE_DIR'
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
flavor = gyp.common.GetFlavor(params)
default_variables.setdefault('OS', flavor)
if flavor == 'win':
# Copy additional generator configuration data from VS, which is shared
# by the Eclipse generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name, params,
compiler_path):
"""Calculate the set of include directories to be used.
Returns:
A list including all the include_dir's specified for every target followed
by any include directories that were added as cflag compiler options.
"""
gyp_includes_set = set()
compiler_includes_list = []
# Find compiler's default include dirs.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-xc++', '-v', '-'])
proc = subprocess.Popen(args=command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.communicate()[1]
# Extract the list of include dirs from the output, which has this format:
# ...
# #include "..." search starts here:
# #include <...> search starts here:
# /usr/include/c++/4.6
# /usr/local/include
# End of search list.
# ...
in_include_list = False
for line in output.splitlines():
if line.startswith('#include'):
in_include_list = True
continue
if line.startswith('End of search list.'):
break
if in_include_list:
include_dir = line.strip()
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if config_name in target['configurations']:
config = target['configurations'][config_name]
# Look for any include dirs that were explicitly added via cflags. This
# may be done in gyp files to force certain includes to come at the end.
# TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and
# remove this.
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
cflags = msvs_settings.GetCflags(config_name)
else:
cflags = config['cflags']
for cflag in cflags:
if cflag.startswith('-I'):
include_dir = cflag[2:]
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
# Find standard gyp include dirs.
if config.has_key('include_dirs'):
include_dirs = config['include_dirs']
for shared_intermediate_dir in shared_intermediate_dirs:
for include_dir in include_dirs:
include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR',
shared_intermediate_dir)
if not os.path.isabs(include_dir):
base_dir = os.path.dirname(target_name)
include_dir = base_dir + '/' + include_dir
include_dir = os.path.abspath(include_dir)
gyp_includes_set.add(include_dir)
# Generate a list that has all the include dirs.
all_includes_list = list(gyp_includes_set)
all_includes_list.sort()
for compiler_include in compiler_includes_list:
if not compiler_include in gyp_includes_set:
all_includes_list.append(compiler_include)
# All done.
return all_includes_list
def GetCompilerPath(target_list, data, options):
"""Determine a command that can be used to invoke the compiler.
Returns:
If this is a gyp project that has explicit make settings, try to determine
the compiler from that. Otherwise, see if a compiler was specified via the
CC_target environment variable.
"""
# First, see if the compiler is configured in make's settings.
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_dict = data[build_file].get('make_global_settings', {})
for key, value in make_global_settings_dict:
if key in ['CC', 'CXX']:
return os.path.join(options.toplevel_dir, value)
# Check to see if the compiler was specified as an environment variable.
for key in ['CC_target', 'CC', 'CXX']:
compiler = os.environ.get(key)
if compiler:
return compiler
return 'gcc'
def GetAllDefines(target_list, target_dicts, data, config_name, params,
compiler_path):
"""Calculate the defines for a project.
Returns:
A dict that includes explict defines declared in gyp files along with all of
the default defines that the compiler uses.
"""
# Get defines declared in the gyp files.
all_defines = {}
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
extra_defines = msvs_settings.GetComputedDefines(config_name)
else:
extra_defines = []
if config_name in target['configurations']:
config = target['configurations'][config_name]
target_defines = config['defines']
else:
target_defines = []
for define in target_defines + extra_defines:
split_define = define.split('=', 1)
if len(split_define) == 1:
split_define.append('1')
if split_define[0].strip() in all_defines:
# Already defined
continue
all_defines[split_define[0].strip()] = split_define[1].strip()
# Get default compiler defines (if possible).
if flavor == 'win':
return all_defines # Default defines already processed in the loop above.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-dM', '-'])
cpp_proc = subprocess.Popen(args=command, cwd='.',
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cpp_output = cpp_proc.communicate()[0]
cpp_lines = cpp_output.split('\n')
for cpp_line in cpp_lines:
if not cpp_line.strip():
continue
cpp_line_parts = cpp_line.split(' ', 2)
key = cpp_line_parts[1]
if len(cpp_line_parts) >= 3:
val = cpp_line_parts[2]
else:
val = '1'
all_defines[key] = val
return all_defines
def WriteIncludePaths(out, eclipse_langs, include_dirs):
"""Write the includes section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.IncludePaths">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for include_dir in include_dirs:
out.write(' <includepath workspace_path="false">%s</includepath>\n' %
include_dir)
out.write(' </language>\n')
out.write(' </section>\n')
def WriteMacros(out, eclipse_langs, defines):
"""Write the macros section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.Macros">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for key in sorted(defines.iterkeys()):
out.write(' <macro><name>%s</name><value>%s</value></macro>\n' %
(escape(key), escape(defines[key])))
out.write(' </language>\n')
out.write(' </section>\n')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.join(generator_flags.get('output_dir', 'out'),
config_name)
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
# Ninja uses out/Debug/gen while make uses out/Debug/obj/gen as the
# SHARED_INTERMEDIATE_DIR. Include both possible locations.
shared_intermediate_dirs = [os.path.join(toplevel_build, 'obj', 'gen'),
os.path.join(toplevel_build, 'gen')]
GenerateCdtSettingsFile(target_list,
target_dicts,
data,
params,
config_name,
os.path.join(toplevel_build,
'eclipse-cdt-settings.xml'),
options,
shared_intermediate_dirs)
GenerateClasspathFile(target_list,
target_dicts,
options.toplevel_dir,
toplevel_build,
os.path.join(toplevel_build,
'eclipse-classpath.xml'))
def GenerateCdtSettingsFile(target_list, target_dicts, data, params,
config_name, out_name, options,
shared_intermediate_dirs):
gyp.common.EnsureDirExists(out_name)
with open(out_name, 'w') as out:
out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
out.write('<cdtprojectproperties>\n')
eclipse_langs = ['C++ Source File', 'C Source File', 'Assembly Source File',
'GNU C++', 'GNU C', 'Assembly']
compiler_path = GetCompilerPath(target_list, data, options)
include_dirs = GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs,
config_name, params, compiler_path)
WriteIncludePaths(out, eclipse_langs, include_dirs)
defines = GetAllDefines(target_list, target_dicts, data, config_name,
params, compiler_path)
WriteMacros(out, eclipse_langs, defines)
out.write('</cdtprojectproperties>\n')
def GenerateClasspathFile(target_list, target_dicts, toplevel_dir,
toplevel_build, out_name):
'''Generates a classpath file suitable for symbol navigation and code
completion of Java code (such as in Android projects) by finding all
.java and .jar files used as action inputs.'''
gyp.common.EnsureDirExists(out_name)
result = ET.Element('classpath')
def AddElements(kind, paths):
# First, we need to normalize the paths so they are all relative to the
# toplevel dir.
rel_paths = set()
for path in paths:
if os.path.isabs(path):
rel_paths.add(os.path.relpath(path, toplevel_dir))
else:
rel_paths.add(path)
for path in sorted(rel_paths):
entry_element = ET.SubElement(result, 'classpathentry')
entry_element.set('kind', kind)
entry_element.set('path', path)
AddElements('lib', GetJavaJars(target_list, target_dicts, toplevel_dir))
AddElements('src', GetJavaSourceDirs(target_list, target_dicts, toplevel_dir))
# Include the standard JRE container and a dummy out folder
AddElements('con', ['org.eclipse.jdt.launching.JRE_CONTAINER'])
# Include a dummy out folder so that Eclipse doesn't use the default /bin
# folder in the root of the project.
AddElements('output', [os.path.join(toplevel_build, '.eclipse-java-build')])
ET.ElementTree(result).write(out_name)
def GetJavaJars(target_list, target_dicts, toplevel_dir):
'''Generates a sequence of all .jars used as inputs.'''
for target_name in target_list:
target = target_dicts[target_name]
for action in target.get('actions', []):
for input_ in action['inputs']:
if os.path.splitext(input_)[1] == '.jar' and not input_.startswith('$'):
if os.path.isabs(input_):
yield input_
else:
yield os.path.join(os.path.dirname(target_name), input_)
def GetJavaSourceDirs(target_list, target_dicts, toplevel_dir):
'''Generates a sequence of all likely java package root directories.'''
for target_name in target_list:
target = target_dicts[target_name]
for action in target.get('actions', []):
for input_ in action['inputs']:
if (os.path.splitext(input_)[1] == '.java' and
not input_.startswith('$')):
dir_ = os.path.dirname(os.path.join(os.path.dirname(target_name),
input_))
# If there is a parent 'src' or 'java' folder, navigate up to it -
# these are canonical package root names in Chromium. This will
# break if 'src' or 'java' exists in the package structure. This
# could be further improved by inspecting the java file for the
# package name if this proves to be too fragile in practice.
parent_search = dir_
while os.path.basename(parent_search) not in ['src', 'java']:
parent_search, _ = os.path.split(parent_search)
if not parent_search or parent_search == toplevel_dir:
# Didn't find a known root, just return the original path
yield dir_
break
else:
yield parent_search
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate an XML settings file that can be imported into a CDT project."""
if params['options'].generator_output:
raise NotImplementedError("--generator_output not implemented for eclipse")
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| mit |
jk1/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/db/backends/mysql/operations.py | 312 | 2418 | from django.db.backends.mysql.base import DatabaseOperations
from django.contrib.gis.db.backends.adapter import WKTAdapter
from django.contrib.gis.db.backends.base import BaseSpatialOperations
class MySQLOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = 'django.contrib.gis.db.models.sql.compiler'
mysql = True
name = 'mysql'
select = 'AsText(%s)'
from_wkb = 'GeomFromWKB'
from_text = 'GeomFromText'
Adapter = WKTAdapter
Adaptor = Adapter # Backwards-compatibility alias.
geometry_functions = {
'bbcontains' : 'MBRContains', # For consistency w/PostGIS API
'bboverlaps' : 'MBROverlaps', # .. ..
'contained' : 'MBRWithin', # .. ..
'contains' : 'MBRContains',
'disjoint' : 'MBRDisjoint',
'equals' : 'MBREqual',
'exact' : 'MBREqual',
'intersects' : 'MBRIntersects',
'overlaps' : 'MBROverlaps',
'same_as' : 'MBREqual',
'touches' : 'MBRTouches',
'within' : 'MBRWithin',
}
gis_terms = dict([(term, None) for term in geometry_functions.keys() + ['isnull']])
def geo_db_type(self, f):
return f.geom_type
def get_geom_placeholder(self, value, srid):
"""
The placeholder here has to include MySQL's WKT constructor. Because
MySQL does not support spatial transformations, there is no need to
modify the placeholder based on the contents of the given value.
"""
if hasattr(value, 'expression'):
placeholder = '%s.%s' % tuple(map(self.quote_name, value.cols[value.expression]))
else:
placeholder = '%s(%%s)' % self.from_text
return placeholder
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
alias, col, db_type = lvalue
geo_col = '%s.%s' % (qn(alias), qn(col))
lookup_info = self.geometry_functions.get(lookup_type, False)
if lookup_info:
return "%s(%s, %s)" % (lookup_info, geo_col,
self.get_geom_placeholder(value, field.srid))
# TODO: Is this really necessary? MySQL can't handle NULL geometries
# in its spatial indexes anyways.
if lookup_type == 'isnull':
return "%s IS %sNULL" % (geo_col, (not value and 'NOT ' or ''))
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))
| apache-2.0 |
paolodedios/tensorflow | tensorflow/python/training/tracking/layer_utils.py | 9 | 5513 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to layer/model functionality."""
# TODO(b/110718070): Move these functions back to tensorflow/python/keras/utils
# once __init__ files no longer require all of tf.keras to be imported together.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import weakref
from tensorflow.python.util import object_identity
try:
# typing module is only used for comment type annotations.
import typing # pylint: disable=g-import-not-at-top, unused-import
except ImportError:
pass
def is_layer(obj):
"""Implicit check for Layer-like objects."""
# TODO(b/110718070): Replace with isinstance(obj, base_layer.Layer).
return hasattr(obj, "_is_layer") and not isinstance(obj, type)
def has_weights(obj):
"""Implicit check for Layer-like objects."""
# TODO(b/110718070): Replace with isinstance(obj, base_layer.Layer).
has_weight = (hasattr(type(obj), "trainable_weights")
and hasattr(type(obj), "non_trainable_weights"))
return has_weight and not isinstance(obj, type)
def invalidate_recursive_cache(key):
"""Convenience decorator to invalidate the cache when setting attributes."""
def outer(f):
@functools.wraps(f)
def wrapped(self, value):
sentinel = getattr(self, "_attribute_sentinel") # type: AttributeSentinel
sentinel.invalidate(key)
return f(self, value)
return wrapped
return outer
class MutationSentinel(object):
"""Container for tracking whether a property is in a cached state."""
_in_cached_state = False
def mark_as(self, value): # type: (MutationSentinel, bool) -> bool
may_affect_upstream = (value != self._in_cached_state)
self._in_cached_state = value
return may_affect_upstream
@property
def in_cached_state(self):
return self._in_cached_state
class AttributeSentinel(object):
"""Container for managing attribute cache state within a Layer.
The cache can be invalidated either on an individual basis (for instance when
an attribute is mutated) or a layer-wide basis (such as when a new dependency
is added).
"""
def __init__(self, always_propagate=False):
self._parents = weakref.WeakSet()
self.attributes = collections.defaultdict(MutationSentinel)
# The trackable data structure containers are simple pass throughs. They
# don't know or care about particular attributes. As a result, they will
# consider themselves to be in a cached state, so it's up to the Layer
# which contains them to terminate propagation.
self.always_propagate = always_propagate
def __repr__(self):
return "{}\n {}".format(
super(AttributeSentinel, self).__repr__(),
{k: v.in_cached_state for k, v in self.attributes.items()})
def add_parent(self, node):
# type: (AttributeSentinel, AttributeSentinel) -> None
# Properly tracking removal is quite challenging; however since this is only
# used to invalidate a cache it's alright to be overly conservative. We need
# to invalidate the cache of `node` (since it has implicitly gained a child)
# but we don't need to invalidate self since attributes should not depend on
# parent Layers.
self._parents.add(node)
node.invalidate_all()
def get(self, key):
# type: (AttributeSentinel, str) -> bool
return self.attributes[key].in_cached_state
def _set(self, key, value):
# type: (AttributeSentinel, str, bool) -> None
may_affect_upstream = self.attributes[key].mark_as(value)
if may_affect_upstream or self.always_propagate:
for node in self._parents: # type: AttributeSentinel
node.invalidate(key)
def mark_cached(self, key):
# type: (AttributeSentinel, str) -> None
self._set(key, True)
def invalidate(self, key):
# type: (AttributeSentinel, str) -> None
self._set(key, False)
def invalidate_all(self):
# Parents may have different keys than their children, so we locally
# invalidate but use the `invalidate_all` method of parents.
for key in self.attributes.keys():
self.attributes[key].mark_as(False)
for node in self._parents:
node.invalidate_all()
def filter_empty_layer_containers(layer_list):
"""Filter out empty Layer-like containers and uniquify."""
# TODO(b/130381733): Make this an attribute in base_layer.Layer.
existing = object_identity.ObjectIdentitySet()
to_visit = layer_list[::-1]
while to_visit:
obj = to_visit.pop()
if obj in existing:
continue
existing.add(obj)
if is_layer(obj):
yield obj
else:
sub_layers = getattr(obj, "layers", None) or []
# Trackable data structures will not show up in ".layers" lists, but
# the layers they contain will.
to_visit.extend(sub_layers[::-1])
| apache-2.0 |
matcatc/Test_Parser | src/TestParser/Model/PythonUnittestRunner.py | 1 | 1517 | '''
@date Jun 28, 2010
@author Matthew A. Todd
This file is part of Test Parser
by Matthew A. Todd
Test Parser is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Test Parser is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Test Parser. If not, see <http://www.gnu.org/licenses/>.
'''
from .IRunner import IRunner
class PythonUnittestRunner(IRunner):
'''
Runner for Python Unittest framework.
@warning Because of how Python Unittest is designed, we require
that the test runner be setup a certain way. For parsing to work,
we need the verbosity level to be set to 2. For running to work,
we need the information all output to stdout. I.e: use the
following command in your test runner.
unittest.TextTestRunner(verbosity=2, stream=sys.stdout).run(suite)
@date Jun 28, 2010
@author Matthew A. Todd
'''
def __init__(self):
'''
Constructor
'''
super().__init__()
def computeCmd(self, params):
'''
Just run w/o any paramaters
'''
return self.runner
| gpl-3.0 |
onceuponatimeforever/oh-mainline | vendor/packages/python-social-auth/social/utils.py | 15 | 6901 | import re
import sys
import unicodedata
import collections
import functools
import logging
import six
import requests
import social
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.poolmanager import PoolManager
from social.exceptions import AuthCanceled, AuthUnreachableProvider
from social.p3 import urlparse, urlunparse, urlencode, \
parse_qs as battery_parse_qs
SETTING_PREFIX = 'SOCIAL_AUTH'
social_logger = logging.Logger('social')
class SSLHttpAdapter(HTTPAdapter):
""""
Transport adapter that allows to use any SSL protocol. Based on:
http://requests.rtfd.org/latest/user/advanced/#example-specific-ssl-version
"""
def __init__(self, ssl_protocol):
self.ssl_protocol = ssl_protocol
super(SSLHttpAdapter, self).__init__()
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(
num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=self.ssl_protocol
)
@classmethod
def ssl_adapter_session(cls, ssl_protocol):
session = requests.Session()
session.mount('https://', SSLHttpAdapter(ssl_protocol))
return session
def import_module(name):
__import__(name)
return sys.modules[name]
def module_member(name):
mod, member = name.rsplit('.', 1)
module = import_module(mod)
return getattr(module, member)
def user_agent():
"""Builds a simple User-Agent string to send in requests"""
return 'python-social-auth-' + social.__version__
def url_add_parameters(url, params):
"""Adds parameters to URL, parameter will be repeated if already present"""
if params:
fragments = list(urlparse(url))
value = parse_qs(fragments[4])
value.update(params)
fragments[4] = urlencode(value)
url = urlunparse(fragments)
return url
def to_setting_name(*names):
return '_'.join([name.upper().replace('-', '_') for name in names if name])
def setting_name(*names):
return to_setting_name(*((SETTING_PREFIX,) + names))
def sanitize_redirect(host, redirect_to):
"""
Given the hostname and an untrusted URL to redirect to,
this method tests it to make sure it isn't garbage/harmful
and returns it, else returns None, similar as how's it done
on django.contrib.auth.views.
"""
if redirect_to:
try:
# Don't redirect to a different host
netloc = urlparse(redirect_to)[1] or host
except (TypeError, AttributeError):
pass
else:
if netloc == host:
return redirect_to
def user_is_authenticated(user):
if user and hasattr(user, 'is_authenticated'):
if isinstance(user.is_authenticated, collections.Callable):
authenticated = user.is_authenticated()
else:
authenticated = user.is_authenticated
elif user:
authenticated = True
else:
authenticated = False
return authenticated
def user_is_active(user):
if user and hasattr(user, 'is_active'):
if isinstance(user.is_active, collections.Callable):
is_active = user.is_active()
else:
is_active = user.is_active
elif user:
is_active = True
else:
is_active = False
return is_active
# This slugify version was borrowed from django revision a61dbd6
def slugify(value):
"""Converts to lowercase, removes non-word characters (alphanumerics
and underscores) and converts spaces to hyphens. Also strips leading
and trailing whitespace."""
value = unicodedata.normalize('NFKD', value) \
.encode('ascii', 'ignore') \
.decode('ascii')
value = re.sub('[^\w\s-]', '', value).strip().lower()
return re.sub('[-\s]+', '-', value)
def first(func, items):
"""Return the first item in the list for what func returns True"""
for item in items:
if func(item):
return item
def parse_qs(value):
"""Like urlparse.parse_qs but transform list values to single items"""
return drop_lists(battery_parse_qs(value))
def drop_lists(value):
out = {}
for key, val in value.items():
val = val[0]
if isinstance(key, six.binary_type):
key = six.text_type(key, 'utf-8')
if isinstance(val, six.binary_type):
val = six.text_type(val, 'utf-8')
out[key] = val
return out
def partial_pipeline_data(backend, user=None, *args, **kwargs):
partial = backend.strategy.session_get('partial_pipeline', None)
if partial:
idx, backend_name, xargs, xkwargs = \
backend.strategy.partial_from_session(partial)
if backend_name == backend.name:
kwargs.setdefault('pipeline_index', idx)
if user: # don't update user if it's None
kwargs.setdefault('user', user)
kwargs.setdefault('request', backend.strategy.request_data())
xkwargs.update(kwargs)
return xargs, xkwargs
else:
backend.strategy.clean_partial_pipeline()
def build_absolute_uri(host_url, path=None):
"""Build absolute URI with given (optional) path"""
path = path or ''
if path.startswith('http://') or path.startswith('https://'):
return path
if host_url.endswith('/') and path.startswith('/'):
path = path[1:]
return host_url + path
def constant_time_compare(val1, val2):
"""
Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
This code was borrowed from Django 1.5.4-final
"""
if len(val1) != len(val2):
return False
result = 0
if six.PY3 and isinstance(val1, bytes) and isinstance(val2, bytes):
for x, y in zip(val1, val2):
result |= x ^ y
else:
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
def is_url(value):
return value and \
(value.startswith('http://') or
value.startswith('https://') or
value.startswith('/'))
def setting_url(backend, *names):
for name in names:
if is_url(name):
return name
else:
value = backend.setting(name)
if is_url(value):
return value
def handle_http_errors(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except requests.HTTPError as err:
if err.response.status_code == 400:
raise AuthCanceled(args[0])
elif err.response.status_code == 503:
raise AuthUnreachableProvider(args[0])
else:
raise
return wrapper
| agpl-3.0 |
Lyleo/OmniMarkupPreviewer | OmniMarkupLib/Renderers/libs/python3/docutils/languages/pt_br.py | 2 | 1903 | # $Id$
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Brazilian Portuguese-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
'author': 'Autor',
'authors': 'Autores',
'organization': 'Organiza\u00E7\u00E3o',
'address': 'Endere\u00E7o',
'contact': 'Contato',
'version': 'Vers\u00E3o',
'revision': 'Revis\u00E3o',
'status': 'Estado',
'date': 'Data',
'copyright': 'Copyright',
'dedication': 'Dedicat\u00F3ria',
'abstract': 'Resumo',
'attention': 'Aten\u00E7\u00E3o!',
'caution': 'Cuidado!',
'danger': 'PERIGO!',
'error': 'Erro',
'hint': 'Sugest\u00E3o',
'important': 'Importante',
'note': 'Nota',
'tip': 'Dica',
'warning': 'Aviso',
'contents': 'Sum\u00E1rio'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
'autor': 'author',
'autores': 'authors',
'organiza\u00E7\u00E3o': 'organization',
'endere\u00E7o': 'address',
'contato': 'contact',
'vers\u00E3o': 'version',
'revis\u00E3o': 'revision',
'estado': 'status',
'data': 'date',
'copyright': 'copyright',
'dedicat\u00F3ria': 'dedication',
'resumo': 'abstract'}
"""Brazilian Portuguese (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| mit |
grokcore/dev.lexycross | wordsmithed/aistats.py | 1 | 13616 | '''
This is a file which stores various statistics for AI metrics:
'''
import random, sys, time, math, dictionarywords
class AIStats():
FILENAME = "wordsmithed/media/aistats.txt" #'media/heuristic/heuristic_tilequantile_5_5.txt'
COLLECT_WORD_DATA = False #if True, this will collect data on timing/letterPlays
COLLECT_GAME_DATA = False #if True, this will record data for entire games
def __init__(self):
self.timingInfo = []
self.letterPlays = {}
for code in range(ord('A'), ord('Z')+1):
char = chr(code)
self.letterPlays[char] = []
#also add a count for blanks
self.letterPlays["_"] = []
self.scores = []
self.seedRatio = []
self.load()
'''
Loads all stats from last time to update
'''
def load(self):
try:
statsFile = open(AIStats.FILENAME, 'r')
MODE = "none"
for line in statsFile:
if line != "\n":
line = line.rstrip()
if MODE == "TIMING:":
tokens = line.split()
#TIMING DATA should be [totalTime] [timeAtMaxWord]
assert len(tokens) == 2
self.timingInfo.append((float(tokens[0]) , float(tokens[1])))
elif MODE == "LETTERS:":
tokens = line.split()
#LETTER PLAY should be [letter] [score]
assert len(tokens) == 2
self.letterPlays[tokens[0]].append(float(tokens[1]))
elif MODE == "SEED:":
tokens = line.split()
#SEEDS should be [numSeeds] [numTiles] [points]
assert len(tokens) == 3
self.seedRatio.append((int(tokens[0]), int(tokens[1]), float(tokens[2])))
elif MODE == "GAME:":
tokens = line.split()
self.scores.append([int(token) for token in tokens])
else:
MODE = "none"
if line == "TIMING:":
MODE = "TIMING:"
elif line == "LETTERS:":
MODE = "LETTERS:"
elif line == "SEED:":
MODE = "SEED:"
elif line == "GAME:":
MODE = "GAME:"
except IOError as e:
pass
'''
Saves all stats
'''
def save(self):
statsFile = open(AIStats.FILENAME, 'w')
statsFile.write("TIMING:\n")
for timeStamp in self.timingInfo:
statsFile.write(str(timeStamp[0])+" "+str(timeStamp[1])+"\n")
statsFile.write("\n")
statsFile.write("LETTERS:\n")
for code in range(ord('A'), ord('Z')+1):
char = chr(code)
if len(self.letterPlays[char]) > 0:
for play in self.letterPlays[char]:
statsFile.write(char+" "+str(play)+"\n")
if len(self.letterPlays["_"]) > 0:
for play in self.letterPlays["_"]:
statsFile.write("_ "+str(play)+"\n")
statsFile.write("\n")
statsFile.write("SEED:\n")
for seeds in self.seedRatio:
statsFile.write(str(seeds[0])+" "+str(seeds[1])+" "+str(seeds[2])+"\n")
statsFile.write("\n")
statsFile.write("GAME:\n")
for game in self.scores:
for score in game:
statsFile.write(str(score)+" ")
statsFile.write("\n")
statsFile.write("\n")
def updateTiming(self, totalTime, timeAtMaxWord):
if AIStats.COLLECT_WORD_DATA:
self.timingInfo.append((totalTime, timeAtMaxWord))
def updateLetterPlays(self, lettersUsed, points):
if AIStats.COLLECT_WORD_DATA:
for letter in lettersUsed:
self.letterPlays[letter].append(points)
def updateSeedRatio(self, (numSeeds, numTiles), points):
if AIStats.COLLECT_WORD_DATA:
self.seedRatio.append((numSeeds, numTiles, points))
def saveGame(self, gameScores):
if AIStats.COLLECT_GAME_DATA:
self.scores.append(gameScores)
'''
Displays a histogram of the ratio of timeAtMaxWord over totalTime
'''
def visualizeTiming(self, DISPLAYSURF):
values = []
for timeStamp in self.timingInfo:
values.append(timeStamp[1]/(timeStamp[0]+0.00001))
self.drawHistogram(DISPLAYSURF, values, 400, 400, 100)
'''
Gets the CDF of the timingInfo data, given a certain cutoff time, to see
what percentage of turns would be executed properly
'''
def timingCDF(self, cutoffTime):
i = 0
for totalTime, maxWordTime in self.timingInfo:
if maxWordTime < cutoffTime:
i += 1
return i / (len(self.timingInfo) + 0.00001)
'''
Gets the inverse CDF of letterPlay info for one play, to determine what
score and less accounts for percentMass of the data
'''
def letterPlaysInvCDF(self, letter, mass):
assert mass >= 0.0 and mass <= 1.0
if letter != None:
plays = self.letterPlays[letter]
else:
plays = []
for code in range(ord('A'), ord('Z')+1):
char = chr(code)
for play in self.letterPlays[char]:
plays.append(play)
score = -1
if len(plays) > 0:
plays = sorted(plays)
score = plays[int(mass*len(plays))]
return score
'''
Gets the average of a letterPlay
'''
def letterPlaysMean(self, letter):
total = 0
if letter != None:
plays = self.letterPlays[letter]
else:
plays = []
for code in range(ord('A'), ord('Z')+1):
char = chr(code)
for play in self.letterPlays[char]:
plays.append(play)
for play in plays:
total += play
mean = total/(len(plays)+0.0001)
return mean
'''
Gets the standard deviation of a letterPlay
'''
def letterPlaysStdDev(self, letter):
total = 0
mean = self.letterPlaysMean(letter)
if letter != None:
plays = self.letterPlays[letter]
else:
plays = []
for code in range(ord('A'), ord('Z')+1):
char = chr(code)
for play in self.letterPlays[char]:
plays.append(play)
for play in plays:
total += math.pow(play-mean, 2)
variance = total/(len(plays)+0.0001)
stddev = math.sqrt(variance)
return stddev
'''
Gets the games won from the 1st player, to see the relative win %
'''
def getGamesWon(self):
totalWon = 0
for scores in self.scores:
assert len(scores) == 2, "Error, function only works for 2-player games."
if scores[0] > scores[1]:
totalWon += 1
elif scores[0] == scores[1]:
totalWon += 0.5
return totalWon
'''
Gets the mean of the difference Player 1 - Player 2 in game scores
(assumes Heurstic v. Control)
'''
def getGameDiffMean(self):
totalDifference = 0
for scores in self.scores:
assert len(scores) == 2, "Error: function only works for 2-player games."
difference = scores[0] - scores[1]
totalDifference += difference
return (totalDifference / len(self.scores))
'''
Gets the standard deviation of the difference Player 1 - Player 2 in game scores
(assumes Heurstic v. Control)
'''
def getGameDiffStdDev(self):
totalDifference = 0
mean = self.getGameDiffMean()
for scores in self.scores:
assert len(scores) == 2, "Error: function only works for 2-player games."
difference = scores[0] - scores[1]
totalDifference += math.pow(difference-mean, 2)
totalDifference = math.sqrt(totalDifference)
return (totalDifference / len(self.scores))
'''
Gets the highest word score
'''
def getHighestWord(self, letter = None):
if letter != None:
plays = self.letterPlays[letter]
else:
plays = []
for code in range(ord('A'), ord('Z')+1):
char = chr(code)
for play in self.letterPlays[char]:
plays.append(play)
return(max(plays))
'''
Normalizes all seedRatio data and draws the heat map
'''
def visualizeSeedRatio(self, DISPLAYSURF, clamp=50):
maxSeeds = 0
maxTiles = 0
maxScore = 0
for seeds, tiles, score in self.seedRatio:
if seeds > maxSeeds:
maxSeeds = seeds
if tiles > maxTiles:
maxTiles = tiles
if score > maxScore:
maxScore = score
values = []
for seeds, tiles, score in self.seedRatio:
normSeeds = seeds / (maxSeeds + 0.00001)
normTiles = tiles / (maxTiles + 0.00001)
normScore = score / (clamp + 0.00001)
assert (normSeeds >= 0.0 and normSeeds <= 1.0 and
normTiles >= 0.0 and normTiles <= 1.0)
values.append((normSeeds, normTiles, normScore))
self.drawHeatMap(DISPLAYSURF, values, 30)
'''
Draws a heatmap of a 3D data-set where x, y are ranged between 0.0 and 1.0,
plotted values must range between 0.0 and 1.0, everything above and below is clamped.
'''
def drawHeatMap(self, DISPLAYSURF, values, size, blockSize = 10):
#create a grid
buckets = []
for i in range(size):
buckets.append([])
for j in range(size):
buckets[i].append([])
for value in values:
x = int(value[0] / (1.0/(size)))
y = int(value[1] / (1.0/(size)))
assert x >= 0 and x < len(buckets) and y >= 0 and y < len(buckets)
buckets[x][y].append(value[2])
LEFT_X = 10
TOP_Y = 50
for x in range(size):
for y in range(size):
total = 0.0
num = 0.0001 #Add a small amount to the number so we don't get div by 0 errors
for value in buckets[x][y]:
#Clamp values
if value > 1.0:
value = 1.0
elif value < 0.0:
value = 0.0
total += value
num += 1
avg = (total/num)
assert avg >= 0.0 and avg <= 1.0
color = (255*avg, 255*avg, 255*avg)
if num < 1.0:
color = (0, 0, 0)
left = LEFT_X + blockSize * x
top = TOP_Y + blockSize * y
pygame.draw.rect(DISPLAYSURF, color, (left, top, blockSize, blockSize))
'''
Shows a histogram of words by their Google n-gram usage value
'''
def visualizeWordUsage(self, DISPLAYSURF):
dictionary = dictionarywords.DictionaryWords("media/scrabblewords_usage.txt")
values = dictionary.words.values()
maxUsage = math.log(max(values))
print "Most used word appeared "+str(maxUsage)+" times in Google's ngram corpus."
normalizedValues = []
for val in values:
if val < 0:
normalizedValues.append(math.log(1)/(maxUsage+1))
else:
normalizedValues.append(math.log(val)/(maxUsage+1))
self.drawHistogram(DISPLAYSURF, normalizedValues, 400, 400, 10)
'''
Gives the quantiles of word usages
'''
def wordUsageQuantiles(self, quantiles):
dictionary = dictionarywords.DictionaryWords("media/scrabblewords_usage.txt")
values = dictionary.words.values()
values.sort(reverse=True)
for quantile in quantiles:
massCutoff = int(quantile * len(values))
assert massCutoff < len(values)
point = values[massCutoff]
if point <= 1:
point = 1
print str(quantile)+" = "+str(math.log(point))
'''
Draws a histogram given a set of values ranging from 0.0 -> 1.0 and
a width, height and number of buckets
'''
def drawHistogram(self, DISPLAYSURF, values, width, height, numBuckets):
buckets = []
for i in range(numBuckets):
buckets.append(0)
for value in values:
assert value >= 0.0 and value <= 1.0, "Histogram only works on values between 0.0 and 1.0"
bucketNumber = int(value / (1.0/(numBuckets)))
assert bucketNumber >= 0 and bucketNumber < len(buckets)
buckets[bucketNumber] += 1
maxBucket = max(buckets)
LEFT_X = 10
TOP_Y = 50
COLOR = (0, 100, 255)
pygame.draw.rect(DISPLAYSURF, (255, 255, 255), (LEFT_X, TOP_Y, width, height))
i = 0
barWidth = width/numBuckets
for bucket in buckets:
barLeft = LEFT_X + i * barWidth
barHeight = float(bucket)/maxBucket * height
barTop = TOP_Y + (height - barHeight)
pygame.draw.rect(DISPLAYSURF, COLOR, (barLeft, barTop, barWidth, barHeight))
i += 1
#RUNNING WORD FREQUENCY ON ITS OWN PROVIDES STATISTICS
if __name__ == '__main__':
aiStats = AIStats()
print str(len(aiStats.timingInfo)) + " data points collected."
#DISPLAYSURF = pygame.display.set_mode((800, 600))
#pygame.display.set_caption('Wordsmith Statistics')
#aiStats.visualizeTiming(DISPLAYSURF)
#aiStats.visualizeSeedRatio(DISPLAYSURF)
#aiStats.visualizeWordUsage(DISPLAYSURF)
aiStats.wordUsageQuantiles([(i+1)/100.0 for i in range(99)])
groupMedian = aiStats.letterPlaysInvCDF(None, .5)
groupMean = aiStats.letterPlaysMean(None)
group25p = aiStats.letterPlaysInvCDF(None, .25)
group75p = aiStats.letterPlaysInvCDF(None, .75)
groupStdDev = aiStats.letterPlaysStdDev(None)
for code in range(ord('A'), ord('Z')+1):
char = chr(code)
print char+": \tmedian = "+str(aiStats.letterPlaysInvCDF(char, .5)-groupMedian)
print "\tmean = "+str(aiStats.letterPlaysMean(char)-groupMean)
print "\t25th percentile: "+str(aiStats.letterPlaysInvCDF(char, .25)-group25p)
print "\t75th percentile: "+str(aiStats.letterPlaysInvCDF(char, .75)-group75p)
print "\tStd dev: "+str(aiStats.letterPlaysStdDev(char)/groupStdDev)
print "\tBest play ever: "+str(aiStats.getHighestWord(char))
char = '_'
print char+": \tmedian = "+str(aiStats.letterPlaysInvCDF(char, .5)-groupMedian)
print "\tmean = "+str(aiStats.letterPlaysMean(char)-groupMean)
print "\t25th percentile: "+str(aiStats.letterPlaysInvCDF(char, .25)-group25p)
print "\t75th percentile: "+str(aiStats.letterPlaysInvCDF(char, .75)-group75p)
print "\tStd dev: "+str(aiStats.letterPlaysStdDev(char)/groupStdDev)
print "\tBest play ever: "+str(aiStats.getHighestWord(char))
print "\nHighest-ever word score: "+str(aiStats.getHighestWord())
print str(len(aiStats.letterPlays['Q']))+" games played counting letter statistics.\n"
print "Latest Heuristic Game Analysis:"
print "Test won "+str(100.0*aiStats.getGamesWon() / (len(aiStats.scores) + 0.00001))+"% of games."
print "Mean performance improvement: "+str(aiStats.getGameDiffMean())
print "Performance difference stddev: "+str(aiStats.getGameDiffStdDev())
print str(len(aiStats.scores))+" games played in this round of testing.\n"
#for i in range(0, 20, 1):
# print str(100*aiStats.timingCDF(i)) + '% would be completed successfully in '+ str(i) +' seconds'
'''while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update() '''
| mit |
danielhb/kimchi | mockmodel.py | 1 | 21986 | #
# Project Kimchi
#
# Copyright IBM, Corp. 2013-2015
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import libvirt
import lxml.etree as ET
import os
import time
from collections import defaultdict
from lxml import objectify
from lxml.builder import E
from wok.exception import NotFoundError, OperationFailed
from wok.objectstore import ObjectStore
from wok.utils import add_task, get_next_clone_name
from wok.xmlutils.utils import xml_item_update
from wok.plugins.kimchi import imageinfo
from wok.plugins.kimchi import osinfo
from wok.plugins.kimchi.model import cpuinfo
from wok.plugins.kimchi.model import vmifaces
from wok.plugins.kimchi.model.host import DeviceModel
from wok.plugins.kimchi.model.libvirtstoragepool import IscsiPoolDef
from wok.plugins.kimchi.model.libvirtstoragepool import NetfsPoolDef
from wok.plugins.kimchi.model.libvirtstoragepool import StoragePoolDef
from wok.plugins.kimchi.model.model import Model
from wok.plugins.kimchi.model.storagepools import StoragePoolModel
from wok.plugins.kimchi.model.storagepools import StoragePoolsModel
from wok.plugins.kimchi.model.storagevolumes import StorageVolumeModel
from wok.plugins.kimchi.model.storagevolumes import StorageVolumesModel
from wok.plugins.kimchi.model import storagevolumes
from wok.plugins.kimchi.model.templates import LibvirtVMTemplate
from wok.plugins.kimchi.model.users import PAMUsersModel
from wok.plugins.kimchi.model.groups import PAMGroupsModel
from wok.plugins.kimchi.utils import pool_name_from_uri
from wok.plugins.kimchi.vmtemplate import VMTemplate
fake_user = {'root': 'letmein!'}
mockmodel_defaults = {
'domain': 'test', 'arch': 'i686'
}
storagevolumes.VALID_RAW_CONTENT = ['dos/mbr boot sector',
'x86 boot sector',
'data', 'empty']
DEFAULT_POOL = '/plugins/kimchi/storagepools/default-pool'
class MockModel(Model):
_mock_vms = defaultdict(list)
_mock_snapshots = {}
_XMLDesc = libvirt.virDomain.XMLDesc
_undefineDomain = libvirt.virDomain.undefine
_libvirt_get_vol_path = LibvirtVMTemplate._get_volume_path
def __init__(self, objstore_loc=None):
# Override osinfo.defaults to ajust the values according to
# test:///default driver
defaults = dict(osinfo.defaults)
defaults.update(mockmodel_defaults)
defaults['disks'][0]['pool'] = {'name': DEFAULT_POOL}
osinfo.defaults = dict(defaults)
self._mock_vgs = MockVolumeGroups()
self._mock_partitions = MockPartitions()
self._mock_devices = MockDevices()
self._mock_storagevolumes = MockStorageVolumes()
cpuinfo.get_topo_capabilities = MockModel.get_topo_capabilities
vmifaces.getDHCPLeases = MockModel.getDHCPLeases
libvirt.virDomain.XMLDesc = MockModel.domainXMLDesc
libvirt.virDomain.undefine = MockModel.undefineDomain
libvirt.virDomain.attachDeviceFlags = MockModel.attachDeviceFlags
libvirt.virDomain.detachDeviceFlags = MockModel.detachDeviceFlags
libvirt.virDomain.updateDeviceFlags = MockModel.updateDeviceFlags
libvirt.virStorageVol.resize = MockModel.volResize
libvirt.virStorageVol.wipePattern = MockModel.volWipePattern
IscsiPoolDef.prepare = NetfsPoolDef.prepare = StoragePoolDef.prepare
PAMUsersModel.auth_type = 'fake'
PAMGroupsModel.auth_type = 'fake'
super(MockModel, self).__init__('test:///default', objstore_loc)
self.objstore_loc = objstore_loc
self.objstore = ObjectStore(objstore_loc)
# The MockModel methods are instantiated on runtime according to Model
# and BaseModel
# Because that a normal method override will not work here
# Instead of that we also need to do the override on runtime
for method in dir(self):
if method.startswith('_mock_'):
mock_method = getattr(self, method)
if not callable(mock_method):
continue
m = method[6:]
model_method = getattr(self, m)
setattr(self, '_model_' + m, model_method)
setattr(self, m, mock_method)
DeviceModel.lookup = self._mock_device_lookup
StoragePoolsModel._check_lvm = self._check_lvm
StoragePoolModel._update_lvm_disks = self._update_lvm_disks
StorageVolumesModel.get_list = self._mock_storagevolumes_get_list
StorageVolumeModel.doUpload = self._mock_storagevolume_doUpload
LibvirtVMTemplate._get_volume_path = self._get_volume_path
VMTemplate.get_iso_info = self._probe_image
imageinfo.probe_image = self._probe_image
def reset(self):
MockModel._mock_vms = defaultdict(list)
MockModel._mock_snapshots = {}
if hasattr(self, 'objstore'):
self.objstore = ObjectStore(self.objstore_loc)
params = {'vms': [u'test'], 'templates': [],
'networks': [u'default'], 'storagepools': [u'default-pool']}
for res, items in params.iteritems():
resources = getattr(self, '%s_get_list' % res)()
for i in resources:
if i in items:
continue
try:
getattr(self, '%s_deactivate' % res[:-1])(i)
except:
pass
getattr(self, '%s_delete' % res[:-1])(i)
volumes = self.storagevolumes_get_list('default-pool')
for v in volumes:
self.storagevolume_delete('default-pool', v)
@staticmethod
def get_topo_capabilities(conn):
# The libvirt test driver doesn't return topology.
xml = "<topology sockets='1' cores='2' threads='2'/>"
return ET.fromstring(xml)
@staticmethod
def domainXMLDesc(dom, flags=0):
xml = MockModel._XMLDesc(dom, flags)
root = objectify.fromstring(xml)
for dev_xml in MockModel._mock_vms.get(dom.name(), []):
dev = objectify.fromstring(dev_xml)
root.devices.append(dev)
return ET.tostring(root, encoding="utf-8")
@staticmethod
def undefineDomain(dom):
name = dom.name()
if name in MockModel._mock_vms.keys():
del MockModel._mock_vms[dom.name()]
return MockModel._undefineDomain(dom)
@staticmethod
def attachDeviceFlags(dom, xml, flags=0):
MockModel._mock_vms[dom.name()].append(xml)
@staticmethod
def _get_device_node(dom, xml):
xpath_map = {'disk': 'target',
'interface': 'mac',
'graphics': 'listen'}
dev = objectify.fromstring(xml)
dev_id = dev.find(xpath_map[dev.tag]).items()
dev_filter = ''
for key, value in dev_id:
dev_filter += "[@%s='%s']" % (key, value)
old_xml = dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE)
root = objectify.fromstring(old_xml)
devices = root.devices
dev = devices.find("./%s/%s%s/.." % (dev.tag, xpath_map[dev.tag],
dev_filter))
return (root, dev)
@staticmethod
def detachDeviceFlags(dom, xml, flags=0):
node = ET.fromstring(xml)
xml = ET.tostring(node, encoding="utf-8", pretty_print=True)
if xml in MockModel._mock_vms[dom.name()]:
MockModel._mock_vms[dom.name()].remove(xml)
@staticmethod
def updateDeviceFlags(dom, xml, flags=0):
_, old_dev = MockModel._get_device_node(dom, xml)
old_xml = ET.tostring(old_dev, encoding="utf-8", pretty_print=True)
if old_xml in MockModel._mock_vms[dom.name()]:
MockModel._mock_vms[dom.name()].remove(old_xml)
MockModel._mock_vms[dom.name()].append(xml)
@staticmethod
def volResize(vol, size, flags=0):
new_xml = xml_item_update(vol.XMLDesc(0), './capacity', str(size))
vol.delete(0)
pool = vol.storagePoolLookupByVolume()
pool.createXML(new_xml)
@staticmethod
def volWipePattern(vol, algorithm, flags=0):
new_xml = xml_item_update(vol.XMLDesc(0), './allocation', '0')
vol.delete(0)
pool = vol.storagePoolLookupByVolume()
pool.createXML(new_xml)
@staticmethod
def getDHCPLeases(net, mac):
return [{'iface': 'virbr1', 'ipaddr': '192.168.0.167',
'hostname': 'kimchi', 'expirytime': 1433285036L,
'prefix': 24, 'clientid': '01:%s' % mac,
'mac': mac, 'iaid': None, 'type': 0}]
def _probe_image(self, path):
return ('unknown', 'unknown')
def _get_volume_path(self, pool_uri, vol):
pool = pool_name_from_uri(pool_uri)
pool_info = self.storagepool_lookup(pool)
if pool_info['type'] == 'scsi':
return self._mock_storagevolumes.scsi_volumes[vol]['path']
return MockModel._libvirt_get_vol_path(pool, vol)
def _check_lvm(self, name, from_vg):
# do not do any verification while using MockModel
pass
def _update_lvm_disks(self, pool_name, disks):
conn = self.conn.get()
pool = conn.storagePoolLookupByName(pool_name.encode('utf-8'))
xml = pool.XMLDesc(0)
root = ET.fromstring(xml)
source = root.xpath('./source')[0]
for d in disks:
dev = E.device(path=d)
source.append(dev)
conn.storagePoolDefineXML(ET.tostring(root), 0)
def _mock_storagevolumes_create(self, pool, params):
vol_source = ['url', 'capacity']
index_list = list(i for i in range(len(vol_source))
if vol_source[i] in params)
create_param = vol_source[index_list[0]]
name = params.get('name')
if name is None and create_param == 'url':
params['name'] = os.path.basename(params['url'])
del params['url']
params['capacity'] = 1024
return self._model_storagevolumes_create(pool, params)
def _mock_storagevolumes_get_list(self, pool):
pool_info = self.storagepool_lookup(pool)
if pool_info['type'] == 'scsi':
return self._mock_storagevolumes.scsi_volumes.keys()
return self._model_storagevolumes_get_list(pool)
def _mock_storagevolume_lookup(self, pool, vol):
pool_info = self.storagepool_lookup(pool)
if pool_info['type'] == 'scsi':
return self._mock_storagevolumes.scsi_volumes[vol]
return self._model_storagevolume_lookup(pool, vol)
def _mock_storagevolume_doUpload(self, cb, vol, offset, data, data_size):
vol_path = vol.path()
# MockModel does not create the storage volume as a file
# So create it to do the file upload
if offset == 0:
dirname = os.path.dirname(vol_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
open(vol_path, 'w').close()
try:
with open(vol_path, 'a') as fd:
fd.seek(offset)
fd.write(data)
except Exception, e:
os.remove(vol_path)
cb('', False)
raise OperationFailed("KCHVOL0029E", {"err": e.message})
def _mock_devices_get_list(self, _cap=None, _passthrough=None,
_passthrough_affected_by=None,
_available_only=None):
if _cap is None:
return self._mock_devices.devices.keys()
if _cap == 'fc_host':
_cap = 'scsi_host'
return [dev['name'] for dev in self._mock_devices.devices.values()
if dev['device_type'] == _cap]
def _mock_device_lookup(self, dev_name):
return self._mock_devices.devices[dev_name]
def _mock_partitions_get_list(self):
return self._mock_partitions.partitions.keys()
def _mock_partition_lookup(self, name):
return self._mock_partitions.partitions[name]
def _mock_volumegroups_get_list(self):
return self._mock_vgs.data.keys()
def _mock_volumegroup_lookup(self, name):
return self._mock_vgs.data[name]
def _mock_vm_clone(self, name):
new_name = get_next_clone_name(self.vms_get_list(), name)
snapshots = MockModel._mock_snapshots.get(name, [])
MockModel._mock_snapshots[new_name] = snapshots
return self._model_vm_clone(name)
def _mock_vmsnapshots_create(self, vm_name, params):
name = params.get('name', unicode(int(time.time())))
params = {'vm_name': vm_name, 'name': name}
taskid = add_task(u'/plugins/kimchi/vms/%s/snapshots/%s' %
(vm_name, name), self._vmsnapshots_create_task,
self.objstore, params)
return self.task_lookup(taskid)
def _vmsnapshots_create_task(self, cb, params):
vm_name = params['vm_name']
name = params['name']
parent = u''
snapshots = MockModel._mock_snapshots.get(vm_name, [])
for sn in snapshots:
if sn.current:
sn.current = False
parent = sn.name
snapshots.append(MockVMSnapshot(name, {'parent': parent}))
MockModel._mock_snapshots[vm_name] = snapshots
cb('OK', True)
def _mock_vmsnapshots_get_list(self, vm_name):
snapshots = MockModel._mock_snapshots.get(vm_name, [])
return sorted([snap.name for snap in snapshots])
def _mock_currentvmsnapshot_lookup(self, vm_name):
for sn in MockModel._mock_snapshots.get(vm_name, []):
if sn.current:
return sn.info
def _mock_vmsnapshot_lookup(self, vm_name, name):
for sn in MockModel._mock_snapshots.get(vm_name, []):
if sn.name == name:
return sn.info
raise NotFoundError('KCHSNAP0003E', {'name': name, 'vm': vm_name})
def _mock_vmsnapshot_delete(self, vm_name, name):
snapshots = MockModel._mock_snapshots.get(vm_name, [])
for sn in snapshots:
if sn.name == name:
del snapshots[snapshots.index(sn)]
MockModel._mock_snapshots[vm_name] = snapshots
def _mock_vmsnapshot_revert(self, vm_name, name):
snapshots = MockModel._mock_snapshots.get(vm_name, [])
for sn in snapshots:
if sn.current:
sn.current = False
for sn in snapshots:
if sn.name == name:
sn.current = True
class MockStorageVolumes(object):
def __init__(self):
base_path = "/dev/disk/by-path/pci-0000:0e:00.0-fc-0x20-lun"
self.scsi_volumes = {'unit:0:0:1': {'capacity': 1024,
'format': 'unknown',
'allocation': 512,
'type': 'block',
'path': base_path + '1',
'used_by': [],
'isvalid': True},
'unit:0:0:2': {'capacity': 2048,
'format': 'unknown',
'allocation': 512,
'type': 'block',
'path': base_path + '2',
'used_by': [],
'isvalid': True}}
class MockVolumeGroups(object):
def __init__(self):
self.data = {"hostVG": {"lvs": [],
"name": "hostVG",
"pvs": ["/dev/vdx"],
"free": 5347737600,
"size": 5347737600},
"kimchiVG": {"lvs": [],
"name": "kimchiVG",
"pvs": ["/dev/vdz", "/dev/vdw"],
"free": 10695475200,
"size": 10695475200}}
class MockPartitions(object):
def __init__(self):
self.partitions = {"vdx": {"available": True, "name": "vdx",
"fstype": "", "path": "/dev/vdx",
"mountpoint": "", "type": "disk",
"size": "2147483648"},
"vdz": {"available": True, "name": "vdz",
"fstype": "", "path": "/dev/vdz",
"mountpoint": "", "type": "disk",
"size": "2147483648"}}
class MockDevices(object):
def __init__(self):
self.devices = {
'computer': {'device_type': 'system',
'firmware': {'release_date': '01/01/2012',
'vendor': 'LENOVO',
'version': 'XXXXX (X.XX )'},
'hardware': {'serial': 'PXXXXX',
'uuid':
'9d660370-820f-4241-8731-5a60c97e8aa6',
'vendor': 'LENOVO',
'version': 'ThinkPad T420'},
'name': 'computer',
'parent': None,
'product': '4180XXX'},
'pci_0000_03_00_0': {'bus': 3,
'device_type': 'pci',
'domain': 0,
'driver': {'name': 'iwlwifi'},
'function': 0,
'iommuGroup': 7,
'name': 'pci_0000_03_00_0',
'parent': 'computer',
'path':
'/sys/devices/pci0000:00/0000:03:00.0',
'product': {
'description':
'Centrino Advanced-N 6205 [Taylor Peak]',
'id': '0x0085'},
'slot': 0,
'vendor': {'description': 'Intel Corporation',
'id': '0x8086'}},
'pci_0000_0d_00_0': {'bus': 13,
'device_type': 'pci',
'domain': 0,
'driver': {'name': 'sdhci-pci'},
'function': 0,
'iommuGroup': 7,
'name': 'pci_0000_0d_00_0',
'parent': 'computer',
'path':
'/sys/devices/pci0000:00/0000:0d:00.0',
'product': {'description':
'PCIe SDXC/MMC Host Controller',
'id': '0xe823'},
'slot': 0,
'vendor': {'description': 'Ricoh Co Ltd',
'id': '0x1180'}},
'scsi_host0': {'adapter': {'fabric_wwn': '37df6c1efa1b4388',
'type': 'fc_host',
'wwnn': 'efb6563f06434a98',
'wwpn': '742f32073aab45d7'},
'device_type': 'scsi_host',
'host': 0,
'name': 'scsi_host0',
'parent': 'computer',
'path': '/sys/devices/pci0000:00/0000:40:00.0/0'},
'scsi_host1': {'adapter': {'fabric_wwn': '542efa5dced34123',
'type': 'fc_host',
'wwnn': 'b7433a40c9b84092',
'wwpn': '25c1f485ae42497f'},
'device_type': 'scsi_host',
'host': 0,
'name': 'scsi_host1',
'parent': 'computer',
'path': '/sys/devices/pci0000:00/0000:40:00.0/1'},
'scsi_host2': {'adapter': {'fabric_wwn': '5c373c334c20478d',
'type': 'fc_host',
'wwnn': 'f2030bec4a254e6b',
'wwpn': '07dbca4164d44096'},
'device_type': 'scsi_host',
'host': 0,
'name': 'scsi_host2',
'parent': 'computer',
'path': '/sys/devices/pci0000:00/0000:40:00.0/2'}}
class MockVMSnapshot(object):
def __init__(self, name, params={}):
self.name = name
self.current = True
self.info = {'created': params.get('created',
unicode(int(time.time()))),
'name': name,
'parent': params.get('parent', u''),
'state': params.get('state', u'shutoff')}
| lgpl-3.0 |
PYPIT/PYPIT | pypeit/debugger.py | 1 | 3461 | """
Module to setup the PypeIt debugger
"""
from __future__ import (print_function, absolute_import, division, unicode_literals)
import matplotlib.pyplot as plt
import numpy as np
# These need to be outside of the def's
try:
from pypeit.ginga import show_image
except ImportError: # Ginga is not yet required
pass
else:
from pypeit.ginga import clear_canvas
# Moved to the top and changed to only import set_trace
from pdb import set_trace
# ADD-ONs from xastropy
def plot1d(*args, **kwargs):
""" Plot 1d arrays
Parameters
----------
outfil= : string
Outfil
xlbl,ylbl= : string
Labels for x,y axes
xrng= list
Range of x limits
yrng= list
Range of y limits
xtwo= : ndarray
x-values for a second array
ytwo= : ndarray
y-values for a second array
mtwo= : str
marker for xtwo
scatter= : Bool
True for a scatter plot
NOTE: Any extra parameters are fed as kwargs to plt.plot()
"""
# Error checking
if len(args) == 0:
print('x_guis.simple_splot: No arguments!')
return
if not isinstance(args[0], np.ndarray):
print('x_guis: Input array is not a numpy.ndarray!')
return
plt_dict = {}
# Outfil
if ('outfil' in kwargs):
plt_dict['outfil'] = kwargs['outfil']
kwargs.pop('outfil')
else:
plt_dict['outfil'] = None
# Scatter plot?
if ('scatter' in kwargs):
kwargs.pop('scatter')
plt_dict['flg_scatt'] = 1
else:
plt_dict['flg_scatt'] = 0
# Second array?
if ('xtwo' in kwargs) & ('ytwo' in kwargs):
plt_dict['xtwo'] = kwargs['xtwo']
kwargs.pop('xtwo')
plt_dict['ytwo'] = kwargs['ytwo']
kwargs.pop('ytwo')
plt_dict['flg_two'] = 1
# mtwo
if 'mtwo' in kwargs:
plt_dict['mtwo']=kwargs['mtwo']
kwargs.pop('mtwo')
else:
plt_dict['mtwo']=''
else:
plt_dict['flg_two'] = 0
# Limits
for irng in ['xrng','yrng']:
try:
plt_dict[irng] = kwargs[irng]
except KeyError:
plt_dict[irng] = None
else:
kwargs.pop(irng)
# Labels
for ilbl in ['xlbl','ylbl']:
try:
plt_dict[ilbl] = kwargs[ilbl]
except KeyError:
plt_dict[ilbl] = None
else:
kwargs.pop(ilbl)
# Clear
plt.clf()
# Plot it right up
if len(args) == 1:
plt.plot(args[0].flatten(), **kwargs)
else:
for kk in range(1,len(args)):
if plt_dict['flg_scatt'] == 0:
plt.plot(args[0].flatten(),args[kk].flatten(), **kwargs)
else:
plt.scatter(args[0].flatten(),args[kk].flatten(), marker='o', **kwargs)
if plt_dict['flg_two'] == 1:
plt.plot(plt_dict['xtwo'], plt_dict['ytwo'], plt_dict['mtwo'], color='red', **kwargs)
# Limits
if plt_dict['xrng'] is not None:
plt.xlim(plt_dict['xrng'])
if plt_dict['yrng'] is not None:
plt.ylim(plt_dict['yrng'])
# Label
if plt_dict['xlbl'] is not None:
plt.xlabel(plt_dict['xlbl'])
if plt_dict['ylbl'] is not None:
plt.ylabel(plt_dict['ylbl'])
# Output?
if plt_dict['outfil'] is not None:
plt.savefig(plt_dict['outfil'])
print('Wrote figure to {:s}'.format(plt_dict['outfil']))
else: # Show
plt.show()
return
| gpl-3.0 |
tedlaz/pyted | sms/requests/packages/chardet/langbulgarianmodel.py | 2965 | 12784 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
# this table is modified base on win1251BulgarianCharToOrderMap, so
# only number <64 is sure valid
Latin5_BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
)
win1251BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 96.9392%
# first 1024 sequences:3.0618%
# rest sequences: 0.2992%
# negative sequences: 0.0020%
BulgarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
)
Latin5BulgarianModel = {
'charToOrderMap': Latin5_BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
Win1251BulgarianModel = {
'charToOrderMap': win1251BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
# flake8: noqa
| gpl-3.0 |
agualis/test-django-nonrel | django/core/management/commands/test.py | 244 | 1747 | from django.core.management.base import BaseCommand
from optparse import make_option
import sys
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--failfast', action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first failed test.')
)
help = 'Runs the test suite for the specified applications, or the entire site if no apps are specified.'
args = '[appname ...]'
requires_model_validation = False
def handle(self, *test_labels, **options):
from django.conf import settings
from django.test.utils import get_runner
verbosity = int(options.get('verbosity', 1))
interactive = options.get('interactive', True)
failfast = options.get('failfast', False)
TestRunner = get_runner(settings)
if hasattr(TestRunner, 'func_name'):
# Pre 1.2 test runners were just functions,
# and did not support the 'failfast' option.
import warnings
warnings.warn(
'Function-based test runners are deprecated. Test runners should be classes with a run_tests() method.',
DeprecationWarning
)
failures = TestRunner(test_labels, verbosity=verbosity, interactive=interactive)
else:
test_runner = TestRunner(verbosity=verbosity, interactive=interactive, failfast=failfast)
failures = test_runner.run_tests(test_labels)
if failures:
sys.exit(bool(failures))
| bsd-3-clause |
Englebabz/CasPyTeX | WebGUI.py | 1 | 2634 | #SERVER IMPORTS
import threading
import webbrowser
import http.server
import socketserver
import sys
import os
#CAS IMPORTS
sys.path.insert(0, 'Data/')
import textparser as cas
from TextCAS import displaymathcascall
from debugger import *
import CasPyTexConfig as config
FILE = 'Data/Web Interface/frontend.html'
PORT = 8080
def post_simplify(str,approx):
exp=cas.TextToCAS(str)
if ":=" in str:
return "error"
if "solve" not in str and "Solve" not in str:
result=exp.posforms(2,approx)
if type(result)==type(cas.TextToCAS("a")):
result=[result.tolatex()]
retval="SIMPL#"+exp.tolatex()
for n in result:
retval+=r"#\("+n+r"\)"
return retval
else:
resultarr=displaymathcascall(str,approx,[])
if resultarr[0]==False:
return "error"
return "SOLVE#"+r"\("+resultarr[1]+r"\)"
def post_solve(str,approx):
return "SOLVE"
def byting(str):
return bytes(str,"utf-8")
class TestHandler(http.server.SimpleHTTPRequestHandler):
"""The test example handler."""
def do_POST(self):
length = int(self.headers.get_all('Content-Length')[0])
data_string = self.rfile.read(length).decode('utf-8')
print("RECEIVED STRING :", data_string,"SENDING",data_string[12:])
try:
if data_string[:10]=="#SAFEVAL1#":
approx=False
if data_string[10]=="1":
approx=True
result=byting(post_simplify(data_string[12:],approx))
elif data_string[:10]=="#SAFEVAL2#":
approx=False
if data_string[10]=="1":
approx=True
result=byting(post_solve(data_string[12:],approx))
else:
result = byting("error")
except Exception as e:
print(e)
result=byting("error")
print("SEDING FINAL",result)
#except:
# result=byting("error")
self.wfile.write(result)
def open_browser():
"""Start a browser after waiting for half a second."""
def _open_browser():
webbrowser.open('http://localhost:%s/%s' % (PORT, FILE))
thread = threading.Timer(0.5, _open_browser)
thread.start()
def start_server():
"""Start the server."""
server_address = ("", PORT)
server = http.server.HTTPServer(server_address, TestHandler)
try:
server.serve_forever()
except KeyboardInterrupt:
server.socket.close()
if __name__ == "__main__":
config.Use_Coloredoutput=False
debug.lvl=0
open_browser()
start_server()
| mit |
dzan/xenOnArm | tools/python/logging/logging-0.4.9.2/test/log_test6.py | 42 | 2045 | #!/usr/bin/env python
#
# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# This file is part of the Python logging distribution. See
# http://www.red-dove.com/python_logging.html
#
"""
A test harness for the logging module. Tests NTEventLogHandler.
Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved.
"""
import logging, logging.handlers
def main():
ntl = logging.handlers.NTEventLogHandler("Python Logging Test")
logger = logging.getLogger("")
logger.setLevel(logging.DEBUG)
logger.addHandler(ntl)
logger.debug("This is a '%s' message", "Debug")
logger.info("This is a '%s' message", "Info")
logger.warning("This is a '%s' message", "Warning")
logger.error("This is a '%s' message", "Error")
logger.critical("This is a '%s' message", "Critical")
try:
x = 4 / 0
except:
logger.info("This is an %s (or should that be %s?)", "informational exception", "exceptional information", exc_info=1)
logger.exception("This is the same stuff, via a %s", "exception() call")
logger.removeHandler(ntl)
if __name__ == "__main__":
main() | gpl-2.0 |
DelazJ/QGIS | python/plugins/sagaprovider/ext/supervisedclassification.py | 30 | 1159 | # -*- coding: utf-8 -*-
"""
***************************************************************************
supervisedclassification.py
---------------------
Date : July 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'July 2013'
__copyright__ = '(C) 2013, Victor Olaya'
from processing.tests.TestData import table
def editCommands(commands):
commands[-3] = commands[-3] + ' -STATS ' + table()
return commands
| gpl-2.0 |
tibor95/phatch-python2.7 | build/lib.linux-i686-2.7/phatch/lib/pyWx/treeDragDrop.py | 3 | 4626 | # Copyright (C) 2007-2008 www.stani.be
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/
#
# Follow PEP8
import wx
class Mixin:
def GetItemChildren(self, item):
child, cookie = self.GetFirstChild(item)
children = []
while child:
children.append(child)
child = self.GetNextSibling(child)
return children
def GetRootChild(self, item):
root = self.GetRootItem()
if item == root:
return -1
parent = self.GetItemParent(item)
while parent != root:
item = parent
parent = self.GetItemParent(item)
return item
def MoveChildUp(self, item):
if item == self.GetRootItem():
return
parent = self.GetItemParent(item)
children = self.GetItemChildren(parent)
if item in children:
index = children.index(item)
if index > 0:
children.remove(item)
self._order = children[:index - 1] + \
[item] + children[index - 1:]
self.SortChildren(parent)
#print 'SortChildren', \
# parent,self.GetItemParent(parent),self.GetRootItem()
self._order = []
def MoveChildDown(self, item):
if item == self.GetRootItem():
return
parent = self.GetItemParent(item)
children = self.GetItemChildren(parent)
n = self.GetChildrenCount(parent, recursively=False)
if item in children:
index = children.index(item)
if index < n - 1:
children.remove(item)
self._order = children[:index + 1] + \
[item] + children[index + 1:]
self.SortChildren(parent)
self._order = []
def OnCompareItems(self, item1, item2):
if hasattr(self, '_order') and \
(item1 in self._order) and (item2 in self._order):
index1 = self._order.index(item1)
index2 = self._order.index(item2)
if index1 < index2:
return -1
if index1 == index2:
return 0
return 1
else:
raise 'no order'
# Drag & drop
def EnableDrag(self, dragTo=None):
if dragTo is None:
self._dragTo = self.GetRootChild
else:
self._dragTo = dragTo
self.Bind(wx.EVT_TREE_BEGIN_DRAG, self.OnBeginDrag, self)
self.Bind(wx.EVT_TREE_END_DRAG, self.OnEndDrag, self)
def DisableDrag(self):
self.Unbind(wx.EVT_TREE_BEGIN_DRAG, self)
self.Unbind(wx.EVT_TREE_END_DRAG, self)
def OnBeginDrag(self, event):
'''Allow drag-and-drop.'''
item = event.GetItem()
if item.IsOk() and item != self.GetRootItem():
event.Allow()
self._dragItem = item
def OnEndDrag(self, event):
'''Do the re-organization if possible'''
# If we dropped somewhere that isn't on top
# of an item, ignore the event.
target = event.GetItem()
if not (target.IsOk() and target != self.GetRootItem() and \
hasattr(self, '_dragItem') and self._dragItem):
return
items = [self._dragItem, target]
if self._dragTo:
items = [self._dragTo(item) for item in items]
parent, parentTarget = [self.GetItemParent(item) for item in items]
if parent.IsOk() and parentTarget.IsOk() and parent == parentTarget:
children = self.GetItemChildren(parent)
if (items[0] in children) and (items[1] in children):
# Move
old, new = [children.index(item) for item in items]
temp = children[new]
children[new] = children[old]
children[old] = temp
self._order = children
self.SortChildren(parent)
self._order = []
self._dragItem = None
| gpl-3.0 |
chongdashu/py-vgdl | vgdl/ai.py | 4 | 5675 |
import math
import core
#from tools import logToFile
class AStarNode(object):
def __init__(self, index, vgdlSprite):
self.vgdlSprite = vgdlSprite
self.sprite = vgdlSprite
self.index = index
class AStarWorld(object):
def __init__(self, game):
self.game = game
#ghost_sprites = game.getSprites('ghost')
#pacman_sprite = game.getSprites('pacman')[0]
self.food = game.getSprites('food')
self.nest = game.getSprites('nest')
self.moving = game.getSprites('moving')
self.empty = [core.VGDLSprite(pos, (self.game.block_size, self.game.block_size)) for pos in self.game.emptyBlocks()]
##print "food=%s, nest=%s, moving=%s" %(len(food), len(nest), len(moving))
##print "empty=%s" % (len(empty))
##print "total=%s" %(len(food)+len(nest)+len(moving)+len(empty))
##print "len(sprites)=%s" %len(sprites)
#print "game.width=%s, game.height=%s" %(game.width, game.height)
#print "pacman_sprite=%s" %(pacman_sprite)
#print "x=%s, y=%s" %(pacman_sprite.rect.left/game.block_size, pacman_sprite.rect.top/game.block_size)
self.save_walkable_tiles()
def get_walkable_tiles(self):
return self.food + self.nest + self.moving + self.empty
def save_walkable_tiles(self):
self.walkable_tiles = {}
self.walkable_tile_indices = []
combined = self.food + self.nest + self.moving + self.empty
#print combined
for sprite in combined:
#print sprite
tileX, tileY = self.get_sprite_tile_position(sprite)
index = self.get_index(tileX, tileY)
self.walkable_tile_indices.append(index)
self.walkable_tiles[index] = AStarNode(index, sprite)
def get_index(self, tileX, tileY):
#return tileX * self.game.width + tileY
return tileY * self.game.width + tileX
def get_tile_from_index(self, index):
return index/self.game.width, index%self.game.width
def h(self, start, goal):
#return self.euclidean(start, goal)
return self.distance(start, goal)
def euclidean(self, node1, node2):
x1, y1 = self.get_sprite_tile_position(node1.sprite)
x2, y2 = self.get_sprite_tile_position(node2.sprite)
#print "x1:%s, y1:%s, x2:%s, y2:%s" %(x1,y1,x2,y2)
a = x2-x1
b = y2-y1
#print "a:%s, b:%s" %(a,b)
return math.sqrt(a*a + b*b)
def get_sprite_tile_position(self, sprite):
tileX = sprite.rect.left/self.game.block_size
tileY = sprite.rect.top/self.game.block_size
return tileX, tileY
def get_lowest_f(self, nodes, f_score):
f_best = 9999
node_best = None
for node in nodes:
if f_score[node.index] < f_best:
f_best = f_score[node.index]
node_best = node
return node_best
def reconstruct_path(self, came_from, current):
#print self.get_tile_from_index(current.index)
if current.index in came_from:
p = self.reconstruct_path(came_from, came_from[current.index])
p.append(current)
return p
else:
return [current]
def neighbor_nodes(self, node):
sprite = node.sprite;
return self.neighbor_nodes_of_sprite(sprite)
def neighbor_nodes_of_sprite(self, sprite):
tileX, tileY = self.get_sprite_tile_position(sprite)
tiles = [ (tileX-1,tileY), (tileX+1, tileY), (tileX,tileY-1), (tileX, tileY+1)]
neighbors = []
for (tilex, tiley) in tiles:
if (tilex >= 0 and tilex < self.game.width and tiley >= 0 and tiley < self.game.height):
index = self.get_index(tilex, tiley)
if index in self.walkable_tile_indices:
neighbors.append(self.walkable_tiles[index])
# neighbor_indices = [neighbor.index for neighbor in neighbors]
# print 'neighbors(%s,%s):%s' %(tileX, tileY, map(self.get_tile_from_index, neighbor_indices))
return neighbors
def distance(self, node1, node2):
x1, y1 = self.get_sprite_tile_position(node1.sprite)
x2, y2 = self.get_sprite_tile_position(node2.sprite)
return abs(x2-x1) + abs(y2-y1)
def getMoveFor(self, startSprite):
tileX, tileY = self.get_sprite_tile_position(startSprite)
index = self.get_index(tileX, tileY)
startNode = AStarNode(index, startSprite)
pacman = self.game.getSprites('pacman')[0]
goalX, goalY = self.get_sprite_tile_position(pacman)
goalIndex = self.get_index(goalX, goalY)
goalNode = AStarNode(goalIndex, pacman)
# logToFile('Goal: (%s,%s) --> (%s, %s)' %(tileX, tileY, goalX, goalY))
return self.search(startNode, goalNode)
def search(self, start, goal):
# Initialize the variables.
closedset = []
openset = []
came_from = {}
g_score = {}
f_score = {}
openset = [start]
g_score[start.index] = 0
f_score[start.index] = g_score[start.index] + self.h(start, goal)
while (len(openset) > 0):
current = self.get_lowest_f(openset, f_score)
if current.index == goal.index:
# print came_from
path = self.reconstruct_path(came_from, goal)
# path_sprites = [node.sprite for node in path]
# pathh = map(self.get_sprite_tile_position, path_sprites)
# print pathh
return path
openset.remove(current)
closedset.append(current)
for neighbor in self.neighbor_nodes(current):
temp_g = g_score[current.index] + self.distance(current, neighbor)
if self.nodeInSet(neighbor, closedset) and temp_g >= g_score[neighbor.index]:
continue
if not self.nodeInSet(neighbor, openset) or temp_g < g_score[neighbor.index]:
came_from[neighbor.index] = current
#print 'came_from[%s]=%s' % (self.get_tile_from_index(neighbor.index), self.get_tile_from_index(current.index))
g_score[neighbor.index] = temp_g
f_score[neighbor.index] = g_score[neighbor.index] + self.h(neighbor, goal)
if neighbor not in openset:
openset.append(neighbor)
return None
def nodeInSet(self, node, nodeSet):
nodeSetIndices = [n.index for n in nodeSet]
return node.index in nodeSetIndices
| bsd-3-clause |
lyremelody/mysql-replicant-python | examples/load_balancer.py | 3 | 2707 | # Copyright (c) 2010, Mats Kindahl, Charles Bell, and Lars Thalmann
# All rights reserved.
#
# Use of this source code is goverened by a BSD licence that can be
# found in the LICENCE file.
import sys, os.path
rootpath = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
sys.path.append(rootpath)
import MySQLdb as _connector
import mysql.replicant.errors
import my_deployment
class AlreadyInPoolError(mysql.replicant.errors.Error):
pass
class NotInPoolError(mysql.replicant.errors.Error):
pass
_CREATE_TABLE = """
CREATE TABLE common.nodes(
host VARCHAR(48),
port INT,
sock VARCHAR(64),
type SET('READ','WRITE'))
"""
_INSERT_SERVER = """
INSERT INTO nodes(host, port, sock, type)
VALUES (%s, %s, %s, %s)
"""
_DELETE_SERVER = "DELETE FROM nodes WHERE host = %s AND port = %s"
_UPDATE_SERVER = "UPDATE nodes SET type = %s WHERE host = %s AND port = %s"
def pool_add(common, server, type=[]):
try:
common.sql(_INSERT_SERVER,
(server.host, server.port, server.socket, ','.join(type)),
db="common");
except _connector.IntegrityError:
raise AlreadyInPoolError
def pool_del(common, server):
common.sql(_DELETE_SERVER, (server.host, server.port), db="common")
def pool_set(common, server, type):
common.sql(_UPDATE_SERVER, (','.join(type), server.host, server.port),
db="common")
import unittest
class TestLoadBalancer(unittest.TestCase):
"Class to test the load balancer functions."
def setUp(self):
from my_deployment import common, master, slaves
common.sql("DROP DATABASE IF EXISTS common")
common.sql("CREATE DATABASE common")
common.sql(_CREATE_TABLE)
def tearDown(self):
from my_deployment import common, servers
for server in servers:
pool_del(common, server)
common.sql("DROP DATABASE common")
def testServers(self):
from my_deployment import common, master, slaves
try:
pool_add(common, master, ['READ', 'WRITE'])
except AlreadyInPoolError:
pool_set(common, master, ['READ', 'WRITE'])
for slave in slaves:
try:
pool_add(common, slave, ['READ'])
except AlreadyInPoolError:
pool_set(common, slave, ['READ'])
for row in common.sql("SELECT * FROM nodes", db="common"):
if row['port'] == master.port:
self.assertEqual(row['type'], 'READ,WRITE')
elif row['port'] in [slave.port for slave in slaves]:
self.assertEqual(row['type'], 'READ')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
petricm/DIRAC | Resources/MessageQueue/test/Test_MQConnectionManager.py | 5 | 28499 | """Unit tests of MQConnectionManager in the DIRAC.Resources.MessageQueue.MConnectionManager
Also, test of internal functions for mq connection storage.
"""
## ignore use of __functions, _functions
#pylint: disable=no-member, protected-access
import unittest
import mock
from DIRAC import S_OK
from DIRAC.Resources.MessageQueue.MQConnectionManager import MQConnectionManager
class TestMQConnectionManager( unittest.TestCase ):
def setUp( self ):
self.maxDiff = None # To show full difference between structures in case of error
dest = {}
dest.update({'/queue/test1': ['producer4', 'consumer1', 'consumer2', 'consumer4']})
dest.update({'/queue/test2': ['producer2', 'consumer1', 'consumer2']})
dest.update({'/topic/test1': ['producer1']})
dest4 = {'/queue/test3': ['producer1', 'consumer2','consumer3','consumer4']}
conn1 = {'MQConnector':'TestConnector1', 'destinations':dest}
conn2 = {'MQConnector':'TestConnector2', 'destinations':dest4}
storage = {'mardirac3.in2p3.fr':conn1, 'testdir.blabla.ch':conn2}
self.mgr = MQConnectionManager(connectionStorage = storage)
def tearDown( self ):
pass
class TestMQConnectionStorageFunctions_connectionExists( TestMQConnectionManager ):
def test_success( self ):
self.assertTrue(self.mgr._MQConnectionManager__connectionExists( 'mardirac3.in2p3.fr'))
def test_failure( self ):
self.assertFalse(self.mgr._MQConnectionManager__connectionExists( 'nonexisting'))
class TestMQConnectionStorageFunctions_destinationExists( TestMQConnectionManager ):
def test_success( self ):
self.assertTrue(self.mgr._MQConnectionManager__destinationExists('mardirac3.in2p3.fr', '/queue/test1'))
def test_failure( self ):
self.assertFalse(self.mgr._MQConnectionManager__destinationExists( 'nonexisting', '/queue/test1'))
def test_failure2( self ):
self.assertFalse(self.mgr._MQConnectionManager__destinationExists('mardirac3.in2p3.fr', '/queue/nonexisting'))
class TestMQConnectionStorageFunctions_messengerExists( TestMQConnectionManager ):
def test_success( self ):
self.assertTrue(self.mgr._MQConnectionManager__messengerExists('mardirac3.in2p3.fr', '/queue/test1','consumer2' ))
self.assertTrue(self.mgr._MQConnectionManager__messengerExists('mardirac3.in2p3.fr', '/queue/test1','producer4' ))
def test_failure( self ):
self.assertFalse(self.mgr._MQConnectionManager__messengerExists('noexisting', '/queue/test1','producer4' ))
def test_failure2( self ):
self.assertFalse(self.mgr._MQConnectionManager__messengerExists( 'mardirac3.in2p3.fr', '/queue/nonexisting','producer4'))
def test_failure3( self ):
self.assertFalse(self.mgr._MQConnectionManager__messengerExists( 'mardirac3.in2p3.fr', '/queue/test1','producer10'))
class TestMQConnectionStorageFunctions_getConnection( TestMQConnectionManager ):
def test_success( self ):
expectedConn = {'MQConnector':'TestConnector2', 'destinations':{'/queue/test3': ['producer1', 'consumer2','consumer3','consumer4']}}
self.assertEqual(self.mgr._MQConnectionManager__getConnection('testdir.blabla.ch'),expectedConn)
def test_failure( self ):
self.assertEqual(self.mgr._MQConnectionManager__getConnection('nonexisiting'), {})
class TestMQConnectionStorageFunctions_getAllConnections( TestMQConnectionManager ):
def test_success( self ):
expectedOutput = ['testdir.blabla.ch','mardirac3.in2p3.fr']
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllConnections()),sorted(expectedOutput))
class TestMQConnectionStorageFunctions_getConnector( TestMQConnectionManager ):
def test_success( self ):
self.assertEqual(self.mgr._MQConnectionManager__getConnector('testdir.blabla.ch'),'TestConnector2')
def test_failure( self ):
self.assertIsNone(self.mgr._MQConnectionManager__getConnector('nonexisiting'))
class TestMQConnectionStorageFunctions_setConnector( TestMQConnectionManager ):
def test_success( self ):
self.assertTrue(self.mgr._MQConnectionManager__setConnector('testdir.blabla.ch', 'TestConnector5'))
self.assertEqual(self.mgr._MQConnectionManager__getConnector('testdir.blabla.ch'),'TestConnector5')
def test_failure( self ):
self.assertFalse(self.mgr._MQConnectionManager__setConnector('nonexisiting', 'TestConnector3'))
class TestMQConnectionStorageFunctions_getDestinations( TestMQConnectionManager ):
def test_success( self ):
expectedDests ={'/queue/test1': ['producer4', 'consumer1', 'consumer2', 'consumer4'],
'/queue/test2': ['producer2', 'consumer1', 'consumer2'],
'/topic/test1': ['producer1']}
self.assertEqual(self.mgr._MQConnectionManager__getDestinations('mardirac3.in2p3.fr'),expectedDests)
def test_failure( self ):
self.assertEqual(self.mgr._MQConnectionManager__getDestinations('nonexisiting'), {})
class TestMQConnectionStorageFunctions_getMessengersId( TestMQConnectionManager ):
def test_success( self ):
expectedMess =['producer4', 'consumer1', 'consumer2', 'consumer4']
self.assertEqual(self.mgr._MQConnectionManager__getMessengersId('mardirac3.in2p3.fr', '/queue/test1'),expectedMess)
def test_success2( self ):
expectedMess2 =['producer2', 'consumer1', 'consumer2']
self.assertEqual(self.mgr._MQConnectionManager__getMessengersId('mardirac3.in2p3.fr', '/queue/test2'),expectedMess2)
def test_failure( self ):
self.assertEqual(self.mgr._MQConnectionManager__getMessengersId('nonexisiting', '/queue/test2'), [])
def test_failure2( self ):
self.assertEqual(self.mgr._MQConnectionManager__getMessengersId('mardirac3.in2p3.fr', 'nonexisiting'), [])
class TestMQConnectionStorageFunctions_getMessengersIdWithType( TestMQConnectionManager ):
def test_success( self ):
expectedMess =['producer4']
self.assertEqual(self.mgr._MQConnectionManager__getMessengersIdWithType('mardirac3.in2p3.fr', '/queue/test1', 'producer'),expectedMess)
def test_success2( self ):
expectedMess2 =['producer2']
self.assertEqual(self.mgr._MQConnectionManager__getMessengersIdWithType('mardirac3.in2p3.fr', '/queue/test2', 'producer'),expectedMess2)
def test_success3( self ):
expectedMess =[ 'consumer1', 'consumer2', 'consumer4']
self.assertEqual(self.mgr._MQConnectionManager__getMessengersIdWithType('mardirac3.in2p3.fr', '/queue/test1', 'consumer'),expectedMess)
def test_success4( self ):
expectedMess2 =['consumer1', 'consumer2']
self.assertEqual(self.mgr._MQConnectionManager__getMessengersIdWithType('mardirac3.in2p3.fr', '/queue/test2', 'consumer'),expectedMess2)
def test_failure( self ):
self.assertEqual(self.mgr._MQConnectionManager__getMessengersIdWithType('nonexisiting', '/queue/test2', 'producer'), [])
def test_failure2( self ):
self.assertEqual(self.mgr._MQConnectionManager__getMessengersIdWithType('mardirac3.in2p3.fr', 'nonexisiting', 'producer'), [])
class TestMQConnectionStorageFunctions_getAllMessengersInfo( TestMQConnectionManager ):
def test_success( self ):
expectedOutput= ['mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()),sorted(expectedOutput))
class TestMQConnectionStorageFunctions_getAllMessengersId( TestMQConnectionManager ):
def test_success( self ):
expectedOutput= ['producer4', 'consumer1', 'consumer2', 'consumer4', 'producer2', 'consumer1', 'consumer2', 'producer1', 'producer1', 'consumer2', 'consumer3', 'consumer4']
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersId()),sorted(expectedOutput))
class TestMQConnectionStorageFunctions_getAllMessengersIdWithType( TestMQConnectionManager ):
def test_success( self ):
expectedOutput= ['consumer1', 'consumer2', 'consumer4', 'consumer1', 'consumer2','consumer2', 'consumer3', 'consumer4']
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersIdWithType('consumer')),sorted(expectedOutput))
expectedOutput= ['producer4', 'producer2', 'producer1', 'producer1']
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersIdWithType('producer')),sorted(expectedOutput))
class TestMQConnectionStorageFunctions_addMessenger( TestMQConnectionManager ):
def test_success( self ):
expectedOutput= ['mardirac3.in2p3.fr/queue/test1/producer1', 'mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
self.assertTrue(self.mgr._MQConnectionManager__addMessenger('mardirac3.in2p3.fr', '/queue/test1', 'producer1'))
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()),sorted(expectedOutput))
def test_success2( self ):
# new queue
expectedOutput= ['mardirac3.in2p3.fr/queue/test5/producer8', 'mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
self.assertTrue(self.mgr._MQConnectionManager__addMessenger('mardirac3.in2p3.fr', '/queue/test5', 'producer8'))
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()),sorted(expectedOutput))
def test_success3( self ):
# new connection
expectedOutput= ['mytest.is.the.best/queue/test10/producer24', 'mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
self.assertTrue(self.mgr._MQConnectionManager__addMessenger('mytest.is.the.best', '/queue/test10', 'producer24'))
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()),sorted(expectedOutput))
def test_success4( self ):
# two times
expectedOutput= ['mytest.is.the.best/queue/test10/producer2', 'mytest.is.the.best/queue/test10/producer24', 'mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
self.assertTrue(self.mgr._MQConnectionManager__addMessenger('mytest.is.the.best', '/queue/test10', 'producer24'))
self.assertTrue(self.mgr._MQConnectionManager__addMessenger('mytest.is.the.best', '/queue/test10', 'producer2'))
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()),sorted(expectedOutput))
def test_failure( self ):
# messenger already exists
expectedOutput= ['mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
self.assertFalse(self.mgr._MQConnectionManager__addMessenger('mardirac3.in2p3.fr', '/queue/test1', 'producer4'))
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()),sorted(expectedOutput))
class TestMQConnectionStorageFunctions_removeMessenger( TestMQConnectionManager ):
def test_success( self ):
expectedOutput= [ 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
self.assertTrue(self.mgr._MQConnectionManager__removeMessenger('mardirac3.in2p3.fr', '/queue/test1', 'producer4'))
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()),sorted(expectedOutput))
def test_success2( self ):
#remove whole destination /topic/test1 cause only one element
expectedOutput= [ 'mardirac3.in2p3.fr/queue/test1/producer4','mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2','testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
self.assertTrue(self.mgr._MQConnectionManager__removeMessenger('mardirac3.in2p3.fr', '/topic/test1', 'producer1'))
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()),sorted(expectedOutput))
def test_success3( self ):
expectedOutput= ['mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1']
#remove whole connection
self.assertTrue(self.mgr._MQConnectionManager__removeMessenger('testdir.blabla.ch', '/queue/test3', 'producer1'))
self.assertTrue(self.mgr._MQConnectionManager__removeMessenger('testdir.blabla.ch', '/queue/test3', 'consumer2'))
self.assertTrue(self.mgr._MQConnectionManager__removeMessenger('testdir.blabla.ch', '/queue/test3', 'consumer3'))
self.assertTrue(self.mgr._MQConnectionManager__removeMessenger('testdir.blabla.ch', '/queue/test3', 'consumer4'))
self.assertEqual(sorted(self.mgr._MQConnectionManager__getAllMessengersInfo()),sorted(expectedOutput))
def test_failure( self ):
#remove nonexisting messenger
self.assertFalse(self.mgr._MQConnectionManager__removeMessenger('testdir.blabla.ch', '/queue/test3', 'producer10'))
def test_failure2( self ):
#remove nonexisting destination
self.assertFalse(self.mgr._MQConnectionManager__removeMessenger('testdir.blabla.ch', '/queue/nonexisting', 'producer1'))
def test_failure3( self ):
#remove nonexisting connection
self.assertFalse(self.mgr._MQConnectionManager__removeMessenger('nonexisting', '/queue/test103', 'producer1'))
class TestMQConnectionManager_addNewmessenger( TestMQConnectionManager ):
def test_success( self ):
result = self.mgr.addNewMessenger(mqURI = "mardirac3.in2p3.fr::Queue::test1", messengerType = "producer" )
self.assertTrue(result['OK'])
self.assertEqual(result['Value'], 'producer5')
expectedOutput= ['mardirac3.in2p3.fr/queue/test1/producer5', 'mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result['Value']),sorted(expectedOutput))
def test_success2( self ):
result = self.mgr.addNewMessenger(mqURI = "mardirac3.in2p3.fr::Topic::test1", messengerType = "consumer" )
self.assertTrue(result['OK'])
self.assertEqual(result['Value'], 'consumer5')
def test_success3( self ):
result = self.mgr.addNewMessenger(mqURI = "testdir.blabla.ch::Queue::test3", messengerType = "consumer" )
self.assertTrue(result['OK'])
self.assertEqual(result['Value'], 'consumer5')
def test_success4( self ):
#connection does not exist
result = self.mgr.addNewMessenger(mqURI = "noexisting.blabla.ch::Queue::test3", messengerType = "consumer" )
self.assertTrue(result['OK'])
self.assertEqual(result['Value'], 'consumer5')
expectedOutput= ['noexisting.blabla.ch/queue/test3/consumer5', 'mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result['Value']),sorted(expectedOutput))
class TestMQConnectionManager_startConnection( TestMQConnectionManager ):
def test_success( self ):
#existing connection
result = self.mgr.startConnection(mqURI = "mardirac3.in2p3.fr::Queue::test1", params ={}, messengerType = "producer")
self.assertTrue(result['OK'])
self.assertEqual(result['Value'], 'producer5')
expectedOutput= ['mardirac3.in2p3.fr/queue/test1/producer5', 'mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result['Value']),sorted(expectedOutput))
@mock.patch('DIRAC.Resources.MessageQueue.MQConnectionManager.MQConnectionManager.createConnectorAndConnect')
def test_success2( self, mock_createConnectorAndConnect):
#connection does not exist
mock_createConnectorAndConnect.return_value = S_OK('MyConnector')
result = self.mgr.startConnection(mqURI = "noexisting.blabla.ch::Queue::test3", params={}, messengerType = "consumer" )
self.assertTrue(result['OK'])
self.assertEqual(result['Value'], 'consumer5')
expectedOutput= ['noexisting.blabla.ch/queue/test3/consumer5', 'mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result['Value']),sorted(expectedOutput))
result = self.mgr.getConnector('noexisting.blabla.ch')
self.assertEqual(result['Value'], 'MyConnector')
class TestMQConnectionManager_stopConnection( TestMQConnectionManager ):
def test_success( self ):
result = self.mgr.stopConnection(mqURI = "mardirac3.in2p3.fr::Queue::test1", messengerId = "producer4")
self.assertTrue(result['OK'])
expectedOutput= ['mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result['Value']),sorted(expectedOutput))
def test_success2( self ):
result = self.mgr.stopConnection(mqURI = "mardirac3.in2p3.fr::Topic::test1", messengerId = "producer1")
self.assertTrue(result['OK'])
expectedOutput= ['mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result['Value']),sorted(expectedOutput))
@mock.patch('DIRAC.Resources.MessageQueue.MQConnectionManager.MQConnectionManager.unsubscribe')
@mock.patch('DIRAC.Resources.MessageQueue.MQConnectionManager.MQConnectionManager.disconnect')
def test_success3( self, mock_disconnect, mock_unsubscribe ):
mock_disconnect.return_value = S_OK()
mock_unsubscribe.return_value = S_OK()
result = self.mgr.stopConnection(mqURI = "testdir.blabla.ch::Queue::test3", messengerId = "consumer3")
self.assertTrue(result['OK'])
result = self.mgr.stopConnection(mqURI = "testdir.blabla.ch::Queue::test3", messengerId = "producer1")
self.assertTrue(result['OK'])
result = self.mgr.stopConnection(mqURI = "testdir.blabla.ch::Queue::test3", messengerId = "consumer2")
self.assertTrue(result['OK'])
result = self.mgr.stopConnection(mqURI = "testdir.blabla.ch::Queue::test3", messengerId = "consumer4")
self.assertTrue(result['OK'])
expectedOutput= ['mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1']
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result['Value']),sorted(expectedOutput))
class TestMQConnectionManager_removeAllConnections( TestMQConnectionManager ):
@mock.patch('DIRAC.Resources.MessageQueue.MQConnectionManager.MQConnectionManager.disconnect')
def test_success( self, mock_disconnect):
mock_disconnect.return_value = S_OK()
result = self.mgr.removeAllConnections()
self.assertTrue(result['OK'])
expectedOutput= []
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result['Value']),sorted(expectedOutput))
class TestMQConnectionManager_getAllMessengers( TestMQConnectionManager ):
def test_success( self ):
result = self.mgr.getAllMessengers()
self.assertTrue(result['OK'])
expectedOutput= ['mardirac3.in2p3.fr/queue/test1/producer4', 'mardirac3.in2p3.fr/queue/test1/consumer1', 'mardirac3.in2p3.fr/queue/test1/consumer2', 'mardirac3.in2p3.fr/queue/test1/consumer4', 'mardirac3.in2p3.fr/queue/test2/producer2', 'mardirac3.in2p3.fr/queue/test2/consumer1', 'mardirac3.in2p3.fr/queue/test2/consumer2', 'mardirac3.in2p3.fr/topic/test1/producer1', 'testdir.blabla.ch/queue/test3/producer1', 'testdir.blabla.ch/queue/test3/consumer2', 'testdir.blabla.ch/queue/test3/consumer3', 'testdir.blabla.ch/queue/test3/consumer4']
result = self.mgr.getAllMessengers()
self.assertEqual(sorted(result['Value']),sorted(expectedOutput))
class TestMQConnectionManager_getConnector( TestMQConnectionManager ):
def test_success( self ):
result = self.mgr.getConnector('mardirac3.in2p3.fr')
self.assertTrue(result['OK'])
def test_failure( self ):
result = self.mgr.getConnector('nonexistent.in2p3.fr')
self.assertEqual(result['Message'], 'Failed to get the MQConnector!')
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionManager )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionManager_addNewmessenger ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionManager_startConnection ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionManager_stopConnection ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionManager_removeAllConnections ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionManager_getAllMessengers ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionManager_getConnector ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_connectionExists ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_destinationExists ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_messengerExists ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_getConnection ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_getAllConnections ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_getConnector ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_setConnector ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_getDestinations ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_getMessengersId ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_getMessengersIdWithType ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_addMessenger ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_removeMessenger ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_getAllMessengersInfo) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_getAllMessengersId) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestMQConnectionStorageFunctions_getAllMessengersIdWithType) )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
| gpl-3.0 |
dstanek/keystone | keystone/contrib/federation/migrate_repo/versions/001_add_identity_provider_table.py | 14 | 1536 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
idp_table = sql.Table(
'identity_provider',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('enabled', sql.Boolean, nullable=False),
sql.Column('description', sql.Text(), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8')
idp_table.create(migrate_engine, checkfirst=True)
federation_protocol_table = sql.Table(
'federation_protocol',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('idp_id', sql.String(64),
sql.ForeignKey('identity_provider.id', ondelete='CASCADE'),
primary_key=True),
sql.Column('mapping_id', sql.String(64), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8')
federation_protocol_table.create(migrate_engine, checkfirst=True)
| apache-2.0 |
danakj/chromium | tools/metrics/rappor/pretty_print_test.py | 42 | 3083 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import pretty_print
PRETTY_XML = """
<!-- Comment1 -->
<rappor-configuration>
<!-- Comment2 -->
<rappor-parameter-types>
<!-- Comment3 -->
<rappor-parameters name="TEST_RAPPOR_TYPE">
<summary>
Fake type for tests.
</summary>
<parameters num-cohorts="128" bytes="1" hash-functions="2" fake-prob="0.5"
fake-one-prob="0.5" one-coin-prob="0.75" zero-coin-prob="0.25"
reporting-level="COARSE"/>
</rappor-parameters>
</rappor-parameter-types>
<rappor-metrics>
<!-- Comment4 -->
<rappor-metric name="Test.Rappor.Metric" type="TEST_RAPPOR_TYPE">
<owner>user1@chromium.org</owner>
<owner>user2@chromium.org</owner>
<summary>
A fake metric summary.
</summary>
</rappor-metric>
<rappor-metric name="Test.Rappor.Metric2" type="TEST_RAPPOR_TYPE">
<owner>user1@chromium.org</owner>
<owner>user2@chromium.org</owner>
<summary>
A fake metric summary.
</summary>
<string-field name="Url">
<summary>
The url of the event.
</summary>
</string-field>
<flags-field name="Flags">
<flag>Flag bit #1</flag>
<flag>Flag bit #2</flag>
</flags-field>
</rappor-metric>
</rappor-metrics>
</rappor-configuration>
""".strip()
BASIC_METRIC = {
'comments': [],
'name': 'Test.Rappor.Metric',
'type': 'TEST_RAPPOR_TYPE',
'owners': ['user1@chromium.org', 'user2@chromium.org'],
'summary': 'A fake metric summary.',
'flags': [],
'strings': [],
}
MULTI_FIELD_METRIC = {
'comments': [],
'name': 'Test.Rappor.Metric2',
'type': 'TEST_RAPPOR_TYPE',
'owners': ['user1@chromium.org', 'user2@chromium.org'],
'summary': 'A fake metric summary.',
'strings': [{
'comments': [],
'name': 'Url',
'summary': 'The url of the event.',
}],
'flags': [{
'comments': [],
'name': 'Flags',
'flags': [
'Flag bit #1',
'Flag bit #2',
]
}]
}
class ActionXmlTest(unittest.TestCase):
def testIsPretty(self):
result = pretty_print.UpdateXML(PRETTY_XML)
self.assertMultiLineEqual(PRETTY_XML, result.strip())
def testParsing(self):
comments, config = pretty_print.RAPPOR_XML_TYPE.Parse(PRETTY_XML)
self.assertEqual(BASIC_METRIC, config['metrics']['metrics'][0])
self.assertEqual(MULTI_FIELD_METRIC, config['metrics']['metrics'][1])
self.assertEqual(set(['TEST_RAPPOR_TYPE']),
pretty_print.GetTypeNames(config))
def testMissingOwners(self):
self.assertFalse(pretty_print.GetMissingOwnerErrors([BASIC_METRIC]))
no_owners = BASIC_METRIC.copy()
no_owners['owners'] = []
self.assertTrue(pretty_print.GetMissingOwnerErrors([no_owners]))
def testInvalidTypes(self):
self.assertFalse(pretty_print.GetInvalidTypeErrors(
set(['TEST_RAPPOR_TYPE']), [BASIC_METRIC]))
self.assertTrue(pretty_print.GetInvalidTypeErrors(
set(['OTHER_TYPE']), [BASIC_METRIC]))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
lyusupov/SoftRF | software/app/Emulator/Android.py | 1 | 2409 | #
# Android.py
# Copyright (C) 2016-2021 Linar Yusupov
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from time import sleep
import androidhelper
import os
import hashlib
def hostid():
#return os.popen("settings get secure android_id").read().strip()
serial = os.popen("getprop ro.serialno").read().strip()
hash_object = hashlib.md5(serial)
return hash_object.hexdigest()
def platform_name():
return 'Android'
def platform_init(emu):
emu.myId = format(int(hostid(),16) & 0xffffff, 'x')
emu.droid = androidhelper.Android()
emu.droid.startLocating(1000, 0)
def platform_get_fix(emu):
emu.droid.eventWaitFor('location')
loc = emu.droid.readLocation().result
# print loc
if loc != {}:
try:
n = loc['gps']
except KeyError:
n = loc['network']
timestamp = n['time']
lat = n['latitude']
lon = n['longitude']
alt = n['altitude']
bearing = n['bearing']
speed = n['speed']
accuracy = n['accuracy']
prov = n['provider']
#print
#print cnt
#print "timestamp: ", timestamp
#print "latitude: ", lat
#print "longtitude:", lon
#print "altitude: ", alt
#print "bearing: ", bearing
#print "speed: ", speed
#print "accuracy: ", accuracy
#print "provider: ", prov
#print "S", cnt, int(timestamp / 1000), "%.4f" % lat, "%.4f" % lon, int(alt)
#result = session.process_e(timestamp, lat, lon, alt)
#cnt = cnt + 1
emu.mytstamp = int(timestamp / 1000)
emu.mylat = lat
emu.mylon = lon
emu.myalt = int(alt)
emu.mytrk = bearing
sleep(1)
return True
def platform_fini(emu):
emu.droid.stopLocating()
return
| gpl-3.0 |
gmalmquist/pants | src/python/pants/backend/jvm/ivy_utils.py | 6 | 50711 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import errno
import json
import logging
import os
import pkgutil
import threading
import xml.etree.ElementTree as ET
from abc import abstractmethod
from collections import OrderedDict, defaultdict, namedtuple
import six
from twitter.common.collections import OrderedSet
from pants.backend.jvm.jar_dependency_utils import M2Coordinate, ResolvedJar
from pants.backend.jvm.subsystems.jar_dependency_management import (JarDependencyManagement,
PinnedJarArtifactSet)
from pants.backend.jvm.targets.exclude import Exclude
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.base.generator import Generator, TemplateData
from pants.base.revision import Revision
from pants.build_graph.target import Target
from pants.ivy.bootstrapper import Bootstrapper
from pants.java.util import execute_runner
from pants.util.dirutil import safe_concurrent_creation, safe_mkdir, safe_open
from pants.util.fileutil import atomic_copy
class IvyResolutionStep(object):
"""Ivy specific class for describing steps of performing resolution."""
# NB(nh): This class is the base class for the ivy resolve and fetch steps.
# It also specifies the abstract methods that define the components of resolution steps.
def __init__(self, confs, hash_name, pinned_artifacts, soft_excludes, ivy_cache_dir,
global_ivy_workdir):
"""
:param confs: A tuple of string ivy confs to resolve for.
:param hash_name: A unique string name for this resolve.
:param pinned_artifacts: A tuple of "artifact-alikes" to force the versions of.
:param soft_excludes: A flag marking whether to pass excludes to Ivy or to apply them after the
fact.
:param ivy_cache_dir: The cache directory used by Ivy for this resolution step.
:param global_ivy_workdir: The workdir that all ivy outputs live in.
"""
self.confs = confs
self.hash_name = hash_name
self.pinned_artifacts = pinned_artifacts
self.soft_excludes = soft_excludes
self.ivy_cache_dir = ivy_cache_dir
self.global_ivy_workdir = global_ivy_workdir
self.workdir_reports_by_conf = {c: self.resolve_report_path(c) for c in confs}
@abstractmethod
def required_load_files_exist(self):
"""The files required to load a previous resolve exist."""
@abstractmethod
def required_exec_files_exist(self):
"""The files to do a resolve exist."""
@abstractmethod
def load(self, targets):
"""Loads the result of a resolve or fetch."""
@abstractmethod
def exec_and_load(self, executor, extra_args, targets, jvm_options, workunit_name,
workunit_factory):
"""Runs the resolve or fetch and loads the result, returning it."""
@property
def workdir(self):
return os.path.join(self.global_ivy_workdir, self.hash_name)
@property
def symlink_classpath_filename(self):
return os.path.join(self.workdir, 'classpath')
@property
def ivy_cache_classpath_filename(self):
return '{}.raw'.format(self.symlink_classpath_filename)
@property
def frozen_resolve_file(self):
return os.path.join(self.workdir, 'resolution.json')
@property
def symlink_dir(self):
return os.path.join(self.global_ivy_workdir, 'jars')
@abstractmethod
def ivy_xml_path(self):
"""Ivy xml location."""
@abstractmethod
def resolve_report_path(self, conf):
"""Location of the resolve report in the workdir."""
def _construct_and_load_symlink_map(self):
artifact_paths, symlink_map = IvyUtils.construct_and_load_symlink_map(
self.symlink_dir,
self.ivy_cache_dir,
self.ivy_cache_classpath_filename,
self.symlink_classpath_filename)
return artifact_paths, symlink_map
def _call_ivy(self, executor, extra_args, ivyxml, jvm_options, hash_name_for_report,
workunit_factory, workunit_name):
IvyUtils.do_resolve(executor,
extra_args,
ivyxml,
jvm_options,
self.workdir_reports_by_conf,
self.confs,
self.ivy_cache_dir,
self.ivy_cache_classpath_filename,
hash_name_for_report,
workunit_factory,
workunit_name)
class IvyFetchStep(IvyResolutionStep):
"""Resolves ivy artifacts using the coordinates from a previous resolve."""
def required_load_files_exist(self):
return (all(os.path.isfile(report) for report in self.workdir_reports_by_conf.values()) and
os.path.isfile(self.ivy_cache_classpath_filename) and
os.path.isfile(self.frozen_resolve_file))
def resolve_report_path(self, conf):
return os.path.join(self.workdir, 'fetch-report-{}.xml'.format(conf))
@property
def ivy_xml_path(self):
return os.path.join(self.workdir, 'fetch-ivy.xml')
def required_exec_files_exist(self):
return os.path.isfile(self.frozen_resolve_file)
def load(self, targets):
try:
frozen_resolutions = FrozenResolution.load_from_file(self.frozen_resolve_file,
targets)
except Exception as e:
logger.debug('Failed to load {}: {}'.format(self.frozen_resolve_file, e))
return NO_RESOLVE_RUN_RESULT
return self._load_from_fetch(frozen_resolutions)
def exec_and_load(self, executor, extra_args, targets, jvm_options, workunit_name,
workunit_factory):
try:
frozen_resolutions = FrozenResolution.load_from_file(self.frozen_resolve_file,
targets)
except Exception as e:
logger.debug('Failed to load {}: {}'.format(self.frozen_resolve_file, e))
return NO_RESOLVE_RUN_RESULT
self._do_fetch(executor, extra_args, frozen_resolutions, jvm_options,
workunit_name, workunit_factory)
result = self._load_from_fetch(frozen_resolutions)
if not result.all_linked_artifacts_exist():
raise IvyResolveMappingError(
'Some artifacts were not linked to {} for {}'.format(self.global_ivy_workdir,
result))
return result
def _load_from_fetch(self, frozen_resolutions):
artifact_paths, symlink_map = self._construct_and_load_symlink_map()
return IvyFetchResolveResult(artifact_paths,
symlink_map,
self.hash_name,
self.workdir_reports_by_conf,
frozen_resolutions)
def _do_fetch(self, executor, extra_args, frozen_resolution, jvm_options, workunit_name,
workunit_factory):
# It's important for fetches to have a different ivy report from resolves as their
# contents differ.
hash_name_for_report = '{}-fetch'.format(self.hash_name)
ivyxml = self.ivy_xml_path
self._prepare_ivy_xml(frozen_resolution, ivyxml, hash_name_for_report)
self._call_ivy(executor, extra_args, ivyxml, jvm_options, hash_name_for_report,
workunit_factory, workunit_name)
def _prepare_ivy_xml(self, frozen_resolution, ivyxml, resolve_hash_name_for_report):
# NB(nh): Our ivy.xml ensures that we always get the default configuration, even if it's not
# part of the requested confs.
default_resolution = frozen_resolution.get('default')
if default_resolution is None:
raise IvyUtils.IvyError("Couldn't find the frozen resolution for the 'default' ivy conf.")
try:
jars = default_resolution.jar_dependencies
IvyUtils.generate_fetch_ivy(jars, ivyxml, self.confs, resolve_hash_name_for_report)
except Exception as e:
raise IvyUtils.IvyError('Failed to prepare ivy resolve: {}'.format(e))
class IvyResolveStep(IvyResolutionStep):
"""Resolves ivy artifacts and produces a cacheable file containing the resulting coordinates."""
def required_load_files_exist(self):
return (all(os.path.isfile(report) for report in self.workdir_reports_by_conf.values()) and
os.path.isfile(self.ivy_cache_classpath_filename))
def resolve_report_path(self, conf):
return os.path.join(self.workdir, 'resolve-report-{}.xml'.format(conf))
@property
def ivy_xml_path(self):
return os.path.join(self.workdir, 'resolve-ivy.xml')
def load(self, targets):
artifact_paths, symlink_map = self._construct_and_load_symlink_map()
return IvyResolveResult(artifact_paths,
symlink_map,
self.hash_name,
self.workdir_reports_by_conf)
def exec_and_load(self, executor, extra_args, targets, jvm_options,
workunit_name, workunit_factory):
self._do_resolve(executor, extra_args, targets, jvm_options, workunit_name, workunit_factory)
result = self.load(targets)
if not result.all_linked_artifacts_exist():
raise IvyResolveMappingError(
'Some artifacts were not linked to {} for {}'.format(self.global_ivy_workdir,
result))
frozen_resolutions_by_conf = result.get_frozen_resolutions_by_conf(targets)
FrozenResolution.dump_to_file(self.frozen_resolve_file, frozen_resolutions_by_conf)
return result
def _do_resolve(self, executor, extra_args, targets, jvm_options, workunit_name, workunit_factory):
safe_mkdir(self.workdir)
ivyxml = self.ivy_xml_path
hash_name = '{}-resolve'.format(self.hash_name)
self._prepare_ivy_xml(targets, ivyxml, hash_name)
self._call_ivy(executor, extra_args, ivyxml, jvm_options, hash_name,
workunit_factory, workunit_name)
def _prepare_ivy_xml(self, targets, ivyxml, hash_name):
# TODO(John Sirois): merge the code below into IvyUtils or up here; either way, better
# diagnostics can be had in `IvyUtils.generate_ivy` if this is done.
# See: https://github.com/pantsbuild/pants/issues/2239
jars, global_excludes = IvyUtils.calculate_classpath(targets)
# Don't pass global excludes to ivy when using soft excludes.
if self.soft_excludes:
global_excludes = []
IvyUtils.generate_ivy(targets, jars, global_excludes, ivyxml, self.confs,
hash_name, self.pinned_artifacts)
class FrozenResolution(object):
"""Contains the abstracted results of a resolve.
With this we can do a simple fetch.
"""
# TODO(nh): include full dependency graph in here.
# So that we can inject it into the build graph if we want to.
class MissingTarget(Exception):
"""Thrown when a loaded resolution has a target spec for a target that doesn't exist."""
def __init__(self):
self.target_to_resolved_coordinates = defaultdict(OrderedSet)
self.all_resolved_coordinates = OrderedSet()
self.coordinate_to_attributes = OrderedDict()
@property
def jar_dependencies(self):
return [
JarDependency(c.org, c.name, c.rev, classifier=c.classifier, ext=c.ext,
**self.coordinate_to_attributes.get(c, {}))
for c in self.all_resolved_coordinates]
def add_resolved_jars(self, target, resolved_jars):
coords = [j.coordinate for j in resolved_jars]
self.add_resolution_coords(target, coords)
# Assuming target is a jar library.
for j in target.jar_dependencies:
if j.url:
self.coordinate_to_attributes[j.coordinate] = {'url': j.url}
else:
self.coordinate_to_attributes[j.coordinate] = {}
def add_resolution_coords(self, target, coords):
for c in coords:
self.target_to_resolved_coordinates[target].add(c)
self.all_resolved_coordinates.add(c)
def target_spec_to_coordinate_strings(self):
return {t.address.spec: [str(c) for c in coordinates]
for t, coordinates in self.target_to_resolved_coordinates.items()}
def __repr__(self):
return 'FrozenResolution(\n target_to_resolved_coordinates\n {}\n all\n {}'.format(
'\n '.join(': '.join([t.address.spec,
'\n '.join(str(c) for c in cs)])
for t,cs in self.target_to_resolved_coordinates.items()),
'\n '.join(str(c) for c in self.coordinate_to_attributes.keys())
)
def __eq__(self, other):
return (type(self) == type(other) and
self.all_resolved_coordinates == other.all_resolved_coordinates and
self.target_to_resolved_coordinates == other.target_to_resolved_coordinates)
def __ne__(self, other):
return not self == other
@classmethod
def load_from_file(cls, filename, targets):
if not os.path.exists(filename):
return None
with open(filename) as f:
# Using OrderedDict here to maintain insertion order of dict entries.
from_file = json.load(f, object_pairs_hook=OrderedDict)
result = {}
target_lookup = {t.address.spec: t for t in targets}
for conf, serialized_resolution in from_file.items():
resolution = FrozenResolution()
def m2_for(c):
return M2Coordinate.from_string(c)
for coord, attr_dict in serialized_resolution['coord_to_attrs'].items():
m2 = m2_for(coord)
resolution.coordinate_to_attributes[m2] = attr_dict
for spec, coord_strs in serialized_resolution['target_to_coords'].items():
t = target_lookup.get(spec, None)
if t is None:
raise cls.MissingTarget('Cannot find target for address {} in frozen resolution'
.format(spec))
resolution.add_resolution_coords(t, [m2_for(c) for c in coord_strs])
result[conf] = resolution
return result
@classmethod
def dump_to_file(cls, filename, resolutions_by_conf):
res = {}
for conf, resolution in resolutions_by_conf.items():
res[conf] = OrderedDict([
['target_to_coords',resolution.target_spec_to_coordinate_strings()],
['coord_to_attrs', OrderedDict([str(c), attrs]
for c, attrs in resolution.coordinate_to_attributes.items())]
])
with safe_concurrent_creation(filename) as tmp_filename:
with open(tmp_filename, 'wb') as f:
json.dump(res, f)
class IvyResolveResult(object):
"""The result of an Ivy resolution.
The result data includes the list of resolved artifacts, the relationships between those artifacts
and the targets that requested them and the hash name of the resolve.
"""
def __init__(self, resolved_artifact_paths, symlink_map, resolve_hash_name, reports_by_conf):
self._reports_by_conf = reports_by_conf
self.resolved_artifact_paths = resolved_artifact_paths
self.resolve_hash_name = resolve_hash_name
self._symlink_map = symlink_map
@property
def has_resolved_artifacts(self):
"""The requested targets have a resolution associated with them."""
return self.resolve_hash_name is not None
def all_linked_artifacts_exist(self):
"""All of the artifact paths for this resolve point to existing files."""
if not self.has_resolved_artifacts:
return False
for path in self.resolved_artifact_paths:
if not os.path.isfile(path):
return False
else:
return True
def report_for_conf(self, conf):
"""Returns the path to the ivy report for the provided conf.
Returns None if there is no path.
"""
return self._reports_by_conf.get(conf)
def get_frozen_resolutions_by_conf(self, targets):
frozen_resolutions_by_conf = OrderedDict()
for conf in self._reports_by_conf:
frozen_resolution = FrozenResolution()
for target, resolved_jars in self.resolved_jars_for_each_target(conf, targets):
frozen_resolution.add_resolved_jars(target, resolved_jars)
frozen_resolutions_by_conf[conf] = frozen_resolution
return frozen_resolutions_by_conf
def resolved_jars_for_each_target(self, conf, targets):
"""Yields the resolved jars for each passed JarLibrary.
If there is no report for the requested conf, yields nothing.
:param conf: The ivy conf to load jars for.
:param targets: The collection of JarLibrary targets to find resolved jars for.
:yield: target, resolved_jars
:raises IvyTaskMixin.UnresolvedJarError
"""
ivy_info = self._ivy_info_for(conf)
if not ivy_info:
return
jar_library_targets = [t for t in targets if isinstance(t, JarLibrary)]
ivy_jar_memo = {}
for target in jar_library_targets:
# Add the artifacts from each dependency module.
resolved_jars = self._resolved_jars_with_symlinks(conf, ivy_info, ivy_jar_memo,
self._jar_dependencies_for_target(conf,
target),
target)
yield target, resolved_jars
def _jar_dependencies_for_target(self, conf, target):
return target.jar_dependencies
def _ivy_info_for(self, conf):
report_path = self._reports_by_conf.get(conf)
return IvyUtils.parse_xml_report(conf, report_path)
def _new_resolved_jar_with_symlink_path(self, conf, target, resolved_jar_without_symlink):
def candidate_cache_paths():
# There is a focus on being lazy here to avoid `os.path.realpath` when we can.
yield resolved_jar_without_symlink.cache_path
yield os.path.realpath(resolved_jar_without_symlink.cache_path)
for cache_path in candidate_cache_paths():
pants_path = self._symlink_map.get(cache_path)
if pants_path:
break
else:
raise IvyResolveMappingError(
'Jar {resolved_jar} in {spec} not resolved to the ivy '
'symlink map in conf {conf}.'
.format(spec=target.address.spec,
resolved_jar=resolved_jar_without_symlink.cache_path,
conf=conf))
return ResolvedJar(coordinate=resolved_jar_without_symlink.coordinate,
pants_path=pants_path,
cache_path=resolved_jar_without_symlink.cache_path)
def _resolved_jars_with_symlinks(self, conf, ivy_info, ivy_jar_memo, coordinates, target):
raw_resolved_jars = ivy_info.get_resolved_jars_for_coordinates(coordinates,
memo=ivy_jar_memo)
resolved_jars = [self._new_resolved_jar_with_symlink_path(conf, target, raw_resolved_jar)
for raw_resolved_jar in raw_resolved_jars]
return resolved_jars
class IvyFetchResolveResult(IvyResolveResult):
"""A resolve result that uses the frozen resolution to look up dependencies."""
def __init__(self, resolved_artifact_paths, symlink_map, resolve_hash_name, reports_by_conf,
frozen_resolutions):
super(IvyFetchResolveResult, self).__init__(resolved_artifact_paths, symlink_map,
resolve_hash_name, reports_by_conf)
self._frozen_resolutions = frozen_resolutions
def _jar_dependencies_for_target(self, conf, target):
return self._frozen_resolutions[conf].target_to_resolved_coordinates.get(target, ())
NO_RESOLVE_RUN_RESULT = IvyResolveResult([], {}, None, {})
IvyModule = namedtuple('IvyModule', ['ref', 'artifact', 'callers'])
Dependency = namedtuple('DependencyAttributes',
['org', 'name', 'rev', 'mutable', 'force', 'transitive'])
Artifact = namedtuple('Artifact', ['name', 'type_', 'ext', 'url', 'classifier'])
logger = logging.getLogger(__name__)
class IvyResolveMappingError(Exception):
"""Raised when there is a failure mapping the ivy resolve results to pants objects."""
class IvyModuleRef(object):
"""
:API: public
"""
# latest.integration is ivy magic meaning "just get the latest version"
_ANY_REV = 'latest.integration'
def __init__(self, org, name, rev, classifier=None, ext=None):
self.org = org
self.name = name
self.rev = rev
self.classifier = classifier
self.ext = ext or 'jar'
self._id = (self.org, self.name, self.rev, self.classifier, self.ext)
def __eq__(self, other):
return isinstance(other, IvyModuleRef) and self._id == other._id
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._id)
def __str__(self):
return 'IvyModuleRef({})'.format(':'.join((x or '') for x in self._id))
def __repr__(self):
return ('IvyModuleRef(org={!r}, name={!r}, rev={!r}, classifier={!r}, ext={!r})'
.format(*self._id))
def __cmp__(self, other):
# We can't just re-use __repr__ or __str_ because we want to order rev last
return cmp((self.org, self.name, self.classifier, self.ext, self.rev),
(other.org, other.name, other.classifier, other.ext, other.rev))
@property
def caller_key(self):
"""This returns an identifier for an IvyModuleRef that only retains the caller org and name.
Ivy represents dependees as `<caller/>`'s with just org and name and rev information.
This method returns a `<caller/>` representation of the current ref.
"""
return IvyModuleRef(name=self.name, org=self.org, rev=self._ANY_REV)
@property
def unversioned(self):
"""This returns an identifier for an IvyModuleRef without version information.
It's useful because ivy might return information about a different version of a dependency than
the one we request, and we want to ensure that all requesters of any version of that dependency
are able to learn about it.
"""
return IvyModuleRef(name=self.name, org=self.org, rev=self._ANY_REV, classifier=self.classifier,
ext=self.ext)
class IvyInfo(object):
"""
:API: public
"""
def __init__(self, conf):
self._conf = conf
self.modules_by_ref = {} # Map from ref to referenced module.
self.refs_by_unversioned_refs = {} # Map from unversioned ref to the resolved versioned ref
# Map from ref of caller to refs of modules required by that caller.
self._deps_by_caller = defaultdict(OrderedSet)
# Map from _unversioned_ ref to OrderedSet of IvyArtifact instances.
self._artifacts_by_ref = defaultdict(OrderedSet)
def add_module(self, module):
if not module.artifact:
# Module was evicted, so do not record information about it
return
ref_unversioned = module.ref.unversioned
if ref_unversioned in self.refs_by_unversioned_refs:
raise IvyResolveMappingError('Already defined module {}, as rev {}!'
.format(ref_unversioned, module.ref.rev))
if module.ref in self.modules_by_ref:
raise IvyResolveMappingError('Already defined module {}, would be overwritten!'
.format(module.ref))
self.refs_by_unversioned_refs[ref_unversioned] = module.ref
self.modules_by_ref[module.ref] = module
for caller in module.callers:
self._deps_by_caller[caller.caller_key].add(module.ref)
self._artifacts_by_ref[ref_unversioned].add(module.artifact)
def _do_traverse_dependency_graph(self, ref, collector, memo, visited):
memoized_value = memo.get(ref)
if memoized_value:
return memoized_value
if ref in visited:
# Ivy allows for circular dependencies
# If we're here, that means we're resolving something that
# transitively depends on itself
return set()
visited.add(ref)
acc = collector(ref)
# NB(zundel): ivy does not return deps in a consistent order for the same module for
# different resolves. Sort them to get consistency and prevent cache invalidation.
# See https://github.com/pantsbuild/pants/issues/2607
deps = sorted(self._deps_by_caller.get(ref.caller_key, ()))
for dep in deps:
acc.update(self._do_traverse_dependency_graph(dep, collector, memo, visited))
memo[ref] = acc
return acc
def traverse_dependency_graph(self, ref, collector, memo=None):
"""Traverses module graph, starting with ref, collecting values for each ref into the sets
created by the collector function.
:param ref an IvyModuleRef to start traversing the ivy dependency graph
:param collector a function that takes a ref and returns a new set of values to collect for
that ref, which will also be updated with all the dependencies accumulated values
:param memo is a dict of ref -> set that memoizes the results of each node in the graph.
If provided, allows for retaining cache across calls.
:returns the accumulated set for ref
"""
resolved_ref = self.refs_by_unversioned_refs.get(ref.unversioned)
if resolved_ref:
ref = resolved_ref
if memo is None:
memo = dict()
visited = set()
return self._do_traverse_dependency_graph(ref, collector, memo, visited)
def get_resolved_jars_for_coordinates(self, coordinates, memo=None):
"""Collects jars for the passed coordinates.
Because artifacts are only fetched for the "winning" version of a module, the artifacts
will not always represent the version originally declared by the library.
This method is transitive within the passed coordinates dependencies.
:param coordinates collections.Iterable: Collection of coordinates to collect transitive
resolved jars for.
:param memo: See `traverse_dependency_graph`.
:returns: All the artifacts for all of the jars for the provided coordinates,
including transitive dependencies.
:rtype: list of :class:`pants.backend.jvm.jar_dependency_utils.ResolvedJar`
"""
def to_resolved_jar(jar_ref, jar_path):
return ResolvedJar(coordinate=M2Coordinate(org=jar_ref.org,
name=jar_ref.name,
rev=jar_ref.rev,
classifier=jar_ref.classifier,
ext=jar_ref.ext),
cache_path=jar_path)
resolved_jars = OrderedSet()
def create_collection(dep):
return OrderedSet([dep])
for jar in coordinates:
classifier = jar.classifier if self._conf == 'default' else self._conf
jar_module_ref = IvyModuleRef(jar.org, jar.name, jar.rev, classifier)
for module_ref in self.traverse_dependency_graph(jar_module_ref, create_collection, memo):
for artifact_path in self._artifacts_by_ref[module_ref.unversioned]:
resolved_jars.add(to_resolved_jar(module_ref, artifact_path))
return resolved_jars
def __repr__(self):
return 'IvyInfo(conf={}, refs={})'.format(self._conf, self.modules_by_ref.keys())
class IvyUtils(object):
"""Useful methods related to interaction with ivy.
:API: public
"""
# Protects ivy executions.
_ivy_lock = threading.RLock()
# Protect writes to the global map of jar path -> symlinks to that jar.
_symlink_map_lock = threading.Lock()
INTERNAL_ORG_NAME = 'internal'
class IvyError(Exception):
"""Indicates an error preparing an ivy operation."""
class IvyResolveReportError(IvyError):
"""Indicates that an ivy report cannot be found."""
class IvyResolveConflictingDepsError(IvyError):
"""Indicates two or more locally declared dependencies conflict."""
class BadRevisionError(IvyError):
"""Indicates an unparseable version number."""
@staticmethod
def _generate_exclude_template(exclude):
return TemplateData(org=exclude.org, name=exclude.name)
@staticmethod
def _generate_override_template(jar):
return TemplateData(org=jar.org, module=jar.name, version=jar.rev)
@staticmethod
def _load_classpath_from_cachepath(path):
if not os.path.exists(path):
return []
else:
with safe_open(path, 'r') as cp:
return filter(None, (path.strip() for path in cp.read().split(os.pathsep)))
@classmethod
def do_resolve(cls, executor, extra_args, ivyxml, jvm_options, workdir_report_paths_by_conf,
confs, ivy_cache_dir, ivy_cache_classpath_filename, resolve_hash_name,
workunit_factory, workunit_name):
"""Execute Ivy with the given ivy.xml and copies all relevant files into the workdir.
This method does an Ivy resolve, which may be either a Pants resolve or a Pants fetch depending
on whether there is an existing frozen resolution.
After it is run, the Ivy reports are copied into the workdir at the paths specified by
workdir_report_paths_by_conf along with a file containing a list of all the requested artifacts
and their transitive dependencies.
:param executor: A JVM executor to use to invoke ivy.
:param extra_args: Extra arguments to pass to ivy.
:param ivyxml: The input ivy.xml containing the dependencies to resolve.
:param jvm_options: A list of jvm option strings to use for the ivy invoke, or None.
:param workdir_report_paths_by_conf: A dict mapping confs to report paths in the workdir.
:param confs: The confs used in the resolve.
:param resolve_hash_name: The hash to use as the module name for finding the ivy report file.
:param workunit_factory: A workunit factory for the ivy invoke, or None.
:param workunit_name: A workunit name for the ivy invoke, or None.
"""
ivy = Bootstrapper.default_ivy(bootstrap_workunit_factory=workunit_factory)
with safe_concurrent_creation(ivy_cache_classpath_filename) as raw_target_classpath_file_tmp:
extra_args = extra_args or []
args = ['-cachepath', raw_target_classpath_file_tmp] + extra_args
with cls._ivy_lock:
cls._exec_ivy(ivy, confs, ivyxml, args,
jvm_options=jvm_options,
executor=executor,
workunit_name=workunit_name,
workunit_factory=workunit_factory)
if not os.path.exists(raw_target_classpath_file_tmp):
raise cls.IvyError('Ivy failed to create classpath file at {}'
.format(raw_target_classpath_file_tmp))
cls._copy_ivy_reports(workdir_report_paths_by_conf, confs, ivy_cache_dir, resolve_hash_name)
logger.debug('Moved ivy classfile file to {dest}'
.format(dest=ivy_cache_classpath_filename))
@classmethod
def _copy_ivy_reports(cls, workdir_report_paths_by_conf, confs, ivy_cache_dir, resolve_hash_name):
for conf in confs:
ivy_cache_report_path = IvyUtils.xml_report_path(ivy_cache_dir, resolve_hash_name,
conf)
workdir_report_path = workdir_report_paths_by_conf[conf]
try:
atomic_copy(ivy_cache_report_path,
workdir_report_path)
except IOError as e:
raise cls.IvyError('Failed to copy report into workdir from {} to {}: {}'
.format(ivy_cache_report_path, workdir_report_path, e))
@classmethod
def _exec_ivy(cls, ivy, confs, ivyxml, args, jvm_options, executor,
workunit_name, workunit_factory):
ivy = ivy or Bootstrapper.default_ivy()
ivy_args = ['-ivy', ivyxml]
ivy_args.append('-confs')
ivy_args.extend(confs)
ivy_args.extend(args)
ivy_jvm_options = list(jvm_options)
# Disable cache in File.getCanonicalPath(), makes Ivy work with -symlink option properly on ng.
ivy_jvm_options.append('-Dsun.io.useCanonCaches=false')
runner = ivy.runner(jvm_options=ivy_jvm_options, args=ivy_args, executor=executor)
try:
with ivy.resolution_lock:
result = execute_runner(runner, workunit_factory=workunit_factory,
workunit_name=workunit_name)
if result != 0:
raise IvyUtils.IvyError('Ivy returned {result}. cmd={cmd}'.format(result=result,
cmd=runner.cmd))
except runner.executor.Error as e:
raise IvyUtils.IvyError(e)
@classmethod
def construct_and_load_symlink_map(cls, symlink_dir, ivy_cache_dir,
ivy_cache_classpath_filename, symlink_classpath_filename):
# Make our actual classpath be symlinks, so that the paths are uniform across systems.
# Note that we must do this even if we read the raw_target_classpath_file from the artifact
# cache. If we cache the target_classpath_file we won't know how to create the symlinks.
with IvyUtils._symlink_map_lock:
# A common dir for symlinks into the ivy2 cache. This ensures that paths to jars
# in artifact-cached analysis files are consistent across systems.
# Note that we have one global, well-known symlink dir, again so that paths are
# consistent across builds.
symlink_map = cls._symlink_cachepath(ivy_cache_dir,
ivy_cache_classpath_filename,
symlink_dir,
symlink_classpath_filename)
classpath = cls._load_classpath_from_cachepath(symlink_classpath_filename)
return classpath, symlink_map
@classmethod
def _symlink_cachepath(cls, ivy_cache_dir, inpath, symlink_dir, outpath):
"""Symlinks all paths listed in inpath that are under ivy_cache_dir into symlink_dir.
If there is an existing symlink for a file under inpath, it is used rather than creating
a new symlink. Preserves all other paths. Writes the resulting paths to outpath.
Returns a map of path -> symlink to that path.
"""
safe_mkdir(symlink_dir)
# The ivy_cache_dir might itself be a symlink. In this case, ivy may return paths that
# reference the realpath of the .jar file after it is resolved in the cache dir. To handle
# this case, add both the symlink'ed path and the realpath to the jar to the symlink map.
real_ivy_cache_dir = os.path.realpath(ivy_cache_dir)
symlink_map = OrderedDict()
inpaths = cls._load_classpath_from_cachepath(inpath)
paths = OrderedSet([os.path.realpath(path) for path in inpaths])
for path in paths:
if path.startswith(real_ivy_cache_dir):
symlink_map[path] = os.path.join(symlink_dir, os.path.relpath(path, real_ivy_cache_dir))
else:
# This path is outside the cache. We won't symlink it.
symlink_map[path] = path
# Create symlinks for paths in the ivy cache dir.
for path, symlink in six.iteritems(symlink_map):
if path == symlink:
# Skip paths that aren't going to be symlinked.
continue
safe_mkdir(os.path.dirname(symlink))
try:
os.symlink(path, symlink)
except OSError as e:
# We don't delete and recreate the symlink, as this may break concurrently executing code.
if e.errno != errno.EEXIST:
raise
# (re)create the classpath with all of the paths
with safe_open(outpath, 'w') as outfile:
outfile.write(':'.join(OrderedSet(symlink_map.values())))
return dict(symlink_map)
@classmethod
def xml_report_path(cls, cache_dir, resolve_hash_name, conf):
"""The path to the xml report ivy creates after a retrieve.
:API: public
:param string cache_dir: The path of the ivy cache dir used for resolves.
:param string resolve_hash_name: Hash from the Cache key from the VersionedTargetSet used for
resolution.
:param string conf: The ivy conf name (e.g. "default").
:returns: The report path.
:rtype: string
"""
return os.path.join(cache_dir, '{}-{}-{}.xml'.format(IvyUtils.INTERNAL_ORG_NAME,
resolve_hash_name, conf))
@classmethod
def parse_xml_report(cls, conf, path):
"""Parse the ivy xml report corresponding to the name passed to ivy.
:API: public
:param string conf: the ivy conf name (e.g. "default")
:param string path: The path to the ivy report file.
:returns: The info in the xml report.
:rtype: :class:`IvyInfo`
:raises: :class:`IvyResolveMappingError` if no report exists.
"""
if not os.path.exists(path):
raise cls.IvyResolveReportError('Missing expected ivy output file {}'.format(path))
logger.debug("Parsing ivy report {}".format(path))
ret = IvyInfo(conf)
etree = ET.parse(path)
doc = etree.getroot()
for module in doc.findall('dependencies/module'):
org = module.get('organisation')
name = module.get('name')
for revision in module.findall('revision'):
rev = revision.get('name')
callers = []
for caller in revision.findall('caller'):
callers.append(IvyModuleRef(caller.get('organisation'),
caller.get('name'),
caller.get('callerrev')))
for artifact in revision.findall('artifacts/artifact'):
classifier = artifact.get('extra-classifier')
ext = artifact.get('ext')
ivy_module_ref = IvyModuleRef(org=org, name=name, rev=rev,
classifier=classifier, ext=ext)
artifact_cache_path = artifact.get('location')
ivy_module = IvyModule(ivy_module_ref, artifact_cache_path, tuple(callers))
ret.add_module(ivy_module)
return ret
@classmethod
def generate_ivy(cls, targets, jars, excludes, ivyxml, confs, resolve_hash_name=None,
pinned_artifacts=None, jar_dep_manager=None):
if not resolve_hash_name:
resolve_hash_name = Target.maybe_readable_identify(targets)
return cls._generate_resolve_ivy(jars, excludes, ivyxml, confs, resolve_hash_name, pinned_artifacts,
jar_dep_manager)
@classmethod
def _generate_resolve_ivy(cls, jars, excludes, ivyxml, confs, resolve_hash_name, pinned_artifacts=None,
jar_dep_manager=None):
org = IvyUtils.INTERNAL_ORG_NAME
name = resolve_hash_name
extra_configurations = [conf for conf in confs if conf and conf != 'default']
jars_by_key = OrderedDict()
for jar in jars:
jars = jars_by_key.setdefault((jar.org, jar.name), [])
jars.append(jar)
manager = jar_dep_manager or JarDependencyManagement.global_instance()
artifact_set = PinnedJarArtifactSet(pinned_artifacts) # Copy, because we're modifying it.
for jars in jars_by_key.values():
for i, dep in enumerate(jars):
direct_coord = M2Coordinate.create(dep)
managed_coord = artifact_set[direct_coord]
if direct_coord.rev != managed_coord.rev:
# It may be necessary to actually change the version number of the jar we want to resolve
# here, because overrides do not apply directly (they are exclusively transitive). This is
# actually a good thing, because it gives us more control over what happens.
coord = manager.resolve_version_conflict(managed_coord, direct_coord, force=dep.force)
jars[i] = dep.copy(rev=coord.rev)
elif dep.force:
# If this dependency is marked as 'force' and there is no version conflict, use the normal
# pants behavior for 'force'.
artifact_set.put(direct_coord)
dependencies = [cls._generate_jar_template(jars) for jars in jars_by_key.values()]
# As it turns out force is not transitive - it only works for dependencies pants knows about
# directly (declared in BUILD files - present in generated ivy.xml). The user-level ivy docs
# don't make this clear [1], but the source code docs do (see isForce docs) [2]. I was able to
# edit the generated ivy.xml and use the override feature [3] though and that does work
# transitively as you'd hope.
#
# [1] http://ant.apache.org/ivy/history/2.3.0/settings/conflict-managers.html
# [2] https://svn.apache.org/repos/asf/ant/ivy/core/branches/2.3.0/
# src/java/org/apache/ivy/core/module/descriptor/DependencyDescriptor.java
# [3] http://ant.apache.org/ivy/history/2.3.0/ivyfile/override.html
overrides = [cls._generate_override_template(_coord) for _coord in artifact_set]
excludes = [cls._generate_exclude_template(exclude) for exclude in excludes]
template_data = TemplateData(
org=org,
module=name,
extra_configurations=extra_configurations,
dependencies=dependencies,
excludes=excludes,
overrides=overrides)
template_relpath = os.path.join('templates', 'ivy_utils', 'ivy.xml.mustache')
cls._write_ivy_xml_file(ivyxml, template_data, template_relpath)
@classmethod
def generate_fetch_ivy(cls, jars, ivyxml, confs, resolve_hash_name):
"""Generates an ivy xml with all jars marked as intransitive using the all conflict manager."""
org = IvyUtils.INTERNAL_ORG_NAME
name = resolve_hash_name
extra_configurations = [conf for conf in confs if conf and conf != 'default']
# Use org name _and_ rev so that we can have dependencies with different versions. This will
# allow for batching fetching if we want to do that.
jars_by_key = OrderedDict()
for jar in jars:
jars_by_key.setdefault((jar.org, jar.name, jar.rev), []).append(jar)
dependencies = [cls._generate_fetch_jar_template(_jars) for _jars in jars_by_key.values()]
template_data = TemplateData(org=org,
module=name,
extra_configurations=extra_configurations,
dependencies=dependencies)
template_relpath = os.path.join('templates', 'ivy_utils', 'ivy_fetch.xml.mustache')
cls._write_ivy_xml_file(ivyxml, template_data, template_relpath)
@classmethod
def _write_ivy_xml_file(cls, ivyxml, template_data, template_relpath):
template_text = pkgutil.get_data(__name__, template_relpath)
generator = Generator(template_text, lib=template_data)
with safe_open(ivyxml, 'w') as output:
generator.write(output)
@classmethod
def calculate_classpath(cls, targets):
"""Creates a consistent classpath and list of excludes for the passed targets.
It also modifies the JarDependency objects' excludes to contain all the jars excluded by
provides.
:param iterable targets: List of targets to collect JarDependencies and excludes from.
:returns: A pair of a list of JarDependencies, and a set of excludes to apply globally.
"""
jars = OrderedDict()
global_excludes = set()
provide_excludes = set()
targets_processed = set()
# Support the ivy force concept when we sanely can for internal dep conflicts.
# TODO(John Sirois): Consider supporting / implementing the configured ivy revision picking
# strategy generally.
def add_jar(jar):
# TODO(John Sirois): Maven allows for depending on an artifact at one rev and one of its
# attachments (classified artifacts) at another. Ivy does not, allow this, the dependency
# can carry only 1 rev and that hosts multiple artifacts for that rev. This conflict
# resolution happens at the classifier level, allowing skew in a
# multi-artifact/multi-classifier dependency. We only find out about the skew later in
# `_generate_jar_template` below which will blow up with a conflict. Move this logic closer
# together to get a more clear validate, then emit ivy.xml then resolve flow instead of the
# spread-out validations happening here.
# See: https://github.com/pantsbuild/pants/issues/2239
coordinate = (jar.org, jar.name, jar.classifier)
existing = jars.get(coordinate)
jars[coordinate] = jar if not existing else cls._resolve_conflict(existing=existing,
proposed=jar)
def collect_jars(target):
if isinstance(target, JarLibrary):
for jar in target.jar_dependencies:
add_jar(jar)
def collect_excludes(target):
target_excludes = target.payload.get_field_value('excludes')
if target_excludes:
global_excludes.update(target_excludes)
def collect_provide_excludes(target):
if not target.is_exported:
return
logger.debug('Automatically excluding jar {}.{}, which is provided by {}'.format(
target.provides.org, target.provides.name, target))
provide_excludes.add(Exclude(org=target.provides.org, name=target.provides.name))
def collect_elements(target):
targets_processed.add(target)
collect_jars(target)
collect_excludes(target)
collect_provide_excludes(target)
for target in targets:
target.walk(collect_elements, predicate=lambda target: target not in targets_processed)
# If a source dep is exported (ie, has a provides clause), it should always override
# remote/binary versions of itself, ie "round trip" dependencies.
# TODO: Move back to applying provides excludes as target-level excludes when they are no
# longer global.
if provide_excludes:
additional_excludes = tuple(provide_excludes)
new_jars = OrderedDict()
for coordinate, jar in jars.items():
new_jars[coordinate] = jar.copy(excludes=jar.excludes + additional_excludes)
jars = new_jars
return jars.values(), global_excludes
@classmethod
def _resolve_conflict(cls, existing, proposed):
if existing.rev is None:
return proposed
if proposed.rev is None:
return existing
if proposed == existing:
if proposed.force:
return proposed
return existing
elif existing.force and proposed.force:
raise cls.IvyResolveConflictingDepsError('Cannot force {}#{};{} to both rev {} and {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev
))
elif existing.force:
logger.debug('Ignoring rev {} for {}#{};{} already forced to {}'.format(
proposed.rev, proposed.org, proposed.name, proposed.classifier or '', existing.rev
))
return existing
elif proposed.force:
logger.debug('Forcing {}#{};{} from {} to {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev
))
return proposed
else:
if Revision.lenient(proposed.rev) > Revision.lenient(existing.rev):
logger.debug('Upgrading {}#{};{} from rev {} to {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev,
))
return proposed
else:
return existing
@classmethod
def _generate_jar_template(cls, jars):
global_dep_attributes = set(Dependency(org=jar.org,
name=jar.name,
rev=jar.rev,
mutable=jar.mutable,
force=jar.force,
transitive=jar.transitive)
for jar in jars)
if len(global_dep_attributes) != 1:
# TODO: Need to provide information about where these came from - could be
# far-flung JarLibrary targets. The jars here were collected from targets via
# `calculate_classpath` above so executing this step there instead may make more
# sense.
conflicting_dependencies = sorted(str(g) for g in global_dep_attributes)
raise cls.IvyResolveConflictingDepsError('Found conflicting dependencies:\n\t{}'
.format('\n\t'.join(conflicting_dependencies)))
jar_attributes = global_dep_attributes.pop()
excludes = set()
for jar in jars:
excludes.update(jar.excludes)
any_have_url = False
artifacts = OrderedDict()
for jar in jars:
ext = jar.ext
url = jar.url
if url:
any_have_url = True
classifier = jar.classifier
artifact = Artifact(name=jar.name,
type_=ext or 'jar',
ext=ext,
url=url,
classifier=classifier)
artifacts[(ext, url, classifier)] = artifact
template = TemplateData(
org=jar_attributes.org,
module=jar_attributes.name,
version=jar_attributes.rev,
mutable=jar_attributes.mutable,
force=jar_attributes.force,
transitive=jar_attributes.transitive,
artifacts=artifacts.values(),
any_have_url=any_have_url,
excludes=[cls._generate_exclude_template(exclude) for exclude in excludes])
return template
@classmethod
def _generate_fetch_jar_template(cls, jars):
global_dep_attributes = set(Dependency(org=jar.org,
name=jar.name,
rev=jar.rev,
transitive=False,
mutable=jar.mutable,
force=True)
for jar in jars)
if len(global_dep_attributes) != 1:
# If we batch fetches and assume conflict manager all, we could ignore these.
# Leaving this here for now.
conflicting_dependencies = sorted(str(g) for g in global_dep_attributes)
raise cls.IvyResolveConflictingDepsError('Found conflicting dependencies:\n\t{}'
.format('\n\t'.join(conflicting_dependencies)))
jar_attributes = global_dep_attributes.pop()
any_have_url = False
artifacts = OrderedDict()
for jar in jars:
ext = jar.ext
url = jar.url
if url:
any_have_url = True
classifier = jar.classifier
artifact = Artifact(name=jar.name,
type_=ext or 'jar',
ext=ext,
url=url,
classifier=classifier)
artifacts[(ext, url, classifier)] = artifact
template = TemplateData(
org=jar_attributes.org,
module=jar_attributes.name,
version=jar_attributes.rev,
mutable=jar_attributes.mutable,
artifacts=artifacts.values(),
any_have_url=any_have_url,
excludes=[])
return template
| apache-2.0 |
haxsaw/actuator | src/examples/tutorial_examples.py | 1 | 13444 | #
# Copyright (c) 2014 Tom Carroll
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os.path
from actuator import (InfraModel, MultiResource, MultiResourceGroup, ctxt,
with_roles, NamespaceModel, Role, Var, with_variables,
with_resources, ResourceGroup, MultiRole,
ConfigModel, CopyFileTask, CommandTask, with_dependencies,
MultiTask, ConfigClassTask)
from actuator.provisioners.openstack.resources import (Server, Network, Subnet,
FloatingIP, Router,
RouterGateway,
RouterInterface)
import actuator
# Simple Openstack example
class SingleOpenstackServer(InfraModel):
server = Server("actuator1", "Ubuntu 13.10", "m1.small",
nics=[ctxt.model.net]) # @UndefinedVariable
net = Network("actuator_ex1_net")
fip = FloatingIP("actuator_ex1_float",
ctxt.model.server, # @UndefinedVariable
ctxt.model.server.iface0.addr0, # @UndefinedVariable
pool="external")
subnet = Subnet("actuator_ex1_subnet", ctxt.model.net, # @UndefinedVariable
"192.168.23.0/24",
dns_nameservers=['8.8.8.8'])
router = Router("actuator_ex1_router")
gateway = RouterGateway("actuator_ex1_gateway",
ctxt.model.router, # @UndefinedVariable
"external")
rinter = RouterInterface("actuator_ex1_rinter",
ctxt.model.router, # @UndefinedVariable
ctxt.model.subnet) # @UndefinedVariable
# if you get some credentials on an Openstack instance you can try out the following:
# from actuator.provisioners.openstack.openstack import OpenstackProvisioner
# inst = SingleOpenstackServer("actuator_ex1")
# provisioner = OpenstackProvisioner(uid, pwd, uid, url)
# provisioner.provision_infra_model(inst)
#repeat of first example with common components factored out
gateway_components = {"net":Network("actuator_ex1_net"),
"subnet":Subnet("actuator_ex1_subnet",
ctxt.model.net, # @UndefinedVariable
"192.168.23.0/24",
dns_nameservers=['8.8.8.8']),
"router":Router("actuator_ex1_router"),
"gateway":RouterGateway("actuator_ex1_gateway",
ctxt.model.router, # @UndefinedVariable
"external"),
"rinter":RouterInterface("actuator_ex1_rinter",
ctxt.model.router, # @UndefinedVariable
ctxt.model.subnet)} # @UndefinedVariable
class SingleOpenstackServer2(InfraModel):
with_resources(**gateway_components)
server = Server("actuator1", "Ubuntu 13.10", "m1.small",
nics=[ctxt.model.net]) # @UndefinedVariable
fip = FloatingIP("actuator_ex1_float", ctxt.model.server, # @UndefinedVariable
ctxt.model.server.iface0.addr0, # @UndefinedVariable
pool="external")
# MultipleServers tutorial example
class MultipleServers(InfraModel):
#
#First, declare the common networking components with with_infra_components
#
with_resources(**gateway_components)
#
#now declare the "foreman"; this will be the only server the outside world can
#reach, and it will pass off work requests to the workers. It will need a
#floating ip for the outside world to see it
#
foreman = Server("foreman", "Ubuntu 13.10", "m1.small",
nics=[ctxt.model.net]) # @UndefinedVariable
fip = FloatingIP("actuator_ex2_float", ctxt.model.foreman, # @UndefinedVariable
ctxt.model.foreman.iface0.addr0, # @UndefinedVariable
pool="external")
#
#finally, declare the workers MultiComponent
#
workers = MultiResource(Server("worker", "Ubuntu 13.10", "m1.small",
nics=[ctxt.model.net])) # @UndefinedVariable
# Resource groups example
gateway_component = ResourceGroup("gateway", net=Network("actuator_ex1_net"),
subnet=Subnet("actuator_ex1_subnet",
ctxt.comp.container.net,
"192.168.23.0/24",
dns_nameservers=['8.8.8.8']),
router=Router("actuator_ex1_router"),
gateway=RouterGateway("actuator_ex1_gateway",
ctxt.comp.container.router,
"external"),
rinter=RouterInterface("actuator_ex1_rinter",
ctxt.comp.container.router,
ctxt.comp.container.subnet))
class SingleOpenstackServer3(InfraModel):
gateway = gateway_component
server = Server("actuator1", "Ubuntu 13.10", "m1.small",
nics=[ctxt.model.gateway.net]) # @UndefinedVariable
fip = FloatingIP("actuator_ex1_float", ctxt.model.server, # @UndefinedVariable
ctxt.model.server.iface0.addr0, # @UndefinedVariable
pool="external")
class MultipleGroups(InfraModel):
#
#First, declare the common networking components
#
with_resources(**gateway_components)
#
#now declare the "foreman"; this will be the only server the outside world can
#reach, and it will pass off work requests to the leaders of clusters. It will need a
#floating ip for the outside world to see it
#
foreman = Server("foreman", "Ubuntu 13.10", "m1.small",
nics=[ctxt.model.net]) # @UndefinedVariable
fip = FloatingIP("actuator_ex3_float", ctxt.model.server, # @UndefinedVariable
ctxt.model.server.iface0.addr0, # @UndefinedVariable
pool="external")
#
#finally, declare a "cluster"; a leader that coordinates the workers in the
#cluster, which operate under the leader's direction
#
cluster = MultiResourceGroup("cluster",
leader=Server("leader", "Ubuntu 13.10", "m1.small",
nics=[ctxt.model.net]), # @UndefinedVariable
workers=MultiResource(Server("cluster_node",
"Ubuntu 13.10",
"m1.small",
nics=[ctxt.model.net]))) # @UndefinedVariable
class SOSNamespace(NamespaceModel):
with_variables(Var("COMP_SERVER_HOST", SingleOpenstackServer.server.iface0.addr0),
Var("COMP_SERVER_PORT", '8081'),
Var("EXTERNAL_APP_SERVER_IP", SingleOpenstackServer.fip.ip),
Var("APP_SERVER_PORT", '8080'))
app_server = (Role("app_server", host_ref=SingleOpenstackServer.server)
.add_variable(Var("APP_SERVER_HOST",
SingleOpenstackServer.server.iface0.addr0)))
compute_server = Role("compute_server", host_ref=SingleOpenstackServer.server)
# First approach for dynamic Namespaces
def grid_namespace_factory(num_workers=10):
class GridNamespace(NamespaceModel):
with_variables(Var("FOREMAN_EXTERNAL_IP", MultipleServers.fip.ip),
Var("FOREMAN_INTERNAL_IP", MultipleServers.foreman.iface0.addr0),
Var("FOREMAN_EXTERNAL_PORT", "3000"),
Var("FOREMAN_WORKER_PORT", "3001"))
foreman = Role("foreman", host_ref=MultipleServers.foreman)
role_dict = {}
namer = lambda x: "worker_{}".format(x)
for i in range(num_workers):
role_dict[namer(i)] = Role(namer(i), host_ref=MultipleServers.workers[i])
with_roles(**role_dict)
del role_dict, namer
return GridNamespace()
# Second approach for dynamic namespaces
class GridNamespace(NamespaceModel):
with_variables(Var("FOREMAN_EXTERNAL_IP", MultipleServers.fip.ip),
Var("FOREMAN_INTERNAL_IP", MultipleServers.foreman.iface0.addr0),
Var("FOREMAN_EXTERNAL_PORT", "3000"),
Var("FOREMAN_WORKER_PORT", "3001"))
foreman = Role("foreman", host_ref=MultipleServers.foreman)
grid = MultiRole(Role("node",
host_ref=ctxt.model.infra.workers[ctxt.name])) # @UndefinedVariable
# Var examples
class VarExample(NamespaceModel):
with_variables(Var("NODE_NAME", "!{BASE_NAME}-!{NODE_ID}"))
grid = (MultiRole(Role("worker", variables=[Var("NODE_ID", ctxt.name)]))
.add_variable(Var("BASE_NAME", "Grid")))
# Namespace for ConfigExamples
class SimpleNamespace(NamespaceModel):
with_variables(Var("DEST", "/tmp"),
Var("PKG", "actuator"),
Var("CMD_TARGET", "127.0.0.1"))
copy_target = Role("copy_target", host_ref="!{CMD_TARGET}")
# SimpleConfig example
#find the path to actuator; if it is under our cwd, the it won't be at an absolute path
actuator_path = actuator.__file__
if not os.path.isabs(actuator_path):
actuator_path = os.path.join(os.getcwd(), "!{PKG}")
class SimpleConfig(ConfigModel):
cleanup = CommandTask("clean", "/bin/rm -f !{PKG}", chdir="!{DEST}",
task_role=SimpleNamespace.copy_target)
copy = CopyFileTask("copy-file", "!{DEST}", src=actuator_path,
task_role=SimpleNamespace.copy_target)
# SimpleConfig again but with dependencies this time
class SimpleConfig2(ConfigModel):
cleanup = CommandTask("clean", "/bin/rm -f !{PKG}", chdir="!{DEST}",
task_role=SimpleNamespace.copy_target)
copy = CopyFileTask("copy-file", "!{DEST}", src=actuator_path,
task_role=SimpleNamespace.copy_target)
#NOTE: this call must be within the config model, not after it!
with_dependencies( cleanup | copy )
# Auto-scaling example
class GridInfra(InfraModel): #needed to add this
with_resources(**gateway_components)
grid = MultiResource(Server("grid_node", "Ubuntu 13.10", "m1.small",
nics=[ctxt.model.net])) # @UndefinedVariable
class GridNamespace2(NamespaceModel):
grid = MultiRole(Role("grid-node", host_ref=GridInfra.grid[ctxt.name]))
class GridConfig(ConfigModel):
reset = MultiTask("reset", CommandTask("remove", "/bin/rm -rf /some/path/*"),
GridNamespace2.q.grid.all())
copy = MultiTask("copy", CopyFileTask("copy-tarball", '/some/path/software.tgz',
src='/some/local/path/software.tgz'),
GridNamespace2.q.grid.all())
with_dependencies(reset | copy)
# Config classes as tasks example
#this is the same namespace model as above
class GridNamespace3(NamespaceModel):
grid = MultiRole(Role("grid-node", host_ref=GridInfra.grid[ctxt.name]))
#this config model is new; it defines all the tasks and dependencies for a single role
#Notice that there is no mention of a 'task_role' within this model
class NodeConfig(ConfigModel):
reset = CommandTask("remove", "/bin/rm -rf /some/path/*")
copy = CopyFileTask("copy-tarball", '/some/path/software.tgz',
src='/some/local/path/software.tgz')
with_dependencies(reset | copy)
#this model now uses the NodeConfig model in a MultiTask to define all the tasks that need
#to be carried out on each role
class GridConfig2(ConfigModel):
setup_nodes = MultiTask("setup-nodes", ConfigClassTask("setup-suite", NodeConfig),
GridNamespace3.q.grid.all())
| mit |
TalShafir/ansible | test/units/modules/cloud/amazon/test_lambda.py | 55 | 11972 | #
# (c) 2017 Michael De La Rue
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
import copy
import pytest
from units.compat.mock import MagicMock, Mock, patch
from ansible.module_utils import basic
from units.modules.utils import set_module_args
boto3 = pytest.importorskip("boto3")
# lambda is a keyword so we have to hack this.
_temp = __import__("ansible.modules.cloud.amazon.lambda")
lda = getattr(_temp.modules.cloud.amazon, "lambda")
base_lambda_config = {
'FunctionName': 'lambda_name',
'Role': 'arn:aws:iam::987654321012:role/lambda_basic_execution',
'Handler': 'lambda_python.my_handler',
'Description': 'this that the other',
'Timeout': 3,
'MemorySize': 128,
'Runtime': 'python2.7',
'CodeSha256': 'AqMZ+xptM7aC9VXu+5jyp1sqO+Nj4WFMNzQxtPMP2n8=',
}
one_change_lambda_config = copy.copy(base_lambda_config)
one_change_lambda_config['Timeout'] = 4
two_change_lambda_config = copy.copy(one_change_lambda_config)
two_change_lambda_config['Role'] = 'arn:aws:iam::987654321012:role/lambda_advanced_execution'
code_change_lambda_config = copy.copy(base_lambda_config)
code_change_lambda_config['CodeSha256'] = 'P+Zy8U4T4RiiHWElhL10VBKj9jw4rSJ5bm/TiW+4Rts='
base_module_args = {
"region": "us-west-1",
"name": "lambda_name",
"state": "present",
"zip_file": "test/units/modules/cloud/amazon/fixtures/thezip.zip",
"runtime": 'python2.7',
"role": 'arn:aws:iam::987654321012:role/lambda_basic_execution',
"memory_size": 128,
"timeout": 3,
"handler": 'lambda_python.my_handler'
}
module_args_with_environment = dict(base_module_args, environment_variables={
"variable_name": "variable_value"
})
def make_mock_no_connection_connection(config):
"""return a mock of ansible's boto3_conn ready to return a mock AWS API client"""
lambda_client_double = MagicMock()
lambda_client_double.get_function.configure_mock(
return_value=False
)
lambda_client_double.update_function_configuration.configure_mock(
return_value={
'Version': 1
}
)
fake_boto3_conn = Mock(return_value=lambda_client_double)
return (fake_boto3_conn, lambda_client_double)
def make_mock_connection(config):
"""return a mock of ansible's boto3_conn ready to return a mock AWS API client"""
lambda_client_double = MagicMock()
lambda_client_double.get_function.configure_mock(
return_value={
'Configuration': config
}
)
lambda_client_double.update_function_configuration.configure_mock(
return_value={
'Version': 1
}
)
fake_boto3_conn = Mock(return_value=lambda_client_double)
return (fake_boto3_conn, lambda_client_double)
class AnsibleFailJson(Exception):
pass
def fail_json_double(*args, **kwargs):
"""works like fail_json but returns module results inside exception instead of stdout"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
# TODO: def test_handle_different_types_in_config_params():
def test_create_lambda_if_not_exist():
set_module_args(base_module_args)
(boto3_conn_double, lambda_client_double) = make_mock_no_connection_connection(code_change_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) == 0), \
"unexpectedly updated lambda configuration when should have only created"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"update lambda function code when function should have been created only"
assert(len(lambda_client_double.create_function.mock_calls) > 0), \
"failed to call create_function "
(create_args, create_kwargs) = lambda_client_double.create_function.call_args
assert (len(create_kwargs) > 0), "expected create called with keyword args, none found"
try:
# For now I assume that we should NOT send an empty environment. It might
# be okay / better to explicitly send an empty environment. However `None'
# is not acceptable - mikedlr
create_kwargs["Environment"]
raise(Exception("Environment sent to boto when none expected"))
except KeyError:
pass # We are happy, no environment is fine
def test_update_lambda_if_code_changed():
set_module_args(base_module_args)
(boto3_conn_double, lambda_client_double) = make_mock_connection(code_change_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) == 0), \
"unexpectedly updatede lambda configuration when only code changed"
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
"lambda function update called multiple times when only one time should be needed"
assert(len(lambda_client_double.update_function_code.mock_calls) > 1), \
"failed to update lambda function when code changed"
# 3 because after uploading we call into the return from mock to try to find what function version
# was returned so the MagicMock actually sees two calls for one update.
assert(len(lambda_client_double.update_function_code.mock_calls) < 3), \
"lambda function code update called multiple times when only one time should be needed"
def test_update_lambda_if_config_changed():
set_module_args(base_module_args)
(boto3_conn_double, lambda_client_double) = make_mock_connection(two_change_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) > 0), \
"failed to update lambda function when configuration changed"
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
"lambda function update called multiple times when only one time should be needed"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"updated lambda code when no change should have happened"
def test_update_lambda_if_only_one_config_item_changed():
set_module_args(base_module_args)
(boto3_conn_double, lambda_client_double) = make_mock_connection(one_change_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) > 0), \
"failed to update lambda function when configuration changed"
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
"lambda function update called multiple times when only one time should be needed"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"updated lambda code when no change should have happened"
def test_update_lambda_if_added_environment_variable():
set_module_args(module_args_with_environment)
(boto3_conn_double, lambda_client_double) = make_mock_connection(base_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) > 0), \
"failed to update lambda function when configuration changed"
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
"lambda function update called multiple times when only one time should be needed"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"updated lambda code when no change should have happened"
(update_args, update_kwargs) = lambda_client_double.update_function_configuration.call_args
assert (len(update_kwargs) > 0), "expected update configuration called with keyword args, none found"
assert update_kwargs['Environment']['Variables'] == module_args_with_environment['environment_variables']
def test_dont_update_lambda_if_nothing_changed():
set_module_args(base_module_args)
(boto3_conn_double, lambda_client_double) = make_mock_connection(base_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) == 0), \
"updated lambda function when no configuration changed"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"updated lambda code when no change should have happened"
def test_warn_region_not_specified():
set_module_args({
"name": "lambda_name",
"state": "present",
# Module is called without a region causing error
# "region": "us-east-1",
"zip_file": "test/units/modules/cloud/amazon/fixtures/thezip.zip",
"runtime": 'python2.7',
"role": 'arn:aws:iam::987654321012:role/lambda_basic_execution',
"handler": 'lambda_python.my_handler'})
get_aws_connection_info_double = Mock(return_value=(None, None, None))
with patch.object(lda, 'get_aws_connection_info', get_aws_connection_info_double):
with patch.object(basic.AnsibleModule, 'fail_json', fail_json_double):
try:
lda.main()
except AnsibleFailJson as e:
result = e.args[0]
assert("region must be specified" in result['msg'])
| gpl-3.0 |
ujenmr/ansible | test/units/module_utils/test_distribution_version.py | 17 | 46542 | # -*- coding: utf-8 -*-
# Copyright: (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from itertools import product
import mock
import pytest
from ansible.module_utils.six.moves import builtins
# the module we are actually testing (sort of)
from ansible.module_utils.facts.system.distribution import DistributionFactCollector
# to generate the testcase data, you can use the script gen_distribution_version_testcase.py in hacking/tests
TESTSETS = [
{
"platform.dist": [
"centos",
"7.2.1511",
"Core"
],
"input": {
"/etc/redhat-release": "CentOS Linux release 7.2.1511 (Core) \n",
"/etc/os-release": (
"NAME=\"CentOS Linux\"\nVERSION=\"7 (Core)\"\nID=\"centos\"\nID_LIKE=\"rhel fedora\"\nVERSION_ID=\"7\"\n"
"PRETTY_NAME=\"CentOS Linux 7 (Core)\"\nANSI_COLOR=\"0;31\"\nCPE_NAME=\"cpe:/o:centos:centos:7\"\n"
"HOME_URL=\"https://www.centos.org/\"\nBUG_REPORT_URL=\"https://bugs.centos.org/\"\n\nCENTOS_MANTISBT_PROJECT=\"CentOS-7\"\n"
"CENTOS_MANTISBT_PROJECT_VERSION=\"7\"\nREDHAT_SUPPORT_PRODUCT=\"centos\"\nREDHAT_SUPPORT_PRODUCT_VERSION=\"7\"\n\n"
),
"/etc/system-release": "CentOS Linux release 7.2.1511 (Core) \n"
},
"name": "CentOS 7.2.1511",
"result": {
"distribution_release": "Core",
"distribution": "CentOS",
"distribution_major_version": "7",
"os_family": "RedHat",
"distribution_version": "7.2.1511",
}
},
{
"name": "CentOS 6.7",
"platform.dist": [
"centos",
"6.7",
"Final"
],
"input": {
"/etc/redhat-release": "CentOS release 6.7 (Final)\n",
"/etc/lsb-release": (
"LSB_VERSION=base-4.0-amd64:base-4.0-noarch:core-4.0-amd64:core-4.0-noarch:graphics-4.0-amd64:graphics-4.0-noarch:"
"printing-4.0-amd64:printing-4.0-noarch\n"
),
"/etc/system-release": "CentOS release 6.7 (Final)\n"
},
"result": {
"distribution_release": "Final",
"distribution": "CentOS",
"distribution_major_version": "6",
"os_family": "RedHat",
"distribution_version": "6.7"
}
},
{
"name": "RedHat 7.2",
"platform.dist": [
"redhat",
"7.2",
"Maipo"
],
"input": {
"/etc/redhat-release": "Red Hat Enterprise Linux Server release 7.2 (Maipo)\n",
"/etc/os-release": (
"NAME=\"Red Hat Enterprise Linux Server\"\nVERSION=\"7.2 (Maipo)\"\nID=\"rhel\"\nID_LIKE=\"fedora\"\nVERSION_ID=\"7.2\"\n"
"PRETTY_NAME=\"Red Hat Enterprise Linux Server 7.2 (Maipo)\"\nANSI_COLOR=\"0;31\"\n"
"CPE_NAME=\"cpe:/o:redhat:enterprise_linux:7.2:GA:server\"\nHOME_URL=\"https://www.redhat.com/\"\n"
"BUG_REPORT_URL=\"https://bugzilla.redhat.com/\"\n\nREDHAT_BUGZILLA_PRODUCT=\"Red Hat Enterprise Linux 7\"\n"
"REDHAT_BUGZILLA_PRODUCT_VERSION=7.2\nREDHAT_SUPPORT_PRODUCT=\"Red Hat Enterprise Linux\"\n"
"REDHAT_SUPPORT_PRODUCT_VERSION=\"7.2\"\n"
),
"/etc/system-release": "Red Hat Enterprise Linux Server release 7.2 (Maipo)\n"
},
"result": {
"distribution_release": "Maipo",
"distribution": "RedHat",
"distribution_major_version": "7",
"os_family": "RedHat",
"distribution_version": "7.2"
}
},
{
"name": "RedHat 6.7",
"platform.dist": [
"redhat",
"6.7",
"Santiago"
],
"input": {
"/etc/redhat-release": "Red Hat Enterprise Linux Server release 6.7 (Santiago)\n",
"/etc/lsb-release": (
"LSB_VERSION=base-4.0-amd64:base-4.0-noarch:core-4.0-amd64:core-4.0-noarch:graphics-4.0-amd64:graphics-4.0-noarch:"
"printing-4.0-amd64:printing-4.0-noarch\n"
),
"/etc/system-release": "Red Hat Enterprise Linux Server release 6.7 (Santiago)\n"
},
"result": {
"distribution_release": "Santiago",
"distribution": "RedHat",
"distribution_major_version": "6",
"os_family": "RedHat",
"distribution_version": "6.7"
}
},
{
"name": "Virtuozzo 7.3",
"platform.dist": [
"redhat",
"7.3",
""
],
"input": {
"/etc/redhat-release": "Virtuozzo Linux release 7.3\n",
"/etc/os-release": (
"NAME=\"Virtuozzo\"\n"
"VERSION=\"7.0.3\"\n"
"ID=\"virtuozzo\"\n"
"ID_LIKE=\"rhel fedora\"\n"
"VERSION_ID=\"7\"\n"
"PRETTY_NAME=\"Virtuozzo release 7.0.3\"\n"
"ANSI_COLOR=\"0;31\"\n"
"CPE_NAME=\"cpe:/o:virtuozzoproject:vz:7\"\n"
"HOME_URL=\"http://www.virtuozzo.com\"\n"
"BUG_REPORT_URL=\"https://bugs.openvz.org/\"\n"
),
"/etc/system-release": "Virtuozzo release 7.0.3 (640)\n"
},
"result": {
"distribution_release": "NA",
"distribution": "Virtuozzo",
"distribution_major_version": "7",
"os_family": "RedHat",
"distribution_version": "7.3"
}
},
{
"name": "openSUSE Leap 42.1",
"input": {
"/etc/os-release": """
NAME="openSUSE Leap"
VERSION="42.1"
VERSION_ID="42.1"
PRETTY_NAME="openSUSE Leap 42.1 (x86_64)"
ID=opensuse
ANSI_COLOR="0;32"
CPE_NAME="cpe:/o:opensuse:opensuse:42.1"
BUG_REPORT_URL="https://bugs.opensuse.org"
HOME_URL="https://opensuse.org/"
ID_LIKE="suse"
""",
"/etc/SuSE-release": """
openSUSE 42.1 (x86_64)
VERSION = 42.1
CODENAME = Malachite
# /etc/SuSE-release is deprecated and will be removed in the future, use /etc/os-release instead
"""
},
"platform.dist": ['SuSE', '42.1', 'x86_64'],
"result": {
"distribution": "openSUSE Leap",
"distribution_major_version": "42",
"distribution_release": "1",
"os_family": "Suse",
"distribution_version": "42.1",
}
},
{
'name': 'openSUSE 13.2',
'input': {
'/etc/SuSE-release': """openSUSE 13.2 (x86_64)
VERSION = 13.2
CODENAME = Harlequin
# /etc/SuSE-release is deprecated and will be removed in the future, use /etc/os-release instead
""",
'/etc/os-release': """NAME=openSUSE
VERSION="13.2 (Harlequin)"
VERSION_ID="13.2"
PRETTY_NAME="openSUSE 13.2 (Harlequin) (x86_64)"
ID=opensuse
ANSI_COLOR="0;32"
CPE_NAME="cpe:/o:opensuse:opensuse:13.2"
BUG_REPORT_URL="https://bugs.opensuse.org"
HOME_URL="https://opensuse.org/"
ID_LIKE="suse"
"""
},
'platform.dist': ('SuSE', '13.2', 'x86_64'),
'result': {
'distribution': u'openSUSE',
'distribution_major_version': u'13',
'distribution_release': u'2',
'os_family': u'Suse',
'distribution_version': u'13.2'
}
},
{
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/os-release": (
"NAME=\"openSUSE Tumbleweed\"\n# VERSION=\"20160917\"\nID=opensuse\nID_LIKE=\"suse\"\nVERSION_ID=\"20160917\"\n"
"PRETTY_NAME=\"openSUSE Tumbleweed\"\nANSI_COLOR=\"0;32\"\nCPE_NAME=\"cpe:/o:opensuse:tumbleweed:20160917\"\n"
"BUG_REPORT_URL=\"https://bugs.opensuse.org\"\nHOME_URL=\"https://www.opensuse.org/\"\n"
)
},
"name": "openSUSE Tumbleweed 20160917",
"result": {
"distribution_release": "",
"distribution": "openSUSE Tumbleweed",
"distribution_major_version": "20160917",
"os_family": "Suse",
"distribution_version": "20160917"
}
},
{
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/os-release": (
"NAME=\"openSUSE Leap\"\n# VERSION=\"15.0\"\nID=opensuse-leap\nID_LIKE=\"suse opensuse\"\nVERSION_ID=\"15.0\"\n"
"PRETTY_NAME=\"openSUSE Leap 15.0\"\nANSI_COLOR=\"0;32\"\nCPE_NAME=\"cpe:/o:opensuse:leap:15.0\"\n"
"BUG_REPORT_URL=\"https://bugs.opensuse.org\"\nHOME_URL=\"https://www.opensuse.org/\"\n"
)
},
"name": "openSUSE Leap 15.0",
"result": {
"distribution_release": "0",
"distribution": "openSUSE Leap",
"distribution_major_version": "15",
"os_family": "Suse",
"distribution_version": "15.0"
}
},
{ # see https://github.com/ansible/ansible/issues/14837
"name": "SLES 11.3",
"input": {
"/etc/SuSE-release": """
SUSE Linux Enterprise Server 11 (x86_64)
VERSION = 11
PATCHLEVEL = 3
"""
},
"platform.dist": ['SuSE', '11', 'x86_64'],
"result": {
"distribution": "SLES",
"distribution_major_version": "11",
"distribution_release": "3",
"os_family": "Suse",
"distribution_version": "11.3",
}
},
{ # see https://github.com/ansible/ansible/issues/14837
"name": "SLES 11.4",
"input": {
"/etc/SuSE-release": """
SUSE Linux Enterprise Server 11 (x86_64)
VERSION = 11
PATCHLEVEL = 4
""",
"/etc/os-release": """
NAME="SLES"
VERSION="11.4"
VERSION_ID="11.4"
PRETTY_NAME="SUSE Linux Enterprise Server 11 SP4"
ID="sles"
ANSI_COLOR="0;32"
CPE_NAME="cpe:/o:suse:sles:11:4"
""",
},
"platform.dist": ['SuSE', '11', 'x86_64'],
"result":{
"distribution": "SLES",
"distribution_major_version": "11",
"distribution_release": "4",
"os_family": "Suse",
"distribution_version": "11.4",
}
},
{ # see https://github.com/ansible/ansible/issues/14837
"name": "SLES 12 SP0",
"input": {
"/etc/SuSE-release": """
SUSE Linux Enterprise Server 12 (x86_64)
VERSION = 12
PATCHLEVEL = 0
# This file is deprecated and will be removed in a future service pack or release.
# Please check /etc/os-release for details about this release.
""",
"/etc/os-release": """
NAME="SLES"
VERSION="12"
VERSION_ID="12"
PRETTY_NAME="SUSE Linux Enterprise Server 12"
ID="sles"
ANSI_COLOR="0;32"
CPE_NAME="cpe:/o:suse:sles:12"
""",
},
"platform.dist": ['SuSE', '12', 'x86_64'],
"result": {
"distribution": "SLES",
"distribution_major_version": "12",
"distribution_release": "0",
"os_family": "Suse",
"distribution_version": "12",
}
},
{ # see https://github.com/ansible/ansible/issues/14837
"name": "SLES 12 SP1",
"input": {
"/etc/SuSE-release": """
SUSE Linux Enterprise Server 12 (x86_64)
VERSION = 12
PATCHLEVEL = 0
# This file is deprecated and will be removed in a future service pack or release.
# Please check /etc/os-release for details about this release.
""",
"/etc/os-release": """
NAME="SLES"
VERSION="12-SP1"
VERSION_ID="12.1"
PRETTY_NAME="SUSE Linux Enterprise Server 12 SP1"
ID="sles"
ANSI_COLOR="0;32"
CPE_NAME="cpe:/o:suse:sles:12:sp1"
""",
},
"platform.dist": ['SuSE', '12', 'x86_64'],
"result":{
"distribution": "SLES",
"distribution_major_version": "12",
"distribution_release": "1",
"os_family": "Suse",
"distribution_version": "12.1",
}
},
{
"name": "SLES4SAP 12 SP2",
"input": {
"/etc/SuSE-release": """
SUSE Linux Enterprise Server 12 (x86_64)
VERSION = 12
PATCHLEVEL = 2
# This file is deprecated and will be removed in a future service pack or release.
# Please check /etc/os-release for details about this release.
""",
"/etc/os-release": """
NAME="SLES_SAP"
VERSION="12-SP2"
VERSION_ID="12.2"
PRETTY_NAME="SUSE Linux Enterprise Server for SAP Applications 12 SP2"
ID="sles_sap"
ANSI_COLOR="0;32"
CPE_NAME="cpe:/o:suse:sles_sap:12:sp2"
""",
},
"platform.dist": ['SuSE', '12', 'x86_64'],
"result":{
"distribution": "SLES_SAP",
"distribution_major_version": "12",
"distribution_release": "2",
"os_family": "Suse",
"distribution_version": "12.2",
}
},
{
"name": "SLES4SAP 12 SP3",
"input": {
"/etc/SuSE-release": """
SUSE Linux Enterprise Server 12 (x86_64)
VERSION = 12
PATCHLEVEL = 3
# This file is deprecated and will be removed in a future service pack or release.
# Please check /etc/os-release for details about this release.
""",
"/etc/os-release": """
NAME="SLES"
VERSION="12-SP3"
VERSION_ID="12.3"
PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3"
ID="sles"
ANSI_COLOR="0;32"
CPE_NAME="cpe:/o:suse:sles_sap:12:sp3"
""",
},
"platform.dist": ['SuSE', '12', 'x86_64'],
"result":{
"distribution": "SLES_SAP",
"distribution_major_version": "12",
"distribution_release": "3",
"os_family": "Suse",
"distribution_version": "12.3",
}
},
{
"name": "Debian stretch/sid",
"input": {
"/etc/os-release": """
PRETTY_NAME="Debian GNU/Linux stretch/sid"
NAME="Debian GNU/Linux"
ID=debian
HOME_URL="https://www.debian.org/"
SUPPORT_URL="https://www.debian.org/support"
BUG_REPORT_URL="https://bugs.debian.org/"
""",
"/etc/debian_version": """
stretch/sid
""",
},
"platform.dist": ('debian', 'stretch/sid', ''),
"result": {
"distribution": "Debian",
"distribution_major_version": "stretch/sid",
"distribution_release": "NA",
"os_family": "Debian",
"distribution_version": "stretch/sid",
}
},
{
'name': "Debian 7.9",
'input': {
'/etc/os-release': """PRETTY_NAME="Debian GNU/Linux 7 (wheezy)"
NAME="Debian GNU/Linux"
VERSION_ID="7"
VERSION="7 (wheezy)"
ID=debian
ANSI_COLOR="1;31"
HOME_URL="http://www.debian.org/"
SUPPORT_URL="http://www.debian.org/support/"
BUG_REPORT_URL="http://bugs.debian.org/"
"""
},
'platform.dist': ('debian', '7.9', ''),
'result': {
'distribution': u'Debian',
'distribution_major_version': u'7',
'distribution_release': u'wheezy',
"os_family": "Debian",
'distribution_version': u'7.9'
}
},
{
'name': "SteamOS 2.0",
'input': {
'/etc/os-release': """PRETTY_NAME="SteamOS GNU/Linux 2.0 (brewmaster)"
NAME="SteamOS GNU/Linux"
VERSION_ID="2"
VERSION="2 (brewmaster)"
ID=steamos
ID_LIKE=debian
HOME_URL="http://www.steampowered.com/"
SUPPORT_URL="http://support.steampowered.com/"
BUG_REPORT_URL="http://support.steampowered.com/"
""",
'/etc/lsb-release': """DISTRIB_ID=SteamOS
DISTRIB_RELEASE=2.0
DISTRIB_CODENAME=brewmaster
DISTRIB_DESCRIPTION="SteamOS 2.0"
"""
},
'platform.dist': ('Steamos', '2.0', 'brewmaster'),
'result': {
'distribution': u'SteamOS',
'distribution_major_version': u'2',
'distribution_release': u'brewmaster',
"os_family": "Debian",
'distribution_version': u'2.0'
}
},
{
'name': "Devuan",
'input': {
'/etc/os-release': """PRETTY_NAME="Devuan GNU/Linux 1 (jessie)"
NAME="Devuan GNU/Linux"
VERSION_ID="1"
VERSION="1 (jessie)"
ID=devuan
HOME_URL="http://www.devuan.org/"
SUPPORT_URL="http://www.devuan.org/support/"
BUG_REPORT_URL="https://bugs.devuan.org/"
"""
},
'platform.dist': ('', '', ''),
'result': {
'distribution': u'Devuan',
'distribution_major_version': u'1',
'distribution_release': u'jessie',
'os_family': 'Debian',
'distribution_version': u'1'
}
},
{
'name': "Devuan",
'input': {
'/etc/os-release': """PRETTY_NAME="Devuan GNU/Linux ascii"
NAME="Devuan GNU/Linux"
ID=devuan
HOME_URL="https://www.devuan.org/"
SUPPORT_URL="https://devuan.org/os/community"
BUG_REPORT_URL="https://bugs.devuan.org/"
"""
},
'platform.dist': ('', '', ''),
'result': {
'distribution': u'Devuan',
'distribution_major_version': u'NA',
'distribution_release': u'ascii',
'os_family': 'Debian',
'distribution_version': u'NA'
}
},
{
"platform.dist": [
"Ubuntu",
"16.04",
"xenial"
],
"input": {
"/etc/os-release": (
"NAME=\"Ubuntu\"\nVERSION=\"16.04 LTS (Xenial Xerus)\"\nID=ubuntu\nID_LIKE=debian\nPRETTY_NAME=\"Ubuntu 16.04 LTS\"\n"
"VERSION_ID=\"16.04\"\nHOME_URL=\"http://www.ubuntu.com/\"\nSUPPORT_URL=\"http://help.ubuntu.com/\"\n"
"BUG_REPORT_URL=\"http://bugs.launchpad.net/ubuntu/\"\nUBUNTU_CODENAME=xenial\n"
),
"/etc/lsb-release": "DISTRIB_ID=Ubuntu\nDISTRIB_RELEASE=16.04\nDISTRIB_CODENAME=xenial\nDISTRIB_DESCRIPTION=\"Ubuntu 16.04 LTS\"\n"
},
"name": "Ubuntu 16.04",
"result": {
"distribution_release": "xenial",
"distribution": "Ubuntu",
"distribution_major_version": "16",
"os_family": "Debian",
"distribution_version": "16.04"
}
},
{
'name': "Ubuntu 10.04 guess",
'input':
{
'/etc/lsb-release': """DISTRIB_ID=Ubuntu
DISTRIB_RELEASE=10.04
DISTRIB_CODENAME=lucid
DISTRIB_DESCRIPTION="Ubuntu 10.04.4 LTS
"""
},
'platform.dist': ('Ubuntu', '10.04', 'lucid'),
'result':
{
'distribution': u'Ubuntu',
'distribution_major_version': u'10',
'distribution_release': u'lucid',
"os_family": "Debian",
'distribution_version': u'10.04'
}
},
{
'name': "Ubuntu 14.04",
'input': {
'/etc/lsb-release': """DISTRIB_ID=Ubuntu
DISTRIB_RELEASE=14.04
DISTRIB_CODENAME=trusty
DISTRIB_DESCRIPTION="Ubuntu 14.04.4 LTS"
""",
'/etc/os-release': """NAME="Ubuntu"
VERSION="14.04.4 LTS, Trusty Tahr"
ID=ubuntu
ID_LIKE=debian
PRETTY_NAME="Ubuntu 14.04.4 LTS"
VERSION_ID="14.04"
HOME_URL="http://www.ubuntu.com/"
SUPPORT_URL="http://help.ubuntu.com/"
BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"
"""
},
'platform.dist': ('Ubuntu', '14.04', 'trusty'),
'result': {
'distribution': u'Ubuntu',
'distribution_major_version': u'14',
'distribution_release': u'trusty',
"os_family": "Debian",
'distribution_version': u'14.04'
}
},
{
'name': "Ubuntu 12.04",
'input': {'/etc/lsb-release': """DISTRIB_ID=Ubuntu
DISTRIB_RELEASE=12.04
DISTRIB_CODENAME=precise
DISTRIB_DESCRIPTION="Ubuntu 12.04.5 LTS"
""",
'/etc/os-release': """NAME="Ubuntu"
VERSION="12.04.5 LTS, Precise Pangolin"
ID=ubuntu
ID_LIKE=debian
PRETTY_NAME="Ubuntu precise (12.04.5 LTS)"
VERSION_ID="12.04"
"""},
'platform.dist': ('Ubuntu', '12.04', 'precise'),
'result': {'distribution': u'Ubuntu',
'distribution_major_version': u'12',
'distribution_release': u'precise',
"os_family": "Debian",
'distribution_version': u'12.04'}
},
{
'name': 'Kali 2019.1',
'input': {
'/etc/os-release': ("PRETTY_NAME=\"Kali GNU/Linux Rolling\"\nNAME=\"Kali GNU/Linux\"\nID=kali\nVERSION=\"2019.1\"\n"
"VERSION_ID=\"2019.1\"\nID_LIKE=debian\nANSI_COLOR=\"1;31\"\nHOME_URL=\"https://www.kali.org/\"\n"
"SUPPORT_URL=\"https://forums.kali.org/\"\nBUG_REPORT_URL=\"https://bugs.kali.org/\"\n"),
'/etc/lsb-release': ("DISTRIB_ID=Kali\nDISTRIB_RELEASE=kali-rolling\nDISTRIB_CODENAME=kali-rolling\n"
"DISTRIB_DESCRIPTION=\"Kali GNU/Linux Rolling\"\n"),
'/usr/lib/os-release': ("PRETTY_NAME=\"Kali GNU/Linux Rolling\"\nNAME=\"Kali GNU/Linux\"\nID=kali\nVERSION=\"2019.1\"\n"
"VERSION_ID=\"2019.1\"\nID_LIKE=debian\nANSI_COLOR=\"1;31\"\nHOME_URL=\"https://www.kali.org/\"\n"
"SUPPORT_URL=\"https://forums.kali.org/\"\nBUG_REPORT_URL=\"https://bugs.kali.org/\"\n")
},
'platform.dist': [
'kali',
'2019.1',
''
],
'result': {
'distribution': 'Kali',
'distribution_version': '2019.1',
'distribution_release': 'kali-rolling',
'distribution_major_version': '2019',
'os_family': 'Debian'
}
},
{
"platform.dist": [
"neon",
"16.04",
"xenial"
],
"input": {
"/etc/os-release": ("NAME=\"KDE neon\"\nVERSION=\"5.8\"\nID=neon\nID_LIKE=\"ubuntu debian\"\nPRETTY_NAME=\"KDE neon User Edition 5.8\"\n"
"VERSION_ID=\"16.04\"\nHOME_URL=\"http://neon.kde.org/\"\nSUPPORT_URL=\"http://neon.kde.org/\"\n"
"BUG_REPORT_URL=\"http://bugs.kde.org/\"\nVERSION_CODENAME=xenial\nUBUNTU_CODENAME=xenial\n"),
"/etc/lsb-release": "DISTRIB_ID=neon\nDISTRIB_RELEASE=16.04\nDISTRIB_CODENAME=xenial\nDISTRIB_DESCRIPTION=\"KDE neon User Edition 5.8\"\n"
},
"name": "KDE neon 16.04",
"result": {
"distribution_release": "xenial",
"distribution": "KDE neon",
"distribution_major_version": "16",
"os_family": "Debian",
"distribution_version": "16.04"
}
},
{
'name': 'Core OS',
'input': {
'/etc/os-release': """
NAME=CoreOS
ID=coreos
VERSION=976.0.0
VERSION_ID=976.0.0
BUILD_ID=2016-03-03-2324
PRETTY_NAME="CoreOS 976.0.0 (Coeur Rouge)"
ANSI_COLOR="1;32"
HOME_URL="https://coreos.com/"
BUG_REPORT_URL="https://github.com/coreos/bugs/issues"
""",
'/etc/lsb-release': """DISTRIB_ID=CoreOS
DISTRIB_RELEASE=976.0.0
DISTRIB_CODENAME="Coeur Rouge"
DISTRIB_DESCRIPTION="CoreOS 976.0.0 (Coeur Rouge)"
""",
},
'platform.dist': ('', '', ''),
'platform.release': '',
'result': {
"distribution": "CoreOS",
"distribution_major_version": "NA",
"distribution_release": "NA",
"distribution_version": "976.0.0",
}
},
# Solaris and derivatives: https://gist.github.com/natefoo/7af6f3d47bb008669467
{
"name": "SmartOS Global Zone",
"uname_v": "joyent_20160330T234717Z",
"result": {
"distribution_release": "SmartOS 20160330T234717Z x86_64",
"distribution": "SmartOS",
"os_family": "Solaris",
"distribution_version": "joyent_20160330T234717Z"
},
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/release": (" SmartOS 20160330T234717Z x86_64\n"
" Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.\n"
" Copyright 2010-2012 Joyent, Inc. All Rights Reserved.\n"
" Use is subject to license terms.\n\n"
" Built with the following components:\n\n[\n"
" { \"repo\": \"smartos-live\", \"branch\": \"release-20160331\", \"rev\": \"a77c410f2afe6dc9853a915733caec3609cc50f1\", "
"\"commit_date\": \"1459340323\", \"url\": \"git@github.com:joyent/smartos-live.git\" }\n , "
"{ \"repo\": \"illumos-joyent\", \"branch\": \"release-20160331\", \"rev\": \"ab664c06caf06e9ce7586bff956e7709df1e702e\", "
"\"commit_date\": \"1459362533\", \"url\": \"/root/data/jenkins/workspace/smartos/MG/build/illumos-joyent\" }\n"
" , { \"repo\": \"illumos-extra\", \"branch\": \"release-20160331\", "
"\"rev\": \"cc723855bceace3df7860b607c9e3827d47e0ff4\", \"commit_date\": \"1458153188\", "
"\"url\": \"/root/data/jenkins/workspace/smartos/MG/build/illumos-extra\" }\n , "
"{ \"repo\": \"kvm\", \"branch\": \"release-20160331\", \"rev\": \"a8befd521c7e673749c64f118585814009fe4b73\", "
"\"commit_date\": \"1450081968\", \"url\": \"/root/data/jenkins/workspace/smartos/MG/build/illumos-kvm\" }\n , "
"{ \"repo\": \"kvm-cmd\", \"branch\": \"release-20160331\", \"rev\": \"c1a197c8e4582c68739ab08f7e3198b2392c9820\", "
"\"commit_date\": \"1454723558\", \"url\": \"/root/data/jenkins/workspace/smartos/MG/build/illumos-kvm-cmd\" }\n , "
"{ \"repo\": \"mdata-client\", \"branch\": \"release-20160331\", \"rev\": \"58158c44603a3316928975deccc5d10864832770\", "
"\"commit_date\": \"1429917227\", \"url\": \"/root/data/jenkins/workspace/smartos/MG/build/mdata-client\" }\n]\n")
},
"platform.system": "SunOS"
},
{
"name": "SmartOS Zone",
"uname_v": "joyent_20160330T234717Z",
"result": {
"distribution_release": "SmartOS x86_64",
"distribution": "SmartOS",
"os_family": "Solaris",
"distribution_version": "14.3.0"
},
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/release": (" SmartOS x86_64\n Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.\n"
" Copyright 2010-2013 Joyent, Inc. All Rights Reserved.\n Use is subject to license terms.\n"
" See joyent_20141002T182809Z for assembly date and time.\n"),
"/etc/product": "Name: Joyent Instance\nImage: base64 14.3.0\nDocumentation: http://wiki.joyent.com/jpc2/Base+Instance\n"
},
"platform.system": "SunOS"
},
{
"name": "OpenIndiana",
"uname_v": "oi_151a9",
"result": {
"distribution_release": "OpenIndiana Development oi_151.1.9 X86 (powered by illumos)",
"distribution": "OpenIndiana",
"os_family": "Solaris",
"distribution_version": "oi_151a9"
},
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/release": (" OpenIndiana Development oi_151.1.9 X86 (powered by illumos)\n Copyright 2011 Oracle and/or its affiliates. "
"All rights reserved.\n Use is subject to license terms.\n "
"Assembled 17 January 2014\n")
},
"platform.system": "SunOS"
},
{
"name": "OmniOS",
"uname_v": "omnios-10b9c79",
"result": {
"distribution_release": "OmniOS v11 r151012",
"distribution": "OmniOS",
"os_family": "Solaris",
"distribution_version": "r151012"
},
"platform.dist": [
"",
"",
""
],
# "platform.release": 'OmniOS',
"input": {
"/etc/release": (
" OmniOS v11 r151012\n Copyright 2014 OmniTI Computer Consulting, Inc. All rights reserved.\n Use is subject to license terms.\n\n"
)
},
"platform.system": "SunOS"
},
{
"name": "Nexenta 3",
"uname_v": "NexentaOS_134f",
"result": {
"distribution_release": "Open Storage Appliance v3.1.6",
"distribution": "Nexenta",
"os_family": "Solaris",
"distribution_version": "3.1.6"
},
"platform.dist": [
"",
"",
""
],
"platform.release:": "",
"input": {
"/etc/release": (" Open Storage Appliance v3.1.6\n Copyright (c) 2014 Nexenta Systems, Inc. "
"All Rights Reserved.\n Copyright (c) 2011 Oracle. All Rights Reserved.\n "
"Use is subject to license terms.\n")
},
"platform.system": "SunOS"
},
{
"name": "Nexenta 4",
"uname_v": "NexentaOS_4:cd604cd066",
"result": {
"distribution_release": "Open Storage Appliance 4.0.3-FP2",
"distribution": "Nexenta",
"os_family": "Solaris",
"distribution_version": "4.0.3-FP2"
},
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/release": (" Open Storage Appliance 4.0.3-FP2\n Copyright (c) 2014 Nexenta Systems, Inc. "
"All Rights Reserved.\n Copyright (c) 2010 Oracle. All Rights Reserved.\n "
"Use is subject to license terms.\n")
},
"platform.system": "SunOS"
},
{
"name": "Solaris 10",
"uname_v": "Generic_141445-09",
"uname_r": "5.10",
"result": {
"distribution_release": "Solaris 10 10/09 s10x_u8wos_08a X86",
"distribution": "Solaris",
"os_family": "Solaris",
"distribution_major_version": "10",
"distribution_version": "10"
},
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/release": (" Solaris 10 10/09 s10x_u8wos_08a X86\n Copyright 2009 Sun Microsystems, Inc. "
"All Rights Reserved.\n Use is subject to license terms.\n "
"Assembled 16 September 2009\n")
},
"platform.system": "SunOS"
},
{
"name": "Solaris 11",
"uname_v": "11.0",
"uname_r": "5.11",
"result": {
"distribution_release": "Oracle Solaris 11 11/11 X86",
"distribution": "Solaris",
"os_family": "Solaris",
"distribution_major_version": "11",
"distribution_version": "11"
},
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/release": (" Oracle Solaris 11 11/11 X86\n Copyright (c) 1983, 2011, Oracle and/or its affiliates. "
"All rights reserved.\n Assembled 18 October 2011\n")
},
"platform.system": "SunOS"
},
{
"name": "Solaris 11.3",
"uname_r": "5.11",
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/release": (
" Oracle Solaris 11.3 X86\n Copyright (c) 1983, 2018, Oracle and/or its affiliates. "
"All rights reserved.\n Assembled 09 May 2018\n"
)
},
"platform.system": "SunOS",
"result": {
"distribution_release": "Oracle Solaris 11.3 X86",
"distribution": "Solaris",
"os_family": "Solaris",
"distribution_major_version": "11",
"distribution_version": "11.3"
}
},
{
"name": "Solaris 11.4",
"uname_r": "5.11",
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/release": (
" Oracle Solaris 11.4 SPARC\n Copyright (c) 1983, 2018, Oracle and/or its affiliates."
" All rights reserved.\n Assembled 14 September 2018\n"
)
},
"platform.system": "SunOS",
"result": {
"distribution_release": "Oracle Solaris 11.4 SPARC",
"distribution": "Solaris",
"os_family": "Solaris",
"distribution_major_version": "11",
"distribution_version": "11.4"
}
},
{
"name": "Solaris 10",
"uname_r": "5.10",
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/release": (" Oracle Solaris 10 1/13 s10x_u11wos_24a X86\n Copyright (c) 1983, 2013, Oracle and/or its affiliates. "
"All rights reserved.\n Assembled 17 January 2013\n")
},
"platform.system": "SunOS",
"result": {
"distribution_release": "Oracle Solaris 10 1/13 s10x_u11wos_24a X86",
"distribution": "Solaris",
"os_family": "Solaris",
"distribution_major_version": "10",
"distribution_version": "10"
}
},
{
"name": "Fedora 22",
"platform.dist": [
"fedora",
"22",
"Twenty Two"
],
"input": {
"/etc/redhat-release": "Fedora release 22 (Twenty Two)\n",
"/etc/os-release": (
"NAME=Fedora\nVERSION=\"22 (Twenty Two)\"\nID=fedora\nVERSION_ID=22\nPRETTY_NAME=\"Fedora 22 (Twenty Two)\"\n"
"ANSI_COLOR=\"0;34\"\nCPE_NAME=\"cpe:/o:fedoraproject:fedora:22\"\nHOME_URL=\"https://fedoraproject.org/\"\n"
"BUG_REPORT_URL=\"https://bugzilla.redhat.com/\"\nREDHAT_BUGZILLA_PRODUCT=\"Fedora\"\nREDHAT_BUGZILLA_PRODUCT_VERSION=22\n"
"REDHAT_SUPPORT_PRODUCT=\"Fedora\"\nREDHAT_SUPPORT_PRODUCT_VERSION=22\n"
"PRIVACY_POLICY_URL=https://fedoraproject.org/wiki/Legal:PrivacyPolicy\n"
),
"/etc/system-release": "Fedora release 22 (Twenty Two)\n"
},
"result": {
"distribution_release": "Twenty Two",
"distribution": "Fedora",
"distribution_major_version": "22",
"os_family": "RedHat",
"distribution_version": "22"
}
},
{
"platform.dist": [
"fedora",
"25",
"Rawhide"
],
"input": {
"/etc/redhat-release": "Fedora release 25 (Rawhide)\n",
"/etc/os-release": (
"NAME=Fedora\nVERSION=\"25 (Workstation Edition)\"\nID=fedora\nVERSION_ID=25\n"
"PRETTY_NAME=\"Fedora 25 (Workstation Edition)\"\nANSI_COLOR=\"0;34\"\nCPE_NAME=\"cpe:/o:fedoraproject:fedora:25\"\n"
"HOME_URL=\"https://fedoraproject.org/\"\nBUG_REPORT_URL=\"https://bugzilla.redhat.com/\"\n"
"REDHAT_BUGZILLA_PRODUCT=\"Fedora\"\nREDHAT_BUGZILLA_PRODUCT_VERSION=rawhide\nREDHAT_SUPPORT_PRODUCT=\"Fedora\"\n"
"REDHAT_SUPPORT_PRODUCT_VERSION=rawhide\nPRIVACY_POLICY_URL=https://fedoraproject.org/wiki/Legal:PrivacyPolicy\n"
"VARIANT=\"Workstation Edition\"\nVARIANT_ID=workstation\n"
),
"/etc/system-release": "Fedora release 25 (Rawhide)\n"
},
"name": "Fedora 25",
"result": {
"distribution_release": "Rawhide",
"distribution": "Fedora",
"distribution_major_version": "25",
"os_family": "RedHat",
"distribution_version": "25"
}
},
{
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/sourcemage-release": ("Source Mage GNU/Linux x86_64-pc-linux-gnu\nInstalled from tarball using chroot image (Grimoire 0.61-rc) "
"on Thu May 17 17:31:37 UTC 2012\n")
},
"name": "SMGL NA",
"result": {
"distribution_release": "NA",
"distribution": "SMGL",
"distribution_major_version": "NA",
"os_family": "SMGL",
"distribution_version": "NA"
}
},
# ArchLinux with an empty /etc/arch-release and a /etc/os-release with "NAME=Arch Linux"
{
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/os-release": "NAME=\"Arch Linux\"\nPRETTY_NAME=\"Arch Linux\"\nID=arch\nID_LIKE=archlinux\nANSI_COLOR=\"0;36\"\nHOME_URL=\"https://www.archlinux.org/\"\nSUPPORT_URL=\"https://bbs.archlinux.org/\"\nBUG_REPORT_URL=\"https://bugs.archlinux.org/\"\n\n", # noqa
"/etc/arch-release": "",
},
"name": "Arch Linux NA",
"result": {
"distribution_release": "NA",
"distribution": "Archlinux",
"distribution_major_version": "NA",
"os_family": "Archlinux",
"distribution_version": "NA"
}
},
# ClearLinux https://github.com/ansible/ansible/issues/31501#issuecomment-340861535
{
"platform.dist": [
"Clear Linux OS",
"26580",
"clear-linux-os"
],
"input": {
"/etc/os-release": '''
NAME="Clear Linux OS"
VERSION=1
ID=clear-linux-os
ID_LIKE=clear-linux-os
VERSION_ID=26580
PRETTY_NAME="Clear Linux OS"
ANSI_COLOR="1;35"
HOME_URL="https://clearlinux.org"
SUPPORT_URL="https://clearlinux.org"
BUG_REPORT_URL="mailto:dev@lists.clearlinux.org"
PRIVACY_POLICY_URL="http://www.intel.com/privacy"
''',
"/usr/lib/os-release": '''
NAME="Clear Linux OS"
VERSION=1
ID=clear-linux-os
ID_LIKE=clear-linux-os
VERSION_ID=26580
PRETTY_NAME="Clear Linux OS"
ANSI_COLOR="1;35"
HOME_URL="https://clearlinux.org"
SUPPORT_URL="https://clearlinux.org"
BUG_REPORT_URL="mailto:dev@lists.clearlinux.org"
PRIVACY_POLICY_URL="http://www.intel.com/privacy"
'''
},
"name": "ClearLinux 26580",
"result": {
"distribution_release": "clear-linux-os",
"distribution": "Clear Linux OS",
"distribution_major_version": "26580",
"os_family": "ClearLinux",
"distribution_version": "26580"
}
},
# ArchLinux with no /etc/arch-release but with a /etc/os-release with NAME=Arch Linux
# The fact needs to map 'Arch Linux' to 'Archlinux' for compat with 2.3 and earlier facts
{
"platform.dist": [
"",
"",
""
],
"input": {
"/etc/os-release": "NAME=\"Arch Linux\"\nPRETTY_NAME=\"Arch Linux\"\nID=arch\nID_LIKE=archlinux\nANSI_COLOR=\"0;36\"\nHOME_URL=\"https://www.archlinux.org/\"\nSUPPORT_URL=\"https://bbs.archlinux.org/\"\nBUG_REPORT_URL=\"https://bugs.archlinux.org/\"\n\n", # noqa
},
"name": "Arch Linux no arch-release NA",
"result": {
"distribution_release": "NA",
"distribution": "Archlinux",
"distribution_major_version": "NA",
"os_family": "Archlinux",
"distribution_version": "NA"
}
},
{
'name': "Cumulus Linux 3.7.3",
'input': {
'/etc/os-release': """NAME="Cumulus Linux"
VERSION_ID=3.7.3
VERSION="Cumulus Linux 3.7.3"
PRETTY_NAME="Cumulus Linux"
ID=cumulus-linux
ID_LIKE=debian
CPE_NAME=cpe:/o:cumulusnetworks:cumulus_linux:3.7.3
HOME_URL="http://www.cumulusnetworks.com/"
SUPPORT_URL="http://support.cumulusnetworks.com/"
"""
},
'platform.dist': ('debian', '8.11', ''),
'result': {
'distribution': 'Cumulus Linux',
'distribution_major_version': '3',
'distribution_release': 'Cumulus Linux 3.7.3',
'os_family': 'Debian',
'distribution_version': '3.7.3',
}
},
{
'name': "Cumulus Linux 2.5.4",
'input': {
'/etc/os-release': """NAME="Cumulus Linux"
VERSION_ID=2.5.4
VERSION="2.5.4-6dc6e80-201510091936-build"
PRETTY_NAME="Cumulus Linux"
ID=cumulus-linux
ID_LIKE=debian
CPE_NAME=cpe:/o:cumulusnetworks:cumulus_linux:2.5.4-6dc6e80-201510091936-build
HOME_URL="http://www.cumulusnetworks.com/"
SUPPORT_URL="http://support.cumulusnetworks.com/"
"""
},
'platform.dist': ('', '', ''),
'result': {
'distribution': 'Cumulus Linux',
'distribution_major_version': '2',
'distribution_release': '2.5.4-6dc6e80-201510091936-build',
'os_family': 'Debian',
'distribution_version': '2.5.4',
}
},
{
"platform.dist": [
"LinuxMint",
"18.3",
"sylvia",
],
"input": {
"/etc/os-release": "NAME=\"Linux Mint\"\nVERSION=\"18.3 (Sylvia)\"\nID=linuxmint\nID_LIKE=ubuntu\nPRETTY_NAME=\"Linux Mint 18.3\"\nVERSION_ID=\"18.3\"\nHOME_URL=\"http://www.linuxmint.com/\"\nSUPPORT_URL=\"http://forums.linuxmint.com/\"\nBUG_REPORT_URL=\"http://bugs.launchpad.net/linuxmint/\"\nVERSION_CODENAME=sylvia\nUBUNTU_CODENAME=xenial", # noqa
},
"name": "Linux Mint 18.3",
"result": {
"distribution_release": "sylvia",
"distribution": "Linux Mint",
"distribution_major_version": "18",
"os_family": "Debian",
"distribution_version": "18.3"
}
}
]
@pytest.mark.parametrize("stdin, testcase", product([{}], TESTSETS), ids=lambda x: x.get('name'), indirect=['stdin'])
def test_distribution_version(am, mocker, testcase):
"""tests the distribution parsing code of the Facts class
testsets have
* a name (for output/debugging only)
* input files that are faked
* those should be complete and also include "irrelevant" files that might be mistaken as coming from other distributions
* all files that are not listed here are assumed to not exist at all
* the output of ansible.module_utils.distro.linux_distribution() [called platform.dist() for historical reasons]
* results for the ansible variables distribution* and os_family
"""
# prepare some mock functions to get the testdata in
def mock_get_file_content(fname, default=None, strip=True):
"""give fake content if it exists, otherwise pretend the file is empty"""
data = default
if fname in testcase['input']:
# for debugging
print('faked %s for %s' % (fname, testcase['name']))
data = testcase['input'][fname].strip()
if strip and data is not None:
data = data.strip()
return data
def mock_get_uname(am, flags):
if '-v' in flags:
return testcase.get('uname_v', None)
elif '-r' in flags:
return testcase.get('uname_r', None)
else:
return None
def mock_file_exists(fname, allow_empty=False):
if fname not in testcase['input']:
return False
if allow_empty:
return True
return bool(len(testcase['input'][fname]))
def mock_platform_system():
return testcase.get('platform.system', 'Linux')
def mock_platform_release():
return testcase.get('platform.release', '')
def mock_platform_version():
return testcase.get('platform.version', '')
def mock_distro_name():
return testcase['platform.dist'][0]
def mock_distro_version():
return testcase['platform.dist'][1]
def mock_distro_codename():
return testcase['platform.dist'][2]
def mock_open(filename, mode='r'):
if filename in testcase['input']:
file_object = mocker.mock_open(read_data=testcase['input'][filename]).return_value
file_object.__iter__.return_value = testcase['input'][filename].splitlines(True)
else:
file_object = real_open(filename, mode)
return file_object
def mock_os_path_is_file(filename):
if filename in testcase['input']:
return True
return False
mocker.patch('ansible.module_utils.facts.system.distribution.get_file_content', mock_get_file_content)
mocker.patch('ansible.module_utils.facts.system.distribution.get_uname', mock_get_uname)
mocker.patch('ansible.module_utils.facts.system.distribution._file_exists', mock_file_exists)
mocker.patch('ansible.module_utils.distro.name', mock_distro_name)
mocker.patch('ansible.module_utils.distro.id', mock_distro_name)
mocker.patch('ansible.module_utils.distro.version', mock_distro_version)
mocker.patch('ansible.module_utils.distro.codename', mock_distro_codename)
mocker.patch('os.path.isfile', mock_os_path_is_file)
mocker.patch('platform.system', mock_platform_system)
mocker.patch('platform.release', mock_platform_release)
mocker.patch('platform.version', mock_platform_version)
real_open = builtins.open
mocker.patch.object(builtins, 'open', new=mock_open)
# run Facts()
distro_collector = DistributionFactCollector()
generated_facts = distro_collector.collect(am)
# compare with the expected output
# testcase['result'] has a list of variables and values it expects Facts() to set
for key, val in testcase['result'].items():
assert key in generated_facts
msg = 'Comparing value of %s on %s, should: %s, is: %s' %\
(key, testcase['name'], val, generated_facts[key])
assert generated_facts[key] == val, msg
| gpl-3.0 |
ESS-LLP/erpnext | erpnext/domains/healthcare.py | 4 | 1363 | data = {
'desktop_icons': [
'Patient',
'Patient Appointment',
'Patient Encounter',
'Lab Test',
'Healthcare',
'Vital Signs',
'Clinical Procedure',
'Inpatient Record',
'Accounts',
'Buying',
'Stock',
'HR',
'ToDo'
],
'default_portal_role': 'Patient',
'restricted_roles': [
'Healthcare Administrator',
'LabTest Approver',
'Laboratory User',
'Nursing User',
'Physician',
'Patient'
],
'custom_fields': {
'Sales Invoice': [
{
'fieldname': 'patient', 'label': 'Patient', 'fieldtype': 'Link', 'options': 'Patient',
'insert_after': 'naming_series'
},
{
'fieldname': 'patient_name', 'label': 'Patient Name', 'fieldtype': 'Data', 'fetch_from': 'patient.patient_name',
'insert_after': 'patient', 'read_only': True
},
{
'fieldname': 'ref_practitioner', 'label': 'Referring Practitioner', 'fieldtype': 'Link', 'options': 'Healthcare Practitioner',
'insert_after': 'customer'
}
],
'Sales Invoice Item': [
{
'fieldname': 'reference_dt', 'label': 'Reference DocType', 'fieldtype': 'Link', 'options': 'DocType',
'insert_after': 'edit_references'
},
{
'fieldname': 'reference_dn', 'label': 'Reference Name', 'fieldtype': 'Dynamic Link', 'options': 'reference_dt',
'insert_after': 'reference_dt'
}
]
},
'on_setup': 'erpnext.healthcare.setup.setup_healthcare'
}
| gpl-3.0 |
kbc-developers/android_kernel_htc_m7wlj | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
amenonsen/ansible | lib/ansible/modules/cloud/amazon/aws_kms_info.py | 11 | 14953 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_kms_info
short_description: Gather information about AWS KMS keys
description:
- Gather information about AWS KMS keys including tags and grants
- This module was called C(aws_kms_facts) before Ansible 2.9. The usage did not change.
version_added: "2.5"
author: "Will Thames (@willthames)"
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
The filters aren't natively supported by boto3, but are supported to provide similar
functionality to other modules. Standard tag filters (C(tag-key), C(tag-value) and
C(tag:tagName)) are available, as are C(key-id) and C(alias)
pending_deletion:
description: Whether to get full details (tags, grants etc.) of keys pending deletion
default: False
type: bool
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather information about all KMS keys
- aws_kms_info:
# Gather information about all keys with a Name tag
- aws_kms_info:
filters:
tag-key: Name
# Gather information about all keys with a specific name
- aws_kms_info:
filters:
"tag:Name": Example
'''
RETURN = '''
keys:
description: list of keys
type: complex
returned: always
contains:
key_id:
description: ID of key
type: str
returned: always
sample: abcd1234-abcd-1234-5678-ef1234567890
key_arn:
description: ARN of key
type: str
returned: always
sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890
key_state:
description: The state of the key
type: str
returned: always
sample: PendingDeletion
key_usage:
description: The cryptographic operations for which you can use the key.
type: str
returned: always
sample: ENCRYPT_DECRYPT
origin:
description:
The source of the key's key material. When this value is C(AWS_KMS),
AWS KMS created the key material. When this value is C(EXTERNAL), the
key material was imported or the CMK lacks key material.
type: str
returned: always
sample: AWS_KMS
aws_account_id:
description: The AWS Account ID that the key belongs to
type: str
returned: always
sample: 1234567890123
creation_date:
description: Date of creation of the key
type: str
returned: always
sample: 2017-04-18T15:12:08.551000+10:00
description:
description: Description of the key
type: str
returned: always
sample: "My Key for Protecting important stuff"
enabled:
description: Whether the key is enabled. True if C(KeyState) is true.
type: str
returned: always
sample: false
aliases:
description: list of aliases associated with the key
type: list
returned: always
sample:
- aws/acm
- aws/ebs
tags:
description: dictionary of tags applied to the key. Empty when access is denied even if there are tags.
type: dict
returned: always
sample:
Name: myKey
Purpose: protecting_stuff
policies:
description: list of policy documents for the keys. Empty when access is denied even if there are policies.
type: list
returned: always
sample:
Version: "2012-10-17"
Id: "auto-ebs-2"
Statement:
- Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS"
Effect: "Allow"
Principal:
AWS: "*"
Action:
- "kms:Encrypt"
- "kms:Decrypt"
- "kms:ReEncrypt*"
- "kms:GenerateDataKey*"
- "kms:CreateGrant"
- "kms:DescribeKey"
Resource: "*"
Condition:
StringEquals:
kms:CallerAccount: "111111111111"
kms:ViaService: "ec2.ap-southeast-2.amazonaws.com"
- Sid: "Allow direct access to key metadata to the account"
Effect: "Allow"
Principal:
AWS: "arn:aws:iam::111111111111:root"
Action:
- "kms:Describe*"
- "kms:Get*"
- "kms:List*"
- "kms:RevokeGrant"
Resource: "*"
grants:
description: list of grants associated with a key
type: complex
returned: always
contains:
constraints:
description: Constraints on the encryption context that the grant allows.
See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) for further details
type: dict
returned: always
sample:
encryption_context_equals:
"aws:lambda:_function_arn": "arn:aws:lambda:ap-southeast-2:012345678912:function:xyz"
creation_date:
description: Date of creation of the grant
type: str
returned: always
sample: 2017-04-18T15:12:08+10:00
grant_id:
description: The unique ID for the grant
type: str
returned: always
sample: abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234
grantee_principal:
description: The principal that receives the grant's permissions
type: str
returned: always
sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz
issuing_account:
description: The AWS account under which the grant was issued
type: str
returned: always
sample: arn:aws:iam::01234567890:root
key_id:
description: The key ARN to which the grant applies.
type: str
returned: always
sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890
name:
description: The friendly name that identifies the grant
type: str
returned: always
sample: xyz
operations:
description: The list of operations permitted by the grant
type: list
returned: always
sample:
- Decrypt
- RetireGrant
retiring_principal:
description: The principal that can retire the grant
type: str
returned: always
sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict, HAS_BOTO3
from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict
import traceback
try:
import botocore
except ImportError:
pass # caught by imported HAS_BOTO3
# Caching lookup for aliases
_aliases = dict()
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_kms_keys_with_backoff(connection):
paginator = connection.get_paginator('list_keys')
return paginator.paginate().build_full_result()
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_kms_aliases_with_backoff(connection):
paginator = connection.get_paginator('list_aliases')
return paginator.paginate().build_full_result()
def get_kms_aliases_lookup(connection):
if not _aliases:
for alias in get_kms_aliases_with_backoff(connection)['Aliases']:
# Not all aliases are actually associated with a key
if 'TargetKeyId' in alias:
# strip off leading 'alias/' and add it to key's aliases
if alias['TargetKeyId'] in _aliases:
_aliases[alias['TargetKeyId']].append(alias['AliasName'][6:])
else:
_aliases[alias['TargetKeyId']] = [alias['AliasName'][6:]]
return _aliases
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_kms_tags_with_backoff(connection, key_id, **kwargs):
return connection.list_resource_tags(KeyId=key_id, **kwargs)
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_kms_grants_with_backoff(connection, key_id, **kwargs):
params = dict(KeyId=key_id)
if kwargs.get('tokens'):
params['GrantTokens'] = kwargs['tokens']
paginator = connection.get_paginator('list_grants')
return paginator.paginate(**params).build_full_result()
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_kms_metadata_with_backoff(connection, key_id):
return connection.describe_key(KeyId=key_id)
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def list_key_policies_with_backoff(connection, key_id):
paginator = connection.get_paginator('list_key_policies')
return paginator.paginate(KeyId=key_id).build_full_result()
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_key_policy_with_backoff(connection, key_id, policy_name):
return connection.get_key_policy(KeyId=key_id, PolicyName=policy_name)
def get_kms_tags(connection, module, key_id):
# Handle pagination here as list_resource_tags does not have
# a paginator
kwargs = {}
tags = []
more = True
while more:
try:
tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs)
tags.extend(tag_response['Tags'])
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'AccessDeniedException':
module.fail_json(msg="Failed to obtain key tags",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
else:
tag_response = {}
if tag_response.get('NextMarker'):
kwargs['Marker'] = tag_response['NextMarker']
else:
more = False
return tags
def get_kms_policies(connection, module, key_id):
try:
policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames']
return [get_key_policy_with_backoff(connection, key_id, policy)['Policy'] for
policy in policies]
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'AccessDeniedException':
module.fail_json(msg="Failed to obtain key policies",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
else:
return []
def key_matches_filter(key, filtr):
if filtr[0] == 'key-id':
return filtr[1] == key['key_id']
if filtr[0] == 'tag-key':
return filtr[1] in key['tags']
if filtr[0] == 'tag-value':
return filtr[1] in key['tags'].values()
if filtr[0] == 'alias':
return filtr[1] in key['aliases']
if filtr[0].startswith('tag:'):
return key['tags'][filtr[0][4:]] == filtr[1]
def key_matches_filters(key, filters):
if not filters:
return True
else:
return all([key_matches_filter(key, filtr) for filtr in filters.items()])
def get_key_details(connection, module, key_id, tokens=None):
if not tokens:
tokens = []
try:
result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata']
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to obtain key metadata",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
result['KeyArn'] = result.pop('Arn')
try:
aliases = get_kms_aliases_lookup(connection)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to obtain aliases",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
result['aliases'] = aliases.get(result['KeyId'], [])
if module.params.get('pending_deletion'):
return camel_dict_to_snake_dict(result)
try:
result['grants'] = get_kms_grants_with_backoff(connection, key_id, tokens=tokens)['Grants']
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to obtain key grants",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
tags = get_kms_tags(connection, module, key_id)
result = camel_dict_to_snake_dict(result)
result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue')
result['policies'] = get_kms_policies(connection, module, key_id)
return result
def get_kms_info(connection, module):
try:
keys = get_kms_keys_with_backoff(connection)['Keys']
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to obtain keys",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
return [get_key_details(connection, module, key['KeyId']) for key in keys]
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters=dict(type='dict'),
pending_deletion=dict(type='bool', default=False)
)
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if module._name == 'aws_kms_facts':
module.deprecate("The 'aws_kms_facts' module has been renamed to 'aws_kms_info'", version='2.13')
if not HAS_BOTO3:
module.fail_json(msg='boto3 and botocore are required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='kms', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
all_keys = get_kms_info(connection, module)
module.exit_json(keys=[key for key in all_keys if key_matches_filters(key, module.params['filters'])])
if __name__ == '__main__':
main()
| gpl-3.0 |
cattleprod/samsung-kernel-gt-i9100 | external/webkit/WebKitTools/Scripts/webkitpy/grammar.py | 3 | 2218 | #!/usr/bin/env python
# Copyright (c) 2009, Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
def plural(noun):
# This is a dumb plural() implementation that is just enough for our uses.
if re.search("h$", noun):
return noun + "es"
else:
return noun + "s"
def pluralize(noun, count):
if count != 1:
noun = plural(noun)
return "%d %s" % (count, noun)
def join_with_separators(list_of_strings, separator=', ', last_separator=', and '):
if not list_of_strings:
return ""
if len(list_of_strings) == 1:
return list_of_strings[0]
return "%s%s%s" % (separator.join(list_of_strings[:-1]), last_separator, list_of_strings[-1])
| gpl-2.0 |
pombredanne/anitya | anitya/lib/backends/pecl.py | 1 | 3891 | # -*- coding: utf-8 -*-
"""
(c) 2014-2016 - Copyright Red Hat Inc
Authors:
Pierre-Yves Chibon <pingou@pingoured.fr>
Ralph Bean <rbean@redhat.com>
"""
import anitya.lib.xml2dict as xml2dict
from anitya.lib.backends import BaseBackend
from anitya.lib.exceptions import AnityaPluginException
def _get_versions(url):
''' Retrieve the versions for the provided url. '''
try:
req = PeclBackend.call_url(url)
except Exception: # pragma: no cover
raise AnityaPluginException('Could not contact %s' % url)
data = req.text
versions = []
for line in data.split('\n'):
if '<v>' in line and '</v>' in line:
version = line.split('v>', 2)[1].split('</')[0]
versions.append(version)
return versions
class PeclBackend(BaseBackend):
''' The custom class for projects hosted on pecl.php.net.
This backend allows to specify a version_url and a regex that will
be used to retrieve the version information.
'''
name = 'PECL'
examples = [
'http://pecl.php.net/package/inotify',
'http://pecl.php.net/package/gnupg',
]
@classmethod
def get_version(cls, project):
''' Method called to retrieve the latest version of the projects
provided, project that relies on the backend of this plugin.
:arg Project project: a :class:`model.Project` object whose backend
corresponds to the current plugin.
:return: the latest version found upstream
:return type: str
:raise AnityaPluginException: a
:class:`anitya.lib.exceptions.AnityaPluginException` exception
when the version cannot be retrieved correctly
'''
return cls.get_versions(project)[0]
@classmethod
def get_versions(cls, project):
''' Method called to retrieve all the versions (that can be found)
of the projects provided, project that relies on the backend of
this plugin.
:arg Project project: a :class:`model.Project` object whose backend
corresponds to the current plugin.
:return: a list of all the possible releases found
:return type: list
:raise AnityaPluginException: a
:class:`anitya.lib.exceptions.AnityaPluginException` exception
when the versions cannot be retrieved correctly
'''
url_template = 'https://pecl.php.net/rest/r/%(name)s/allreleases.xml'
url = url_template % {'name': project.name.lower()}
versions = []
try:
versions = _get_versions(url)
except AnityaPluginException:
pass
if not versions and '-' in project.name:
pname = project.name.lower().replace('-', '_')
url = url_template % {'name': pname}
versions = _get_versions(url)
if not versions:
raise AnityaPluginException(
'No versions found for %s' % project.name.lower())
return versions
@classmethod
def check_feed(cls):
''' Return a generator over the latest 10 uploads to PECL
by querying an RSS feed.
'''
url = 'https://pecl.php.net/feeds/latest.rss'
try:
response = cls.call_url(url)
except Exception: # pragma: no cover
raise AnityaPluginException('Could not contact %s' % url)
try:
parser = xml2dict.XML2Dict()
data = parser.fromstring(response.text)
except Exception: # pragma: no cover
raise AnityaPluginException('No XML returned by %s' % url)
items = data['RDF']['item']
for entry in items:
title = entry['title']['value']
name, version = title.rsplit(None, 1)
homepage = 'http://pecl.php.net/package/%s' % name
yield name, homepage, cls.name, version
| gpl-2.0 |
lqch14102/lantern | install/linux_x86_32/pt/fteproxy/Crypto/Random/random.py | 125 | 5284 | # -*- coding: utf-8 -*-
#
# Random/random.py : Strong alternative for the standard 'random' module
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""A cryptographically strong version of Python's standard "random" module."""
__revision__ = "$Id$"
__all__ = ['StrongRandom', 'getrandbits', 'randrange', 'randint', 'choice', 'shuffle', 'sample']
from Crypto import Random
import sys
if sys.version_info[0] == 2 and sys.version_info[1] == 1:
from Crypto.Util.py21compat import *
class StrongRandom(object):
def __init__(self, rng=None, randfunc=None):
if randfunc is None and rng is None:
self._randfunc = None
elif randfunc is not None and rng is None:
self._randfunc = randfunc
elif randfunc is None and rng is not None:
self._randfunc = rng.read
else:
raise ValueError("Cannot specify both 'rng' and 'randfunc'")
def getrandbits(self, k):
"""Return a python long integer with k random bits."""
if self._randfunc is None:
self._randfunc = Random.new().read
mask = (1L << k) - 1
return mask & bytes_to_long(self._randfunc(ceil_div(k, 8)))
def randrange(self, *args):
"""randrange([start,] stop[, step]):
Return a randomly-selected element from range(start, stop, step)."""
if len(args) == 3:
(start, stop, step) = args
elif len(args) == 2:
(start, stop) = args
step = 1
elif len(args) == 1:
(stop,) = args
start = 0
step = 1
else:
raise TypeError("randrange expected at most 3 arguments, got %d" % (len(args),))
if (not isinstance(start, (int, long))
or not isinstance(stop, (int, long))
or not isinstance(step, (int, long))):
raise TypeError("randrange requires integer arguments")
if step == 0:
raise ValueError("randrange step argument must not be zero")
num_choices = ceil_div(stop - start, step)
if num_choices < 0:
num_choices = 0
if num_choices < 1:
raise ValueError("empty range for randrange(%r, %r, %r)" % (start, stop, step))
# Pick a random number in the range of possible numbers
r = num_choices
while r >= num_choices:
r = self.getrandbits(size(num_choices))
return start + (step * r)
def randint(self, a, b):
"""Return a random integer N such that a <= N <= b."""
if not isinstance(a, (int, long)) or not isinstance(b, (int, long)):
raise TypeError("randint requires integer arguments")
N = self.randrange(a, b+1)
assert a <= N <= b
return N
def choice(self, seq):
"""Return a random element from a (non-empty) sequence.
If the seqence is empty, raises IndexError.
"""
if len(seq) == 0:
raise IndexError("empty sequence")
return seq[self.randrange(len(seq))]
def shuffle(self, x):
"""Shuffle the sequence in place."""
# Make a (copy) of the list of objects we want to shuffle
items = list(x)
# Choose a random item (without replacement) until all the items have been
# chosen.
for i in xrange(len(x)):
x[i] = items.pop(self.randrange(len(items)))
def sample(self, population, k):
"""Return a k-length list of unique elements chosen from the population sequence."""
num_choices = len(population)
if k > num_choices:
raise ValueError("sample larger than population")
retval = []
selected = {} # we emulate a set using a dict here
for i in xrange(k):
r = None
while r is None or selected.has_key(r):
r = self.randrange(num_choices)
retval.append(population[r])
selected[r] = 1
return retval
_r = StrongRandom()
getrandbits = _r.getrandbits
randrange = _r.randrange
randint = _r.randint
choice = _r.choice
shuffle = _r.shuffle
sample = _r.sample
# These are at the bottom to avoid problems with recursive imports
from Crypto.Util.number import ceil_div, bytes_to_long, long_to_bytes, size
# vim:set ts=4 sw=4 sts=4 expandtab:
| apache-2.0 |
yanboliang/spark | examples/src/main/python/ml/lda_example.py | 52 | 1898 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An example demonstrating LDA.
Run with:
bin/spark-submit examples/src/main/python/ml/lda_example.py
"""
from __future__ import print_function
# $example on$
from pyspark.ml.clustering import LDA
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("LDAExample") \
.getOrCreate()
# $example on$
# Loads data.
dataset = spark.read.format("libsvm").load("data/mllib/sample_lda_libsvm_data.txt")
# Trains a LDA model.
lda = LDA(k=10, maxIter=10)
model = lda.fit(dataset)
ll = model.logLikelihood(dataset)
lp = model.logPerplexity(dataset)
print("The lower bound on the log likelihood of the entire corpus: " + str(ll))
print("The upper bound on perplexity: " + str(lp))
# Describe topics.
topics = model.describeTopics(3)
print("The topics described by their top-weighted terms:")
topics.show(truncate=False)
# Shows the result
transformed = model.transform(dataset)
transformed.show(truncate=False)
# $example off$
spark.stop()
| apache-2.0 |
jayme-github/headphones | headphones/importer.py | 1 | 40161 | # This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import time
from headphones import logger, helpers, db, mb, lastfm, metacritic
from beets.mediafile import MediaFile
import headphones
blacklisted_special_artist_names = ['[anonymous]', '[data]', '[no artist]',
'[traditional]', '[unknown]', 'Various Artists']
blacklisted_special_artists = ['f731ccc4-e22a-43af-a747-64213329e088',
'33cf029c-63b0-41a0-9855-be2a3665fb3b',
'314e1c25-dde7-4e4d-b2f4-0a7b9f7c56dc',
'eec63d3c-3b81-4ad4-b1e4-7c147d4d2b61',
'9be7f096-97ec-4615-8957-8d40b5dcbc41',
'125ec42a-7229-4250-afc5-e057484327fe',
'89ad4ac3-39f7-470e-963a-56509c546377']
def is_exists(artistid):
myDB = db.DBConnection()
# See if the artist is already in the database
artistlist = myDB.select('SELECT ArtistID, ArtistName from artists WHERE ArtistID=?',
[artistid])
if any(artistid in x for x in artistlist):
logger.info(artistlist[0][
1] + u" is already in the database. Updating 'have tracks', but not artist information")
return True
else:
return False
def artistlist_to_mbids(artistlist, forced=False):
for artist in artistlist:
if not artist and artist != ' ':
continue
# If adding artists through Manage New Artists, they're coming through as non-unicode (utf-8?)
# and screwing everything up
if not isinstance(artist, unicode):
try:
artist = artist.decode('utf-8', 'replace')
except Exception:
logger.warn("Unable to convert artist to unicode so cannot do a database lookup")
continue
results = mb.findArtist(artist, limit=1)
if not results:
logger.info('No results found for: %s' % artist)
continue
try:
artistid = results[0]['id']
except IndexError:
logger.info('MusicBrainz query turned up no matches for: %s' % artist)
continue
# Check if it's blacklisted/various artists (only check if it's not forced, e.g. through library scan auto-add.)
# Forced example = Adding an artist from Manage New Artists
myDB = db.DBConnection()
if not forced:
bl_artist = myDB.action('SELECT * FROM blacklist WHERE ArtistID=?',
[artistid]).fetchone()
if bl_artist or artistid in blacklisted_special_artists:
logger.info(
"Artist ID for '%s' is either blacklisted or Various Artists. To add artist, you must do it manually (Artist ID: %s)" % (
artist, artistid))
continue
# Add to database if it doesn't exist
if not is_exists(artistid):
addArtisttoDB(artistid)
# Just update the tracks if it does
else:
havetracks = len(
myDB.select('SELECT TrackTitle from tracks WHERE ArtistID=?', [artistid])) + len(
myDB.select('SELECT TrackTitle from have WHERE ArtistName like ?', [artist]))
myDB.action('UPDATE artists SET HaveTracks=? WHERE ArtistID=?', [havetracks, artistid])
# Delete it from the New Artists if the request came from there
if forced:
myDB.action('DELETE from newartists WHERE ArtistName=?', [artist])
# Update the similar artist tag cloud:
logger.info('Updating artist information from Last.fm')
try:
lastfm.getSimilar()
except Exception as e:
logger.warn('Failed to update arist information from Last.fm: %s' % e)
def addArtistIDListToDB(artistidlist):
# Used to add a list of artist IDs to the database in a single thread
logger.debug("Importer: Adding artist ids %s" % artistidlist)
for artistid in artistidlist:
addArtisttoDB(artistid)
def addArtisttoDB(artistid, extrasonly=False, forcefull=False, type="artist"):
# Putting this here to get around the circular import. We're using this to update thumbnails for artist/albums
from headphones import cache
# Can't add various artists - throws an error from MB
if artistid in blacklisted_special_artists:
logger.warn('Cannot import blocked special purpose artist with id' + artistid)
return
# We'll use this to see if we should update the 'LastUpdated' time stamp
errors = False
myDB = db.DBConnection()
# Delete from blacklist if it's on there
myDB.action('DELETE from blacklist WHERE ArtistID=?', [artistid])
# We need the current minimal info in the database instantly
# so we don't throw a 500 error when we redirect to the artistPage
controlValueDict = {"ArtistID": artistid}
# Don't replace a known artist name with an "Artist ID" placeholder
dbartist = myDB.action('SELECT * FROM artists WHERE ArtistID=?', [artistid]).fetchone()
# Only modify the Include Extras stuff if it's a new artist. We need it early so we know what to fetch
if not dbartist:
newValueDict = {"ArtistName": "Artist ID: %s" % (artistid),
"Status": "Loading",
"IncludeExtras": headphones.CONFIG.INCLUDE_EXTRAS,
"Extras": headphones.CONFIG.EXTRAS}
if type == "series":
newValueDict['Type'] = "series"
else:
newValueDict = {"Status": "Loading"}
if dbartist["Type"] == "series":
type = "series"
myDB.upsert("artists", newValueDict, controlValueDict)
if type == "series":
artist = mb.getSeries(artistid)
else:
artist = mb.getArtist(artistid, extrasonly)
if artist and artist.get('artist_name') in blacklisted_special_artist_names:
logger.warn('Cannot import blocked special purpose artist: %s' % artist.get('artist_name'))
myDB.action('DELETE from artists WHERE ArtistID=?', [artistid])
# in case it's already in the db
myDB.action('DELETE from albums WHERE ArtistID=?', [artistid])
myDB.action('DELETE from tracks WHERE ArtistID=?', [artistid])
return
if not artist:
logger.warn("Error fetching artist info. ID: " + artistid)
if dbartist is None:
newValueDict = {"ArtistName": "Fetch failed, try refreshing. (%s)" % (artistid),
"Status": "Active"}
else:
newValueDict = {"Status": "Active"}
myDB.upsert("artists", newValueDict, controlValueDict)
return
if artist['artist_name'].startswith('The '):
sortname = artist['artist_name'][4:]
else:
sortname = artist['artist_name']
logger.info(u"Now adding/updating: " + artist['artist_name'])
controlValueDict = {"ArtistID": artistid}
newValueDict = {"ArtistName": artist['artist_name'],
"ArtistSortName": sortname,
"DateAdded": helpers.today(),
"Status": "Loading"}
myDB.upsert("artists", newValueDict, controlValueDict)
# See if we need to grab extras. Artist specific extras take precedence
# over global option. Global options are set when adding a new artist
try:
db_artist = myDB.action('SELECT IncludeExtras, Extras from artists WHERE ArtistID=?',
[artistid]).fetchone()
includeExtras = db_artist['IncludeExtras']
except IndexError:
includeExtras = False
# Clean all references to release group in dB that are no longer referenced
# from the musicbrainz refresh
group_list = []
force_repackage = 0
# Don't nuke the database if there's a MusicBrainz error
if len(artist['releasegroups']) != 0:
for groups in artist['releasegroups']:
group_list.append(groups['id'])
if not extrasonly:
remove_missing_groups_from_albums = myDB.select(
"SELECT AlbumID FROM albums WHERE ArtistID=?", [artistid])
else:
remove_missing_groups_from_albums = myDB.select(
'SELECT AlbumID FROM albums WHERE ArtistID=? AND Status="Skipped" AND Type!="Album"',
[artistid])
for items in remove_missing_groups_from_albums:
if items['AlbumID'] not in group_list:
# Remove all from albums/tracks that aren't in release groups
myDB.action("DELETE FROM albums WHERE AlbumID=?", [items['AlbumID']])
myDB.action("DELETE FROM allalbums WHERE AlbumID=?", [items['AlbumID']])
myDB.action("DELETE FROM tracks WHERE AlbumID=?", [items['AlbumID']])
myDB.action("DELETE FROM alltracks WHERE AlbumID=?", [items['AlbumID']])
myDB.action('DELETE from releases WHERE ReleaseGroupID=?', [items['AlbumID']])
logger.info(
"[%s] Removing all references to release group %s to reflect MusicBrainz refresh" % (
artist['artist_name'], items['AlbumID']))
if not extrasonly:
force_repackage = 1
else:
if not extrasonly:
logger.info(
"[%s] There was either an error pulling data from MusicBrainz or there might not be any releases for this category" %
artist['artist_name'])
# Then search for releases within releasegroups, if releases don't exist, then remove from allalbums/alltracks
album_searches = []
for rg in artist['releasegroups']:
al_title = rg['title']
today = helpers.today()
rgid = rg['id']
skip_log = 0
# Make a user configurable variable to skip update of albums with release dates older than this date (in days)
pause_delta = headphones.CONFIG.MB_IGNORE_AGE
rg_exists = myDB.action("SELECT * from albums WHERE AlbumID=?", [rg['id']]).fetchone()
if not forcefull:
new_release_group = False
try:
check_release_date = rg_exists['ReleaseDate']
except TypeError:
check_release_date = None
new_release_group = True
if new_release_group:
logger.info("[%s] Now adding: %s (New Release Group)" % (
artist['artist_name'], rg['title']))
new_releases = mb.get_new_releases(rgid, includeExtras)
else:
if check_release_date is None or check_release_date == u"None":
logger.info("[%s] Now updating: %s (No Release Date)" % (
artist['artist_name'], rg['title']))
new_releases = mb.get_new_releases(rgid, includeExtras, True)
else:
if len(check_release_date) == 10:
release_date = check_release_date
elif len(check_release_date) == 7:
release_date = check_release_date + "-31"
elif len(check_release_date) == 4:
release_date = check_release_date + "-12-31"
else:
release_date = today
if helpers.get_age(today) - helpers.get_age(release_date) < pause_delta:
logger.info("[%s] Now updating: %s (Release Date <%s Days)",
artist['artist_name'], rg['title'], pause_delta)
new_releases = mb.get_new_releases(rgid, includeExtras, True)
else:
logger.info("[%s] Skipping: %s (Release Date >%s Days)",
artist['artist_name'], rg['title'], pause_delta)
skip_log = 1
new_releases = 0
if force_repackage == 1:
new_releases = -1
logger.info('[%s] Forcing repackage of %s (Release Group Removed)',
artist['artist_name'], al_title)
else:
new_releases = new_releases
else:
logger.info("[%s] Now adding/updating: %s (Comprehensive Force)", artist['artist_name'],
rg['title'])
new_releases = mb.get_new_releases(rgid, includeExtras, forcefull)
if new_releases != 0:
# Dump existing hybrid release since we're repackaging/replacing it
myDB.action("DELETE from albums WHERE ReleaseID=?", [rg['id']])
myDB.action("DELETE from allalbums WHERE ReleaseID=?", [rg['id']])
myDB.action("DELETE from tracks WHERE ReleaseID=?", [rg['id']])
myDB.action("DELETE from alltracks WHERE ReleaseID=?", [rg['id']])
myDB.action('DELETE from releases WHERE ReleaseGroupID=?', [rg['id']])
# This will be used later to build a hybrid release
fullreleaselist = []
# Search for releases within a release group
find_hybrid_releases = myDB.action("SELECT * from allalbums WHERE AlbumID=?",
[rg['id']])
# Build the dictionary for the fullreleaselist
for items in find_hybrid_releases:
if items['ReleaseID'] != rg[
'id']: # don't include hybrid information, since that's what we're replacing
hybrid_release_id = items['ReleaseID']
newValueDict = {"ArtistID": items['ArtistID'],
"ArtistName": items['ArtistName'],
"AlbumTitle": items['AlbumTitle'],
"AlbumID": items['AlbumID'],
"AlbumASIN": items['AlbumASIN'],
"ReleaseDate": items['ReleaseDate'],
"Type": items['Type'],
"ReleaseCountry": items['ReleaseCountry'],
"ReleaseFormat": items['ReleaseFormat']
}
find_hybrid_tracks = myDB.action("SELECT * from alltracks WHERE ReleaseID=?",
[hybrid_release_id])
totalTracks = 1
hybrid_track_array = []
for hybrid_tracks in find_hybrid_tracks:
hybrid_track_array.append({
'number': hybrid_tracks['TrackNumber'],
'title': hybrid_tracks['TrackTitle'],
'id': hybrid_tracks['TrackID'],
# 'url': hybrid_tracks['TrackURL'],
'duration': hybrid_tracks['TrackDuration']
})
totalTracks += 1
newValueDict['ReleaseID'] = hybrid_release_id
newValueDict['Tracks'] = hybrid_track_array
fullreleaselist.append(newValueDict)
# Basically just do the same thing again for the hybrid release
# This may end up being called with an empty fullreleaselist
try:
hybridrelease = getHybridRelease(fullreleaselist)
logger.info('[%s] Packaging %s releases into hybrid title' % (
artist['artist_name'], rg['title']))
except Exception as e:
errors = True
logger.warn('[%s] Unable to get hybrid release information for %s: %s' % (
artist['artist_name'], rg['title'], e))
continue
# Use the ReleaseGroupID as the ReleaseID for the hybrid release to differentiate it
# We can then use the condition WHERE ReleaseID == ReleaseGroupID to select it
# The hybrid won't have a country or a format
controlValueDict = {"ReleaseID": rg['id']}
newValueDict = {"ArtistID": artistid,
"ArtistName": artist['artist_name'],
"AlbumTitle": rg['title'],
"AlbumID": rg['id'],
"AlbumASIN": hybridrelease['AlbumASIN'],
"ReleaseDate": hybridrelease['ReleaseDate'],
"Type": rg['type']
}
myDB.upsert("allalbums", newValueDict, controlValueDict)
for track in hybridrelease['Tracks']:
cleanname = helpers.cleanName(
artist['artist_name'] + ' ' + rg['title'] + ' ' + track['title'])
controlValueDict = {"TrackID": track['id'],
"ReleaseID": rg['id']}
newValueDict = {"ArtistID": artistid,
"ArtistName": artist['artist_name'],
"AlbumTitle": rg['title'],
"AlbumASIN": hybridrelease['AlbumASIN'],
"AlbumID": rg['id'],
"TrackTitle": track['title'],
"TrackDuration": track['duration'],
"TrackNumber": track['number'],
"CleanName": cleanname
}
match = myDB.action('SELECT Location, BitRate, Format from have WHERE CleanName=?',
[cleanname]).fetchone()
if not match:
match = myDB.action(
'SELECT Location, BitRate, Format from have WHERE ArtistName LIKE ? AND AlbumTitle LIKE ? AND TrackTitle LIKE ?',
[artist['artist_name'], rg['title'], track['title']]).fetchone()
# if not match:
# match = myDB.action('SELECT Location, BitRate, Format from have WHERE TrackID=?', [track['id']]).fetchone()
if match:
newValueDict['Location'] = match['Location']
newValueDict['BitRate'] = match['BitRate']
newValueDict['Format'] = match['Format']
# myDB.action('UPDATE have SET Matched="True" WHERE Location=?', [match['Location']])
myDB.action('UPDATE have SET Matched=? WHERE Location=?',
(rg['id'], match['Location']))
myDB.upsert("alltracks", newValueDict, controlValueDict)
# Delete matched tracks from the have table
# myDB.action('DELETE from have WHERE Matched="True"')
# If there's no release in the main albums tables, add the default (hybrid)
# If there is a release, check the ReleaseID against the AlbumID to see if they differ (user updated)
# check if the album already exists
if not rg_exists:
releaseid = rg['id']
else:
releaseid = rg_exists['ReleaseID']
if not releaseid:
releaseid = rg['id']
album = myDB.action('SELECT * from allalbums WHERE ReleaseID=?', [releaseid]).fetchone()
controlValueDict = {"AlbumID": rg['id']}
newValueDict = {"ArtistID": album['ArtistID'],
"ArtistName": album['ArtistName'],
"AlbumTitle": album['AlbumTitle'],
"ReleaseID": album['ReleaseID'],
"AlbumASIN": album['AlbumASIN'],
"ReleaseDate": album['ReleaseDate'],
"Type": album['Type'],
"ReleaseCountry": album['ReleaseCountry'],
"ReleaseFormat": album['ReleaseFormat']
}
if rg_exists:
newValueDict['DateAdded'] = rg_exists['DateAdded']
newValueDict['Status'] = rg_exists['Status']
else:
today = helpers.today()
newValueDict['DateAdded'] = today
if headphones.CONFIG.AUTOWANT_ALL:
newValueDict['Status'] = "Wanted"
elif album['ReleaseDate'] > today and headphones.CONFIG.AUTOWANT_UPCOMING:
newValueDict['Status'] = "Wanted"
# Sometimes "new" albums are added to musicbrainz after their release date, so let's try to catch these
# The first test just makes sure we have year-month-day
elif helpers.get_age(album['ReleaseDate']) and helpers.get_age(
today) - helpers.get_age(
album['ReleaseDate']) < 21 and headphones.CONFIG.AUTOWANT_UPCOMING:
newValueDict['Status'] = "Wanted"
else:
newValueDict['Status'] = "Skipped"
myDB.upsert("albums", newValueDict, controlValueDict)
tracks = myDB.action('SELECT * from alltracks WHERE ReleaseID=?',
[releaseid]).fetchall()
# This is used to see how many tracks you have from an album - to
# mark it as downloaded. Default is 80%, can be set in config as
# ALBUM_COMPLETION_PCT
total_track_count = len(tracks)
if total_track_count == 0:
logger.warning("Total track count is zero for Release ID " +
"'%s', skipping.", releaseid)
continue
for track in tracks:
controlValueDict = {"TrackID": track['TrackID'],
"AlbumID": rg['id']}
newValueDict = {"ArtistID": track['ArtistID'],
"ArtistName": track['ArtistName'],
"AlbumTitle": track['AlbumTitle'],
"AlbumASIN": track['AlbumASIN'],
"ReleaseID": track['ReleaseID'],
"TrackTitle": track['TrackTitle'],
"TrackDuration": track['TrackDuration'],
"TrackNumber": track['TrackNumber'],
"CleanName": track['CleanName'],
"Location": track['Location'],
"Format": track['Format'],
"BitRate": track['BitRate']
}
myDB.upsert("tracks", newValueDict, controlValueDict)
# Mark albums as downloaded if they have at least 80% (by default, configurable) of the album
have_track_count = len(
myDB.select('SELECT * from tracks WHERE AlbumID=? AND Location IS NOT NULL',
[rg['id']]))
marked_as_downloaded = False
if rg_exists:
if rg_exists['Status'] == 'Skipped' and (
(have_track_count / float(total_track_count)) >= (
headphones.CONFIG.ALBUM_COMPLETION_PCT / 100.0)):
myDB.action('UPDATE albums SET Status=? WHERE AlbumID=?',
['Downloaded', rg['id']])
marked_as_downloaded = True
else:
if (have_track_count / float(total_track_count)) >= (
headphones.CONFIG.ALBUM_COMPLETION_PCT / 100.0):
myDB.action('UPDATE albums SET Status=? WHERE AlbumID=?',
['Downloaded', rg['id']])
marked_as_downloaded = True
logger.info(
u"[%s] Seeing if we need album art for %s" % (artist['artist_name'], rg['title']))
cache.getThumb(AlbumID=rg['id'])
# Start a search for the album if it's new, hasn't been marked as
# downloaded and autowant_all is selected. This search is deferred,
# in case the search failes and the rest of the import will halt.
if not rg_exists and not marked_as_downloaded and headphones.CONFIG.AUTOWANT_ALL:
album_searches.append(rg['id'])
else:
if skip_log == 0:
logger.info(u"[%s] No new releases, so no changes made to %s" % (
artist['artist_name'], rg['title']))
time.sleep(3)
finalize_update(artistid, artist['artist_name'], errors)
logger.info(u"Seeing if we need album art for: %s" % artist['artist_name'])
cache.getThumb(ArtistID=artistid)
logger.info(u"Fetching Metacritic reviews for: %s" % artist['artist_name'])
metacritic.update(artistid, artist['artist_name'], artist['releasegroups'])
if errors:
logger.info(
"[%s] Finished updating artist: %s but with errors, so not marking it as updated in the database" % (
artist['artist_name'], artist['artist_name']))
else:
myDB.action('DELETE FROM newartists WHERE ArtistName = ?', [artist['artist_name']])
logger.info(u"Updating complete for: %s" % artist['artist_name'])
# Start searching for newly added albums
if album_searches:
from headphones import searcher
logger.info("Start searching for %d albums.", len(album_searches))
for album_search in album_searches:
searcher.searchforalbum(albumid=album_search)
def finalize_update(artistid, artistname, errors=False):
# Moving this little bit to it's own function so we can update have tracks & latest album when deleting extras
myDB = db.DBConnection()
latestalbum = myDB.action(
'SELECT AlbumTitle, ReleaseDate, AlbumID from albums WHERE ArtistID=? order by ReleaseDate DESC',
[artistid]).fetchone()
totaltracks = len(myDB.select(
'SELECT TrackTitle from tracks WHERE ArtistID=? AND AlbumID IN (SELECT AlbumID FROM albums WHERE Status != "Ignored")',
[artistid]))
# havetracks = len(myDB.select('SELECT TrackTitle from tracks WHERE ArtistID=? AND Location IS NOT NULL', [artistid])) + len(myDB.select('SELECT TrackTitle from have WHERE ArtistName like ?', [artist['artist_name']]))
havetracks = len(
myDB.select('SELECT TrackTitle from tracks WHERE ArtistID=? AND Location IS NOT NULL',
[artistid])) + len(
myDB.select('SELECT TrackTitle from have WHERE ArtistName like ? AND Matched = "Failed"',
[artistname]))
controlValueDict = {"ArtistID": artistid}
if latestalbum:
newValueDict = {"Status": "Active",
"LatestAlbum": latestalbum['AlbumTitle'],
"ReleaseDate": latestalbum['ReleaseDate'],
"AlbumID": latestalbum['AlbumID'],
"TotalTracks": totaltracks,
"HaveTracks": havetracks}
else:
newValueDict = {"Status": "Active",
"TotalTracks": totaltracks,
"HaveTracks": havetracks}
if not errors:
newValueDict['LastUpdated'] = helpers.now()
myDB.upsert("artists", newValueDict, controlValueDict)
def addReleaseById(rid, rgid=None):
myDB = db.DBConnection()
# Create minimum info upfront if added from searchresults
status = ''
if rgid:
dbalbum = myDB.select("SELECT * from albums WHERE AlbumID=?", [rgid])
if not dbalbum:
status = 'Loading'
controlValueDict = {"AlbumID": rgid}
newValueDict = {"AlbumTitle": rgid,
"ArtistName": status,
"Status": status}
myDB.upsert("albums", newValueDict, controlValueDict)
time.sleep(1)
rgid = None
artistid = None
release_dict = None
results = myDB.select(
"SELECT albums.ArtistID, releases.ReleaseGroupID from releases, albums WHERE releases.ReleaseID=? and releases.ReleaseGroupID=albums.AlbumID LIMIT 1",
[rid])
for result in results:
rgid = result['ReleaseGroupID']
artistid = result['ArtistID']
logger.debug(
"Found a cached releaseid : releasegroupid relationship: " + rid + " : " + rgid)
if not rgid:
# didn't find it in the cache, get the information from MB
logger.debug(
"Didn't find releaseID " + rid + " in the cache. Looking up its ReleaseGroupID")
try:
release_dict = mb.getRelease(rid)
except Exception as e:
logger.info('Unable to get release information for Release %s: %s', rid, e)
if status == 'Loading':
myDB.action("DELETE FROM albums WHERE AlbumID=?", [rgid])
return
if not release_dict:
logger.info('Unable to get release information for Release %s: no dict', rid)
if status == 'Loading':
myDB.action("DELETE FROM albums WHERE AlbumID=?", [rgid])
return
rgid = release_dict['rgid']
artistid = release_dict['artist_id']
# we don't want to make more calls to MB here unless we have to, could be happening quite a lot
rg_exists = myDB.select("SELECT * from albums WHERE AlbumID=?", [rgid])
# make sure the artist exists since I don't know what happens later if it doesn't
artist_exists = myDB.select("SELECT * from artists WHERE ArtistID=?", [artistid])
if not artist_exists and release_dict:
if release_dict['artist_name'].startswith('The '):
sortname = release_dict['artist_name'][4:]
else:
sortname = release_dict['artist_name']
logger.info(
u"Now manually adding: " + release_dict['artist_name'] + " - with status Paused")
controlValueDict = {"ArtistID": release_dict['artist_id']}
newValueDict = {"ArtistName": release_dict['artist_name'],
"ArtistSortName": sortname,
"DateAdded": helpers.today(),
"Status": "Paused"}
if headphones.CONFIG.INCLUDE_EXTRAS:
newValueDict['IncludeExtras'] = 1
newValueDict['Extras'] = headphones.CONFIG.EXTRAS
if 'title' in release_dict:
newValueDict['LatestAlbum'] = release_dict['title']
elif 'rg_title' in release_dict:
newValueDict['LatestAlbum'] = release_dict['rg_title']
if 'date' in release_dict:
newValueDict['ReleaseDate'] = release_dict['date']
if rgid:
newValueDict['AlbumID'] = rgid
myDB.upsert("artists", newValueDict, controlValueDict)
elif not artist_exists and not release_dict:
logger.error(
"Artist does not exist in the database and did not get a valid response from MB. Skipping release.")
if status == 'Loading':
myDB.action("DELETE FROM albums WHERE AlbumID=?", [rgid])
return
if not rg_exists and release_dict or status == 'Loading' and release_dict: # it should never be the case that we have an rg and not the artist
# but if it is this will fail
logger.info(u"Now adding-by-id album (" + release_dict['title'] + ") from id: " + rgid)
controlValueDict = {"AlbumID": rgid}
if status != 'Loading':
status = 'Wanted'
newValueDict = {"ArtistID": release_dict['artist_id'],
"ReleaseID": rgid,
"ArtistName": release_dict['artist_name'],
"AlbumTitle": release_dict['title'] if 'title' in release_dict else
release_dict['rg_title'],
"AlbumASIN": release_dict['asin'],
"ReleaseDate": release_dict['date'],
"DateAdded": helpers.today(),
"Status": status,
"Type": release_dict['rg_type'],
"ReleaseID": rid
}
myDB.upsert("albums", newValueDict, controlValueDict)
# keep a local cache of these so that external programs that are adding releasesByID don't hammer MB
myDB.action('INSERT INTO releases VALUES( ?, ?)', [rid, release_dict['rgid']])
for track in release_dict['tracks']:
cleanname = helpers.cleanName(
release_dict['artist_name'] + ' ' + release_dict['rg_title'] + ' ' + track['title'])
controlValueDict = {"TrackID": track['id'],
"AlbumID": rgid}
newValueDict = {"ArtistID": release_dict['artist_id'],
"ArtistName": release_dict['artist_name'],
"AlbumTitle": release_dict['rg_title'],
"AlbumASIN": release_dict['asin'],
"TrackTitle": track['title'],
"TrackDuration": track['duration'],
"TrackNumber": track['number'],
"CleanName": cleanname
}
match = myDB.action(
'SELECT Location, BitRate, Format, Matched from have WHERE CleanName=?',
[cleanname]).fetchone()
if not match:
match = myDB.action(
'SELECT Location, BitRate, Format, Matched from have WHERE ArtistName LIKE ? AND AlbumTitle LIKE ? AND TrackTitle LIKE ?',
[release_dict['artist_name'], release_dict['rg_title'],
track['title']]).fetchone()
# if not match:
# match = myDB.action('SELECT Location, BitRate, Format from have WHERE TrackID=?', [track['id']]).fetchone()
if match:
newValueDict['Location'] = match['Location']
newValueDict['BitRate'] = match['BitRate']
newValueDict['Format'] = match['Format']
# myDB.action('DELETE from have WHERE Location=?', [match['Location']])
# If the album has been scanned before adding the release it will be unmatched, update to matched
if match['Matched'] == 'Failed':
myDB.action('UPDATE have SET Matched=? WHERE Location=?',
(release_dict['rgid'], match['Location']))
myDB.upsert("tracks", newValueDict, controlValueDict)
# Reset status
if status == 'Loading':
controlValueDict = {"AlbumID": rgid}
if headphones.CONFIG.AUTOWANT_MANUALLY_ADDED:
newValueDict = {"Status": "Wanted"}
else:
newValueDict = {"Status": "Skipped"}
myDB.upsert("albums", newValueDict, controlValueDict)
# Start a search for the album
if headphones.CONFIG.AUTOWANT_MANUALLY_ADDED:
import searcher
searcher.searchforalbum(rgid, False)
elif not rg_exists and not release_dict:
logger.error(
"ReleaseGroup does not exist in the database and did not get a valid response from MB. Skipping release.")
if status == 'Loading':
myDB.action("DELETE FROM albums WHERE AlbumID=?", [rgid])
return
else:
logger.info('Release ' + str(rid) + " already exists in the database!")
def updateFormat():
myDB = db.DBConnection()
tracks = myDB.select('SELECT * from tracks WHERE Location IS NOT NULL and Format IS NULL')
if len(tracks) > 0:
logger.info('Finding media format for %s files' % len(tracks))
for track in tracks:
try:
f = MediaFile(track['Location'])
except Exception as e:
logger.info("Exception from MediaFile for: " + track['Location'] + " : " + str(e))
continue
controlValueDict = {"TrackID": track['TrackID']}
newValueDict = {"Format": f.format}
myDB.upsert("tracks", newValueDict, controlValueDict)
logger.info('Finished finding media format for %s files' % len(tracks))
havetracks = myDB.select('SELECT * from have WHERE Location IS NOT NULL and Format IS NULL')
if len(havetracks) > 0:
logger.info('Finding media format for %s files' % len(havetracks))
for track in havetracks:
try:
f = MediaFile(track['Location'])
except Exception as e:
logger.info("Exception from MediaFile for: " + track['Location'] + " : " + str(e))
continue
controlValueDict = {"TrackID": track['TrackID']}
newValueDict = {"Format": f.format}
myDB.upsert("have", newValueDict, controlValueDict)
logger.info('Finished finding media format for %s files' % len(havetracks))
def getHybridRelease(fullreleaselist):
"""
Returns a dictionary of best group of tracks from the list of releases and
earliest release date
"""
if len(fullreleaselist) == 0:
raise ValueError("Empty fullreleaselist")
sortable_release_list = []
formats = {
'2xVinyl': '2',
'Vinyl': '2',
'CD': '0',
'Cassette': '3',
'2xCD': '1',
'Digital Media': '0'
}
countries = {
'US': '0',
'GB': '1',
'JP': '2',
}
for release in fullreleaselist:
# Find values for format and country
try:
format = int(formats[release['Format']])
except (ValueError, KeyError):
format = 3
try:
country = int(countries[release['Country']])
except (ValueError, KeyError):
country = 3
# Create record
release_dict = {
'hasasin': bool(release['AlbumASIN']),
'asin': release['AlbumASIN'],
'trackscount': len(release['Tracks']),
'releaseid': release['ReleaseID'],
'releasedate': release['ReleaseDate'],
'format': format,
'country': country,
'tracks': release['Tracks']
}
sortable_release_list.append(release_dict)
# Necessary to make dates that miss the month and/or day show up after full
# dates
def getSortableReleaseDate(releaseDate):
# Change this value to change the sorting behaviour of none, returning
# 'None' will put it at the top which was normal behaviour for pre-ngs
# versions
if releaseDate is None:
return 'None'
if releaseDate.count('-') == 2:
return releaseDate
elif releaseDate.count('-') == 1:
return releaseDate + '32'
else:
return releaseDate + '13-32'
sortable_release_list.sort(key=lambda x: getSortableReleaseDate(x['releasedate']))
average_tracks = sum(x['trackscount'] for x in sortable_release_list) / float(
len(sortable_release_list))
for item in sortable_release_list:
item['trackscount_delta'] = abs(average_tracks - item['trackscount'])
a = helpers.multikeysort(sortable_release_list,
['-hasasin', 'country', 'format', 'trackscount_delta'])
release_dict = {'ReleaseDate': sortable_release_list[0]['releasedate'],
'Tracks': a[0]['tracks'],
'AlbumASIN': a[0]['asin']
}
return release_dict
| gpl-3.0 |
Itxaka/st2 | st2common/tests/unit/test_action_alias_utils.py | 4 | 13704 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the 'License'); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest2 import TestCase
from st2common.exceptions import content
from st2common.models.utils.action_alias_utils import DefaultParser, StringValueParser
from st2common.models.utils.action_alias_utils import JsonValueParser, ActionAliasFormatParser
from st2common.exceptions.content import ParseException
class TestDefaultParser(TestCase):
def testDefaultParsing(self):
stream = 'some meaningful value1 something else skippable value2 still more skip.'
start = len('some meaningful ')
self.assertTrue(DefaultParser.is_applicable(stream[start]), 'Should be parsable.')
_, value, _ = DefaultParser.parse(start, stream)
self.assertEqual(value, 'value1')
start = len('some meaningful value1 something else skippable ')
self.assertTrue(DefaultParser.is_applicable(stream[start]), 'Should be parsable.')
_, value, _ = DefaultParser.parse(start, stream)
self.assertEqual(value, 'value2')
def testEndStringParsing(self):
stream = 'some meaningful value1'
start = len('some meaningful ')
self.assertTrue(DefaultParser.is_applicable(stream[start]), 'Should be parsable.')
_, value, _ = DefaultParser.parse(start, stream)
self.assertEqual(value, 'value1')
class TestStringValueParser(TestCase):
def testStringParsing(self):
stream = 'some meaningful "spaced value1" something else skippable "double spaced value2"' \
'still more skip.'
start = len('some meaningful ')
self.assertTrue(StringValueParser.is_applicable(stream[start]), 'Should be parsable.')
_, value, _ = StringValueParser.parse(start, stream)
self.assertEqual(value, 'spaced value1')
start = len('some meaningful "spaced value1" something else skippable ')
self.assertTrue(StringValueParser.is_applicable(stream[start]), 'Should be parsable.')
_, value, _ = StringValueParser.parse(start, stream)
self.assertEqual(value, 'double spaced value2')
start = len(stream) - 2
self.assertFalse(StringValueParser.is_applicable(stream[start]), 'Should not be parsable.')
def testEndStringParsing(self):
stream = 'some meaningful "spaced value1"'
start = len('some meaningful ')
self.assertTrue(StringValueParser.is_applicable(stream[start]), 'Should be parsable.')
_, value, _ = StringValueParser.parse(start, stream)
self.assertEqual(value, 'spaced value1')
def testEscapedStringParsing(self):
stream = 'some meaningful "spaced \\"value1" something else skippable ' \
'"double spaced value2" still more skip.'
start = len('some meaningful ')
self.assertTrue(StringValueParser.is_applicable(stream[start]), 'Should be parsable.')
_, value, _ = StringValueParser.parse(start, stream)
self.assertEqual(value, 'spaced \\"value1')
start = len('some meaningful "spaced \\"value1" something else skippable ')
self.assertTrue(StringValueParser.is_applicable(stream[start]), 'Should be parsable.')
_, value, _ = StringValueParser.parse(start, stream)
self.assertEqual(value, 'double spaced value2')
start = len(stream) - 2
self.assertFalse(StringValueParser.is_applicable(stream[start]), 'Should not be parsable.')
def testIncompleteStringParsing(self):
stream = 'some meaningful "spaced .'
start = len('some meaningful ')
self.assertTrue(StringValueParser.is_applicable(stream[start]), 'Should be parsable.')
try:
StringValueParser.parse(start, stream)
self.assertTrue(False, 'Parsing failure expected.')
except content.ParseException:
self.assertTrue(True)
class TestJsonValueParser(TestCase):
def testJsonParsing(self):
stream = 'some meaningful {"a": "b"} something else skippable {"c": "d"} end.'
start = len('some meaningful ')
self.assertTrue(JsonValueParser.is_applicable(stream[start]), 'Should be parsable.')
_, value, _ = JsonValueParser.parse(start, stream)
self.assertEqual(value, '{"a": "b"}')
start = len('some meaningful {"a": "b"} something else skippable ')
self.assertTrue(JsonValueParser.is_applicable(stream[start]), 'Should be parsable.')
_, value, _ = JsonValueParser.parse(start, stream)
self.assertEqual(value, '{"c": "d"}')
start = len(stream) - 2
self.assertFalse(JsonValueParser.is_applicable(stream[start]), 'Should not be parsable.')
def testEndJsonParsing(self):
stream = 'some meaningful {"a": "b"}'
start = len('some meaningful ')
self.assertTrue(JsonValueParser.is_applicable(stream[start]), 'Should be parsable.')
_, value, _ = JsonValueParser.parse(start, stream)
self.assertEqual(value, '{"a": "b"}')
def testComplexJsonParsing(self):
stream = 'some meaningful {"a": "b", "c": "d", "e": {"f": "g"}, "h": [1, 2]}'
start = len('some meaningful ')
self.assertTrue(JsonValueParser.is_applicable(stream[start]), 'Should be parsable.')
_, value, _ = JsonValueParser.parse(start, stream)
self.assertEqual(value, '{"a": "b", "c": "d", "e": {"f": "g"}, "h": [1, 2]}')
def testIncompleteStringParsing(self):
stream = 'some meaningful {"a":'
start = len('some meaningful ')
self.assertTrue(JsonValueParser.is_applicable(stream[start]), 'Should be parsable.')
try:
JsonValueParser.parse(start, stream)
self.assertTrue(False, 'Parsing failure expected.')
except content.ParseException:
self.assertTrue(True)
class TestActionAliasParser(TestCase):
def test_default_key_value_param_parsing(self):
# Empty string
alias_format = ''
param_stream = ''
parser = ActionAliasFormatParser(alias_format, param_stream)
extracted_values = parser.get_extracted_param_value()
self.assertEqual(extracted_values, {})
# 1 key value pair provided in the param stream
alias_format = ''
param_stream = 'a=foobar1'
parser = ActionAliasFormatParser(alias_format, param_stream)
extracted_values = parser.get_extracted_param_value()
self.assertEqual(extracted_values, {'a': 'foobar1'})
alias_format = ''
param_stream = 'foo a=foobar2 poonies bar'
parser = ActionAliasFormatParser(alias_format, param_stream)
extracted_values = parser.get_extracted_param_value()
self.assertEqual(extracted_values, {'a': 'foobar2'})
# Multiple params provided
alias_format = ''
param_stream = 'a=foobar1 b=boobar2 c=coobar3'
parser = ActionAliasFormatParser(alias_format, param_stream)
extracted_values = parser.get_extracted_param_value()
self.assertEqual(extracted_values, {'a': 'foobar1', 'b': 'boobar2', 'c': 'coobar3'})
# Multiple params provided
alias_format = ''
param_stream = 'a=foobar4 b=boobar5 c=coobar6'
parser = ActionAliasFormatParser(alias_format, param_stream)
extracted_values = parser.get_extracted_param_value()
self.assertEqual(extracted_values, {'a': 'foobar4', 'b': 'boobar5', 'c': 'coobar6'})
# Multiple params provided
alias_format = ''
param_stream = 'mixed a=foobar1 some more b=boobar2 text c=coobar3 yeah'
parser = ActionAliasFormatParser(alias_format, param_stream)
extracted_values = parser.get_extracted_param_value()
self.assertEqual(extracted_values, {'a': 'foobar1', 'b': 'boobar2', 'c': 'coobar3'})
# Param with quotes, make sure they are stripped
alias_format = ''
param_stream = 'mixed a="foobar1"'
parser = ActionAliasFormatParser(alias_format, param_stream)
extracted_values = parser.get_extracted_param_value()
self.assertEqual(extracted_values, {'a': 'foobar1'})
# Param with quotes, make sure they are stripped
alias_format = ''
param_stream = 'mixed a="foobar test" ponies a'
parser = ActionAliasFormatParser(alias_format, param_stream)
extracted_values = parser.get_extracted_param_value()
self.assertEqual(extracted_values, {'a': 'foobar test'})
# Param with quotes, make sure they are stripped
alias_format = ''
param_stream = "mixed a='foobar1 ponies' test"
parser = ActionAliasFormatParser(alias_format, param_stream)
extracted_values = parser.get_extracted_param_value()
self.assertEqual(extracted_values, {'a': 'foobar1 ponies'})
# Param with quotes, make sure they are stripped
alias_format = ''
param_stream = 'mixed a="foobar1"'
parser = ActionAliasFormatParser(alias_format, param_stream)
extracted_values = parser.get_extracted_param_value()
self.assertEqual(extracted_values, {'a': 'foobar1'})
# Mixed format and kv params
alias_format = 'somestuff {{a}} more stuff {{b}}'
param_stream = 'somestuff a=foobar more stuff coobar'
parser = ActionAliasFormatParser(alias_format, param_stream)
extracted_values = parser.get_extracted_param_value()
self.assertEqual(extracted_values, {'a': 'foobar', 'b': 'coobar'})
alias_format = 'somestuff {{a}} more stuff {{b}}'
param_stream = 'somestuff ponies more stuff coobar'
parser = ActionAliasFormatParser(alias_format, param_stream)
extracted_values = parser.get_extracted_param_value()
self.assertEqual(extracted_values, {'a': 'ponies', 'b': 'coobar'})
alias_format = 'somestuff {{a}} more stuff {{b}}'
param_stream = 'somestuff ponies more stuff coobar b=foo'
parser = ActionAliasFormatParser(alias_format, param_stream)
extracted_values = parser.get_extracted_param_value()
self.assertEqual(extracted_values, {'a': 'ponies', 'b': 'foo'})
def testSimpleParsing(self):
alias_format = 'skip {{a}} more skip {{b}} and skip more.'
param_stream = 'skip a1 more skip b1 and skip more.'
parser = ActionAliasFormatParser(alias_format, param_stream)
extracted_values = parser.get_extracted_param_value()
self.assertEqual(extracted_values, {'a': 'a1', 'b': 'b1'})
def testEndStringParsing(self):
alias_format = 'skip {{a}} more skip {{b}}'
param_stream = 'skip a1 more skip b1'
parser = ActionAliasFormatParser(alias_format, param_stream)
extracted_values = parser.get_extracted_param_value()
self.assertEqual(extracted_values, {'a': 'a1', 'b': 'b1'})
def testSpacedParsing(self):
alias_format = 'skip {{a}} more skip {{b}} and skip more.'
param_stream = 'skip "a1 a2" more skip b1 and skip more.'
parser = ActionAliasFormatParser(alias_format, param_stream)
extracted_values = parser.get_extracted_param_value()
self.assertEqual(extracted_values, {'a': 'a1 a2', 'b': 'b1'})
def testJsonParsing(self):
alias_format = 'skip {{a}} more skip.'
param_stream = 'skip {"a": "b", "c": "d"} more skip.'
parser = ActionAliasFormatParser(alias_format, param_stream)
extracted_values = parser.get_extracted_param_value()
self.assertEqual(extracted_values, {'a': '{"a": "b", "c": "d"}'})
def testMixedParsing(self):
alias_format = 'skip {{a}} more skip {{b}}.'
param_stream = 'skip {"a": "b", "c": "d"} more skip x'
parser = ActionAliasFormatParser(alias_format, param_stream)
extracted_values = parser.get_extracted_param_value()
self.assertEqual(extracted_values, {'a': '{"a": "b", "c": "d"}', 'b': 'x'})
def test_stream_is_none_with_all_default_values(self):
alias_format = 'skip {{d=test}} more skip {{e=test}}.'
param_stream = None
parser = ActionAliasFormatParser(alias_format, param_stream)
extracted_values = parser.get_extracted_param_value()
self.assertEqual(extracted_values, {'d': 'test', 'e': 'test'})
def test_stream_is_not_none_some_default_values(self):
alias_format = 'skip {{d=test}} more skip {{e=test}}.'
param_stream = 'skip ponies'
parser = ActionAliasFormatParser(alias_format, param_stream)
extracted_values = parser.get_extracted_param_value()
self.assertEqual(extracted_values, {'d': 'ponies', 'e': 'test'})
def test_stream_is_none_no_default_values(self):
alias_format = 'skip {{d}} more skip {{e}}.'
param_stream = None
parser = ActionAliasFormatParser(alias_format, param_stream)
expected_msg = 'No value supplied and no default value found.'
self.assertRaisesRegexp(ParseException, expected_msg,
parser.get_extracted_param_value)
| apache-2.0 |
georgefrank/ansible-modules-extras | cloud/cloudstack/cs_facts.py | 55 | 7076 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cs_facts
short_description: Gather facts on instances of Apache CloudStack based clouds.
description:
- This module fetches data from the metadata API in CloudStack. The module must be called from within the instance itself.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
filter:
description:
- Filter for a specific fact.
required: false
default: null
choices:
- cloudstack_service_offering
- cloudstack_availability_zone
- cloudstack_public_hostname
- cloudstack_public_ipv4
- cloudstack_local_hostname
- cloudstack_local_ipv4
- cloudstack_instance_id
- cloudstack_user_data
requirements: [ 'yaml' ]
'''
EXAMPLES = '''
# Gather all facts on instances
- name: Gather cloudstack facts
cs_facts:
# Gather specific fact on instances
- name: Gather cloudstack facts
cs_facts: filter=cloudstack_instance_id
'''
RETURN = '''
---
cloudstack_availability_zone:
description: zone the instance is deployed in.
returned: success
type: string
sample: ch-gva-2
cloudstack_instance_id:
description: UUID of the instance.
returned: success
type: string
sample: ab4e80b0-3e7e-4936-bdc5-e334ba5b0139
cloudstack_local_hostname:
description: local hostname of the instance.
returned: success
type: string
sample: VM-ab4e80b0-3e7e-4936-bdc5-e334ba5b0139
cloudstack_local_ipv4:
description: local IPv4 of the instance.
returned: success
type: string
sample: 185.19.28.35
cloudstack_public_hostname:
description: public hostname of the instance.
returned: success
type: string
sample: VM-ab4e80b0-3e7e-4936-bdc5-e334ba5b0139
cloudstack_public_ipv4:
description: public IPv4 of the instance.
returned: success
type: string
sample: 185.19.28.35
cloudstack_service_offering:
description: service offering of the instance.
returned: success
type: string
sample: Micro 512mb 1cpu
cloudstack_user_data:
description: data of the instance provided by users.
returned: success
type: dict
sample: { "bla": "foo" }
'''
import os
try:
import yaml
has_lib_yaml = True
except ImportError:
has_lib_yaml = False
CS_METADATA_BASE_URL = "http://%s/latest/meta-data"
CS_USERDATA_BASE_URL = "http://%s/latest/user-data"
class CloudStackFacts(object):
def __init__(self):
self.facts = ansible_facts(module)
self.api_ip = None
self.fact_paths = {
'cloudstack_service_offering': 'service-offering',
'cloudstack_availability_zone': 'availability-zone',
'cloudstack_public_hostname': 'public-hostname',
'cloudstack_public_ipv4': 'public-ipv4',
'cloudstack_local_hostname': 'local-hostname',
'cloudstack_local_ipv4': 'local-ipv4',
'cloudstack_instance_id': 'instance-id'
}
def run(self):
result = {}
filter = module.params.get('filter')
if not filter:
for key,path in self.fact_paths.iteritems():
result[key] = self._fetch(CS_METADATA_BASE_URL + "/" + path)
result['cloudstack_user_data'] = self._get_user_data_json()
else:
if filter == 'cloudstack_user_data':
result['cloudstack_user_data'] = self._get_user_data_json()
elif filter in self.fact_paths:
result[filter] = self._fetch(CS_METADATA_BASE_URL + "/" + self.fact_paths[filter])
return result
def _get_user_data_json(self):
try:
# this data come form users, we try what we can to parse it...
return yaml.load(self._fetch(CS_USERDATA_BASE_URL))
except:
return None
def _fetch(self, path):
api_ip = self._get_api_ip()
if not api_ip:
return None
api_url = path % api_ip
(response, info) = fetch_url(module, api_url, force=True)
if response:
data = response.read()
else:
data = None
return data
def _get_dhcp_lease_file(self):
"""Return the path of the lease file."""
default_iface = self.facts['default_ipv4']['interface']
dhcp_lease_file_locations = [
'/var/lib/dhcp/dhclient.%s.leases' % default_iface, # debian / ubuntu
'/var/lib/dhclient/dhclient-%s.leases' % default_iface, # centos 6
'/var/lib/dhclient/dhclient--%s.lease' % default_iface, # centos 7
'/var/db/dhclient.leases.%s' % default_iface, # openbsd
]
for file_path in dhcp_lease_file_locations:
if os.path.exists(file_path):
return file_path
module.fail_json(msg="Could not find dhclient leases file.")
def _get_api_ip(self):
"""Return the IP of the DHCP server."""
if not self.api_ip:
dhcp_lease_file = self._get_dhcp_lease_file()
for line in open(dhcp_lease_file):
if 'dhcp-server-identifier' in line:
# get IP of string "option dhcp-server-identifier 185.19.28.176;"
line = line.translate(None, ';')
self.api_ip = line.split()[2]
break
if not self.api_ip:
module.fail_json(msg="No dhcp-server-identifier found in leases file.")
return self.api_ip
def main():
global module
module = AnsibleModule(
argument_spec = dict(
filter = dict(default=None, choices=[
'cloudstack_service_offering',
'cloudstack_availability_zone',
'cloudstack_public_hostname',
'cloudstack_public_ipv4',
'cloudstack_local_hostname',
'cloudstack_local_ipv4',
'cloudstack_instance_id',
'cloudstack_user_data',
]),
),
supports_check_mode=False
)
if not has_lib_yaml:
module.fail_json(msg="missing python library: yaml")
cs_facts = CloudStackFacts().run()
cs_facts_result = dict(changed=False, ansible_facts=cs_facts)
module.exit_json(**cs_facts_result)
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils.facts import *
if __name__ == '__main__':
main()
| gpl-3.0 |
mancoast/CPythonPyc_test | fail/332_test_slice.py | 93 | 4433 | # tests for slice objects; in particular the indices method.
import unittest
from test import support
from pickle import loads, dumps
import sys
class SliceTest(unittest.TestCase):
def test_constructor(self):
self.assertRaises(TypeError, slice)
self.assertRaises(TypeError, slice, 1, 2, 3, 4)
def test_repr(self):
self.assertEqual(repr(slice(1, 2, 3)), "slice(1, 2, 3)")
def test_hash(self):
# Verify clearing of SF bug #800796
self.assertRaises(TypeError, hash, slice(5))
self.assertRaises(TypeError, slice(5).__hash__)
def test_cmp(self):
s1 = slice(1, 2, 3)
s2 = slice(1, 2, 3)
s3 = slice(1, 2, 4)
self.assertEqual(s1, s2)
self.assertNotEqual(s1, s3)
self.assertNotEqual(s1, None)
self.assertNotEqual(s1, (1, 2, 3))
self.assertNotEqual(s1, "")
class Exc(Exception):
pass
class BadCmp(object):
def __eq__(self, other):
raise Exc
s1 = slice(BadCmp())
s2 = slice(BadCmp())
self.assertEqual(s1, s1)
self.assertRaises(Exc, lambda: s1 == s2)
s1 = slice(1, BadCmp())
s2 = slice(1, BadCmp())
self.assertEqual(s1, s1)
self.assertRaises(Exc, lambda: s1 == s2)
s1 = slice(1, 2, BadCmp())
s2 = slice(1, 2, BadCmp())
self.assertEqual(s1, s1)
self.assertRaises(Exc, lambda: s1 == s2)
def test_members(self):
s = slice(1)
self.assertEqual(s.start, None)
self.assertEqual(s.stop, 1)
self.assertEqual(s.step, None)
s = slice(1, 2)
self.assertEqual(s.start, 1)
self.assertEqual(s.stop, 2)
self.assertEqual(s.step, None)
s = slice(1, 2, 3)
self.assertEqual(s.start, 1)
self.assertEqual(s.stop, 2)
self.assertEqual(s.step, 3)
class AnyClass:
pass
obj = AnyClass()
s = slice(obj)
self.assertTrue(s.stop is obj)
def test_indices(self):
self.assertEqual(slice(None ).indices(10), (0, 10, 1))
self.assertEqual(slice(None, None, 2).indices(10), (0, 10, 2))
self.assertEqual(slice(1, None, 2).indices(10), (1, 10, 2))
self.assertEqual(slice(None, None, -1).indices(10), (9, -1, -1))
self.assertEqual(slice(None, None, -2).indices(10), (9, -1, -2))
self.assertEqual(slice(3, None, -2).indices(10), (3, -1, -2))
# issue 3004 tests
self.assertEqual(slice(None, -9).indices(10), (0, 1, 1))
self.assertEqual(slice(None, -10).indices(10), (0, 0, 1))
self.assertEqual(slice(None, -11).indices(10), (0, 0, 1))
self.assertEqual(slice(None, -10, -1).indices(10), (9, 0, -1))
self.assertEqual(slice(None, -11, -1).indices(10), (9, -1, -1))
self.assertEqual(slice(None, -12, -1).indices(10), (9, -1, -1))
self.assertEqual(slice(None, 9).indices(10), (0, 9, 1))
self.assertEqual(slice(None, 10).indices(10), (0, 10, 1))
self.assertEqual(slice(None, 11).indices(10), (0, 10, 1))
self.assertEqual(slice(None, 8, -1).indices(10), (9, 8, -1))
self.assertEqual(slice(None, 9, -1).indices(10), (9, 9, -1))
self.assertEqual(slice(None, 10, -1).indices(10), (9, 9, -1))
self.assertEqual(
slice(-100, 100 ).indices(10),
slice(None).indices(10)
)
self.assertEqual(
slice(100, -100, -1).indices(10),
slice(None, None, -1).indices(10)
)
self.assertEqual(slice(-100, 100, 2).indices(10), (0, 10, 2))
self.assertEqual(list(range(10))[::sys.maxsize - 1], [0])
self.assertRaises(OverflowError, slice(None).indices, 1<<100)
def test_setslice_without_getslice(self):
tmp = []
class X(object):
def __setitem__(self, i, k):
tmp.append((i, k))
x = X()
x[1:2] = 42
self.assertEqual(tmp, [(slice(1, 2), 42)])
def test_pickle(self):
s = slice(10, 20, 3)
for protocol in (0,1,2):
t = loads(dumps(s, protocol))
self.assertEqual(s, t)
self.assertEqual(s.indices(15), t.indices(15))
self.assertNotEqual(id(s), id(t))
def test_main():
support.run_unittest(SliceTest)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
wooberlong/shadowsocks-1 | shadowsocks/crypto/table.py | 1044 | 8108 | # !/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import string
import struct
import hashlib
__all__ = ['ciphers']
cached_tables = {}
if hasattr(string, 'maketrans'):
maketrans = string.maketrans
translate = string.translate
else:
maketrans = bytes.maketrans
translate = bytes.translate
def get_table(key):
m = hashlib.md5()
m.update(key)
s = m.digest()
a, b = struct.unpack('<QQ', s)
table = maketrans(b'', b'')
table = [table[i: i + 1] for i in range(len(table))]
for i in range(1, 1024):
table.sort(key=lambda x: int(a % (ord(x) + i)))
return table
def init_table(key):
if key not in cached_tables:
encrypt_table = b''.join(get_table(key))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
cached_tables[key] = [encrypt_table, decrypt_table]
return cached_tables[key]
class TableCipher(object):
def __init__(self, cipher_name, key, iv, op):
self._encrypt_table, self._decrypt_table = init_table(key)
self._op = op
def update(self, data):
if self._op:
return translate(data, self._encrypt_table)
else:
return translate(data, self._decrypt_table)
ciphers = {
'table': (0, 0, TableCipher)
}
def test_table_result():
from shadowsocks.common import ord
target1 = [
[60, 53, 84, 138, 217, 94, 88, 23, 39, 242, 219, 35, 12, 157, 165, 181,
255, 143, 83, 247, 162, 16, 31, 209, 190, 171, 115, 65, 38, 41, 21,
245, 236, 46, 121, 62, 166, 233, 44, 154, 153, 145, 230, 49, 128, 216,
173, 29, 241, 119, 64, 229, 194, 103, 131, 110, 26, 197, 218, 59, 204,
56, 27, 34, 141, 221, 149, 239, 192, 195, 24, 155, 170, 183, 11, 254,
213, 37, 137, 226, 75, 203, 55, 19, 72, 248, 22, 129, 33, 175, 178,
10, 198, 71, 77, 36, 113, 167, 48, 2, 117, 140, 142, 66, 199, 232,
243, 32, 123, 54, 51, 82, 57, 177, 87, 251, 150, 196, 133, 5, 253,
130, 8, 184, 14, 152, 231, 3, 186, 159, 76, 89, 228, 205, 156, 96,
163, 146, 18, 91, 132, 85, 80, 109, 172, 176, 105, 13, 50, 235, 127,
0, 189, 95, 98, 136, 250, 200, 108, 179, 211, 214, 106, 168, 78, 79,
74, 210, 30, 73, 201, 151, 208, 114, 101, 174, 92, 52, 120, 240, 15,
169, 220, 182, 81, 224, 43, 185, 40, 99, 180, 17, 212, 158, 42, 90, 9,
191, 45, 6, 25, 4, 222, 67, 126, 1, 116, 124, 206, 69, 61, 7, 68, 97,
202, 63, 244, 20, 28, 58, 93, 134, 104, 144, 227, 147, 102, 118, 135,
148, 47, 238, 86, 112, 122, 70, 107, 215, 100, 139, 223, 225, 164,
237, 111, 125, 207, 160, 187, 246, 234, 161, 188, 193, 249, 252],
[151, 205, 99, 127, 201, 119, 199, 211, 122, 196, 91, 74, 12, 147, 124,
180, 21, 191, 138, 83, 217, 30, 86, 7, 70, 200, 56, 62, 218, 47, 168,
22, 107, 88, 63, 11, 95, 77, 28, 8, 188, 29, 194, 186, 38, 198, 33,
230, 98, 43, 148, 110, 177, 1, 109, 82, 61, 112, 219, 59, 0, 210, 35,
215, 50, 27, 103, 203, 212, 209, 235, 93, 84, 169, 166, 80, 130, 94,
164, 165, 142, 184, 111, 18, 2, 141, 232, 114, 6, 131, 195, 139, 176,
220, 5, 153, 135, 213, 154, 189, 238, 174, 226, 53, 222, 146, 162,
236, 158, 143, 55, 244, 233, 96, 173, 26, 206, 100, 227, 49, 178, 34,
234, 108, 207, 245, 204, 150, 44, 87, 121, 54, 140, 118, 221, 228,
155, 78, 3, 239, 101, 64, 102, 17, 223, 41, 137, 225, 229, 66, 116,
171, 125, 40, 39, 71, 134, 13, 193, 129, 247, 251, 20, 136, 242, 14,
36, 97, 163, 181, 72, 25, 144, 46, 175, 89, 145, 113, 90, 159, 190,
15, 183, 73, 123, 187, 128, 248, 252, 152, 24, 197, 68, 253, 52, 69,
117, 57, 92, 104, 157, 170, 214, 81, 60, 133, 208, 246, 172, 23, 167,
160, 192, 76, 161, 237, 45, 4, 58, 10, 182, 65, 202, 240, 185, 241,
79, 224, 132, 51, 42, 126, 105, 37, 250, 149, 32, 243, 231, 67, 179,
48, 9, 106, 216, 31, 249, 19, 85, 254, 156, 115, 255, 120, 75, 16]]
target2 = [
[124, 30, 170, 247, 27, 127, 224, 59, 13, 22, 196, 76, 72, 154, 32,
209, 4, 2, 131, 62, 101, 51, 230, 9, 166, 11, 99, 80, 208, 112, 36,
248, 81, 102, 130, 88, 218, 38, 168, 15, 241, 228, 167, 117, 158, 41,
10, 180, 194, 50, 204, 243, 246, 251, 29, 198, 219, 210, 195, 21, 54,
91, 203, 221, 70, 57, 183, 17, 147, 49, 133, 65, 77, 55, 202, 122,
162, 169, 188, 200, 190, 125, 63, 244, 96, 31, 107, 106, 74, 143, 116,
148, 78, 46, 1, 137, 150, 110, 181, 56, 95, 139, 58, 3, 231, 66, 165,
142, 242, 43, 192, 157, 89, 175, 109, 220, 128, 0, 178, 42, 255, 20,
214, 185, 83, 160, 253, 7, 23, 92, 111, 153, 26, 226, 33, 176, 144,
18, 216, 212, 28, 151, 71, 206, 222, 182, 8, 174, 205, 201, 152, 240,
155, 108, 223, 104, 239, 98, 164, 211, 184, 34, 193, 14, 114, 187, 40,
254, 12, 67, 93, 217, 6, 94, 16, 19, 82, 86, 245, 24, 197, 134, 132,
138, 229, 121, 5, 235, 238, 85, 47, 103, 113, 179, 69, 250, 45, 135,
156, 25, 61, 75, 44, 146, 189, 84, 207, 172, 119, 53, 123, 186, 120,
171, 68, 227, 145, 136, 100, 90, 48, 79, 159, 149, 39, 213, 236, 126,
52, 60, 225, 199, 105, 73, 233, 252, 118, 215, 35, 115, 64, 37, 97,
129, 161, 177, 87, 237, 141, 173, 191, 163, 140, 234, 232, 249],
[117, 94, 17, 103, 16, 186, 172, 127, 146, 23, 46, 25, 168, 8, 163, 39,
174, 67, 137, 175, 121, 59, 9, 128, 179, 199, 132, 4, 140, 54, 1, 85,
14, 134, 161, 238, 30, 241, 37, 224, 166, 45, 119, 109, 202, 196, 93,
190, 220, 69, 49, 21, 228, 209, 60, 73, 99, 65, 102, 7, 229, 200, 19,
82, 240, 71, 105, 169, 214, 194, 64, 142, 12, 233, 88, 201, 11, 72,
92, 221, 27, 32, 176, 124, 205, 189, 177, 246, 35, 112, 219, 61, 129,
170, 173, 100, 84, 242, 157, 26, 218, 20, 33, 191, 155, 232, 87, 86,
153, 114, 97, 130, 29, 192, 164, 239, 90, 43, 236, 208, 212, 185, 75,
210, 0, 81, 227, 5, 116, 243, 34, 18, 182, 70, 181, 197, 217, 95, 183,
101, 252, 248, 107, 89, 136, 216, 203, 68, 91, 223, 96, 141, 150, 131,
13, 152, 198, 111, 44, 222, 125, 244, 76, 251, 158, 106, 24, 42, 38,
77, 2, 213, 207, 249, 147, 113, 135, 245, 118, 193, 47, 98, 145, 66,
160, 123, 211, 165, 78, 204, 80, 250, 110, 162, 48, 58, 10, 180, 55,
231, 79, 149, 74, 62, 50, 148, 143, 206, 28, 15, 57, 159, 139, 225,
122, 237, 138, 171, 36, 56, 115, 63, 144, 154, 6, 230, 133, 215, 41,
184, 22, 104, 254, 234, 253, 187, 226, 247, 188, 156, 151, 40, 108,
51, 83, 178, 52, 3, 31, 255, 195, 53, 235, 126, 167, 120]]
encrypt_table = b''.join(get_table(b'foobar!'))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
for i in range(0, 256):
assert (target1[0][i] == ord(encrypt_table[i]))
assert (target1[1][i] == ord(decrypt_table[i]))
encrypt_table = b''.join(get_table(b'barfoo!'))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
for i in range(0, 256):
assert (target2[0][i] == ord(encrypt_table[i]))
assert (target2[1][i] == ord(decrypt_table[i]))
def test_encryption():
from shadowsocks.crypto import util
cipher = TableCipher('table', b'test', b'', 1)
decipher = TableCipher('table', b'test', b'', 0)
util.run_cipher(cipher, decipher)
if __name__ == '__main__':
test_table_result()
test_encryption()
| apache-2.0 |
vaygr/ansible | lib/ansible/modules/cloud/rackspace/rax_mon_check.py | 27 | 10761 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_mon_check
short_description: Create or delete a Rackspace Cloud Monitoring check for an
existing entity.
description:
- Create or delete a Rackspace Cloud Monitoring check associated with an
existing rax_mon_entity. A check is a specific test or measurement that is
performed, possibly from different monitoring zones, on the systems you
monitor. Rackspace monitoring module flow | rax_mon_entity ->
*rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan ->
rax_mon_alarm
version_added: "2.0"
options:
state:
description:
- Ensure that a check with this C(label) exists or does not exist.
choices: ["present", "absent"]
entity_id:
description:
- ID of the rax_mon_entity to target with this check.
required: true
label:
description:
- Defines a label for this check, between 1 and 64 characters long.
required: true
check_type:
description:
- The type of check to create. C(remote.) checks may be created on any
rax_mon_entity. C(agent.) checks may only be created on rax_mon_entities
that have a non-null C(agent_id).
choices:
- remote.dns
- remote.ftp-banner
- remote.http
- remote.imap-banner
- remote.mssql-banner
- remote.mysql-banner
- remote.ping
- remote.pop3-banner
- remote.postgresql-banner
- remote.smtp-banner
- remote.smtp
- remote.ssh
- remote.tcp
- remote.telnet-banner
- agent.filesystem
- agent.memory
- agent.load_average
- agent.cpu
- agent.disk
- agent.network
- agent.plugin
required: true
monitoring_zones_poll:
description:
- Comma-separated list of the names of the monitoring zones the check should
run from. Available monitoring zones include mzdfw, mzhkg, mziad, mzlon,
mzord and mzsyd. Required for remote.* checks; prohibited for agent.* checks.
target_hostname:
description:
- One of `target_hostname` and `target_alias` is required for remote.* checks,
but prohibited for agent.* checks. The hostname this check should target.
Must be a valid IPv4, IPv6, or FQDN.
target_alias:
description:
- One of `target_alias` and `target_hostname` is required for remote.* checks,
but prohibited for agent.* checks. Use the corresponding key in the entity's
`ip_addresses` hash to resolve an IP address to target.
details:
description:
- Additional details specific to the check type. Must be a hash of strings
between 1 and 255 characters long, or an array or object containing 0 to
256 items.
disabled:
description:
- If "yes", ensure the check is created, but don't actually use it yet.
choices: [ "yes", "no" ]
metadata:
description:
- Hash of arbitrary key-value pairs to accompany this check if it fires.
Keys and values must be strings between 1 and 255 characters long.
period:
description:
- The number of seconds between each time the check is performed. Must be
greater than the minimum period set on your account.
timeout:
description:
- The number of seconds this check will wait when attempting to collect
results. Must be less than the period.
author: Ash Wilson
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Create a monitoring check
gather_facts: False
hosts: local
connection: local
tasks:
- name: Associate a check with an existing entity.
rax_mon_check:
credentials: ~/.rax_pub
state: present
entity_id: "{{ the_entity['entity']['id'] }}"
label: the_check
check_type: remote.ping
monitoring_zones_poll: mziad,mzord,mzdfw
details:
count: 10
meta:
hurf: durf
register: the_check
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
def cloud_check(module, state, entity_id, label, check_type,
monitoring_zones_poll, target_hostname, target_alias, details,
disabled, metadata, period, timeout):
# Coerce attributes.
if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list):
monitoring_zones_poll = [monitoring_zones_poll]
if period:
period = int(period)
if timeout:
timeout = int(timeout)
changed = False
check = None
cm = pyrax.cloud_monitoring
if not cm:
module.fail_json(msg='Failed to instantiate client. This typically '
'indicates an invalid region or an incorrectly '
'capitalized region name.')
entity = cm.get_entity(entity_id)
if not entity:
module.fail_json(msg='Failed to instantiate entity. "%s" may not be'
' a valid entity id.' % entity_id)
existing = [e for e in entity.list_checks() if e.label == label]
if existing:
check = existing[0]
if state == 'present':
if len(existing) > 1:
module.fail_json(msg='%s existing checks have a label of %s.' %
(len(existing), label))
should_delete = False
should_create = False
should_update = False
if check:
# Details may include keys set to default values that are not
# included in the initial creation.
#
# Only force a recreation of the check if one of the *specified*
# keys is missing or has a different value.
if details:
for (key, value) in details.items():
if key not in check.details:
should_delete = should_create = True
elif value != check.details[key]:
should_delete = should_create = True
should_update = label != check.label or \
(target_hostname and target_hostname != check.target_hostname) or \
(target_alias and target_alias != check.target_alias) or \
(disabled != check.disabled) or \
(metadata and metadata != check.metadata) or \
(period and period != check.period) or \
(timeout and timeout != check.timeout) or \
(monitoring_zones_poll and monitoring_zones_poll != check.monitoring_zones_poll)
if should_update and not should_delete:
check.update(label=label,
disabled=disabled,
metadata=metadata,
monitoring_zones_poll=monitoring_zones_poll,
timeout=timeout,
period=period,
target_alias=target_alias,
target_hostname=target_hostname)
changed = True
else:
# The check doesn't exist yet.
should_create = True
if should_delete:
check.delete()
if should_create:
check = cm.create_check(entity,
label=label,
check_type=check_type,
target_hostname=target_hostname,
target_alias=target_alias,
monitoring_zones_poll=monitoring_zones_poll,
details=details,
disabled=disabled,
metadata=metadata,
period=period,
timeout=timeout)
changed = True
elif state == 'absent':
if check:
check.delete()
changed = True
else:
module.fail_json(msg='state must be either present or absent.')
if check:
check_dict = {
"id": check.id,
"label": check.label,
"type": check.type,
"target_hostname": check.target_hostname,
"target_alias": check.target_alias,
"monitoring_zones_poll": check.monitoring_zones_poll,
"details": check.details,
"disabled": check.disabled,
"metadata": check.metadata,
"period": check.period,
"timeout": check.timeout
}
module.exit_json(changed=changed, check=check_dict)
else:
module.exit_json(changed=changed)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
entity_id=dict(required=True),
label=dict(required=True),
check_type=dict(required=True),
monitoring_zones_poll=dict(),
target_hostname=dict(),
target_alias=dict(),
details=dict(type='dict', default={}),
disabled=dict(type='bool', default=False),
metadata=dict(type='dict', default={}),
period=dict(type='int'),
timeout=dict(type='int'),
state=dict(default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
entity_id = module.params.get('entity_id')
label = module.params.get('label')
check_type = module.params.get('check_type')
monitoring_zones_poll = module.params.get('monitoring_zones_poll')
target_hostname = module.params.get('target_hostname')
target_alias = module.params.get('target_alias')
details = module.params.get('details')
disabled = module.boolean(module.params.get('disabled'))
metadata = module.params.get('metadata')
period = module.params.get('period')
timeout = module.params.get('timeout')
state = module.params.get('state')
setup_rax_module(module, pyrax)
cloud_check(module, state, entity_id, label, check_type,
monitoring_zones_poll, target_hostname, target_alias, details,
disabled, metadata, period, timeout)
if __name__ == '__main__':
main()
| gpl-3.0 |
pathway27/servo | tests/wpt/web-platform-tests/referrer-policy/generic/subresource/image.py | 147 | 3358 | import os, sys, array, json, math, cStringIO
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
import subresource
class Image:
"""This class partially implements the interface of the PIL.Image.Image.
One day in the future WPT might support the PIL module or another imaging
library, so this hacky BMP implementation will no longer be required.
"""
def __init__(self, width, height):
self.width = width
self.height = height
self.img = bytearray([0 for i in range(3 * width * height)])
@staticmethod
def new(mode, size, color=0):
return Image(size[0], size[1])
def _int_to_bytes(self, number):
packed_bytes = [0, 0, 0, 0]
for i in range(4):
packed_bytes[i] = number & 0xFF
number >>= 8
return packed_bytes
def putdata(self, color_data):
for y in range(self.height):
for x in range(self.width):
i = x + y * self.width
if i > len(color_data) - 1:
return
self.img[i * 3: i * 3 + 3] = color_data[i][::-1]
def save(self, f, type):
assert type == "BMP"
# 54 bytes of preambule + image color data.
filesize = 54 + 3 * self.width * self.height;
# 14 bytes of header.
bmpfileheader = bytearray(['B', 'M'] + self._int_to_bytes(filesize) +
[0, 0, 0, 0, 54, 0, 0, 0])
# 40 bytes of info.
bmpinfoheader = bytearray([40, 0, 0, 0] +
self._int_to_bytes(self.width) +
self._int_to_bytes(self.height) +
[1, 0, 24] + (25 * [0]))
padlength = (4 - (self.width * 3) % 4) % 4
bmppad = bytearray([0, 0, 0]);
padding = bmppad[0 : padlength]
f.write(bmpfileheader)
f.write(bmpinfoheader)
for i in range(self.height):
offset = self.width * (self.height - i - 1) * 3
f.write(self.img[offset : offset + 3 * self.width])
f.write(padding)
def encode_string_as_bmp_image(string_data):
data_bytes = array.array("B", string_data)
num_bytes = len(data_bytes)
# Convert data bytes to color data (RGB).
color_data = []
num_components = 3
rgb = [0] * num_components
i = 0
for byte in data_bytes:
component_index = i % num_components
rgb[component_index] = byte
if component_index == (num_components - 1) or i == (num_bytes - 1):
color_data.append(tuple(rgb))
rgb = [0] * num_components
i += 1
# Render image.
num_pixels = len(color_data)
sqrt = int(math.ceil(math.sqrt(num_pixels)))
img = Image.new("RGB", (sqrt, sqrt), "black")
img.putdata(color_data)
# Flush image to string.
f = cStringIO.StringIO()
img.save(f, "BMP")
f.seek(0)
return f.read()
def generate_payload(server_data):
data = ('{"headers": %(headers)s}') % server_data
return encode_string_as_bmp_image(data)
def main(request, response):
subresource.respond(request,
response,
payload_generator = generate_payload,
content_type = "image/bmp",
access_control_allow_origin = "*")
| mpl-2.0 |
miek/libsigrokdecode | decoders/am230x/pd.py | 9 | 8298 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2014 Johannes Roemer <jroemer@physik.uni-wuerzburg.de>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
import sigrokdecode as srd
# Define valid timing values (in microseconds).
timing = {
'START LOW' : {'min': 750, 'max': 25000},
'START HIGH' : {'min': 10, 'max': 10000},
'RESPONSE LOW' : {'min': 50, 'max': 90},
'RESPONSE HIGH' : {'min': 50, 'max': 90},
'BIT LOW' : {'min': 45, 'max': 90},
'BIT 0 HIGH' : {'min': 20, 'max': 35},
'BIT 1 HIGH' : {'min': 65, 'max': 80},
}
class SamplerateError(Exception):
pass
class Decoder(srd.Decoder):
api_version = 2
id = 'am230x'
name = 'AM230x/DHTxx'
longname = 'Aosong AM230x/DHTxx'
desc = 'Aosong AM230x/DHTxx humidity/temperature sensor protocol.'
license = 'gplv2+'
inputs = ['logic']
outputs = ['am230x']
channels = (
{'id': 'sda', 'name': 'SDA', 'desc': 'Single wire serial data line'},
)
options = (
{'id': 'device', 'desc': 'Device type',
'default': 'am230x', 'values': ('am230x', 'dht11')},
)
annotations = (
('start', 'Start'),
('response', 'Response'),
('bit', 'Bit'),
('end', 'End'),
('byte', 'Byte'),
('humidity', 'Relative humidity in percent'),
('temperature', 'Temperature in degrees Celsius'),
('checksum', 'Checksum'),
)
annotation_rows = (
('bits', 'Bits', (0, 1, 2, 3)),
('bytes', 'Bytes', (4,)),
('results', 'Results', (5, 6, 7)),
)
def putfs(self, data):
self.put(self.fall, self.samplenum, self.out_ann, data)
def putb(self, data):
self.put(self.bytepos[-1], self.samplenum, self.out_ann, data)
def putv(self, data):
self.put(self.bytepos[-2], self.samplenum, self.out_ann, data)
def reset(self):
self.state = 'WAIT FOR START LOW'
self.samplenum = 0
self.fall = 0
self.rise = 0
self.bits = []
self.bytepos = []
def is_valid(self, name):
dt = 0
if name.endswith('LOW'):
dt = self.samplenum - self.fall
elif name.endswith('HIGH'):
dt = self.samplenum - self.rise
if dt >= self.cnt[name]['min'] and dt <= self.cnt[name]['max']:
return True
return False
def bits2num(self, bitlist):
number = 0
for i in range(len(bitlist)):
number += bitlist[-1 - i] * 2**i
return number
def calculate_humidity(self, bitlist):
h = 0
if self.options['device'] == 'dht11':
h = self.bits2num(bitlist[0:8])
else:
h = self.bits2num(bitlist) / 10
return h
def calculate_temperature(self, bitlist):
t = 0
if self.options['device'] == 'dht11':
t = self.bits2num(bitlist[0:8])
else:
t = self.bits2num(bitlist[1:]) / 10
if bitlist[0] == 1:
t = -t
return t
def calculate_checksum(self, bitlist):
checksum = 0
for i in range(8, len(bitlist) + 1, 8):
checksum += self.bits2num(bitlist[i-8:i])
return checksum % 256
def __init__(self, **kwargs):
self.samplerate = None
self.reset()
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def metadata(self, key, value):
if key != srd.SRD_CONF_SAMPLERATE:
return
self.samplerate = value
# Convert microseconds to sample counts.
self.cnt = {}
for e in timing:
self.cnt[e] = {}
for t in timing[e]:
self.cnt[e][t] = timing[e][t] * self.samplerate / 1000000
def handle_byte(self, bit):
self.bits.append(bit)
self.putfs([2, ['Bit: %d' % bit, '%d' % bit]])
self.fall = self.samplenum
self.state = 'WAIT FOR BIT HIGH'
if len(self.bits) % 8 == 0:
byte = self.bits2num(self.bits[-8:])
self.putb([4, ['Byte: %#04x' % byte, '%#04x' % byte]])
if len(self.bits) == 16:
h = self.calculate_humidity(self.bits[-16:])
self.putv([5, ['Humidity: %.1f %%' % h, 'RH = %.1f %%' % h]])
elif len(self.bits) == 32:
t = self.calculate_temperature(self.bits[-16:])
self.putv([6, ['Temperature: %.1f °C' % t, 'T = %.1f °C' % t]])
elif len(self.bits) == 40:
parity = self.bits2num(self.bits[-8:])
if parity == self.calculate_checksum(self.bits[0:32]):
self.putb([7, ['Checksum: OK', 'OK']])
else:
self.putb([7, ['Checksum: not OK', 'NOK']])
self.state = 'WAIT FOR END'
self.bytepos.append(self.samplenum)
def decode(self, ss, es, data):
if not self.samplerate:
raise SamplerateError('Cannot decode without samplerate.')
for (self.samplenum, (sda,)) in data:
# State machine.
if self.state == 'WAIT FOR START LOW':
if sda != 0:
continue
self.fall = self.samplenum
self.state = 'WAIT FOR START HIGH'
elif self.state == 'WAIT FOR START HIGH':
if sda != 1:
continue
if self.is_valid('START LOW'):
self.rise = self.samplenum
self.state = 'WAIT FOR RESPONSE LOW'
else:
self.reset()
elif self.state == 'WAIT FOR RESPONSE LOW':
if sda != 0:
continue
if self.is_valid('START HIGH'):
self.putfs([0, ['Start', 'S']])
self.fall = self.samplenum
self.state = 'WAIT FOR RESPONSE HIGH'
else:
self.reset()
elif self.state == 'WAIT FOR RESPONSE HIGH':
if sda != 1:
continue
if self.is_valid('RESPONSE LOW'):
self.rise = self.samplenum
self.state = 'WAIT FOR FIRST BIT'
else:
self.reset()
elif self.state == 'WAIT FOR FIRST BIT':
if sda != 0:
continue
if self.is_valid('RESPONSE HIGH'):
self.putfs([1, ['Response', 'R']])
self.fall = self.samplenum
self.bytepos.append(self.samplenum)
self.state = 'WAIT FOR BIT HIGH'
else:
self.reset()
elif self.state == 'WAIT FOR BIT HIGH':
if sda != 1:
continue
if self.is_valid('BIT LOW'):
self.rise = self.samplenum
self.state = 'WAIT FOR BIT LOW'
else:
self.reset()
elif self.state == 'WAIT FOR BIT LOW':
if sda != 0:
continue
if self.is_valid('BIT 0 HIGH'):
bit = 0
elif self.is_valid('BIT 1 HIGH'):
bit = 1
else:
self.reset()
continue
self.handle_byte(bit)
elif self.state == 'WAIT FOR END':
if sda != 1:
continue
self.putfs([3, ['End', 'E']])
self.reset()
| gpl-3.0 |
mobiuscoin/p2pool-mobi | p2pool/bitcoin/networks/litecoin.py | 29 | 1199 | import os
import platform
from twisted.internet import defer
from .. import data, helper
from p2pool.util import pack
P2P_PREFIX = 'fbc0b6db'.decode('hex')
P2P_PORT = 9333
ADDRESS_VERSION = 48
RPC_PORT = 9332
RPC_CHECK = defer.inlineCallbacks(lambda bitcoind: defer.returnValue(
'litecoinaddress' in (yield bitcoind.rpc_help()) and
not (yield bitcoind.rpc_getinfo())['testnet']
))
SUBSIDY_FUNC = lambda height: 50*100000000 >> (height + 1)//840000
POW_FUNC = lambda data: pack.IntType(256).unpack(__import__('ltc_scrypt').getPoWHash(data))
BLOCK_PERIOD = 150 # s
SYMBOL = 'LTC'
CONF_FILE_FUNC = lambda: os.path.join(os.path.join(os.environ['APPDATA'], 'Litecoin') if platform.system() == 'Windows' else os.path.expanduser('~/Library/Application Support/Litecoin/') if platform.system() == 'Darwin' else os.path.expanduser('~/.litecoin'), 'litecoin.conf')
BLOCK_EXPLORER_URL_PREFIX = 'http://explorer.litecoin.net/block/'
ADDRESS_EXPLORER_URL_PREFIX = 'http://explorer.litecoin.net/address/'
TX_EXPLORER_URL_PREFIX = 'http://explorer.litecoin.net/tx/'
SANE_TARGET_RANGE = (2**256//1000000000 - 1, 2**256//1000 - 1)
DUMB_SCRYPT_DIFF = 2**16
DUST_THRESHOLD = 0.03e8
| gpl-3.0 |
dgsantana/arsenalsuite | cpp/lib/PyQt4/examples/script/helloscript.py | 20 | 2470 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2010 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
import sys
from PyQt4 import QtGui, QtScript
app = QtGui.QApplication(sys.argv)
engine = QtScript.QScriptEngine()
button = QtGui.QPushButton()
scriptButton = engine.newQObject(button)
engine.globalObject().setProperty('button', scriptButton)
engine.evaluate("button.text = 'Hello World!'")
engine.evaluate("button.styleSheet = 'font-style: italic'")
engine.evaluate("button.show()")
sys.exit(app.exec_())
| gpl-2.0 |
miguelfervi/SSBW-Restaurantes | restaurantes/lib/python2.7/site-packages/django/forms/boundfield.py | 41 | 8691 | from __future__ import unicode_literals
import datetime
from django.forms.utils import flatatt, pretty_name
from django.forms.widgets import Textarea, TextInput
from django.utils import six
from django.utils.encoding import (
force_text, python_2_unicode_compatible, smart_text,
)
from django.utils.html import conditional_escape, format_html, html_safe
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
__all__ = ('BoundField',)
UNSET = object()
@html_safe
@python_2_unicode_compatible
class BoundField(object):
"A Field plus data"
def __init__(self, form, field, name):
self.form = form
self.field = field
self.name = name
self.html_name = form.add_prefix(name)
self.html_initial_name = form.add_initial_prefix(name)
self.html_initial_id = form.add_initial_prefix(self.auto_id)
if self.field.label is None:
self.label = pretty_name(name)
else:
self.label = self.field.label
self.help_text = field.help_text or ''
self._initial_value = UNSET
def __str__(self):
"""Renders this field as an HTML widget."""
if self.field.show_hidden_initial:
return self.as_widget() + self.as_hidden(only_initial=True)
return self.as_widget()
def __iter__(self):
"""
Yields rendered strings that comprise all widgets in this BoundField.
This really is only useful for RadioSelect widgets, so that you can
iterate over individual radio buttons in a template.
"""
id_ = self.field.widget.attrs.get('id') or self.auto_id
attrs = {'id': id_} if id_ else {}
for subwidget in self.field.widget.subwidgets(self.html_name, self.value(), attrs):
yield subwidget
def __len__(self):
return len(list(self.__iter__()))
def __getitem__(self, idx):
# Prevent unnecessary reevaluation when accessing BoundField's attrs
# from templates.
if not isinstance(idx, six.integer_types + (slice,)):
raise TypeError
return list(self.__iter__())[idx]
@property
def errors(self):
"""
Returns an ErrorList for this field. Returns an empty ErrorList
if there are none.
"""
return self.form.errors.get(self.name, self.form.error_class())
def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Renders the field by rendering the passed widget, adding any HTML
attributes passed as attrs. If no widget is specified, then the
field's default widget will be used.
"""
if not widget:
widget = self.field.widget
if self.field.localize:
widget.is_localized = True
attrs = attrs or {}
if self.field.disabled:
attrs['disabled'] = True
auto_id = self.auto_id
if auto_id and 'id' not in attrs and 'id' not in widget.attrs:
if not only_initial:
attrs['id'] = auto_id
else:
attrs['id'] = self.html_initial_id
if not only_initial:
name = self.html_name
else:
name = self.html_initial_name
return force_text(widget.render(name, self.value(), attrs=attrs))
def as_text(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="text">.
"""
return self.as_widget(TextInput(), attrs, **kwargs)
def as_textarea(self, attrs=None, **kwargs):
"Returns a string of HTML for representing this as a <textarea>."
return self.as_widget(Textarea(), attrs, **kwargs)
def as_hidden(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="hidden">.
"""
return self.as_widget(self.field.hidden_widget(), attrs, **kwargs)
@property
def data(self):
"""
Returns the data for this BoundField, or None if it wasn't given.
"""
return self.field.widget.value_from_datadict(self.form.data, self.form.files, self.html_name)
def value(self):
"""
Returns the value for this BoundField, using the initial value if
the form is not bound or the data otherwise.
"""
if not self.form.is_bound:
data = self.form.initial.get(self.name, self.field.initial)
if callable(data):
if self._initial_value is not UNSET:
data = self._initial_value
else:
data = data()
# If this is an auto-generated default date, nix the
# microseconds for standardized handling. See #22502.
if (isinstance(data, (datetime.datetime, datetime.time)) and
not self.field.widget.supports_microseconds):
data = data.replace(microsecond=0)
self._initial_value = data
else:
data = self.field.bound_data(
self.data, self.form.initial.get(self.name, self.field.initial)
)
return self.field.prepare_value(data)
def label_tag(self, contents=None, attrs=None, label_suffix=None):
"""
Wraps the given contents in a <label>, if the field has an ID attribute.
contents should be 'mark_safe'd to avoid HTML escaping. If contents
aren't given, uses the field's HTML-escaped label.
If attrs are given, they're used as HTML attributes on the <label> tag.
label_suffix allows overriding the form's label_suffix.
"""
contents = contents or self.label
if label_suffix is None:
label_suffix = (self.field.label_suffix if self.field.label_suffix is not None
else self.form.label_suffix)
# Only add the suffix if the label does not end in punctuation.
# Translators: If found as last label character, these punctuation
# characters will prevent the default label_suffix to be appended to the label
if label_suffix and contents and contents[-1] not in _(':?.!'):
contents = format_html('{}{}', contents, label_suffix)
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
if id_:
id_for_label = widget.id_for_label(id_)
if id_for_label:
attrs = dict(attrs or {}, **{'for': id_for_label})
if self.field.required and hasattr(self.form, 'required_css_class'):
attrs = attrs or {}
if 'class' in attrs:
attrs['class'] += ' ' + self.form.required_css_class
else:
attrs['class'] = self.form.required_css_class
attrs = flatatt(attrs) if attrs else ''
contents = format_html('<label{}>{}</label>', attrs, contents)
else:
contents = conditional_escape(contents)
return mark_safe(contents)
def css_classes(self, extra_classes=None):
"""
Returns a string of space-separated CSS classes for this field.
"""
if hasattr(extra_classes, 'split'):
extra_classes = extra_classes.split()
extra_classes = set(extra_classes or [])
if self.errors and hasattr(self.form, 'error_css_class'):
extra_classes.add(self.form.error_css_class)
if self.field.required and hasattr(self.form, 'required_css_class'):
extra_classes.add(self.form.required_css_class)
return ' '.join(extra_classes)
@property
def is_hidden(self):
"Returns True if this BoundField's widget is hidden."
return self.field.widget.is_hidden
@property
def auto_id(self):
"""
Calculates and returns the ID attribute for this BoundField, if the
associated Form has specified auto_id. Returns an empty string otherwise.
"""
auto_id = self.form.auto_id
if auto_id and '%s' in smart_text(auto_id):
return smart_text(auto_id) % self.html_name
elif auto_id:
return self.html_name
return ''
@property
def id_for_label(self):
"""
Wrapper around the field widget's `id_for_label` method.
Useful, for example, for focusing on this field regardless of whether
it has a single widget or a MultiWidget.
"""
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
return widget.id_for_label(id_)
| gpl-3.0 |
moondrop-entertainment/django-nonrel-drawp | django/db/__init__.py | 150 | 4304 | from django.conf import settings
from django.core import signals
from django.core.exceptions import ImproperlyConfigured
from django.db.utils import ConnectionHandler, ConnectionRouter, load_backend, DEFAULT_DB_ALIAS, \
DatabaseError, IntegrityError
from django.utils.functional import curry
__all__ = ('backend', 'connection', 'connections', 'router', 'DatabaseError',
'IntegrityError', 'DEFAULT_DB_ALIAS')
# For backwards compatibility - Port any old database settings over to
# the new values.
if not settings.DATABASES:
if settings.DATABASE_ENGINE:
import warnings
warnings.warn(
"settings.DATABASE_* is deprecated; use settings.DATABASES instead.",
DeprecationWarning
)
settings.DATABASES[DEFAULT_DB_ALIAS] = {
'ENGINE': settings.DATABASE_ENGINE,
'HOST': settings.DATABASE_HOST,
'NAME': settings.DATABASE_NAME,
'OPTIONS': settings.DATABASE_OPTIONS,
'PASSWORD': settings.DATABASE_PASSWORD,
'PORT': settings.DATABASE_PORT,
'USER': settings.DATABASE_USER,
'TEST_CHARSET': settings.TEST_DATABASE_CHARSET,
'TEST_COLLATION': settings.TEST_DATABASE_COLLATION,
'TEST_NAME': settings.TEST_DATABASE_NAME,
}
if DEFAULT_DB_ALIAS not in settings.DATABASES:
raise ImproperlyConfigured("You must define a '%s' database" % DEFAULT_DB_ALIAS)
for alias, database in settings.DATABASES.items():
if 'ENGINE' not in database:
raise ImproperlyConfigured("You must specify a 'ENGINE' for database '%s'" % alias)
if database['ENGINE'] in ("postgresql", "postgresql_psycopg2", "sqlite3", "mysql", "oracle"):
import warnings
if 'django.contrib.gis' in settings.INSTALLED_APPS:
warnings.warn(
"django.contrib.gis is now implemented as a full database backend. "
"Modify ENGINE in the %s database configuration to select "
"a backend from 'django.contrib.gis.db.backends'" % alias,
DeprecationWarning
)
if database['ENGINE'] == 'postgresql_psycopg2':
full_engine = 'django.contrib.gis.db.backends.postgis'
elif database['ENGINE'] == 'sqlite3':
full_engine = 'django.contrib.gis.db.backends.spatialite'
else:
full_engine = 'django.contrib.gis.db.backends.%s' % database['ENGINE']
else:
warnings.warn(
"Short names for ENGINE in database configurations are deprecated. "
"Prepend %s.ENGINE with 'django.db.backends.'" % alias,
DeprecationWarning
)
full_engine = "django.db.backends.%s" % database['ENGINE']
database['ENGINE'] = full_engine
connections = ConnectionHandler(settings.DATABASES)
router = ConnectionRouter(settings.DATABASE_ROUTERS)
# `connection`, `DatabaseError` and `IntegrityError` are convenient aliases
# for backend bits.
# DatabaseWrapper.__init__() takes a dictionary, not a settings module, so
# we manually create the dictionary from the settings, passing only the
# settings that the database backends care about. Note that TIME_ZONE is used
# by the PostgreSQL backends.
# we load all these up for backwards compatibility, you should use
# connections['default'] instead.
connection = connections[DEFAULT_DB_ALIAS]
backend = load_backend(connection.settings_dict['ENGINE'])
# Register an event that closes the database connection
# when a Django request is finished.
def close_connection(**kwargs):
for conn in connections.all():
conn.close()
signals.request_finished.connect(close_connection)
# Register an event that resets connection.queries
# when a Django request is started.
def reset_queries(**kwargs):
for conn in connections.all():
conn.queries = []
signals.request_started.connect(reset_queries)
# Register an event that rolls back the connections
# when a Django request has an exception.
def _rollback_on_exception(**kwargs):
from django.db import transaction
for conn in connections:
try:
transaction.rollback_unless_managed(using=conn)
except DatabaseError:
pass
signals.got_request_exception.connect(_rollback_on_exception)
| bsd-3-clause |
ZLLab-Mooc/edx-platform | common/test/acceptance/pages/lms/dashboard.py | 1 | 6716 | # -*- coding: utf-8 -*-
"""
Student dashboard page.
"""
from bok_choy.page_object import PageObject
from . import BASE_URL
class DashboardPage(PageObject):
"""
Student dashboard, where the student can view
courses she/he has registered for.
"""
def __init__(self, browser):
"""Initialize the page.
Arguments:
browser (Browser): The browser instance.
"""
super(DashboardPage, self).__init__(browser)
url = "{base}/dashboard".format(base=BASE_URL)
def is_browser_on_page(self):
return self.q(css='section.my-courses').present
@property
def current_courses_text(self):
"""
This is the title label for the section of the student dashboard that
shows all the courses that the student is enrolled in.
The string displayed is defined in lms/templates/dashboard.html.
"""
text_items = self.q(css='section#my-courses').text
if len(text_items) > 0:
return text_items[0]
else:
return ""
@property
def available_courses(self):
"""
Return list of the names of available courses (e.g. "999 edX Demonstration Course")
"""
def _get_course_name(el):
return el.text
return self.q(css='h3.course-title > a').map(_get_course_name).results
@property
def banner_text(self):
"""
Return the text of the banner on top of the page, or None if
the banner is not present.
"""
message = self.q(css='div.wrapper-msg')
if message.present:
return message.text[0]
return None
def get_enrollment_mode(self, course_name):
"""Get the enrollment mode for a given course on the dashboard.
Arguments:
course_name (str): The name of the course whose mode should be retrieved.
Returns:
String, indicating the enrollment mode for the course corresponding to
the provided course name.
Raises:
Exception, if no course with the provided name is found on the dashboard.
"""
# Filter elements by course name, only returning the relevant course item
course_listing = self.q(css=".course").filter(lambda el: course_name in el.text).results
if course_listing:
# There should only be one course listing for the provided course name.
# Since 'ENABLE_VERIFIED_CERTIFICATES' is true in the Bok Choy settings, we
# can expect two classes to be present on <article> elements, one being 'course'
# and the other being the enrollment mode.
enrollment_mode = course_listing[0].get_attribute('class').split('course ')[1]
else:
raise Exception("No course named {} was found on the dashboard".format(course_name))
return enrollment_mode
def upgrade_enrollment(self, course_name, upgrade_page):
"""Interact with the upgrade button for the course with the provided name.
Arguments:
course_name (str): The name of the course whose mode should be checked.
upgrade_page (PageObject): The page to wait on after clicking the upgrade button. Importing
the definition of PaymentAndVerificationFlow results in a circular dependency.
Raises:
Exception, if no enrollment corresponding to the provided course name appears
on the dashboard.
"""
# Filter elements by course name, only returning the relevant course item
course_listing = self.q(css=".course").filter(lambda el: course_name in el.text).results
if course_listing:
# There should only be one course listing corresponding to the provided course name.
el = course_listing[0]
# Click the upgrade button
el.find_element_by_css_selector('#upgrade-to-verified').click()
upgrade_page.wait_for_page()
else:
raise Exception("No enrollment for {} is visible on the dashboard.".format(course_name))
def view_course(self, course_id):
"""
Go to the course with `course_id` (e.g. edx/Open_DemoX/edx_demo_course)
"""
link_css = self._link_css(course_id)
if link_css is not None:
self.q(css=link_css).first.click()
else:
msg = "No links found for course {0}".format(course_id)
self.warning(msg)
def _link_css(self, course_id):
"""
Return a CSS selector for the link to the course with `course_id`.
"""
# Get the link hrefs for all courses
all_links = self.q(css='a.enter-course').map(lambda el: el.get_attribute('href')).results
# Search for the first link that matches the course id
link_index = None
for index in range(len(all_links)):
if course_id in all_links[index]:
link_index = index
break
if link_index is not None:
return "a.enter-course:nth-of-type({0})".format(link_index + 1)
else:
return None
def pre_requisite_message_displayed(self):
"""
Verify if pre-requisite course messages are being displayed.
"""
return self.q(css='li.prerequisites > .tip').visible
def get_course_listings(self):
"""Retrieve the list of course DOM elements"""
return self.q(css='ul.listing-courses')
def get_course_social_sharing_widget(self, widget_name):
""" Retrieves the specified social sharing widgets by its classification """
return self.q(css='a.action-{}'.format(widget_name))
def get_courses(self):
"""
Get all courses shown in the dashboard
"""
return self.q(css='ul.listing-courses .course-item')
def get_course_date(self):
"""
Get course date of the first course from dashboard
"""
return self.q(css='ul.listing-courses .course-item .info-date-block').first.text[0]
def click_username_dropdown(self):
"""
Click username dropdown.
"""
self.q(css='.dropdown').first.click()
@property
def username_dropdown_link_text(self):
"""
Return list username dropdown links.
"""
return self.q(css='.dropdown-menu li a').text
def click_my_profile_link(self):
"""
Click on `Profile` link.
"""
self.q(css='.dropdown-menu li a').nth(1).click()
def click_account_settings_link(self):
"""
Click on `Account` link.
"""
self.q(css='.dropdown-menu li a').nth(2).click()
| agpl-3.0 |
nicolasbracigliano/NB-Site | node_modules/node-gyp/gyp/pylib/gyp/input.py | 578 | 116086 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import gyp.common
import gyp.simple_copy
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
from gyp.common import OrderedSet
# A list of types that are treated as linkable.
linkable_types = [
'executable',
'shared_library',
'loadable_module',
'mac_kernel_extension',
]
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = set()
# These per-process dictionaries are used to cache build file data when loading
# in parallel mode.
per_process_data = {}
per_process_aux_data = {}
def IsPathSection(section):
# If section ends in one of the '=+?!' characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section and section[-1:] in '=+?!':
section = section[:-1]
if section in path_sections:
return True
# Sections mathing the regexp '_(dir|file|path)s?$' are also
# considered PathSections. Using manual string matching since that
# is much faster than the regexp and this can be called hundreds of
# thousands of times so micro performance matters.
if "_" in section:
tail = section[-6:]
if tail[-1] == 's':
tail = tail[:-1]
if tail[-5:] in ('_file', '_path'):
return True
return tail[-4:] == '_dir'
return False
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) +
"': " + repr(node))
def LoadOneBuildFile(build_file_path, data, aux_data, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
# Open the build file for read ('r') with universal-newlines mode ('U')
# to make sure platform specific newlines ('\r\n' or '\r') are converted to '\n'
# which otherwise will fail eval()
build_file_contents = open(build_file_path, 'rU').read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if type(build_file_data) is not dict:
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, None, False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if type(v) is dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
None, check)
elif type(v) is list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
for item in sublist:
if type(item) is dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
None, check)
elif type(item) is list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = gyp.simple_copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if type(condition) is list:
for condition_dict in condition[1:]:
if type(condition_dict) is dict:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
# The 'target_build_files' key is only set when loading target build files in
# the non-parallel code path, where LoadTargetBuildFile is called
# recursively. In the parallel code path, we don't need to check whether the
# |build_file_path| has already been loaded, because the 'scheduled' set in
# ParallelState guarantees that we never load the same |build_file_path|
# twice.
if 'target_build_files' in data:
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = gyp.simple_copy.deepcopy(
build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, per_process_data,
per_process_aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
# We can safely pop the build_file_data from per_process_data because it
# will never be referenced by this process again, so we don't need to keep
# it in the cache.
build_file_data = per_process_data.pop(build_file_path)
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
build_file_data,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, build_file_data0, dependencies0) = result
self.data[build_file_path0] = build_file_data0
self.data['target_build_files'].add(build_file_path0)
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if type(string) is str:
# This function is called a lot so for maximum performance, avoid
# involving regexps which would otherwise make the code much
# shorter. Regexps would need twice the time of this function.
if string:
if string == "0":
return True
if string[0] == "-":
string = string[1:]
if not string:
return False
if '1' <= string[0] <= '9':
return string.isdigit()
return False
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) is list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = gyp.simple_copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) is list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once. The cache key contains the command to be
# run as well as the directory to run it from, to account for commands
# that depend on their current directory.
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = (str(contents), build_file_dir)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
try:
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
except Exception, e:
raise GypError("%s while executing command '%s' in %s" %
(e, contents, build_file))
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d while in %s." %
(contents, p.returncode, build_file))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if type(replacement) is list:
for item in replacement:
if not contents[-1] == '/' and type(item) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif type(replacement) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if type(replacement) is list:
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if type(replacement) is list:
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
if output == input:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found only identity matches on %r, avoiding infinite "
"recursion.",
output)
else:
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if type(output) is list:
if output and type(output[0]) is list:
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if type(output) is list:
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
# The same condition is often evaluated over and over again so it
# makes sense to cache as much as possible between evaluations.
cached_conditions_asts = {}
def EvalCondition(condition, conditions_key, phase, variables, build_file):
"""Returns the dict that should be used or None if the result was
that nothing should be used."""
if type(condition) is not list:
raise GypError(conditions_key + ' must be a list')
if len(condition) < 2:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be at least length 2, not ' + str(len(condition)))
i = 0
result = None
while i < len(condition):
cond_expr = condition[i]
true_dict = condition[i + 1]
if type(true_dict) is not dict:
raise GypError('{} {} must be followed by a dictionary, not {}'.format(
conditions_key, cond_expr, type(true_dict)))
if len(condition) > i + 2 and type(condition[i + 2]) is dict:
false_dict = condition[i + 2]
i = i + 3
if i != len(condition):
raise GypError('{} {} has {} unexpected trailing items'.format(
conditions_key, cond_expr, len(condition) - i))
else:
false_dict = None
i = i + 2
if result == None:
result = EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file)
return result
def EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file):
"""Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise."""
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if type(cond_expr_expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + cond_expr_expanded.__class__.__name__)
try:
if cond_expr_expanded in cached_conditions_asts:
ast_code = cached_conditions_asts[cond_expr_expanded]
else:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
cached_conditions_asts[cond_expr_expanded] = ast_code
if eval(ast_code, {'__builtins__': None}, variables):
return true_dict
return false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
merge_dict = EvalCondition(condition, conditions_key, phase, variables,
build_file)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if type(value) in (str, int, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if type(value) not in (str, int, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
if type(expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key)
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or type(value) is str:
continue
if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif type(value) is not int:
raise TypeError('Unknown type ' + value.__class__.__name__ + \
' for ' + key)
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if type(item) is dict:
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif type(item) is list:
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif type(item) is str:
expanded = ExpandVariables(item, phase, variables, build_file)
if type(expanded) in (str, int):
the_list[index] = expanded
elif type(expanded) is list:
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError(
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index)
elif type(item) is not int:
raise TypeError('Unknown type ' + item.__class__.__name__ + \
' at index ' + index)
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
def RemoveLinkDependenciesFromNoneTargets(targets):
"""Remove dependencies having the 'link_dependency' attribute from the 'none'
targets."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if target_dict.get('type', None) == 'none':
if targets[t].get('variables', {}).get('link_dependency', 0):
target_dict[dependency_key] = \
Filter(target_dict[dependency_key], t)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = OrderedSet()
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.add(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
# TODO: We want to check through the
# node_dependent.dependencies list but if it's long and we
# always start at the beginning, then we get O(n^2) behaviour.
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return list(flat_list)
def FindCycles(self):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
results = []
visited = set()
def Visit(node, path):
for child in node.dependents:
if child in path:
results.append([child] + path[:path.index(child) + 1])
elif not child in visited:
visited.add(child)
Visit(child, [child] + path)
visited.add(self)
Visit(self, [self])
return results
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependency.DeepDependencies(dependencies)
dependencies.add(dependency.ref)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
# Check for None, corresponding to the root node.
if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
dependencies.add(self.ref)
return dependencies
# Executables, mac kernel extensions and loadable modules are already fully
# and finally linked. Nothing else can be a link dependency of them, there
# can only be dependencies in the sense that a dependent target might run
# an executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module',
'mac_kernel_extension'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(targets):
if not root_node.dependents:
# If all targets have dependencies, add the first target as a dependent
# of root_node so that the cycle can be discovered from root_node.
target = targets.keys()[0]
target_node = dependency_nodes[target]
target_node.dependencies.append(root_node)
root_node.dependents.append(target_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in dependency graph detected:\n' + '\n'.join(cycles))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
if not root_node.dependents:
# If all files have dependencies, add the first file as a dependent
# of root_node so that the cycle can be discovered from root_node.
file_node = dependency_nodes.values()[0]
file_node.dependencies.append(root_node)
root_node.dependents.append(file_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles))
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if type(item) in (str, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not (type(item) is str and item.startswith('-')):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif type(item) is dict:
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif type(item) is list:
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError(
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__)
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if type(v) in (str, int):
if type(to[k]) not in (str, int):
bad_merge = True
elif type(v) is not type(to[k]):
bad_merge = True
if bad_merge:
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k)
if type(v) in (str, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif type(v) is dict:
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif type(v) is list:
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
elif type(to[list_base]) is not list:
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')')
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError(
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k)
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for (i, config) in target_dict['configurations'].iteritems()
if not config.get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
merged_configurations = {}
configs = target_dict['configurations']
for (configuration, old_configuration_dict) in configs.iteritems():
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = {}
for (key, target_val) in target_dict.iteritems():
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
merged_configurations[configuration] = new_configuration_dict
# Put the new configurations back into the target dict as a configuration.
for configuration in merged_configurations.keys():
target_dict['configurations'][configuration] = (
merged_configurations[configuration])
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if type(value) is not list:
raise ValueError(name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__)
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError(name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation])
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError('Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key)
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if type(value) is dict:
ProcessListFiltersInDict(key, value)
elif type(value) is list:
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if type(item) is dict:
ProcessListFiltersInDict(name, item)
elif type(item) is list:
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'mac_kernel_extension', 'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check):
if not duplicate_basename_check:
return
if target_dict.get('type', None) != 'static_library':
return
sources = target_dict.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
target + error + 'libtool on Mac cannot handle that. Use '
'--no-duplicate-basename-check to disable this validation.')
raise GypError('Duplicate basenames in sources section, see list above')
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if type(run_as) is not dict:
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if type(action) is not list:
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and type(working_directory) is not str:
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and type(environment) is not dict:
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if type(v) is int:
v = str(v)
the_dict[k] = v
elif type(v) is dict:
TurnIntIntoStrInDict(v)
elif type(v) is list:
TurnIntIntoStrInList(v)
if type(k) is int:
del the_dict[k]
the_dict[str(k)] = v
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if type(item) is int:
the_list[index] = str(item)
elif type(item) is dict:
TurnIntIntoStrInDict(item)
elif type(item) is list:
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = set(base_path_sections)
path_sections.update(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, duplicate_basename_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info)
else:
aux_data = {}
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Remove all dependencies marked as 'link_dependency' from the targets of
# type 'none'.
RemoveLinkDependenciesFromNoneTargets(targets)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
| unlicense |
allanino/nupic | tests/swarming/nupic/swarming/experiments/delta/permutations.py | 38 | 3906 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by ExpGenerator to generate the actual
permutations.py file by replacing $XXXXXXXX tokens with desired values.
This permutations.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
import os
from nupic.swarming.permutationhelpers import *
# The name of the field being predicted. Any allowed permutation MUST contain
# the prediction field.
# (generated from PREDICTION_FIELD)
predictedField = 'value'
permutations = {
'modelParams': {
'sensorParams': {
'encoders': {
'value': PermuteEncoder(fieldName='value',
encoderClass='ScalarSpaceEncoder',
space=PermuteChoices(['delta', 'absolute']),
clipInput=True,
w=21,
n=PermuteInt(28, 521)),
'_classifierInput': dict(fieldname='value',
type='ScalarSpaceEncoder',
classifierOnly=True,
space=PermuteChoices(['delta', 'absolute']),
clipInput=True,
w=21,
n=PermuteInt(28, 521)),
},
},
'tpParams': {
'minThreshold': PermuteInt(9, 12),
'activationThreshold': PermuteInt(12, 16),
'pamLength': PermuteInt(1, 5),
},
'clParams': {
'alpha': PermuteFloat(0.000100, 0.100000),
},
}
}
# Fields selected for final hypersearch report;
# NOTE: These values are used as regular expressions by RunPermutations.py's
# report generator
# (fieldname values generated from PERM_PREDICTED_FIELD_NAME)
report = [
'.*value.*',
]
# Permutation optimization setting: either minimize or maximize metric
# used by RunPermutations.
# NOTE: The value is used as a regular expressions by RunPermutations.py's
# report generator
# (generated from minimize = "multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=1000:field=value")
minimize = "multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=10:field=value"
minParticlesPerSwarm = None
def permutationFilter(perm):
""" This function can be used to selectively filter out specific permutation
combinations. It is called by RunPermutations for every possible permutation
of the variables in the permutations dict. It should return True for valid a
combination of permutation values and False for an invalid one.
Parameters:
---------------------------------------------------------
perm: dict of one possible combination of name:value
pairs chosen from permutations.
"""
# An example of how to use this
#if perm['__consumption_encoder']['maxval'] > 300:
# return False;
#
return True
| agpl-3.0 |
jaggu303619/asylum | openerp/addons/crm_partner_assign/report/crm_lead_report.py | 53 | 6327 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp import tools
from openerp.addons.crm import crm
AVAILABLE_STATES = [
('draft','Draft'),
('open','Open'),
('cancel', 'Cancelled'),
('done', 'Closed'),
('pending','Pending')
]
class crm_lead_report_assign(osv.osv):
""" CRM Lead Report """
_name = "crm.lead.report.assign"
_auto = False
_description = "CRM Lead Report"
_columns = {
'year': fields.char('Year', size=64, required=False, readonly=True),
'partner_assigned_id':fields.many2one('res.partner', 'Partner', readonly=True),
'grade_id':fields.many2one('res.partner.grade', 'Grade', readonly=True),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'country_id':fields.many2one('res.country', 'Country', readonly=True),
'section_id':fields.many2one('crm.case.section', 'Sales Team', readonly=True),
'state': fields.selection(AVAILABLE_STATES, 'Status', size=16, readonly=True),
'month':fields.selection([('01', 'January'), ('02', 'February'), \
('03', 'March'), ('04', 'April'),\
('05', 'May'), ('06', 'June'), \
('07', 'July'), ('08', 'August'),\
('09', 'September'), ('10', 'October'),\
('11', 'November'), ('12', 'December')], 'Month', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'date_assign': fields.date('Partner Date', readonly=True),
'create_date': fields.datetime('Create Date', readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'delay_open': fields.float('Delay to Open',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to open the case"),
'delay_close': fields.float('Delay to Close',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to close the case"),
'delay_expected': fields.float('Overpassed Deadline',digits=(16,2),readonly=True, group_operator="avg"),
'probability': fields.float('Avg Probability',digits=(16,2),readonly=True, group_operator="avg"),
'probability_max': fields.float('Max Probability',digits=(16,2),readonly=True, group_operator="max"),
'planned_revenue': fields.float('Planned Revenue',digits=(16,2),readonly=True),
'probable_revenue': fields.float('Probable Revenue', digits=(16,2),readonly=True),
'stage_id': fields.many2one ('crm.case.stage', 'Stage', domain="[('section_ids', '=', section_id)]"),
'partner_id': fields.many2one('res.partner', 'Customer' , readonly=True),
'opening_date': fields.date('Opening Date', readonly=True),
'creation_date': fields.date('Creation Date', readonly=True),
'date_closed': fields.date('Close Date', readonly=True),
'nbr': fields.integer('# of Cases', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'priority': fields.selection(crm.AVAILABLE_PRIORITIES, 'Priority'),
'type':fields.selection([
('lead','Lead'),
('opportunity','Opportunity'),
],'Type', help="Type is used to separate Leads and Opportunities"),
}
def init(self, cr):
"""
CRM Lead Report
@param cr: the current row, from the database cursor
"""
tools.drop_view_if_exists(cr, 'crm_lead_report_assign')
cr.execute("""
CREATE OR REPLACE VIEW crm_lead_report_assign AS (
SELECT
c.id,
to_char(c.date_assign, 'YYYY') as year,
to_char(c.date_assign, 'MM') as month,
to_char(c.date_assign, 'YYYY-MM-DD') as day,
to_char(c.create_date, 'YYYY-MM-DD') as creation_date,
to_char(c.date_open, 'YYYY-MM-DD') as opening_date,
to_char(c.date_closed, 'YYYY-mm-dd') as date_closed,
c.state,
c.date_assign,
c.user_id,
c.probability,
c.probability as probability_max,
c.stage_id,
c.type,
c.company_id,
c.priority,
c.section_id,
c.partner_id,
c.country_id,
c.planned_revenue,
c.partner_assigned_id,
p.grade_id,
p.date as partner_date,
c.planned_revenue*(c.probability/100) as probable_revenue,
1 as nbr,
date_trunc('day',c.create_date) as create_date,
extract('epoch' from (c.date_closed-c.create_date))/(3600*24) as delay_close,
extract('epoch' from (c.date_deadline - c.date_closed))/(3600*24) as delay_expected,
extract('epoch' from (c.date_open-c.create_date))/(3600*24) as delay_open
FROM
crm_lead c
left join res_partner p on (c.partner_assigned_id=p.id)
)""")
crm_lead_report_assign()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
danielru/moose | framework/scripts/fixup_headers.py | 26 | 3483 | #!/usr/bin/env python
# This script checks and can optionally update MOOSE source files.
# You should always run this script without the "-u" option
# first to make sure there is a clean dry run of the files that should
# be updated
import os, string, re
from optparse import OptionParser
global_ignores = ['contrib', '.svn', '.git', 'libmesh']
moose_paths = ['framework', 'unit', 'examples', 'test', 'tutorials']
copyright_header = \
"""/****************************************************************/
/* DO NOT MODIFY THIS HEADER */
/* MOOSE - Multiphysics Object Oriented Simulation Environment */
/* */
/* (c) 2010 Battelle Energy Alliance, LLC */
/* ALL RIGHTS RESERVED */
/* */
/* Prepared by Battelle Energy Alliance, LLC */
/* Under Contract No. DE-AC07-05ID14517 */
/* With the U. S. Department of Energy */
/* */
/* See COPYRIGHT for full restrictions */
/****************************************************************/
"""
lgpl_header = \
"""/****************************************************************/
/* MOOSE - Multiphysics Object Oriented Simulation Environment */
/* */
/* All contents are licensed under LGPL V2.1 */
/* See LICENSE for full restrictions */
/****************************************************************/
"""
global_options = {}
def fixupHeader():
for dirpath, dirnames, filenames in os.walk(os.getcwd() + "/../../"):
# Don't traverse into ignored directories
for ignore in global_ignores:
if ignore in dirnames:
dirnames.remove(ignore)
#print dirpath
#print dirnames
for file in filenames:
suffix = os.path.splitext(file)
if suffix[-1] == '.C' or suffix[-1] == '.h':
checkAndUpdate(os.path.abspath(dirpath + '/' + file))
def checkAndUpdate(filename):
f = open(filename)
text = f.read()
f.close()
# Use the copyright header for framework files, use the lgpl header
# for everything else
header = lgpl_header
for dirname in moose_paths:
if (string.find(filename, dirname) != -1):
header = copyright_header
break
# Check (exact match only)
if (string.find(text, header) == -1):
# print the first 10 lines or so of the file
if global_options.update == False: # Report only
print filename + ' does not contain an up to date header'
if global_options.verbose == True:
print '>'*40, '\n', '\n'.join((text.split('\n', 10))[:10]), '\n'*5
else:
# Update
f = open(filename + '~tmp', 'w')
f.write(header)
# Make sure any previous header version is removed
text = re.sub(r'^/\*+/$.*^/\*+/$', '', text, flags=re.S | re.M)
f.write(text)
f.close()
os.rename(filename + '~tmp', filename)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-u", "--update", action="store_true", dest="update", default=False)
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False)
(global_options, args) = parser.parse_args()
fixupHeader()
| lgpl-2.1 |
agentr13/python-phonenumbers | python/phonenumbers/data/region_AZ.py | 10 | 2209 | """Auto-generated file, do not edit by hand. AZ metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_AZ = PhoneMetadata(id='AZ', country_code=994, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[1-9]\\d{8}', possible_number_pattern='\\d{7,9}'),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:1[28]\\d|2(?:02|1[24]|2[2-4]|33|[45]2|6[23])|365)\\d{6}', possible_number_pattern='\\d{7,9}', example_number='123123456'),
mobile=PhoneNumberDesc(national_number_pattern='(?:4[04]|5[015]|60|7[07])\\d{7}', possible_number_pattern='\\d{9}', example_number='401234567'),
toll_free=PhoneNumberDesc(national_number_pattern='88\\d{7}', possible_number_pattern='\\d{9}', example_number='881234567'),
premium_rate=PhoneNumberDesc(national_number_pattern='900200\\d{3}', possible_number_pattern='\\d{9}', example_number='900200123'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['(?:1[28]|2(?:[45]2|[0-36])|365)'], national_prefix_formatting_rule='(0\\1)'),
NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['[4-8]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{3})(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['9'], national_prefix_formatting_rule='0\\1')],
mobile_number_portable_region=True)
| apache-2.0 |
flimshaw/three.js | utils/exporters/blender/addons/io_three/exporter/scene.py | 40 | 8768 | import os
from .. import constants, logger
from . import (
base_classes,
texture,
material,
geometry,
object as object_,
utilities,
io,
api
)
from bpy import context
class Scene(base_classes.BaseScene):
"""Class that handles the contruction of a Three scene"""
def __init__(self, filepath, options=None):
logger.debug("Scene().__init__(%s, %s)", filepath, options)
self._defaults = {
constants.METADATA: constants.DEFAULT_METADATA.copy(),
constants.GEOMETRIES: [],
constants.MATERIALS: [],
constants.IMAGES: [],
constants.TEXTURES: [],
constants.ANIMATION: []
}
base_classes.BaseScene.__init__(self, filepath, options or {})
source_file = api.scene_name()
if source_file:
self[constants.METADATA][constants.SOURCE_FILE] = source_file
self.__init_animation()
def __init_animation(self):
self[constants.ANIMATION].append({
constants.NAME: "default",
constants.FPS : context.scene.render.fps,
constants.KEYFRAMES: []
});
pass
@property
def valid_types(self):
"""
:return: list of valid node types
"""
valid_types = [api.constants.MESH]
if self.options.get(constants.HIERARCHY, False):
valid_types.append(api.constants.EMPTY)
if self.options.get(constants.CAMERAS):
logger.info("Adding cameras to valid object types")
valid_types.append(api.constants.CAMERA)
if self.options.get(constants.LIGHTS):
logger.info("Adding lights to valid object types")
valid_types.append(api.constants.LAMP)
return valid_types
def geometry(self, value):
"""Find a geometry node that matches either a name
or uuid value.
:param value: name or uuid
:type value: str
"""
logger.debug("Scene().geometry(%s)", value)
return _find_node(value, self[constants.GEOMETRIES])
def image(self, value):
"""Find a image node that matches either a name
or uuid value.
:param value: name or uuid
:type value: str
"""
logger.debug("Scene().image%s)", value)
return _find_node(value, self[constants.IMAGES])
def material(self, value):
"""Find a material node that matches either a name
or uuid value.
:param value: name or uuid
:type value: str
"""
logger.debug("Scene().material(%s)", value)
return _find_node(value, self[constants.MATERIALS])
def parse(self):
"""Execute the parsing of the scene"""
logger.debug("Scene().parse()")
if self.options.get(constants.MAPS):
self._parse_textures()
if self.options.get(constants.MATERIALS):
self._parse_materials()
self._parse_geometries()
self._parse_objects()
def texture(self, value):
"""Find a texture node that matches either a name
or uuid value.
:param value: name or uuid
:type value: str
"""
logger.debug("Scene().texture(%s)", value)
return _find_node(value, self[constants.TEXTURES])
def write(self):
"""Write the parsed scene to disk."""
logger.debug("Scene().write()")
data = {}
embed_anim = self.options.get(constants.EMBED_ANIMATION, True)
embed = self.options.get(constants.EMBED_GEOMETRY, True)
compression = self.options.get(constants.COMPRESSION)
extension = constants.EXTENSIONS.get(
compression,
constants.EXTENSIONS[constants.JSON])
export_dir = os.path.dirname(self.filepath)
for key, value in self.items():
if key == constants.GEOMETRIES:
geometries = []
for geom in value:
if not embed_anim:
geom.write_animation(export_dir)
geom_data = geom.copy()
if embed:
geometries.append(geom_data)
continue
geo_type = geom_data[constants.TYPE].lower()
if geo_type == constants.GEOMETRY.lower():
geom_data.pop(constants.DATA)
elif geo_type == constants.BUFFER_GEOMETRY.lower():
geom_data.pop(constants.ATTRIBUTES)
geom_data.pop(constants.METADATA)
url = 'geometry.%s%s' % (geom.node, extension)
geometry_file = os.path.join(export_dir, url)
geom.write(filepath=geometry_file)
geom_data[constants.URL] = os.path.basename(url)
geometries.append(geom_data)
data[key] = geometries
elif isinstance(value, list):
data[key] = []
for each in value:
data[key].append(each.copy())
elif isinstance(value, dict):
data[key] = value.copy()
io.dump(self.filepath, data, options=self.options)
if self.options.get(constants.EXPORT_TEXTURES) and not self.options.get(constants.EMBED_TEXTURES):
texture_folder = self.options.get(constants.TEXTURE_FOLDER)
for geo in self[constants.GEOMETRIES]:
logger.info("Copying textures from %s", geo.node)
geo.copy_textures(texture_folder)
def _parse_geometries(self):
"""Locate all geometry nodes and parse them"""
logger.debug("Scene()._parse_geometries()")
# this is an important step. please refer to the doc string
# on the function for more information
api.object.prep_meshes(self.options)
geometries = []
# now iterate over all the extracted mesh nodes and parse each one
for mesh in api.object.extracted_meshes():
logger.info("Parsing geometry %s", mesh)
geo = geometry.Geometry(mesh, self)
geo.parse()
geometries.append(geo)
logger.info("Added %d geometry nodes", len(geometries))
self[constants.GEOMETRIES] = geometries
def _parse_materials(self):
"""Locate all non-orphaned materials and parse them"""
logger.debug("Scene()._parse_materials()")
materials = []
for material_name in api.material.used_materials():
logger.info("Parsing material %s", material_name)
materials.append(material.Material(material_name, parent=self))
logger.info("Added %d material nodes", len(materials))
self[constants.MATERIALS] = materials
def _parse_objects(self):
"""Locate all valid objects in the scene and parse them"""
logger.debug("Scene()._parse_objects()")
try:
scene_name = self[constants.METADATA][constants.SOURCE_FILE]
except KeyError:
scene_name = constants.SCENE
self[constants.OBJECT] = object_.Object(None, parent=self)
self[constants.OBJECT][constants.TYPE] = constants.SCENE.title()
self[constants.UUID] = utilities.id()
objects = []
if self.options.get(constants.HIERARCHY, False):
nodes = api.object.assemblies(self.valid_types, self.options)
else:
nodes = api.object.nodes(self.valid_types, self.options)
for node in nodes:
logger.info("Parsing object %s", node)
obj = object_.Object(node, parent=self[constants.OBJECT])
objects.append(obj)
logger.info("Added %d object nodes", len(objects))
self[constants.OBJECT][constants.CHILDREN] = objects
def _parse_textures(self):
"""Locate all non-orphaned textures and parse them"""
logger.debug("Scene()._parse_textures()")
textures = []
for texture_name in api.texture.textures():
logger.info("Parsing texture %s", texture_name)
tex_inst = texture.Texture(texture_name, self)
textures.append(tex_inst)
logger.info("Added %d texture nodes", len(textures))
self[constants.TEXTURES] = textures
def _find_node(value, manifest):
"""Find a node that matches either a name
or uuid value.
:param value: name or uuid
:param manifest: manifest of nodes to search
:type value: str
:type manifest: list
"""
for index in manifest:
uuid = index.get(constants.UUID) == value
name = index.node == value
if uuid or name:
return index
else:
logger.debug("No matching node for %s", value)
| mit |
ido-ran/ran-smart-frame2 | web/server/lib/oauth2client/__init__.py | 7 | 1029 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client library for using OAuth2, especially with Google APIs."""
__version__ = '4.1.2'
GOOGLE_AUTH_URI = 'https://accounts.google.com/o/oauth2/v2/auth'
GOOGLE_DEVICE_URI = 'https://accounts.google.com/o/oauth2/device/code'
GOOGLE_REVOKE_URI = 'https://accounts.google.com/o/oauth2/revoke'
GOOGLE_TOKEN_URI = 'https://www.googleapis.com/oauth2/v4/token'
GOOGLE_TOKEN_INFO_URI = 'https://www.googleapis.com/oauth2/v3/tokeninfo'
| mit |
burdell/CS4464-Final-Project | django/contrib/localflavor/id/forms.py | 311 | 6834 | """
ID-specific Form helpers
"""
import re
import time
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, Select
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode
postcode_re = re.compile(r'^[1-9]\d{4}$')
phone_re = re.compile(r'^(\+62|0)[2-9]\d{7,10}$')
plate_re = re.compile(r'^(?P<prefix>[A-Z]{1,2}) ' + \
r'(?P<number>\d{1,5})( (?P<suffix>([A-Z]{1,3}|[1-9][0-9]{,2})))?$')
nik_re = re.compile(r'^\d{16}$')
class IDPostCodeField(Field):
"""
An Indonesian post code field.
http://id.wikipedia.org/wiki/Kode_pos
"""
default_error_messages = {
'invalid': _('Enter a valid post code'),
}
def clean(self, value):
super(IDPostCodeField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = value.strip()
if not postcode_re.search(value):
raise ValidationError(self.error_messages['invalid'])
if int(value) < 10110:
raise ValidationError(self.error_messages['invalid'])
# 1xxx0
if value[0] == '1' and value[4] != '0':
raise ValidationError(self.error_messages['invalid'])
return u'%s' % (value, )
class IDProvinceSelect(Select):
"""
A Select widget that uses a list of provinces of Indonesia as its
choices.
"""
def __init__(self, attrs=None):
from id_choices import PROVINCE_CHOICES
super(IDProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class IDPhoneNumberField(Field):
"""
An Indonesian telephone number field.
http://id.wikipedia.org/wiki/Daftar_kode_telepon_di_Indonesia
"""
default_error_messages = {
'invalid': _('Enter a valid phone number'),
}
def clean(self, value):
super(IDPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
phone_number = re.sub(r'[\-\s\(\)]', '', smart_unicode(value))
if phone_re.search(phone_number):
return smart_unicode(value)
raise ValidationError(self.error_messages['invalid'])
class IDLicensePlatePrefixSelect(Select):
"""
A Select widget that uses a list of vehicle license plate prefix code
of Indonesia as its choices.
http://id.wikipedia.org/wiki/Tanda_Nomor_Kendaraan_Bermotor
"""
def __init__(self, attrs=None):
from id_choices import LICENSE_PLATE_PREFIX_CHOICES
super(IDLicensePlatePrefixSelect, self).__init__(attrs,
choices=LICENSE_PLATE_PREFIX_CHOICES)
class IDLicensePlateField(Field):
"""
An Indonesian vehicle license plate field.
http://id.wikipedia.org/wiki/Tanda_Nomor_Kendaraan_Bermotor
Plus: "B 12345 12"
"""
default_error_messages = {
'invalid': _('Enter a valid vehicle license plate number'),
}
def clean(self, value):
super(IDLicensePlateField, self).clean(value)
if value in EMPTY_VALUES:
return u''
plate_number = re.sub(r'\s+', ' ',
smart_unicode(value.strip())).upper()
matches = plate_re.search(plate_number)
if matches is None:
raise ValidationError(self.error_messages['invalid'])
# Make sure prefix is in the list of known codes.
from id_choices import LICENSE_PLATE_PREFIX_CHOICES
prefix = matches.group('prefix')
if prefix not in [choice[0] for choice in LICENSE_PLATE_PREFIX_CHOICES]:
raise ValidationError(self.error_messages['invalid'])
# Only Jakarta (prefix B) can have 3 letter suffix.
suffix = matches.group('suffix')
if suffix is not None and len(suffix) == 3 and prefix != 'B':
raise ValidationError(self.error_messages['invalid'])
# RI plates don't have suffix.
if prefix == 'RI' and suffix is not None and suffix != '':
raise ValidationError(self.error_messages['invalid'])
# Number can't be zero.
number = matches.group('number')
if number == '0':
raise ValidationError(self.error_messages['invalid'])
# CD, CC and B 12345 12
if len(number) == 5 or prefix in ('CD', 'CC'):
# suffix must be numeric and non-empty
if re.match(r'^\d+$', suffix) is None:
raise ValidationError(self.error_messages['invalid'])
# Known codes range is 12-124
if prefix in ('CD', 'CC') and not (12 <= int(number) <= 124):
raise ValidationError(self.error_messages['invalid'])
if len(number) == 5 and not (12 <= int(suffix) <= 124):
raise ValidationError(self.error_messages['invalid'])
else:
# suffix must be non-numeric
if suffix is not None and re.match(r'^[A-Z]{,3}$', suffix) is None:
raise ValidationError(self.error_messages['invalid'])
return plate_number
class IDNationalIdentityNumberField(Field):
"""
An Indonesian national identity number (NIK/KTP#) field.
http://id.wikipedia.org/wiki/Nomor_Induk_Kependudukan
xx.xxxx.ddmmyy.xxxx - 16 digits (excl. dots)
"""
default_error_messages = {
'invalid': _('Enter a valid NIK/KTP number'),
}
def clean(self, value):
super(IDNationalIdentityNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub(r'[\s.]', '', smart_unicode(value))
if not nik_re.search(value):
raise ValidationError(self.error_messages['invalid'])
if int(value) == 0:
raise ValidationError(self.error_messages['invalid'])
def valid_nik_date(year, month, day):
try:
t1 = (int(year), int(month), int(day), 0, 0, 0, 0, 0, -1)
d = time.mktime(t1)
t2 = time.localtime(d)
if t1[:3] != t2[:3]:
return False
else:
return True
except (OverflowError, ValueError):
return False
year = int(value[10:12])
month = int(value[8:10])
day = int(value[6:8])
current_year = time.localtime().tm_year
if year < int(str(current_year)[-2:]):
if not valid_nik_date(2000 + int(year), month, day):
raise ValidationError(self.error_messages['invalid'])
elif not valid_nik_date(1900 + int(year), month, day):
raise ValidationError(self.error_messages['invalid'])
if value[:6] == '000000' or value[12:] == '0000':
raise ValidationError(self.error_messages['invalid'])
return '%s.%s.%s.%s' % (value[:2], value[2:6], value[6:12], value[12:])
| bsd-3-clause |
Pytwitcher/pytwitcherapi | test/test_session_getchatserver.py | 1 | 2569 | import mock
import pytest
import requests
from pytwitcherapi import session
@pytest.fixture(scope="function")
def ts(mock_session):
"""Return a :class:`session.TwitchSession`
and mock the request of :class:`Session`
"""
return session.TwitchSession()
@pytest.fixture(scope='function')
def mock_chatpropresponse(servers, mock_session):
chatservers = [s.address for s in servers]
channelprop = {"chat_servers": chatservers}
chatpropresponse = mock.Mock()
chatpropresponse.json.return_value = channelprop
return chatpropresponse
@pytest.fixture(scope='function')
def mock_serverstatresponse(servers_json, mock_session):
serverstatresponse = mock.Mock()
serverstatresponse.json.return_value = servers_json
return serverstatresponse
@pytest.fixture(scope='function')
def mock_chatserverresponse(mock_serverstatresponse, mock_chatpropresponse,
servers_json):
requests.Session.request.side_effect = [mock_chatpropresponse,
mock_serverstatresponse]
# if serverstatresponse is successful return the best
s = servers_json[2]
return s['ip'], s['port']
@pytest.fixture(scope='function')
def mock_failchatserverresponse(mock_chatpropresponse, servers_json):
serverstatresponse = mock.Mock()
serverstatresponse.raise_for_status.side_effect = requests.HTTPError()
requests.Session.request.side_effect = [mock_chatpropresponse,
serverstatresponse]
# if serverstatresponse fails just return the first
s = servers_json[0]
return s['ip'], s['port']
@pytest.fixture(scope='function')
def mock_nochatserverresponse(mock_serverstatresponse):
# random server status that will not be in the available ones
chatprop = {"chat_servers": ['0.16.64.11:80', '0.16.24.11:123']}
chatpropresponse = mock.Mock()
chatpropresponse.json.return_value = chatprop
requests.Session.request.side_effect = [chatpropresponse,
mock_serverstatresponse]
# if no server stat for the chat servers can be found just return the first
return '0.16.64.11', 80
@pytest.mark.parametrize('fix', ['mock_chatserverresponse',
'mock_failchatserverresponse',
'mock_nochatserverresponse'])
def test_get_chat_server(ts, channel1, fix, request):
expected = request.getfuncargvalue(fix)
server, port = ts.get_chat_server(channel1)
assert (server, port) == expected
| bsd-3-clause |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Tasking/Mcl_Cmd_Services_Tasking.py | 1 | 1422 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: Mcl_Cmd_Services_Tasking.py
def TaskingMain(namespace):
import mcl.imports
import mcl.target
import mcl.tasking
from mcl.object.Message import MarshalMessage
mcl.imports.ImportWithNamespace(namespace, 'mca.survey.cmd.services', globals())
mcl.imports.ImportWithNamespace(namespace, 'mca.survey.cmd.services.tasking', globals())
lpParams = mcl.tasking.GetParameters()
tgtParams = mca.survey.cmd.services.Params()
if lpParams['serverName'] != None:
tgtParams.target = lpParams['serverName']
rpc = mca.survey.cmd.services.tasking.RPC_INFO_QUERY
msg = MarshalMessage()
tgtParams.Marshal(msg)
rpc.SetData(msg.Serialize())
rpc.SetMessagingType('message')
taskXml = mcl.tasking.Tasking()
if len(tgtParams.target) > 0:
taskXml.SetTargetRemote(tgtParams.target)
else:
taskXml.SetTargetLocal()
mcl.tasking.OutputXml(taskXml.GetXmlObject())
res = mcl.tasking.RpcPerformCall(rpc)
if res != mcl.target.CALL_SUCCEEDED:
mcl.tasking.RecordModuleError(res, 0, mca.survey.cmd.services.errorStrings)
return False
else:
return True
if __name__ == '__main__':
import sys
if TaskingMain(sys.argv[1]) != True:
sys.exit(-1) | unlicense |
denny820909/builder | lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/internet/udp.py | 6 | 11632 | # -*- test-case-name: twisted.test.test_udp -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Various asynchronous UDP classes.
Please do not use this module directly.
@var _sockErrReadIgnore: list of symbolic error constants (from the C{errno}
module) representing socket errors where the error is temporary and can be
ignored.
@var _sockErrReadRefuse: list of symbolic error constants (from the C{errno}
module) representing socket errors that indicate connection refused.
"""
# System Imports
import socket
import operator
import struct
import warnings
from zope.interface import implements
from twisted.python.runtime import platformType
if platformType == 'win32':
from errno import WSAEWOULDBLOCK
from errno import WSAEINTR, WSAEMSGSIZE, WSAETIMEDOUT
from errno import WSAECONNREFUSED, WSAECONNRESET, WSAENETRESET
from errno import WSAEINPROGRESS
# Classify read and write errors
_sockErrReadIgnore = [WSAEINTR, WSAEWOULDBLOCK, WSAEMSGSIZE, WSAEINPROGRESS]
_sockErrReadRefuse = [WSAECONNREFUSED, WSAECONNRESET, WSAENETRESET,
WSAETIMEDOUT]
# POSIX-compatible write errors
EMSGSIZE = WSAEMSGSIZE
ECONNREFUSED = WSAECONNREFUSED
EAGAIN = WSAEWOULDBLOCK
EINTR = WSAEINTR
else:
from errno import EWOULDBLOCK, EINTR, EMSGSIZE, ECONNREFUSED, EAGAIN
_sockErrReadIgnore = [EAGAIN, EINTR, EWOULDBLOCK]
_sockErrReadRefuse = [ECONNREFUSED]
# Twisted Imports
from twisted.internet import base, defer, address
from twisted.python import log, failure
from twisted.internet import abstract, error, interfaces
class Port(base.BasePort):
"""
UDP port, listening for packets.
"""
implements(
interfaces.IListeningPort, interfaces.IUDPTransport,
interfaces.ISystemHandle)
addressFamily = socket.AF_INET
socketType = socket.SOCK_DGRAM
maxThroughput = 256 * 1024 # max bytes we read in one eventloop iteration
# Actual port number being listened on, only set to a non-None
# value when we are actually listening.
_realPortNumber = None
def __init__(self, port, proto, interface='', maxPacketSize=8192, reactor=None):
"""
Initialize with a numeric port to listen on.
"""
base.BasePort.__init__(self, reactor)
self.port = port
self.protocol = proto
self.maxPacketSize = maxPacketSize
self.interface = interface
self.setLogStr()
self._connectedAddr = None
def __repr__(self):
if self._realPortNumber is not None:
return "<%s on %s>" % (self.protocol.__class__, self._realPortNumber)
else:
return "<%s not connected>" % (self.protocol.__class__,)
def getHandle(self):
"""
Return a socket object.
"""
return self.socket
def startListening(self):
"""
Create and bind my socket, and begin listening on it.
This is called on unserialization, and must be called after creating a
server to begin listening on the specified port.
"""
self._bindSocket()
self._connectToProtocol()
def _bindSocket(self):
try:
skt = self.createInternetSocket()
skt.bind((self.interface, self.port))
except socket.error, le:
raise error.CannotListenError, (self.interface, self.port, le)
# Make sure that if we listened on port 0, we update that to
# reflect what the OS actually assigned us.
self._realPortNumber = skt.getsockname()[1]
log.msg("%s starting on %s" % (
self._getLogPrefix(self.protocol), self._realPortNumber))
self.connected = 1
self.socket = skt
self.fileno = self.socket.fileno
def _connectToProtocol(self):
self.protocol.makeConnection(self)
self.startReading()
def doRead(self):
"""
Called when my socket is ready for reading.
"""
read = 0
while read < self.maxThroughput:
try:
data, addr = self.socket.recvfrom(self.maxPacketSize)
except socket.error, se:
no = se.args[0]
if no in _sockErrReadIgnore:
return
if no in _sockErrReadRefuse:
if self._connectedAddr:
self.protocol.connectionRefused()
return
raise
else:
read += len(data)
try:
self.protocol.datagramReceived(data, addr)
except:
log.err()
def write(self, datagram, addr=None):
"""
Write a datagram.
@type datagram: C{str}
@param datagram: The datagram to be sent.
@type addr: C{tuple} containing C{str} as first element and C{int} as
second element, or C{None}
@param addr: A tuple of (I{stringified dotted-quad IP address},
I{integer port number}); can be C{None} in connected mode.
"""
if self._connectedAddr:
assert addr in (None, self._connectedAddr)
try:
return self.socket.send(datagram)
except socket.error, se:
no = se.args[0]
if no == EINTR:
return self.write(datagram)
elif no == EMSGSIZE:
raise error.MessageLengthError, "message too long"
elif no == ECONNREFUSED:
self.protocol.connectionRefused()
else:
raise
else:
assert addr != None
if not addr[0].replace(".", "").isdigit() and addr[0] != "<broadcast>":
warnings.warn("Please only pass IPs to write(), not hostnames",
DeprecationWarning, stacklevel=2)
try:
return self.socket.sendto(datagram, addr)
except socket.error, se:
no = se.args[0]
if no == EINTR:
return self.write(datagram, addr)
elif no == EMSGSIZE:
raise error.MessageLengthError, "message too long"
elif no == ECONNREFUSED:
# in non-connected UDP ECONNREFUSED is platform dependent, I
# think and the info is not necessarily useful. Nevertheless
# maybe we should call connectionRefused? XXX
return
else:
raise
def writeSequence(self, seq, addr):
self.write("".join(seq), addr)
def connect(self, host, port):
"""
'Connect' to remote server.
"""
if self._connectedAddr:
raise RuntimeError, "already connected, reconnecting is not currently supported (talk to itamar if you want this)"
if not abstract.isIPAddress(host):
raise ValueError, "please pass only IP addresses, not domain names"
self._connectedAddr = (host, port)
self.socket.connect((host, port))
def _loseConnection(self):
self.stopReading()
if self.connected: # actually means if we are *listening*
self.reactor.callLater(0, self.connectionLost)
def stopListening(self):
if self.connected:
result = self.d = defer.Deferred()
else:
result = None
self._loseConnection()
return result
def loseConnection(self):
warnings.warn("Please use stopListening() to disconnect port", DeprecationWarning, stacklevel=2)
self.stopListening()
def connectionLost(self, reason=None):
"""
Cleans up my socket.
"""
log.msg('(UDP Port %s Closed)' % self._realPortNumber)
self._realPortNumber = None
base.BasePort.connectionLost(self, reason)
self.protocol.doStop()
self.socket.close()
del self.socket
del self.fileno
if hasattr(self, "d"):
self.d.callback(None)
del self.d
def setLogStr(self):
"""
Initialize the C{logstr} attribute to be used by C{logPrefix}.
"""
logPrefix = self._getLogPrefix(self.protocol)
self.logstr = "%s (UDP)" % logPrefix
def logPrefix(self):
"""
Return the prefix to log with.
"""
return self.logstr
def getHost(self):
"""
Returns an IPv4Address.
This indicates the address from which I am connecting.
"""
return address.IPv4Address('UDP', *self.socket.getsockname())
class MulticastMixin:
"""
Implement multicast functionality.
"""
def getOutgoingInterface(self):
i = self.socket.getsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF)
return socket.inet_ntoa(struct.pack("@i", i))
def setOutgoingInterface(self, addr):
"""Returns Deferred of success."""
return self.reactor.resolve(addr).addCallback(self._setInterface)
def _setInterface(self, addr):
i = socket.inet_aton(addr)
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, i)
return 1
def getLoopbackMode(self):
return self.socket.getsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP)
def setLoopbackMode(self, mode):
mode = struct.pack("b", operator.truth(mode))
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, mode)
def getTTL(self):
return self.socket.getsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL)
def setTTL(self, ttl):
ttl = struct.pack("B", ttl)
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
def joinGroup(self, addr, interface=""):
"""Join a multicast group. Returns Deferred of success."""
return self.reactor.resolve(addr).addCallback(self._joinAddr1, interface, 1)
def _joinAddr1(self, addr, interface, join):
return self.reactor.resolve(interface).addCallback(self._joinAddr2, addr, join)
def _joinAddr2(self, interface, addr, join):
addr = socket.inet_aton(addr)
interface = socket.inet_aton(interface)
if join:
cmd = socket.IP_ADD_MEMBERSHIP
else:
cmd = socket.IP_DROP_MEMBERSHIP
try:
self.socket.setsockopt(socket.IPPROTO_IP, cmd, addr + interface)
except socket.error, e:
return failure.Failure(error.MulticastJoinError(addr, interface, *e.args))
def leaveGroup(self, addr, interface=""):
"""Leave multicast group, return Deferred of success."""
return self.reactor.resolve(addr).addCallback(self._joinAddr1, interface, 0)
class MulticastPort(MulticastMixin, Port):
"""
UDP Port that supports multicasting.
"""
implements(interfaces.IMulticastTransport)
def __init__(self, port, proto, interface='', maxPacketSize=8192, reactor=None, listenMultiple=False):
"""
@see: L{twisted.internet.interfaces.IReactorMulticast.listenMulticast}
"""
Port.__init__(self, port, proto, interface, maxPacketSize, reactor)
self.listenMultiple = listenMultiple
def createInternetSocket(self):
skt = Port.createInternetSocket(self)
if self.listenMultiple:
skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, "SO_REUSEPORT"):
skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
return skt
| mit |
jeremiahmarks/sl4a | python-build/python-libs/python-twitter/simplejson/decoder.py | 135 | 12032 | """Implementation of JSONDecoder
"""
import re
import sys
import struct
from simplejson.scanner import make_scanner
try:
from simplejson._speedups import scanstring as c_scanstring
except ImportError:
c_scanstring = None
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
return '%s: line %d column %d - line %d column %d (char %d - %d)' % (
msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
raise ValueError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
raise ValueError(
errmsg("Invalid \\escape: %r" % (esc,), s, end))
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise ValueError(errmsg(msg, s, end))
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise ValueError(errmsg(msg, s, end))
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise ValueError(errmsg(msg, s, end))
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
pairs = {}
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
return pairs, end + 1
elif nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True):
"""``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN.
This can be used to raise an exception if invalid JSON numbers
are encountered.
"""
self.encoding = encoding
self.object_hook = object_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
| apache-2.0 |
demis001/scikit-bio | skbio/stats/ordination/_principal_coordinate_analysis.py | 4 | 7264 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from warnings import warn
import numpy as np
from skbio.stats.distance import DistanceMatrix
from ._base import Ordination, OrdinationResults
from skbio.util._decorator import experimental
# - In cogent, after computing eigenvalues/vectors, the imaginary part
# is dropped, if any. We know for a fact that the eigenvalues are
# real, so that's not necessary, but eigenvectors can in principle
# be complex (see for example
# http://math.stackexchange.com/a/47807/109129 for details) and in
# that case dropping the imaginary part means they'd no longer be
# so, so I'm not doing that.
class PCoA(Ordination):
r"""Perform Principal Coordinate Analysis.
Principal Coordinate Analysis (PCoA) is a method similar to PCA
that works from distance matrices, and so it can be used with
ecologically meaningful distances like unifrac for bacteria.
In ecology, the euclidean distance preserved by Principal
Component Analysis (PCA) is often not a good choice because it
deals poorly with double zeros (Species have unimodal
distributions along environmental gradients, so if a species is
absent from two sites at the same site, it can't be known if an
environmental variable is too high in one of them and too low in
the other, or too low in both, etc. On the other hand, if an
species is present in two sites, that means that the sites are
similar.).
Parameters
==========
distance_matrix : DistanceMatrix
A distance matrix.
Notes
=====
It is sometimes known as metric multidimensional scaling or
classical scaling.
.. note::
If the distance is not euclidean (for example if it is a
semimetric and the triangle inequality doesn't hold),
negative eigenvalues can appear. There are different ways
to deal with that problem (see Legendre & Legendre 1998, \S
9.2.3), but none are currently implemented here.
However, a warning is raised whenever negative eigenvalues
appear, allowing the user to decide if they can be safely
ignored.
"""
short_method_name = 'PCoA'
long_method_name = 'Principal Coordinate Analysis'
@experimental(as_of="0.4.0")
def __init__(self, distance_matrix):
if isinstance(distance_matrix, DistanceMatrix):
self.dm = np.asarray(distance_matrix.data, dtype=np.float64)
self.ids = distance_matrix.ids
else:
raise TypeError("Input must be a DistanceMatrix.")
self._pcoa()
def _pcoa(self):
E_matrix = self._E_matrix(self.dm)
# If the used distance was euclidean, pairwise distances
# needn't be computed from the data table Y because F_matrix =
# Y.dot(Y.T) (if Y has been centred).
F_matrix = self._F_matrix(E_matrix)
# If the eigendecomposition ever became a bottleneck, it could
# be replaced with an iterative version that computes the
# largest k eigenvectors.
eigvals, eigvecs = np.linalg.eigh(F_matrix)
# eigvals might not be ordered, so we order them (at least one
# is zero). cogent makes eigenvalues positive by taking the
# abs value, but that doesn't seem to be an approach accepted
# by L&L to deal with negative eigenvalues. We raise a warning
# in that case. First, we make values close to 0 equal to 0.
negative_close_to_zero = np.isclose(eigvals, 0)
eigvals[negative_close_to_zero] = 0
if np.any(eigvals < 0):
warn(
"The result contains negative eigenvalues."
" Please compare their magnitude with the magnitude of some"
" of the largest positive eigenvalues. If the negative ones"
" are smaller, it's probably safe to ignore them, but if they"
" are large in magnitude, the results won't be useful. See the"
" Notes section for more details. The smallest eigenvalue is"
" {0} and the largest is {1}.".format(eigvals.min(),
eigvals.max()),
RuntimeWarning
)
idxs_descending = eigvals.argsort()[::-1]
self.eigvals = eigvals[idxs_descending]
self.eigvecs = eigvecs[:, idxs_descending]
@experimental(as_of="0.4.0")
def scores(self):
"""Compute coordinates in transformed space.
Returns
-------
OrdinationResults
Object that stores the computed eigenvalues, the
proportion explained by each of them (per unit) and
transformed coordinates, etc.
See Also
--------
OrdinationResults
"""
# Scale eigenvalues to have lenght = sqrt(eigenvalue). This
# works because np.linalg.eigh returns normalized
# eigenvectors. Each row contains the coordinates of the
# objects in the space of principal coordinates. Note that at
# least one eigenvalue is zero because only n-1 axes are
# needed to represent n points in an euclidean space.
# If we return only the coordinates that make sense (i.e., that have a
# corresponding positive eigenvalue), then Jackknifed Beta Diversity
# won't work as it expects all the OrdinationResults to have the same
# number of coordinates. In order to solve this issue, we return the
# coordinates that have a negative eigenvalue as 0
num_positive = (self.eigvals >= 0).sum()
eigvecs = self.eigvecs
eigvecs[:, num_positive:] = np.zeros(eigvecs[:, num_positive:].shape)
eigvals = self.eigvals
eigvals[num_positive:] = np.zeros(eigvals[num_positive:].shape)
coordinates = eigvecs * np.sqrt(eigvals)
proportion_explained = eigvals / eigvals.sum()
return OrdinationResults(eigvals=eigvals, site=coordinates,
proportion_explained=proportion_explained,
site_ids=self.ids)
@staticmethod
def _E_matrix(distance_matrix):
"""Compute E matrix from a distance matrix.
Squares and divides by -2 the input elementwise. Eq. 9.20 in
Legendre & Legendre 1998."""
return distance_matrix * distance_matrix / -2
@staticmethod
def _F_matrix(E_matrix):
"""Compute F matrix from E matrix.
Centring step: for each element, the mean of the corresponding
row and column are substracted, and the mean of the whole
matrix is added. Eq. 9.21 in Legendre & Legendre 1998."""
row_means = E_matrix.mean(axis=1, keepdims=True)
col_means = E_matrix.mean(axis=0, keepdims=True)
matrix_mean = E_matrix.mean()
return E_matrix - row_means - col_means + matrix_mean
| bsd-3-clause |
leighpauls/k2cro4 | third_party/python_26/Lib/telnetlib.py | 65 | 21808 | r"""TELNET client class.
Based on RFC 854: TELNET Protocol Specification, by J. Postel and
J. Reynolds
Example:
>>> from telnetlib import Telnet
>>> tn = Telnet('www.python.org', 79) # connect to finger port
>>> tn.write('guido\r\n')
>>> print tn.read_all()
Login Name TTY Idle When Where
guido Guido van Rossum pts/2 <Dec 2 11:10> snag.cnri.reston..
>>>
Note that read_all() won't read until eof -- it just reads some data
-- but it guarantees to read at least one byte unless EOF is hit.
It is possible to pass a Telnet object to select.select() in order to
wait until more data is available. Note that in this case,
read_eager() may return '' even if there was data on the socket,
because the protocol negotiation may have eaten the data. This is why
EOFError is needed in some cases to distinguish between "no data" and
"connection closed" (since the socket also appears ready for reading
when it is closed).
To do:
- option negotiation
- timeout should be intrinsic to the connection object instead of an
option on one of the read calls only
"""
# Imported modules
import sys
import socket
import select
__all__ = ["Telnet"]
# Tunable parameters
DEBUGLEVEL = 0
# Telnet protocol defaults
TELNET_PORT = 23
# Telnet protocol characters (don't change)
IAC = chr(255) # "Interpret As Command"
DONT = chr(254)
DO = chr(253)
WONT = chr(252)
WILL = chr(251)
theNULL = chr(0)
SE = chr(240) # Subnegotiation End
NOP = chr(241) # No Operation
DM = chr(242) # Data Mark
BRK = chr(243) # Break
IP = chr(244) # Interrupt process
AO = chr(245) # Abort output
AYT = chr(246) # Are You There
EC = chr(247) # Erase Character
EL = chr(248) # Erase Line
GA = chr(249) # Go Ahead
SB = chr(250) # Subnegotiation Begin
# Telnet protocol options code (don't change)
# These ones all come from arpa/telnet.h
BINARY = chr(0) # 8-bit data path
ECHO = chr(1) # echo
RCP = chr(2) # prepare to reconnect
SGA = chr(3) # suppress go ahead
NAMS = chr(4) # approximate message size
STATUS = chr(5) # give status
TM = chr(6) # timing mark
RCTE = chr(7) # remote controlled transmission and echo
NAOL = chr(8) # negotiate about output line width
NAOP = chr(9) # negotiate about output page size
NAOCRD = chr(10) # negotiate about CR disposition
NAOHTS = chr(11) # negotiate about horizontal tabstops
NAOHTD = chr(12) # negotiate about horizontal tab disposition
NAOFFD = chr(13) # negotiate about formfeed disposition
NAOVTS = chr(14) # negotiate about vertical tab stops
NAOVTD = chr(15) # negotiate about vertical tab disposition
NAOLFD = chr(16) # negotiate about output LF disposition
XASCII = chr(17) # extended ascii character set
LOGOUT = chr(18) # force logout
BM = chr(19) # byte macro
DET = chr(20) # data entry terminal
SUPDUP = chr(21) # supdup protocol
SUPDUPOUTPUT = chr(22) # supdup output
SNDLOC = chr(23) # send location
TTYPE = chr(24) # terminal type
EOR = chr(25) # end or record
TUID = chr(26) # TACACS user identification
OUTMRK = chr(27) # output marking
TTYLOC = chr(28) # terminal location number
VT3270REGIME = chr(29) # 3270 regime
X3PAD = chr(30) # X.3 PAD
NAWS = chr(31) # window size
TSPEED = chr(32) # terminal speed
LFLOW = chr(33) # remote flow control
LINEMODE = chr(34) # Linemode option
XDISPLOC = chr(35) # X Display Location
OLD_ENVIRON = chr(36) # Old - Environment variables
AUTHENTICATION = chr(37) # Authenticate
ENCRYPT = chr(38) # Encryption option
NEW_ENVIRON = chr(39) # New - Environment variables
# the following ones come from
# http://www.iana.org/assignments/telnet-options
# Unfortunately, that document does not assign identifiers
# to all of them, so we are making them up
TN3270E = chr(40) # TN3270E
XAUTH = chr(41) # XAUTH
CHARSET = chr(42) # CHARSET
RSP = chr(43) # Telnet Remote Serial Port
COM_PORT_OPTION = chr(44) # Com Port Control Option
SUPPRESS_LOCAL_ECHO = chr(45) # Telnet Suppress Local Echo
TLS = chr(46) # Telnet Start TLS
KERMIT = chr(47) # KERMIT
SEND_URL = chr(48) # SEND-URL
FORWARD_X = chr(49) # FORWARD_X
PRAGMA_LOGON = chr(138) # TELOPT PRAGMA LOGON
SSPI_LOGON = chr(139) # TELOPT SSPI LOGON
PRAGMA_HEARTBEAT = chr(140) # TELOPT PRAGMA HEARTBEAT
EXOPL = chr(255) # Extended-Options-List
NOOPT = chr(0)
class Telnet:
"""Telnet interface class.
An instance of this class represents a connection to a telnet
server. The instance is initially not connected; the open()
method must be used to establish a connection. Alternatively, the
host name and optional port number can be passed to the
constructor, too.
Don't try to reopen an already connected instance.
This class has many read_*() methods. Note that some of them
raise EOFError when the end of the connection is read, because
they can return an empty string for other reasons. See the
individual doc strings.
read_until(expected, [timeout])
Read until the expected string has been seen, or a timeout is
hit (default is no timeout); may block.
read_all()
Read all data until EOF; may block.
read_some()
Read at least one byte or EOF; may block.
read_very_eager()
Read all data available already queued or on the socket,
without blocking.
read_eager()
Read either data already queued or some data available on the
socket, without blocking.
read_lazy()
Read all data in the raw queue (processing it first), without
doing any socket I/O.
read_very_lazy()
Reads all data in the cooked queue, without doing any socket
I/O.
read_sb_data()
Reads available data between SB ... SE sequence. Don't block.
set_option_negotiation_callback(callback)
Each time a telnet option is read on the input flow, this callback
(if set) is called with the following parameters :
callback(telnet socket, command, option)
option will be chr(0) when there is no option.
No other action is done afterwards by telnetlib.
"""
def __init__(self, host=None, port=0,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Constructor.
When called without arguments, create an unconnected instance.
With a hostname argument, it connects the instance; port number
and timeout are optional.
"""
self.debuglevel = DEBUGLEVEL
self.host = host
self.port = port
self.timeout = timeout
self.sock = None
self.rawq = ''
self.irawq = 0
self.cookedq = ''
self.eof = 0
self.iacseq = '' # Buffer for IAC sequence.
self.sb = 0 # flag for SB and SE sequence.
self.sbdataq = ''
self.option_callback = None
if host is not None:
self.open(host, port, timeout)
def open(self, host, port=0, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Connect to a host.
The optional second argument is the port number, which
defaults to the standard telnet port (23).
Don't try to reopen an already connected instance.
"""
self.eof = 0
if not port:
port = TELNET_PORT
self.host = host
self.port = port
self.timeout = timeout
self.sock = socket.create_connection((host, port), timeout)
def __del__(self):
"""Destructor -- close the connection."""
self.close()
def msg(self, msg, *args):
"""Print a debug message, when the debug level is > 0.
If extra arguments are present, they are substituted in the
message using the standard string formatting operator.
"""
if self.debuglevel > 0:
print 'Telnet(%s,%d):' % (self.host, self.port),
if args:
print msg % args
else:
print msg
def set_debuglevel(self, debuglevel):
"""Set the debug level.
The higher it is, the more debug output you get (on sys.stdout).
"""
self.debuglevel = debuglevel
def close(self):
"""Close the connection."""
if self.sock:
self.sock.close()
self.sock = 0
self.eof = 1
self.iacseq = ''
self.sb = 0
def get_socket(self):
"""Return the socket object used internally."""
return self.sock
def fileno(self):
"""Return the fileno() of the socket object used internally."""
return self.sock.fileno()
def write(self, buffer):
"""Write a string to the socket, doubling any IAC characters.
Can block if the connection is blocked. May raise
socket.error if the connection is closed.
"""
if IAC in buffer:
buffer = buffer.replace(IAC, IAC+IAC)
self.msg("send %r", buffer)
self.sock.sendall(buffer)
def read_until(self, match, timeout=None):
"""Read until a given string is encountered or until timeout.
When no match is found, return whatever is available instead,
possibly the empty string. Raise EOFError if the connection
is closed and no cooked data is available.
"""
n = len(match)
self.process_rawq()
i = self.cookedq.find(match)
if i >= 0:
i = i+n
buf = self.cookedq[:i]
self.cookedq = self.cookedq[i:]
return buf
s_reply = ([self], [], [])
s_args = s_reply
if timeout is not None:
s_args = s_args + (timeout,)
from time import time
time_start = time()
while not self.eof and select.select(*s_args) == s_reply:
i = max(0, len(self.cookedq)-n)
self.fill_rawq()
self.process_rawq()
i = self.cookedq.find(match, i)
if i >= 0:
i = i+n
buf = self.cookedq[:i]
self.cookedq = self.cookedq[i:]
return buf
if timeout is not None:
elapsed = time() - time_start
if elapsed >= timeout:
break
s_args = s_reply + (timeout-elapsed,)
return self.read_very_lazy()
def read_all(self):
"""Read all data until EOF; block until connection closed."""
self.process_rawq()
while not self.eof:
self.fill_rawq()
self.process_rawq()
buf = self.cookedq
self.cookedq = ''
return buf
def read_some(self):
"""Read at least one byte of cooked data unless EOF is hit.
Return '' if EOF is hit. Block if no data is immediately
available.
"""
self.process_rawq()
while not self.cookedq and not self.eof:
self.fill_rawq()
self.process_rawq()
buf = self.cookedq
self.cookedq = ''
return buf
def read_very_eager(self):
"""Read everything that's possible without blocking in I/O (eager).
Raise EOFError if connection closed and no cooked data
available. Return '' if no cooked data available otherwise.
Don't block unless in the midst of an IAC sequence.
"""
self.process_rawq()
while not self.eof and self.sock_avail():
self.fill_rawq()
self.process_rawq()
return self.read_very_lazy()
def read_eager(self):
"""Read readily available data.
Raise EOFError if connection closed and no cooked data
available. Return '' if no cooked data available otherwise.
Don't block unless in the midst of an IAC sequence.
"""
self.process_rawq()
while not self.cookedq and not self.eof and self.sock_avail():
self.fill_rawq()
self.process_rawq()
return self.read_very_lazy()
def read_lazy(self):
"""Process and return data that's already in the queues (lazy).
Raise EOFError if connection closed and no data available.
Return '' if no cooked data available otherwise. Don't block
unless in the midst of an IAC sequence.
"""
self.process_rawq()
return self.read_very_lazy()
def read_very_lazy(self):
"""Return any data available in the cooked queue (very lazy).
Raise EOFError if connection closed and no data available.
Return '' if no cooked data available otherwise. Don't block.
"""
buf = self.cookedq
self.cookedq = ''
if not buf and self.eof and not self.rawq:
raise EOFError, 'telnet connection closed'
return buf
def read_sb_data(self):
"""Return any data available in the SB ... SE queue.
Return '' if no SB ... SE available. Should only be called
after seeing a SB or SE command. When a new SB command is
found, old unread SB data will be discarded. Don't block.
"""
buf = self.sbdataq
self.sbdataq = ''
return buf
def set_option_negotiation_callback(self, callback):
"""Provide a callback function called after each receipt of a telnet option."""
self.option_callback = callback
def process_rawq(self):
"""Transfer from raw queue to cooked queue.
Set self.eof when connection is closed. Don't block unless in
the midst of an IAC sequence.
"""
buf = ['', '']
try:
while self.rawq:
c = self.rawq_getchar()
if not self.iacseq:
if c == theNULL:
continue
if c == "\021":
continue
if c != IAC:
buf[self.sb] = buf[self.sb] + c
continue
else:
self.iacseq += c
elif len(self.iacseq) == 1:
# 'IAC: IAC CMD [OPTION only for WILL/WONT/DO/DONT]'
if c in (DO, DONT, WILL, WONT):
self.iacseq += c
continue
self.iacseq = ''
if c == IAC:
buf[self.sb] = buf[self.sb] + c
else:
if c == SB: # SB ... SE start.
self.sb = 1
self.sbdataq = ''
elif c == SE:
self.sb = 0
self.sbdataq = self.sbdataq + buf[1]
buf[1] = ''
if self.option_callback:
# Callback is supposed to look into
# the sbdataq
self.option_callback(self.sock, c, NOOPT)
else:
# We can't offer automatic processing of
# suboptions. Alas, we should not get any
# unless we did a WILL/DO before.
self.msg('IAC %d not recognized' % ord(c))
elif len(self.iacseq) == 2:
cmd = self.iacseq[1]
self.iacseq = ''
opt = c
if cmd in (DO, DONT):
self.msg('IAC %s %d',
cmd == DO and 'DO' or 'DONT', ord(opt))
if self.option_callback:
self.option_callback(self.sock, cmd, opt)
else:
self.sock.sendall(IAC + WONT + opt)
elif cmd in (WILL, WONT):
self.msg('IAC %s %d',
cmd == WILL and 'WILL' or 'WONT', ord(opt))
if self.option_callback:
self.option_callback(self.sock, cmd, opt)
else:
self.sock.sendall(IAC + DONT + opt)
except EOFError: # raised by self.rawq_getchar()
self.iacseq = '' # Reset on EOF
self.sb = 0
pass
self.cookedq = self.cookedq + buf[0]
self.sbdataq = self.sbdataq + buf[1]
def rawq_getchar(self):
"""Get next char from raw queue.
Block if no data is immediately available. Raise EOFError
when connection is closed.
"""
if not self.rawq:
self.fill_rawq()
if self.eof:
raise EOFError
c = self.rawq[self.irawq]
self.irawq = self.irawq + 1
if self.irawq >= len(self.rawq):
self.rawq = ''
self.irawq = 0
return c
def fill_rawq(self):
"""Fill raw queue from exactly one recv() system call.
Block if no data is immediately available. Set self.eof when
connection is closed.
"""
if self.irawq >= len(self.rawq):
self.rawq = ''
self.irawq = 0
# The buffer size should be fairly small so as to avoid quadratic
# behavior in process_rawq() above
buf = self.sock.recv(50)
self.msg("recv %r", buf)
self.eof = (not buf)
self.rawq = self.rawq + buf
def sock_avail(self):
"""Test whether data is available on the socket."""
return select.select([self], [], [], 0) == ([self], [], [])
def interact(self):
"""Interaction function, emulates a very dumb telnet client."""
if sys.platform == "win32":
self.mt_interact()
return
while 1:
rfd, wfd, xfd = select.select([self, sys.stdin], [], [])
if self in rfd:
try:
text = self.read_eager()
except EOFError:
print '*** Connection closed by remote host ***'
break
if text:
sys.stdout.write(text)
sys.stdout.flush()
if sys.stdin in rfd:
line = sys.stdin.readline()
if not line:
break
self.write(line)
def mt_interact(self):
"""Multithreaded version of interact()."""
import thread
thread.start_new_thread(self.listener, ())
while 1:
line = sys.stdin.readline()
if not line:
break
self.write(line)
def listener(self):
"""Helper for mt_interact() -- this executes in the other thread."""
while 1:
try:
data = self.read_eager()
except EOFError:
print '*** Connection closed by remote host ***'
return
if data:
sys.stdout.write(data)
else:
sys.stdout.flush()
def expect(self, list, timeout=None):
"""Read until one from a list of a regular expressions matches.
The first argument is a list of regular expressions, either
compiled (re.RegexObject instances) or uncompiled (strings).
The optional second argument is a timeout, in seconds; default
is no timeout.
Return a tuple of three items: the index in the list of the
first regular expression that matches; the match object
returned; and the text read up till and including the match.
If EOF is read and no text was read, raise EOFError.
Otherwise, when nothing matches, return (-1, None, text) where
text is the text received so far (may be the empty string if a
timeout happened).
If a regular expression ends with a greedy match (e.g. '.*')
or if more than one expression can match the same input, the
results are undeterministic, and may depend on the I/O timing.
"""
re = None
list = list[:]
indices = range(len(list))
for i in indices:
if not hasattr(list[i], "search"):
if not re: import re
list[i] = re.compile(list[i])
if timeout is not None:
from time import time
time_start = time()
while 1:
self.process_rawq()
for i in indices:
m = list[i].search(self.cookedq)
if m:
e = m.end()
text = self.cookedq[:e]
self.cookedq = self.cookedq[e:]
return (i, m, text)
if self.eof:
break
if timeout is not None:
elapsed = time() - time_start
if elapsed >= timeout:
break
s_args = ([self.fileno()], [], [], timeout-elapsed)
r, w, x = select.select(*s_args)
if not r:
break
self.fill_rawq()
text = self.read_very_lazy()
if not text and self.eof:
raise EOFError
return (-1, None, text)
def test():
"""Test program for telnetlib.
Usage: python telnetlib.py [-d] ... [host [port]]
Default host is localhost; default port is 23.
"""
debuglevel = 0
while sys.argv[1:] and sys.argv[1] == '-d':
debuglevel = debuglevel+1
del sys.argv[1]
host = 'localhost'
if sys.argv[1:]:
host = sys.argv[1]
port = 0
if sys.argv[2:]:
portstr = sys.argv[2]
try:
port = int(portstr)
except ValueError:
port = socket.getservbyname(portstr, 'tcp')
tn = Telnet()
tn.set_debuglevel(debuglevel)
tn.open(host, port, timeout=0.5)
tn.interact()
tn.close()
if __name__ == '__main__':
test()
| bsd-3-clause |
shenzebang/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 138 | 14048 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| bsd-3-clause |
Philippe12/external_chromium_org | tools/perf/metrics/speedindex.py | 23 | 12282 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
from metrics import Metric
class SpeedIndexMetric(Metric):
"""The speed index metric is one way of measuring page load speed.
It is meant to approximate user perception of page load speed, and it
is based on the amount of time that it takes to paint to the visual
portion of the screen. It includes paint events that occur after the
onload event, and it doesn't include time loading things off-screen.
This speed index metric is based on WebPageTest.org (WPT).
For more info see: http://goo.gl/e7AH5l
"""
def __init__(self):
super(SpeedIndexMetric, self).__init__()
self._impl = None
self._script_is_loaded = False
self._is_finished = False
with open(os.path.join(os.path.dirname(__file__), 'speedindex.js')) as f:
self._js = f.read()
def Start(self, _, tab):
"""Start recording events.
This method should be called in the WillNavigateToPage method of
a PageMeasurement, so that all the events can be captured. If it's called
in DidNavigateToPage, that will be too late.
"""
self._impl = (VideoSpeedIndexImpl(tab) if tab.video_capture_supported else
PaintRectSpeedIndexImpl(tab))
self._impl.Start()
self._script_is_loaded = False
self._is_finished = False
def Stop(self, _, tab):
"""Stop timeline recording."""
assert self._impl, 'Must call Start() before Stop()'
assert self.IsFinished(tab), 'Must wait for IsFinished() before Stop()'
self._impl.Stop()
# Optional argument chart_name is not in base class Metric.
# pylint: disable=W0221
def AddResults(self, tab, results, chart_name=None):
"""Calculate the speed index and add it to the results."""
index = self._impl.CalculateSpeedIndex()
# Release the tab so that it can be disconnected.
self._impl = None
results.Add('speed_index', 'ms', index, chart_name=chart_name)
def IsFinished(self, tab):
"""Decide whether the timeline recording should be stopped.
When the timeline recording is stopped determines which paint events
are used in the speed index metric calculation. In general, the recording
should continue if there has just been some data received, because
this suggests that painting may continue.
A page may repeatedly request resources in an infinite loop; a timeout
should be placed in any measurement that uses this metric, e.g.:
def IsDone():
return self._speedindex.IsFinished(tab)
util.WaitFor(IsDone, 60)
Returns:
True if 2 seconds have passed since last resource received, false
otherwise.
"""
if self._is_finished:
return True
# The script that provides the function window.timeSinceLastResponseMs()
# needs to be loaded for this function, but it must be loaded AFTER
# the Start method is called, because if the Start method is called in
# the PageMeasurement's WillNavigateToPage function, then it will
# not be available here. The script should only be re-loaded once per page
# so that variables in the script get reset only for a new page.
if not self._script_is_loaded:
tab.ExecuteJavaScript(self._js)
self._script_is_loaded = True
time_since_last_response_ms = tab.EvaluateJavaScript(
"window.timeSinceLastResponseAfterLoadMs()")
self._is_finished = time_since_last_response_ms > 2000
return self._is_finished
class SpeedIndexImpl(object):
def __init__(self, tab):
"""Constructor.
Args:
tab: The telemetry.core.Tab object for which to calculate SpeedIndex.
"""
self.tab = tab
def Start(self):
raise NotImplementedError()
def Stop(self):
raise NotImplementedError()
def GetTimeCompletenessList(self):
"""Returns a list of time to visual completeness tuples.
In the WPT PHP implementation, this is also called 'visual progress'.
"""
raise NotImplementedError()
def CalculateSpeedIndex(self):
"""Calculate the speed index.
The speed index number conceptually represents the number of milliseconds
that the page was "visually incomplete". If the page were 0% complete for
1000 ms, then the score would be 1000; if it were 0% complete for 100 ms
then 90% complete (ie 10% incomplete) for 900 ms, then the score would be
1.0*100 + 0.1*900 = 190.
Returns:
A single number, milliseconds of visual incompleteness.
"""
time_completeness_list = self.GetTimeCompletenessList()
prev_completeness = 0.0
speed_index = 0.0
prev_time = time_completeness_list[0][0]
for time, completeness in time_completeness_list:
# Add the incemental value for the interval just before this event.
elapsed_time = time - prev_time
incompleteness = (1.0 - prev_completeness)
speed_index += elapsed_time * incompleteness
# Update variables for next iteration.
prev_completeness = completeness
prev_time = time
return speed_index
class VideoSpeedIndexImpl(SpeedIndexImpl):
def __init__(self, tab):
super(VideoSpeedIndexImpl, self).__init__(tab)
assert self.tab.video_capture_supported
self._time_completeness_list = None
def Start(self):
# TODO(tonyg): Bitrate is arbitrary here. Experiment with screen capture
# overhead vs. speed index accuracy and set the bitrate appropriately.
self.tab.StartVideoCapture(min_bitrate_mbps=4)
def Stop(self):
histograms = [(time, bitmap.ColorHistogram())
for time, bitmap in self.tab.StopVideoCapture()]
start_histogram = histograms[0][1]
final_histogram = histograms[-1][1]
def Difference(hist1, hist2):
return (abs(a - b) for a, b in zip(hist1, hist2))
full_difference = list(Difference(start_histogram, final_histogram))
total = float(sum(full_difference))
def FrameProgress(histogram):
difference = Difference(start_histogram, histogram)
# Each color bucket is capped at the full difference, so that progress
# does not exceed 100%.
return sum(min(a, b) for a, b in zip(difference, full_difference))
self._time_completeness_list = [(time, FrameProgress(hist) / total)
for time, hist in histograms]
def GetTimeCompletenessList(self):
assert self._time_completeness_list, 'Must call Stop() first.'
return self._time_completeness_list
class PaintRectSpeedIndexImpl(SpeedIndexImpl):
def __init__(self, tab):
super(PaintRectSpeedIndexImpl, self).__init__(tab)
def Start(self):
self.tab.StartTimelineRecording()
def Stop(self):
self.tab.StopTimelineRecording()
def GetTimeCompletenessList(self):
events = self.tab.timeline_model.GetAllEvents()
viewport = self._GetViewportSize()
paint_events = self._IncludedPaintEvents(events)
time_area_dict = self._TimeAreaDict(paint_events, viewport)
total_area = sum(time_area_dict.values())
assert total_area > 0.0, 'Total paint event area must be greater than 0.'
completeness = 0.0
time_completeness_list = []
# TODO(tonyg): This sets the start time to the start of the first paint
# event. That can't be correct. The start time should be navigationStart.
# Since the previous screen is not cleared at navigationStart, we should
# probably assume the completeness is 0 until the first paint and add the
# time of navigationStart as the start. We need to confirm what WPT does.
time_completeness_list.append(
(self.tab.timeline_model.GetAllEvents()[0].start, completeness))
for time, area in sorted(time_area_dict.items()):
completeness += float(area) / total_area
# Visual progress is rounded to the nearest percentage point as in WPT.
time_completeness_list.append((time, round(completeness, 2)))
return time_completeness_list
def _GetViewportSize(self):
"""Returns dimensions of the viewport."""
return self.tab.EvaluateJavaScript(
'[ window.innerWidth, window.innerHeight ]')
def _IncludedPaintEvents(self, events):
"""Get all events that are counted in the calculation of the speed index.
There's one category of paint event that's filtered out: paint events
that occur before the first 'ResourceReceiveResponse' and 'Layout' events.
Previously in the WPT speed index, paint events that contain children paint
events were also filtered out.
"""
def FirstLayoutTime(events):
"""Get the start time of the first layout after a resource received."""
has_received_response = False
for event in events:
if event.name == 'ResourceReceiveResponse':
has_received_response = True
elif has_received_response and event.name == 'Layout':
return event.start
assert False, 'There were no layout events after resource receive events.'
first_layout_time = FirstLayoutTime(events)
paint_events = [e for e in events
if e.start >= first_layout_time and e.name == 'Paint']
return paint_events
def _TimeAreaDict(self, paint_events, viewport):
"""Make a dict from time to adjusted area value for events at that time.
The adjusted area value of each paint event is determined by how many paint
events cover the same rectangle, and whether it's a full-window paint event.
"Adjusted area" can also be thought of as "points" of visual completeness --
each rectangle has a certain number of points and these points are
distributed amongst the paint events that paint that rectangle.
Args:
paint_events: A list of paint events
viewport: A tuple (width, height) of the window.
Returns:
A dictionary of times of each paint event (in milliseconds) to the
adjusted area that the paint event is worth.
"""
width, height = viewport
fullscreen_area = width * height
def ClippedArea(rectangle):
"""Returns rectangle area clipped to viewport size."""
_, x0, y0, x1, y1 = rectangle
clipped_width = max(0, min(width, x1) - max(0, x0))
clipped_height = max(0, min(height, y1) - max(0, y0))
return clipped_width * clipped_height
grouped = self._GroupEventByRectangle(paint_events)
event_area_dict = collections.defaultdict(int)
for rectangle, events in grouped.items():
# The area points for each rectangle are divided up among the paint
# events in that rectangle.
area = ClippedArea(rectangle)
update_count = len(events)
adjusted_area = float(area) / update_count
# Paint events for the largest-area rectangle are counted as 50%.
if area == fullscreen_area:
adjusted_area /= 2
for event in events:
# The end time for an event is used for that event's time.
event_time = event.end
event_area_dict[event_time] += adjusted_area
return event_area_dict
def _GetRectangle(self, paint_event):
"""Get the specific rectangle on the screen for a paint event.
Each paint event belongs to a frame (as in html <frame> or <iframe>).
This, together with location and dimensions, comprises a rectangle.
In the WPT source, this 'rectangle' is also called a 'region'.
"""
def GetBox(quad):
"""Gets top-left and bottom-right coordinates from paint event.
In the timeline data from devtools, paint rectangle dimensions are
represented x-y coordinates of four corners, clockwise from the top-left.
See: function WebInspector.TimelinePresentationModel.quadFromRectData
in file src/out/Debug/obj/gen/devtools/TimelinePanel.js.
"""
x0, y0, _, _, x1, y1, _, _ = quad
return (x0, y0, x1, y1)
assert paint_event.name == 'Paint'
frame = paint_event.args['frameId']
return (frame,) + GetBox(paint_event.args['data']['clip'])
def _GroupEventByRectangle(self, paint_events):
"""Group all paint events according to the rectangle that they update."""
result = collections.defaultdict(list)
for event in paint_events:
assert event.name == 'Paint'
result[self._GetRectangle(event)].append(event)
return result
| bsd-3-clause |
ptcrypto/p2pool-adaptive | p2pool/bitcoin/networks/litecoin_testnet.py | 29 | 1198 | import os
import platform
from twisted.internet import defer
from .. import data, helper
from p2pool.util import pack
P2P_PREFIX = 'fcc1b7dc'.decode('hex')
P2P_PORT = 19333
ADDRESS_VERSION = 111
RPC_PORT = 19332
RPC_CHECK = defer.inlineCallbacks(lambda bitcoind: defer.returnValue(
'litecoinaddress' in (yield bitcoind.rpc_help()) and
(yield bitcoind.rpc_getinfo())['testnet']
))
SUBSIDY_FUNC = lambda height: 50*100000000 >> (height + 1)//840000
POW_FUNC = lambda data: pack.IntType(256).unpack(__import__('ltc_scrypt').getPoWHash(data))
BLOCK_PERIOD = 150 # s
SYMBOL = 'tLTC'
CONF_FILE_FUNC = lambda: os.path.join(os.path.join(os.environ['APPDATA'], 'Litecoin') if platform.system() == 'Windows' else os.path.expanduser('~/Library/Application Support/Litecoin/') if platform.system() == 'Darwin' else os.path.expanduser('~/.litecoin'), 'litecoin.conf')
BLOCK_EXPLORER_URL_PREFIX = 'http://nonexistent-litecoin-testnet-explorer/block/'
ADDRESS_EXPLORER_URL_PREFIX = 'https://chain.so/address/LTCTEST/'
TX_EXPLORER_URL_PREFIX = 'https://chain.so/tx/LTCTEST/'
SANE_TARGET_RANGE = (2**256//1000000000 - 1, 2**256 - 1)
DUMB_SCRYPT_DIFF = 2**16
DUST_THRESHOLD = 1e8
| gpl-3.0 |
javierlgroba/Eventer-gapp | django/contrib/localflavor/cl/forms.py | 101 | 3238 | """
Chile specific form helpers.
"""
from __future__ import absolute_import, unicode_literals
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import RegexField, Select
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_text
from .cl_regions import REGION_CHOICES
class CLRegionSelect(Select):
"""
A Select widget that uses a list of Chilean Regions (Regiones)
as its choices.
"""
def __init__(self, attrs=None):
super(CLRegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class CLRutField(RegexField):
"""
Chilean "Rol Unico Tributario" (RUT) field. This is the Chilean national
identification number.
Samples for testing are available from
https://palena.sii.cl/cvc/dte/ee_empresas_emisoras.html
"""
default_error_messages = {
'invalid': _('Enter a valid Chilean RUT.'),
'strict': _('Enter a valid Chilean RUT. The format is XX.XXX.XXX-X.'),
'checksum': _('The Chilean RUT is not valid.'),
}
def __init__(self, *args, **kwargs):
if 'strict' in kwargs:
del kwargs['strict']
super(CLRutField, self).__init__(r'^(\d{1,2}\.)?\d{3}\.\d{3}-[\dkK]$',
error_message=self.default_error_messages['strict'], *args, **kwargs)
else:
# In non-strict mode, accept RUTs that validate but do not exist in
# the real world.
super(CLRutField, self).__init__(r'^[\d\.]{1,11}-?[\dkK]$', *args, **kwargs)
def clean(self, value):
"""
Check and clean the Chilean RUT.
"""
super(CLRutField, self).clean(value)
if value in EMPTY_VALUES:
return ''
rut, verificador = self._canonify(value)
if self._algorithm(rut) == verificador:
return self._format(rut, verificador)
else:
raise ValidationError(self.error_messages['checksum'])
def _algorithm(self, rut):
"""
Takes RUT in pure canonical form, calculates the verifier digit.
"""
suma = 0
multi = 2
for r in rut[::-1]:
suma += int(r) * multi
multi += 1
if multi == 8:
multi = 2
return '0123456789K0'[11 - suma % 11]
def _canonify(self, rut):
"""
Turns the RUT into one normalized format. Returns a (rut, verifier)
tuple.
"""
rut = smart_text(rut).replace(' ', '').replace('.', '').replace('-', '')
return rut[:-1], rut[-1].upper()
def _format(self, code, verifier=None):
"""
Formats the RUT from canonical form to the common string representation.
If verifier=None, then the last digit in 'code' is the verifier.
"""
if verifier is None:
verifier = code[-1]
code = code[:-1]
while len(code) > 3 and '.' not in code[:3]:
pos = code.find('.')
if pos == -1:
new_dot = -3
else:
new_dot = pos - 3
code = code[:new_dot] + '.' + code[new_dot:]
return '%s-%s' % (code, verifier)
| lgpl-3.0 |
sdlovecraft/samhains | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/styles/autumn.py | 364 | 2144 | # -*- coding: utf-8 -*-
"""
pygments.styles.autumn
~~~~~~~~~~~~~~~~~~~~~~
A colorful style, inspired by the terminal highlighting style.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class AutumnStyle(Style):
"""
A colorful style, inspired by the terminal highlighting style.
"""
default_style = ""
styles = {
Whitespace: '#bbbbbb',
Comment: 'italic #aaaaaa',
Comment.Preproc: 'noitalic #4c8317',
Comment.Special: 'italic #0000aa',
Keyword: '#0000aa',
Keyword.Type: '#00aaaa',
Operator.Word: '#0000aa',
Name.Builtin: '#00aaaa',
Name.Function: '#00aa00',
Name.Class: 'underline #00aa00',
Name.Namespace: 'underline #00aaaa',
Name.Variable: '#aa0000',
Name.Constant: '#aa0000',
Name.Entity: 'bold #800',
Name.Attribute: '#1e90ff',
Name.Tag: 'bold #1e90ff',
Name.Decorator: '#888888',
String: '#aa5500',
String.Symbol: '#0000aa',
String.Regex: '#009999',
Number: '#009999',
Generic.Heading: 'bold #000080',
Generic.Subheading: 'bold #800080',
Generic.Deleted: '#aa0000',
Generic.Inserted: '#00aa00',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: '#F00 bg:#FAA'
}
| mit |
t103z/interview | api_server/interview/problem_views.py | 1 | 7417 | #!/usr/bin/env python
# encoding: utf-8
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from django.conf import settings
import jsonschema
from . import permissions
from .schemas import swagger_schema
from . import sequences
import pymongo
@api_view(['GET', 'POST'])
def root(request, room_id, **kwargs):
'''
Get and set problems for a certain room.
'''
# Perimission check
permitted_user_types = ['hr', 'interviewer']
if permissions.check(request, permitted_user_types) != permissions.PASS:
return Response(
{'error': 'Permission Denied'},
status.HTTP_403_FORBIDDEN
)
room_id = int(room_id)
if request.method == 'POST':
# Format check
problem_data = dict(request.data)
try:
tmp_schema = swagger_schema['definitions']['Problem'].copy()
tmp_schema['required'] = [
"roomId",
"type",
"content"
]
jsonschema.validate(problem_data,
tmp_schema)
except:
return Response(
{'error': "Key error"},
status.HTTP_400_BAD_REQUEST
)
problem_id = sequences.get_next_sequence('problem_id')
client = pymongo.MongoClient()
db = client[settings.DB_NAME]
problem_data['id'] = problem_id
# Validation. Should never happen.
# if room_id != problem_data['roomId']:
# return Response(
# {'error': 'Unknown error'},
# status.HTTP_400_BAD_REQUEST
# )
# Check existance
room_cursor = db.rooms.find({'id': room_id})
if room_cursor.count() == 0:
return Response(
{'error': "roomId error"},
status.HTTP_400_BAD_REQUEST
)
room = room_cursor[0]
room['problems'].append(problem_id)
db.rooms.update_one(
{'id': room_id}, {'$set': {'problems': room['problems']}}
)
db.problems.insert_one(problem_data)
if '_id' in problem_data:
del problem_data['_id']
return Response(
problem_data,
status.HTTP_200_OK
)
elif request.method == 'GET':
client = pymongo.MongoClient()
db = client[settings.DB_NAME]
offset = request.GET.get('offset')
limit = request.GET.get('limit')
_offset = offset
_limit = limit
# Check query parameters
offset = 0 if offset is None or offset == ' ' else int(offset)
limit = 1 if limit is None or limit == '' else int(limit)
# Check room existance
room_cursor = db.rooms.find({'id': room_id})
if room_cursor.count() == 0:
return Response(
{'error': "roomId error"},
status.HTTP_400_BAD_REQUEST
)
room = room_cursor[0]
problems_list = room['problems']
if problems_list is None or len(problems_list) == 0 or \
len(problems_list) < offset:
return Response(
{'offset': offset, 'limit': limit, 'problems': []},
status.HTTP_200_OK
)
else:
sorted_problem_list = sorted(problems_list)
if len(problems_list) <= offset + limit:
limit = len(problems_list) - offset
response_problem_list = []
for index in range(offset, offset + limit):
problem_cursor = db.problems.find(
{'id': sorted_problem_list[index]}
)
# Should never happen
if problem_cursor.count() == 0 or problem_cursor.count() > 1:
return Response(
{'error': "No such problem record."},
status.HTTP_404_NOT_FOUND
)
update_problem = problem_cursor[0]
if '_id' in update_problem:
del update_problem['_id']
response_problem_list.append(update_problem)
return Response(
{
'offset': _offset,
'limit': _limit,
'problems': response_problem_list,
},
status.HTTP_200_OK
)
else:
return Response(
{'error': "Illegal request method"},
status.HTTP_400_BAD_REQUEST
)
@api_view(['GET', 'PUT', 'DELETE'])
def manage(request, problem_id, **kwargs):
'''
Manage problems directly according to problem id.
'''
# Check permission
permitted_user_types = ['hr', 'interviewer']
if permissions.check(request, permitted_user_types) != permissions.PASS:
return Response(
{'error': 'Permission denied'},
status.HTTP_403_FORBIDDEN
)
client = pymongo.MongoClient()
db = client[settings.DB_NAME]
problem_id = int(problem_id)
problem_cursor = db.problems.find({'id': problem_id})
if problem_cursor.count() == 0:
return Response(
{'error': 'Problem not found.'},
status.HTTP_404_NOT_FOUND
)
elif problem_cursor.count() > 1: # Which should never happen
return Response(
{'error': 'Problem id duplicated.'},
status.HTTP_400_BAD_REQUEST
)
if request.method == 'DELETE':
problem = problem_cursor[0]
room_id = problem['roomId']
room_cursor = db.rooms.find({'id': room_id})
if room_cursor.count() == 0:
pass
else:
for room in room_cursor:
updated_room = room
updated_room['problems'].remove(problem_id)
db.rooms.update_one(
{'id': room['id']},
{
'$set': updated_room
}
)
db.problems.delete_one({'id': problem_id})
return Response(status=status.HTTP_200_OK)
elif request.method == 'GET':
problem = problem_cursor[0]
if '_id' in problem:
del problem['_id']
return Response(
problem,
status.HTTP_200_OK
)
elif request.method == 'PUT':
problem = problem_cursor[0]
update_data = dict(request.data)
# Validation. Should never happen.
if problem_id != update_data['id']:
return Response(
{'error': 'Unknown error'},
status.HTTP_400_BAD_REQUEST
)
# An restriction
if update_data['roomId'] != problem['roomId']:
return Response(
{'error': 'Unknown error(roomid doesn\'t match)'},
status.HTTP_400_BAD_REQUEST
)
try:
jsonschema.validate(update_data,
swagger_schema['definitions']['Problem'])
except:
return Response(
{'error': 'Key error'},
status.HTTP_400_BAD_REQUEST
)
db.problems.update_one(
{'id': problem_id},
{'$set': update_data}
)
return Response(
update_data,
status.HTTP_200_OK
)
| mit |
akretion/odoo | addons/stock/models/stock_scrap.py | 10 | 7252 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.tools import float_compare
class StockScrap(models.Model):
_name = 'stock.scrap'
_order = 'id desc'
_description = 'Scrap'
def _get_default_scrap_location_id(self):
return self.env['stock.location'].search([('scrap_location', '=', True), ('company_id', 'in', [self.env.user.company_id.id, False])], limit=1).id
def _get_default_location_id(self):
company_user = self.env.user.company_id
warehouse = self.env['stock.warehouse'].search([('company_id', '=', company_user.id)], limit=1)
if warehouse:
return warehouse.lot_stock_id.id
return None
name = fields.Char(
'Reference', default=lambda self: _('New'),
copy=False, readonly=True, required=True,
states={'done': [('readonly', True)]})
origin = fields.Char(string='Source Document')
product_id = fields.Many2one(
'product.product', 'Product', domain=[('type', 'in', ['product', 'consu'])],
required=True, states={'done': [('readonly', True)]})
product_uom_id = fields.Many2one(
'uom.uom', 'Unit of Measure',
required=True, states={'done': [('readonly', True)]})
tracking = fields.Selection('Product Tracking', readonly=True, related="product_id.tracking")
lot_id = fields.Many2one(
'stock.production.lot', 'Lot',
states={'done': [('readonly', True)]}, domain="[('product_id', '=', product_id)]")
package_id = fields.Many2one(
'stock.quant.package', 'Package',
states={'done': [('readonly', True)]})
owner_id = fields.Many2one('res.partner', 'Owner', states={'done': [('readonly', True)]})
move_id = fields.Many2one('stock.move', 'Scrap Move', readonly=True)
picking_id = fields.Many2one('stock.picking', 'Picking', states={'done': [('readonly', True)]})
location_id = fields.Many2one(
'stock.location', 'Location', domain="[('usage', '=', 'internal')]",
required=True, states={'done': [('readonly', True)]}, default=_get_default_location_id)
scrap_location_id = fields.Many2one(
'stock.location', 'Scrap Location', default=_get_default_scrap_location_id,
domain="[('scrap_location', '=', True)]", required=True, states={'done': [('readonly', True)]})
scrap_qty = fields.Float('Quantity', default=1.0, required=True, states={'done': [('readonly', True)]})
state = fields.Selection([
('draft', 'Draft'),
('done', 'Done')], string='Status', default="draft")
date_expected = fields.Datetime('Expected Date', default=fields.Datetime.now)
@api.onchange('picking_id')
def _onchange_picking_id(self):
if self.picking_id:
self.location_id = (self.picking_id.state == 'done') and self.picking_id.location_dest_id.id or self.picking_id.location_id.id
@api.onchange('product_id')
def onchange_product_id(self):
if self.product_id:
self.product_uom_id = self.product_id.uom_id.id
@api.model
def create(self, vals):
if 'name' not in vals or vals['name'] == _('New'):
vals['name'] = self.env['ir.sequence'].next_by_code('stock.scrap') or _('New')
scrap = super(StockScrap, self).create(vals)
return scrap
def unlink(self):
if 'done' in self.mapped('state'):
raise UserError(_('You cannot delete a scrap which is done.'))
return super(StockScrap, self).unlink()
def _get_origin_moves(self):
return self.picking_id and self.picking_id.move_lines.filtered(lambda x: x.product_id == self.product_id)
def _prepare_move_values(self):
self.ensure_one()
return {
'name': self.name,
'origin': self.origin or self.picking_id.name or self.name,
'product_id': self.product_id.id,
'product_uom': self.product_uom_id.id,
'product_uom_qty': self.scrap_qty,
'location_id': self.location_id.id,
'scrapped': True,
'location_dest_id': self.scrap_location_id.id,
'move_line_ids': [(0, 0, {'product_id': self.product_id.id,
'product_uom_id': self.product_uom_id.id,
'qty_done': self.scrap_qty,
'location_id': self.location_id.id,
'location_dest_id': self.scrap_location_id.id,
'package_id': self.package_id.id,
'owner_id': self.owner_id.id,
'lot_id': self.lot_id.id, })],
# 'restrict_partner_id': self.owner_id.id,
'picking_id': self.picking_id.id
}
@api.multi
def do_scrap(self):
for scrap in self:
move = self.env['stock.move'].create(scrap._prepare_move_values())
# master: replace context by cancel_backorder
move.with_context(is_scrap=True)._action_done()
scrap.write({'move_id': move.id, 'state': 'done'})
return True
def action_get_stock_picking(self):
action = self.env.ref('stock.action_picking_tree_all').read([])[0]
action['domain'] = [('id', '=', self.picking_id.id)]
return action
def action_get_stock_move_lines(self):
action = self.env.ref('stock.stock_move_line_action').read([])[0]
action['domain'] = [('move_id', '=', self.move_id.id)]
return action
def action_validate(self):
self.ensure_one()
if self.product_id.type != 'product':
return self.do_scrap()
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
available_qty = sum(self.env['stock.quant']._gather(self.product_id,
self.location_id,
self.lot_id,
self.package_id,
self.owner_id,
strict=True).mapped('quantity'))
scrap_qty = self.product_uom_id._compute_quantity(self.scrap_qty, self.product_id.uom_id)
if float_compare(available_qty, scrap_qty, precision_digits=precision) >= 0:
return self.do_scrap()
else:
return {
'name': _('Insufficient Quantity'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.warn.insufficient.qty.scrap',
'view_id': self.env.ref('stock.stock_warn_insufficient_qty_scrap_form_view').id,
'type': 'ir.actions.act_window',
'context': {
'default_product_id': self.product_id.id,
'default_location_id': self.location_id.id,
'default_scrap_id': self.id
},
'target': 'new'
}
| agpl-3.0 |
basicthinker/Sexain-MemController | gem5-stable/src/arch/x86/isa/insts/simd64/floating_point/arithmetic/accumulation.py | 91 | 2169 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# PFACC
# PFNACC
# PFPNACC
'''
| apache-2.0 |
unidesigner/microcircuit | doc/examples/showmatrix2.py | 1 | 1104 | import microcircuit.constants as const
from microcircuit.viz import show
import networkx as nx
from microcircuit.connectome import Connectome
a=nx.DiGraph()
a.add_edge(1,2, {const.CONNECTOME_CHEMICAL_SYNAPSE:1, const.CONNECTOME_ELECTRICAL_SYNAPSE:2})
a.add_edge(2,1, {const.CONNECTOME_ELECTRICAL_SYNAPSE:2})
a.add_edge(1,1, {const.CONNECTOME_ELECTRICAL_SYNAPSE:10})
a.add_edge(3,3, {const.CONNECTOME_ELECTRICAL_SYNAPSE:10})
me = {'name' : 'testcircuit00244',
'neuronmap': {
1: {'name':'A', 'type': 'Sensory neuron'},
2: {'name':'B', 'type': 'Interneuron'},
3: {'name':'C', 'type': 'Motorneuron'}
}}
connectome = Connectome(metadata=me['neuronmap'], graph=a)
para = {
const.CONNECTOME_CHEMICAL_SYNAPSE: {
'marker': 'o',
'c': 'r',
's': 100
},
const.CONNECTOME_ELECTRICAL_SYNAPSE: {
'marker': '^',
'c': 'b',
's': (lambda x:x*10.)
}
}
nodes = connectome.graph.nodes()
print "node", nodes
b=show(connectome, skeleton_order=nodes, use_label=True, display_parameters=para)
| bsd-3-clause |
QuanZag/tornado | tornado/concurrent.py | 12 | 18212 | #!/usr/bin/env python
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities for working with threads and ``Futures``.
``Futures`` are a pattern for concurrent programming introduced in
Python 3.2 in the `concurrent.futures` package. This package defines
a mostly-compatible `Future` class designed for use from coroutines,
as well as some utility functions for interacting with the
`concurrent.futures` package.
"""
from __future__ import absolute_import, division, print_function, with_statement
import functools
import platform
import textwrap
import traceback
import sys
from tornado.log import app_log
from tornado.stack_context import ExceptionStackContext, wrap
from tornado.util import raise_exc_info, ArgReplacer
try:
from concurrent import futures
except ImportError:
futures = None
# Can the garbage collector handle cycles that include __del__ methods?
# This is true in cpython beginning with version 3.4 (PEP 442).
_GC_CYCLE_FINALIZERS = (platform.python_implementation() == 'CPython' and
sys.version_info >= (3, 4))
class ReturnValueIgnoredError(Exception):
pass
# This class and associated code in the future object is derived
# from the Trollius project, a backport of asyncio to Python 2.x - 3.x
class _TracebackLogger(object):
"""Helper to log a traceback upon destruction if not cleared.
This solves a nasty problem with Futures and Tasks that have an
exception set: if nobody asks for the exception, the exception is
never logged. This violates the Zen of Python: 'Errors should
never pass silently. Unless explicitly silenced.'
However, we don't want to log the exception as soon as
set_exception() is called: if the calling code is written
properly, it will get the exception and handle it properly. But
we *do* want to log it if result() or exception() was never called
-- otherwise developers waste a lot of time wondering why their
buggy code fails silently.
An earlier attempt added a __del__() method to the Future class
itself, but this backfired because the presence of __del__()
prevents garbage collection from breaking cycles. A way out of
this catch-22 is to avoid having a __del__() method on the Future
class itself, but instead to have a reference to a helper object
with a __del__() method that logs the traceback, where we ensure
that the helper object doesn't participate in cycles, and only the
Future has a reference to it.
The helper object is added when set_exception() is called. When
the Future is collected, and the helper is present, the helper
object is also collected, and its __del__() method will log the
traceback. When the Future's result() or exception() method is
called (and a helper object is present), it removes the the helper
object, after calling its clear() method to prevent it from
logging.
One downside is that we do a fair amount of work to extract the
traceback from the exception, even when it is never logged. It
would seem cheaper to just store the exception object, but that
references the traceback, which references stack frames, which may
reference the Future, which references the _TracebackLogger, and
then the _TracebackLogger would be included in a cycle, which is
what we're trying to avoid! As an optimization, we don't
immediately format the exception; we only do the work when
activate() is called, which call is delayed until after all the
Future's callbacks have run. Since usually a Future has at least
one callback (typically set by 'yield From') and usually that
callback extracts the callback, thereby removing the need to
format the exception.
PS. I don't claim credit for this solution. I first heard of it
in a discussion about closing files when they are collected.
"""
__slots__ = ('exc_info', 'formatted_tb')
def __init__(self, exc_info):
self.exc_info = exc_info
self.formatted_tb = None
def activate(self):
exc_info = self.exc_info
if exc_info is not None:
self.exc_info = None
self.formatted_tb = traceback.format_exception(*exc_info)
def clear(self):
self.exc_info = None
self.formatted_tb = None
def __del__(self):
if self.formatted_tb:
app_log.error('Future exception was never retrieved: %s',
''.join(self.formatted_tb).rstrip())
class Future(object):
"""Placeholder for an asynchronous result.
A ``Future`` encapsulates the result of an asynchronous
operation. In synchronous applications ``Futures`` are used
to wait for the result from a thread or process pool; in
Tornado they are normally used with `.IOLoop.add_future` or by
yielding them in a `.gen.coroutine`.
`tornado.concurrent.Future` is similar to
`concurrent.futures.Future`, but not thread-safe (and therefore
faster for use with single-threaded event loops).
In addition to ``exception`` and ``set_exception``, methods ``exc_info``
and ``set_exc_info`` are supported to capture tracebacks in Python 2.
The traceback is automatically available in Python 3, but in the
Python 2 futures backport this information is discarded.
This functionality was previously available in a separate class
``TracebackFuture``, which is now a deprecated alias for this class.
.. versionchanged:: 4.0
`tornado.concurrent.Future` is always a thread-unsafe ``Future``
with support for the ``exc_info`` methods. Previously it would
be an alias for the thread-safe `concurrent.futures.Future`
if that package was available and fall back to the thread-unsafe
implementation if it was not.
.. versionchanged:: 4.1
If a `.Future` contains an error but that error is never observed
(by calling ``result()``, ``exception()``, or ``exc_info()``),
a stack trace will be logged when the `.Future` is garbage collected.
This normally indicates an error in the application, but in cases
where it results in undesired logging it may be necessary to
suppress the logging by ensuring that the exception is observed:
``f.add_done_callback(lambda f: f.exception())``.
"""
def __init__(self):
self._done = False
self._result = None
self._exc_info = None
self._log_traceback = False # Used for Python >= 3.4
self._tb_logger = None # Used for Python <= 3.3
self._callbacks = []
# Implement the Python 3.5 Awaitable protocol if possible
# (we can't use return and yield together until py33).
if sys.version_info >= (3, 3):
exec(textwrap.dedent("""
def __await__(self):
return (yield self)
"""))
def cancel(self):
"""Cancel the operation, if possible.
Tornado ``Futures`` do not support cancellation, so this method always
returns False.
"""
return False
def cancelled(self):
"""Returns True if the operation has been cancelled.
Tornado ``Futures`` do not support cancellation, so this method
always returns False.
"""
return False
def running(self):
"""Returns True if this operation is currently running."""
return not self._done
def done(self):
"""Returns True if the future has finished running."""
return self._done
def _clear_tb_log(self):
self._log_traceback = False
if self._tb_logger is not None:
self._tb_logger.clear()
self._tb_logger = None
def result(self, timeout=None):
"""If the operation succeeded, return its result. If it failed,
re-raise its exception.
This method takes a ``timeout`` argument for compatibility with
`concurrent.futures.Future` but it is an error to call it
before the `Future` is done, so the ``timeout`` is never used.
"""
self._clear_tb_log()
if self._result is not None:
return self._result
if self._exc_info is not None:
raise_exc_info(self._exc_info)
self._check_done()
return self._result
def exception(self, timeout=None):
"""If the operation raised an exception, return the `Exception`
object. Otherwise returns None.
This method takes a ``timeout`` argument for compatibility with
`concurrent.futures.Future` but it is an error to call it
before the `Future` is done, so the ``timeout`` is never used.
"""
self._clear_tb_log()
if self._exc_info is not None:
return self._exc_info[1]
else:
self._check_done()
return None
def add_done_callback(self, fn):
"""Attaches the given callback to the `Future`.
It will be invoked with the `Future` as its argument when the Future
has finished running and its result is available. In Tornado
consider using `.IOLoop.add_future` instead of calling
`add_done_callback` directly.
"""
if self._done:
fn(self)
else:
self._callbacks.append(fn)
def set_result(self, result):
"""Sets the result of a ``Future``.
It is undefined to call any of the ``set`` methods more than once
on the same object.
"""
self._result = result
self._set_done()
def set_exception(self, exception):
"""Sets the exception of a ``Future.``"""
self.set_exc_info(
(exception.__class__,
exception,
getattr(exception, '__traceback__', None)))
def exc_info(self):
"""Returns a tuple in the same format as `sys.exc_info` or None.
.. versionadded:: 4.0
"""
self._clear_tb_log()
return self._exc_info
def set_exc_info(self, exc_info):
"""Sets the exception information of a ``Future.``
Preserves tracebacks on Python 2.
.. versionadded:: 4.0
"""
self._exc_info = exc_info
self._log_traceback = True
if not _GC_CYCLE_FINALIZERS:
self._tb_logger = _TracebackLogger(exc_info)
try:
self._set_done()
finally:
# Activate the logger after all callbacks have had a
# chance to call result() or exception().
if self._log_traceback and self._tb_logger is not None:
self._tb_logger.activate()
self._exc_info = exc_info
def _check_done(self):
if not self._done:
raise Exception("DummyFuture does not support blocking for results")
def _set_done(self):
self._done = True
for cb in self._callbacks:
try:
cb(self)
except Exception:
app_log.exception('Exception in callback %r for %r',
cb, self)
self._callbacks = None
# On Python 3.3 or older, objects with a destructor part of a reference
# cycle are never destroyed. It's no longer the case on Python 3.4 thanks to
# the PEP 442.
if _GC_CYCLE_FINALIZERS:
def __del__(self):
if not self._log_traceback:
# set_exception() was not called, or result() or exception()
# has consumed the exception
return
tb = traceback.format_exception(*self._exc_info)
app_log.error('Future %r exception was never retrieved: %s',
self, ''.join(tb).rstrip())
TracebackFuture = Future
if futures is None:
FUTURES = Future
else:
FUTURES = (futures.Future, Future)
def is_future(x):
return isinstance(x, FUTURES)
class DummyExecutor(object):
def submit(self, fn, *args, **kwargs):
future = TracebackFuture()
try:
future.set_result(fn(*args, **kwargs))
except Exception:
future.set_exc_info(sys.exc_info())
return future
def shutdown(self, wait=True):
pass
dummy_executor = DummyExecutor()
def run_on_executor(*args, **kwargs):
"""Decorator to run a synchronous method asynchronously on an executor.
The decorated method may be called with a ``callback`` keyword
argument and returns a future.
The `.IOLoop` and executor to be used are determined by the ``io_loop``
and ``executor`` attributes of ``self``. To use different attributes,
pass keyword arguments to the decorator::
@run_on_executor(executor='_thread_pool')
def foo(self):
pass
.. versionchanged:: 4.2
Added keyword arguments to use alternative attributes.
"""
def run_on_executor_decorator(fn):
executor = kwargs.get("executor", "executor")
io_loop = kwargs.get("io_loop", "io_loop")
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
callback = kwargs.pop("callback", None)
future = getattr(self, executor).submit(fn, self, *args, **kwargs)
if callback:
getattr(self, io_loop).add_future(
future, lambda future: callback(future.result()))
return future
return wrapper
if args and kwargs:
raise ValueError("cannot combine positional and keyword args")
if len(args) == 1:
return run_on_executor_decorator(args[0])
elif len(args) != 0:
raise ValueError("expected 1 argument, got %d", len(args))
return run_on_executor_decorator
_NO_RESULT = object()
def return_future(f):
"""Decorator to make a function that returns via callback return a
`Future`.
The wrapped function should take a ``callback`` keyword argument
and invoke it with one argument when it has finished. To signal failure,
the function can simply raise an exception (which will be
captured by the `.StackContext` and passed along to the ``Future``).
From the caller's perspective, the callback argument is optional.
If one is given, it will be invoked when the function is complete
with `Future.result()` as an argument. If the function fails, the
callback will not be run and an exception will be raised into the
surrounding `.StackContext`.
If no callback is given, the caller should use the ``Future`` to
wait for the function to complete (perhaps by yielding it in a
`.gen.engine` function, or passing it to `.IOLoop.add_future`).
Usage:
.. testcode::
@return_future
def future_func(arg1, arg2, callback):
# Do stuff (possibly asynchronous)
callback(result)
@gen.engine
def caller(callback):
yield future_func(arg1, arg2)
callback()
..
Note that ``@return_future`` and ``@gen.engine`` can be applied to the
same function, provided ``@return_future`` appears first. However,
consider using ``@gen.coroutine`` instead of this combination.
"""
replacer = ArgReplacer(f, 'callback')
@functools.wraps(f)
def wrapper(*args, **kwargs):
future = TracebackFuture()
callback, args, kwargs = replacer.replace(
lambda value=_NO_RESULT: future.set_result(value),
args, kwargs)
def handle_error(typ, value, tb):
future.set_exc_info((typ, value, tb))
return True
exc_info = None
with ExceptionStackContext(handle_error):
try:
result = f(*args, **kwargs)
if result is not None:
raise ReturnValueIgnoredError(
"@return_future should not be used with functions "
"that return values")
except:
exc_info = sys.exc_info()
raise
if exc_info is not None:
# If the initial synchronous part of f() raised an exception,
# go ahead and raise it to the caller directly without waiting
# for them to inspect the Future.
future.result()
# If the caller passed in a callback, schedule it to be called
# when the future resolves. It is important that this happens
# just before we return the future, or else we risk confusing
# stack contexts with multiple exceptions (one here with the
# immediate exception, and again when the future resolves and
# the callback triggers its exception by calling future.result()).
if callback is not None:
def run_callback(future):
result = future.result()
if result is _NO_RESULT:
callback()
else:
callback(future.result())
future.add_done_callback(wrap(run_callback))
return future
return wrapper
def chain_future(a, b):
"""Chain two futures together so that when one completes, so does the other.
The result (success or failure) of ``a`` will be copied to ``b``, unless
``b`` has already been completed or cancelled by the time ``a`` finishes.
"""
def copy(future):
assert future is a
if b.done():
return
if (isinstance(a, TracebackFuture) and isinstance(b, TracebackFuture)
and a.exc_info() is not None):
b.set_exc_info(a.exc_info())
elif a.exception() is not None:
b.set_exception(a.exception())
else:
b.set_result(a.result())
a.add_done_callback(copy)
| apache-2.0 |
Smart-Torvy/torvy-home-assistant | tests/test_core.py | 3 | 22168 | """Test to verify that Home Assistant core works."""
# pylint: disable=protected-access,too-many-public-methods
# pylint: disable=too-few-public-methods
import asyncio
import unittest
from unittest.mock import patch, MagicMock
from datetime import datetime, timedelta
import pytz
import homeassistant.core as ha
from homeassistant.exceptions import InvalidEntityFormatError
import homeassistant.util.dt as dt_util
from homeassistant.util.unit_system import (METRIC_SYSTEM)
from homeassistant.const import (
__version__, EVENT_STATE_CHANGED, ATTR_FRIENDLY_NAME, CONF_UNIT_SYSTEM)
from tests.common import get_test_home_assistant
PST = pytz.timezone('America/Los_Angeles')
def test_split_entity_id():
"""Test split_entity_id."""
assert ha.split_entity_id('domain.object_id') == ['domain', 'object_id']
def test_async_add_job_schedule_callback():
"""Test that we schedule coroutines and add jobs to the job pool."""
hass = MagicMock()
job = MagicMock()
ha.HomeAssistant.async_add_job(hass, ha.callback(job))
assert len(hass.loop.call_soon.mock_calls) == 1
assert len(hass.loop.create_task.mock_calls) == 0
assert len(hass.add_job.mock_calls) == 0
@patch('asyncio.iscoroutinefunction', return_value=True)
def test_async_add_job_schedule_coroutinefunction(mock_iscoro):
"""Test that we schedule coroutines and add jobs to the job pool."""
hass = MagicMock()
job = MagicMock()
ha.HomeAssistant.async_add_job(hass, job)
assert len(hass.loop.call_soon.mock_calls) == 0
assert len(hass.loop.create_task.mock_calls) == 1
assert len(hass.add_job.mock_calls) == 0
@patch('asyncio.iscoroutinefunction', return_value=False)
def test_async_add_job_add_threaded_job_to_pool(mock_iscoro):
"""Test that we schedule coroutines and add jobs to the job pool."""
hass = MagicMock()
job = MagicMock()
ha.HomeAssistant.async_add_job(hass, job)
assert len(hass.loop.call_soon.mock_calls) == 0
assert len(hass.loop.create_task.mock_calls) == 0
assert len(hass.add_job.mock_calls) == 1
def test_async_run_job_calls_callback():
"""Test that the callback annotation is respected."""
hass = MagicMock()
calls = []
def job():
calls.append(1)
ha.HomeAssistant.async_run_job(hass, ha.callback(job))
assert len(calls) == 1
assert len(hass.async_add_job.mock_calls) == 0
def test_async_run_job_delegates_non_async():
"""Test that the callback annotation is respected."""
hass = MagicMock()
calls = []
def job():
calls.append(1)
ha.HomeAssistant.async_run_job(hass, job)
assert len(calls) == 0
assert len(hass.async_add_job.mock_calls) == 1
class TestHomeAssistant(unittest.TestCase):
"""Test the Home Assistant core classes."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant(0)
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
# This test hangs on `loop.add_signal_handler`
# def test_start_and_sigterm(self):
# """Start the test."""
# calls = []
# self.hass.bus.listen_once(EVENT_HOMEASSISTANT_START,
# lambda event: calls.append(1))
# self.hass.start()
# self.assertEqual(1, len(calls))
# self.hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP,
# lambda event: calls.append(1))
# os.kill(os.getpid(), signal.SIGTERM)
# self.hass.block_till_done()
# self.assertEqual(1, len(calls))
class TestEvent(unittest.TestCase):
"""A Test Event class."""
def test_eq(self):
"""Test events."""
now = dt_util.utcnow()
data = {'some': 'attr'}
event1, event2 = [
ha.Event('some_type', data, time_fired=now)
for _ in range(2)
]
self.assertEqual(event1, event2)
def test_repr(self):
"""Test that repr method works."""
self.assertEqual(
"<Event TestEvent[L]>",
str(ha.Event("TestEvent")))
self.assertEqual(
"<Event TestEvent[R]: beer=nice>",
str(ha.Event("TestEvent",
{"beer": "nice"},
ha.EventOrigin.remote)))
def test_as_dict(self):
"""Test as dictionary."""
event_type = 'some_type'
now = dt_util.utcnow()
data = {'some': 'attr'}
event = ha.Event(event_type, data, ha.EventOrigin.local, now)
expected = {
'event_type': event_type,
'data': data,
'origin': 'LOCAL',
'time_fired': now,
}
self.assertEqual(expected, event.as_dict())
class TestEventBus(unittest.TestCase):
"""Test EventBus methods."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.bus = self.hass.bus
self.bus.listen('test_event', lambda x: len)
def tearDown(self): # pylint: disable=invalid-name
"""Stop down stuff we started."""
self.hass.stop()
def test_add_remove_listener(self):
"""Test remove_listener method."""
old_count = len(self.bus.listeners)
def listener(_): pass
self.bus.listen('test', listener)
self.assertEqual(old_count + 1, len(self.bus.listeners))
# Try deleting a non registered listener, nothing should happen
self.bus._remove_listener('test', lambda x: len)
# Remove listener
self.bus._remove_listener('test', listener)
self.assertEqual(old_count, len(self.bus.listeners))
# Try deleting listener while category doesn't exist either
self.bus._remove_listener('test', listener)
def test_unsubscribe_listener(self):
"""Test unsubscribe listener from returned function."""
calls = []
def listener(event):
"""Mock listener."""
calls.append(event)
unsub = self.bus.listen('test', listener)
self.bus.fire('test')
self.hass.block_till_done()
assert len(calls) == 1
unsub()
self.bus.fire('event')
self.hass.block_till_done()
assert len(calls) == 1
def test_listen_once_event(self):
"""Test listen_once_event method."""
runs = []
self.bus.listen_once('test_event', lambda x: runs.append(1))
self.bus.fire('test_event')
# Second time it should not increase runs
self.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(runs))
def test_thread_event_listener(self):
"""Test a event listener listeners."""
thread_calls = []
def thread_listener(event):
thread_calls.append(event)
self.bus.listen('test_thread', thread_listener)
self.bus.fire('test_thread')
self.hass.block_till_done()
assert len(thread_calls) == 1
def test_callback_event_listener(self):
"""Test a event listener listeners."""
callback_calls = []
@ha.callback
def callback_listener(event):
callback_calls.append(event)
self.bus.listen('test_callback', callback_listener)
self.bus.fire('test_callback')
self.hass.block_till_done()
assert len(callback_calls) == 1
def test_coroutine_event_listener(self):
"""Test a event listener listeners."""
coroutine_calls = []
@asyncio.coroutine
def coroutine_listener(event):
coroutine_calls.append(event)
self.bus.listen('test_coroutine', coroutine_listener)
self.bus.fire('test_coroutine')
self.hass.block_till_done()
assert len(coroutine_calls) == 1
class TestState(unittest.TestCase):
"""Test State methods."""
def test_init(self):
"""Test state.init."""
self.assertRaises(
InvalidEntityFormatError, ha.State,
'invalid_entity_format', 'test_state')
def test_domain(self):
"""Test domain."""
state = ha.State('some_domain.hello', 'world')
self.assertEqual('some_domain', state.domain)
def test_object_id(self):
"""Test object ID."""
state = ha.State('domain.hello', 'world')
self.assertEqual('hello', state.object_id)
def test_name_if_no_friendly_name_attr(self):
"""Test if there is no friendly name."""
state = ha.State('domain.hello_world', 'world')
self.assertEqual('hello world', state.name)
def test_name_if_friendly_name_attr(self):
"""Test if there is a friendly name."""
name = 'Some Unique Name'
state = ha.State('domain.hello_world', 'world',
{ATTR_FRIENDLY_NAME: name})
self.assertEqual(name, state.name)
def test_dict_conversion(self):
"""Test conversion of dict."""
state = ha.State('domain.hello', 'world', {'some': 'attr'})
self.assertEqual(state, ha.State.from_dict(state.as_dict()))
def test_dict_conversion_with_wrong_data(self):
"""Test conversion with wrong data."""
self.assertIsNone(ha.State.from_dict(None))
self.assertIsNone(ha.State.from_dict({'state': 'yes'}))
self.assertIsNone(ha.State.from_dict({'entity_id': 'yes'}))
def test_repr(self):
"""Test state.repr."""
self.assertEqual("<state happy.happy=on @ 1984-12-08T12:00:00+00:00>",
str(ha.State(
"happy.happy", "on",
last_changed=datetime(1984, 12, 8, 12, 0, 0))))
self.assertEqual(
"<state happy.happy=on; brightness=144 @ "
"1984-12-08T12:00:00+00:00>",
str(ha.State("happy.happy", "on", {"brightness": 144},
datetime(1984, 12, 8, 12, 0, 0))))
class TestStateMachine(unittest.TestCase):
"""Test State machine methods."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant(0)
self.states = self.hass.states
self.states.set("light.Bowl", "on")
self.states.set("switch.AC", "off")
def tearDown(self): # pylint: disable=invalid-name
"""Stop down stuff we started."""
self.hass.stop()
def test_is_state(self):
"""Test is_state method."""
self.assertTrue(self.states.is_state('light.Bowl', 'on'))
self.assertFalse(self.states.is_state('light.Bowl', 'off'))
self.assertFalse(self.states.is_state('light.Non_existing', 'on'))
def test_is_state_attr(self):
"""Test is_state_attr method."""
self.states.set("light.Bowl", "on", {"brightness": 100})
self.assertTrue(
self.states.is_state_attr('light.Bowl', 'brightness', 100))
self.assertFalse(
self.states.is_state_attr('light.Bowl', 'friendly_name', 200))
self.assertFalse(
self.states.is_state_attr('light.Bowl', 'friendly_name', 'Bowl'))
self.assertFalse(
self.states.is_state_attr('light.Non_existing', 'brightness', 100))
def test_entity_ids(self):
"""Test get_entity_ids method."""
ent_ids = self.states.entity_ids()
self.assertEqual(2, len(ent_ids))
self.assertTrue('light.bowl' in ent_ids)
self.assertTrue('switch.ac' in ent_ids)
ent_ids = self.states.entity_ids('light')
self.assertEqual(1, len(ent_ids))
self.assertTrue('light.bowl' in ent_ids)
def test_all(self):
"""Test everything."""
states = sorted(state.entity_id for state in self.states.all())
self.assertEqual(['light.bowl', 'switch.ac'], states)
def test_remove(self):
"""Test remove method."""
events = []
self.hass.bus.listen(EVENT_STATE_CHANGED,
lambda event: events.append(event))
self.assertIn('light.bowl', self.states.entity_ids())
self.assertTrue(self.states.remove('light.bowl'))
self.hass.block_till_done()
self.assertNotIn('light.bowl', self.states.entity_ids())
self.assertEqual(1, len(events))
self.assertEqual('light.bowl', events[0].data.get('entity_id'))
self.assertIsNotNone(events[0].data.get('old_state'))
self.assertEqual('light.bowl', events[0].data['old_state'].entity_id)
self.assertIsNone(events[0].data.get('new_state'))
# If it does not exist, we should get False
self.assertFalse(self.states.remove('light.Bowl'))
self.hass.block_till_done()
self.assertEqual(1, len(events))
def test_case_insensitivty(self):
"""Test insensitivty."""
runs = []
self.hass.bus.listen(EVENT_STATE_CHANGED,
lambda event: runs.append(event))
self.states.set('light.BOWL', 'off')
self.hass.block_till_done()
self.assertTrue(self.states.is_state('light.bowl', 'off'))
self.assertEqual(1, len(runs))
def test_last_changed_not_updated_on_same_state(self):
"""Test to not update the existing, same state."""
state = self.states.get('light.Bowl')
future = dt_util.utcnow() + timedelta(hours=10)
with patch('homeassistant.util.dt.utcnow', return_value=future):
self.states.set("light.Bowl", "on", {'attr': 'triggers_change'})
self.hass.block_till_done()
state2 = self.states.get('light.Bowl')
assert state2 is not None
assert state.last_changed == state2.last_changed
def test_force_update(self):
"""Test force update option."""
events = []
self.hass.bus.listen(EVENT_STATE_CHANGED, lambda ev: events.append(ev))
self.states.set('light.bowl', 'on')
self.hass.block_till_done()
self.assertEqual(0, len(events))
self.states.set('light.bowl', 'on', None, True)
self.hass.block_till_done()
self.assertEqual(1, len(events))
class TestServiceCall(unittest.TestCase):
"""Test ServiceCall class."""
def test_repr(self):
"""Test repr method."""
self.assertEqual(
"<ServiceCall homeassistant.start>",
str(ha.ServiceCall('homeassistant', 'start')))
self.assertEqual(
"<ServiceCall homeassistant.start: fast=yes>",
str(ha.ServiceCall('homeassistant', 'start', {"fast": "yes"})))
class TestServiceRegistry(unittest.TestCase):
"""Test ServicerRegistry methods."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.services = self.hass.services
self.services.register("Test_Domain", "TEST_SERVICE", lambda x: None)
def tearDown(self): # pylint: disable=invalid-name
"""Stop down stuff we started."""
self.hass.stop()
def test_has_service(self):
"""Test has_service method."""
self.assertTrue(
self.services.has_service("tesT_domaiN", "tesT_servicE"))
self.assertFalse(
self.services.has_service("test_domain", "non_existing"))
self.assertFalse(
self.services.has_service("non_existing", "test_service"))
def test_services(self):
"""Test services."""
expected = {
'test_domain': {'test_service': {'description': '', 'fields': {}}}
}
self.assertEqual(expected, self.services.services)
def test_call_with_blocking_done_in_time(self):
"""Test call with blocking."""
calls = []
def service_handler(call):
"""Service handler."""
calls.append(call)
self.services.register("test_domain", "register_calls",
service_handler)
self.assertTrue(
self.services.call('test_domain', 'REGISTER_CALLS', blocking=True))
self.assertEqual(1, len(calls))
def test_call_non_existing_with_blocking(self):
"""Test non-existing with blocking."""
prior = ha.SERVICE_CALL_LIMIT
try:
ha.SERVICE_CALL_LIMIT = 0.01
assert not self.services.call('test_domain', 'i_do_not_exist',
blocking=True)
finally:
ha.SERVICE_CALL_LIMIT = prior
def test_async_service(self):
"""Test registering and calling an async service."""
calls = []
@asyncio.coroutine
def service_handler(call):
"""Service handler coroutine."""
calls.append(call)
self.services.register('test_domain', 'register_calls',
service_handler)
self.assertTrue(
self.services.call('test_domain', 'REGISTER_CALLS', blocking=True))
self.hass.block_till_done()
self.assertEqual(1, len(calls))
def test_callback_service(self):
"""Test registering and calling an async service."""
calls = []
@ha.callback
def service_handler(call):
"""Service handler coroutine."""
calls.append(call)
self.services.register('test_domain', 'register_calls',
service_handler)
self.assertTrue(
self.services.call('test_domain', 'REGISTER_CALLS', blocking=True))
self.hass.block_till_done()
self.assertEqual(1, len(calls))
class TestConfig(unittest.TestCase):
"""Test configuration methods."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.config = ha.Config()
self.assertIsNone(self.config.config_dir)
def test_path_with_file(self):
"""Test get_config_path method."""
self.config.config_dir = '/tmp/ha-config'
self.assertEqual("/tmp/ha-config/test.conf",
self.config.path("test.conf"))
def test_path_with_dir_and_file(self):
"""Test get_config_path method."""
self.config.config_dir = '/tmp/ha-config'
self.assertEqual("/tmp/ha-config/dir/test.conf",
self.config.path("dir", "test.conf"))
def test_as_dict(self):
"""Test as dict."""
self.config.config_dir = '/tmp/ha-config'
expected = {
'latitude': None,
'longitude': None,
CONF_UNIT_SYSTEM: METRIC_SYSTEM.as_dict(),
'location_name': None,
'time_zone': 'UTC',
'components': [],
'config_dir': '/tmp/ha-config',
'version': __version__,
}
self.assertEqual(expected, self.config.as_dict())
class TestWorkerPool(unittest.TestCase):
"""Test WorkerPool methods."""
def test_exception_during_job(self):
"""Test exception during a job."""
pool = ha.create_worker_pool(1)
def malicious_job(_):
raise Exception("Test breaking worker pool")
calls = []
def register_call(_):
calls.append(1)
pool.add_job(ha.JobPriority.EVENT_DEFAULT, (malicious_job, None))
pool.add_job(ha.JobPriority.EVENT_DEFAULT, (register_call, None))
pool.block_till_done()
self.assertEqual(1, len(calls))
class TestWorkerPoolMonitor(object):
"""Test monitor_worker_pool."""
@patch('homeassistant.core._LOGGER.warning')
def test_worker_pool_monitor(self, mock_warning, event_loop):
"""Test we log an error and increase threshold."""
hass = MagicMock()
hass.pool.worker_count = 3
schedule_handle = MagicMock()
hass.loop.call_later.return_value = schedule_handle
ha.async_monitor_worker_pool(hass)
assert hass.loop.call_later.called
assert hass.bus.async_listen_once.called
assert not schedule_handle.called
check_threshold = hass.loop.call_later.mock_calls[0][1][1]
hass.pool.queue_size = 8
check_threshold()
assert not mock_warning.called
hass.pool.queue_size = 9
check_threshold()
assert mock_warning.called
mock_warning.reset_mock()
assert not mock_warning.called
check_threshold()
assert not mock_warning.called
hass.pool.queue_size = 17
check_threshold()
assert not mock_warning.called
hass.pool.queue_size = 18
check_threshold()
assert mock_warning.called
hass.bus.async_listen_once.mock_calls[0][1][1](None)
assert schedule_handle.cancel.called
class TestAsyncCreateTimer(object):
"""Test create timer."""
@patch('homeassistant.core.asyncio.Event')
@patch('homeassistant.core.dt_util.utcnow')
def test_create_timer(self, mock_utcnow, mock_event, event_loop):
"""Test create timer fires correctly."""
hass = MagicMock()
now = mock_utcnow()
event = mock_event()
now.second = 1
mock_utcnow.reset_mock()
ha.async_create_timer(hass)
assert len(hass.bus.async_listen_once.mock_calls) == 2
start_timer = hass.bus.async_listen_once.mock_calls[1][1][1]
event_loop.run_until_complete(start_timer(None))
assert hass.loop.create_task.called
timer = hass.loop.create_task.mock_calls[0][1][0]
event.is_set.side_effect = False, False, True
event_loop.run_until_complete(timer)
assert len(mock_utcnow.mock_calls) == 1
assert hass.loop.call_soon.called
event_type, event_data = hass.loop.call_soon.mock_calls[0][1][1:]
assert ha.EVENT_TIME_CHANGED == event_type
assert {ha.ATTR_NOW: now} == event_data
stop_timer = hass.bus.async_listen_once.mock_calls[0][1][1]
stop_timer(None)
assert event.set.called
| mit |
Red--Code/Code-Red-honami | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
pangeacake/CloudBot | plugins/history.py | 22 | 3867 | from collections import deque
import time
import asyncio
import re
from cloudbot import hook
from cloudbot.util import timeformat
from cloudbot.event import EventType
db_ready = []
def db_init(db, conn_name):
"""check to see that our db has the the seen table (connection name is for caching the result per connection)
:type db: sqlalchemy.orm.Session
"""
global db_ready
if db_ready.count(conn_name) < 1:
db.execute("create table if not exists seen_user(name, time, quote, chan, host, primary key(name, chan))")
db.commit()
db_ready.append(conn_name)
def track_seen(event, db, conn):
""" Tracks messages for the .seen command
:type event: cloudbot.event.Event
:type db: sqlalchemy.orm.Session
:type conn: cloudbot.client.Client
"""
db_init(db, conn)
# keep private messages private
if event.chan[:1] == "#" and not re.findall('^s/.*/.*/$', event.content.lower()):
db.execute(
"insert or replace into seen_user(name, time, quote, chan, host) values(:name,:time,:quote,:chan,:host)",
{'name': event.nick.lower(), 'time': time.time(), 'quote': event.content, 'chan': event.chan,
'host': event.mask})
db.commit()
def track_history(event, message_time, conn):
"""
:type event: cloudbot.event.Event
:type conn: cloudbot.client.Client
"""
try:
history = conn.history[event.chan]
except KeyError:
conn.history[event.chan] = deque(maxlen=100)
# what are we doing here really
# really really
history = conn.history[event.chan]
data = (event.nick, message_time, event.content)
history.append(data)
@hook.event([EventType.message, EventType.action], singlethread=True)
def chat_tracker(event, db, conn):
"""
:type db: sqlalchemy.orm.Session
:type event: cloudbot.event.Event
:type conn: cloudbot.client.Client
"""
if event.type is EventType.action:
event.content = "\x01ACTION {}\x01".format(event.content)
message_time = time.time()
track_seen(event, db, conn)
track_history(event, message_time, conn)
@asyncio.coroutine
@hook.command(autohelp=False)
def resethistory(event, conn):
"""- resets chat history for the current channel
:type event: cloudbot.event.Event
:type conn: cloudbot.client.Client
"""
try:
conn.history[event.chan].clear()
return "Reset chat history for current channel."
except KeyError:
# wat
return "There is no history for this channel."
@hook.command()
def seen(text, nick, chan, db, event, conn):
"""<nick> <channel> - tells when a nickname was last in active in one of my channels
:type db: sqlalchemy.orm.Session
:type event: cloudbot.event.Event
:type conn: cloudbot.client.Client
"""
if event.conn.nick.lower() == text.lower():
return "You need to get your eyes checked."
if text.lower() == nick.lower():
return "Have you looked in a mirror lately?"
if not re.match("^[A-Za-z0-9_|.\-\]\[]*$", text.lower()):
return "I can't look up that name, its impossible to use!"
db_init(db, conn.name)
last_seen = db.execute("select name, time, quote from seen_user where name like :name and chan = :chan",
{'name': text, 'chan': chan}).fetchone()
if last_seen:
reltime = timeformat.time_since(last_seen[1])
if last_seen[0] != text.lower(): # for glob matching
text = last_seen[0]
if last_seen[2][0:1] == "\x01":
return '{} was last seen {} ago: * {} {}'.format(text, reltime, text, last_seen[2][8:-1])
else:
return '{} was last seen {} ago saying: {}'.format(text, reltime, last_seen[2])
else:
return "I've never seen {} talking in this channel.".format(text)
| gpl-3.0 |
motion2015/edx-platform | openedx/core/lib/logsettings.py | 127 | 5765 | """Get log settings."""
import os
import platform
import sys
from logging.handlers import SysLogHandler
LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
def get_logger_config(log_dir,
logging_env="no_env",
tracking_filename="tracking.log",
edx_filename="edx.log",
dev_env=False,
syslog_addr=None,
debug=False,
local_loglevel='INFO',
console_loglevel=None,
service_variant=None):
"""
Return the appropriate logging config dictionary. You should assign the
result of this to the LOGGING var in your settings. The reason it's done
this way instead of registering directly is because I didn't want to worry
about resetting the logging state if this is called multiple times when
settings are extended.
If dev_env is set to true logging will not be done via local rsyslogd,
instead, tracking and application logs will be dropped in log_dir.
"tracking_filename" and "edx_filename" are ignored unless dev_env
is set to true since otherwise logging is handled by rsyslogd.
"""
# Revert to INFO if an invalid string is passed in
if local_loglevel not in LOG_LEVELS:
local_loglevel = 'INFO'
if console_loglevel is None or console_loglevel not in LOG_LEVELS:
console_loglevel = 'DEBUG' if debug else 'INFO'
if service_variant is None:
# default to a blank string so that if SERVICE_VARIANT is not
# set we will not log to a sub directory
service_variant = ''
hostname = platform.node().split(".")[0]
syslog_format = ("[service_variant={service_variant}]"
"[%(name)s][env:{logging_env}] %(levelname)s "
"[{hostname} %(process)d] [%(filename)s:%(lineno)d] "
"- %(message)s").format(service_variant=service_variant,
logging_env=logging_env,
hostname=hostname)
handlers = ['console', 'local']
if syslog_addr:
handlers.append('syslogger-remote')
logger_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s %(levelname)s %(process)d '
'[%(name)s] %(filename)s:%(lineno)d - %(message)s',
},
'syslog_format': {'format': syslog_format},
'raw': {'format': '%(message)s'},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
}
},
'handlers': {
'console': {
'level': console_loglevel,
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': sys.stderr,
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'newrelic': {
'level': 'ERROR',
'class': 'lms.lib.newrelic_logging.NewRelicHandler',
'formatter': 'raw',
}
},
'loggers': {
'tracking': {
'handlers': ['tracking'],
'level': 'DEBUG',
'propagate': False,
},
'': {
'handlers': handlers,
'level': 'DEBUG',
'propagate': False
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
if syslog_addr:
logger_config['handlers'].update({
'syslogger-remote': {
'level': 'INFO',
'class': 'logging.handlers.SysLogHandler',
'address': syslog_addr,
'formatter': 'syslog_format',
},
})
if dev_env:
tracking_file_loc = os.path.join(log_dir, tracking_filename)
edx_file_loc = os.path.join(log_dir, edx_filename)
logger_config['handlers'].update({
'local': {
'class': 'logging.handlers.RotatingFileHandler',
'level': local_loglevel,
'formatter': 'standard',
'filename': edx_file_loc,
'maxBytes': 1024 * 1024 * 2,
'backupCount': 5,
},
'tracking': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': tracking_file_loc,
'formatter': 'raw',
'maxBytes': 1024 * 1024 * 2,
'backupCount': 5,
},
})
else:
# for production environments we will only
# log INFO and up
logger_config['loggers']['']['level'] = 'INFO'
logger_config['handlers'].update({
'local': {
'level': local_loglevel,
'class': 'logging.handlers.SysLogHandler',
'address': '/dev/log',
'formatter': 'syslog_format',
'facility': SysLogHandler.LOG_LOCAL0,
},
'tracking': {
'level': 'DEBUG',
'class': 'logging.handlers.SysLogHandler',
'address': '/dev/log',
'facility': SysLogHandler.LOG_LOCAL1,
'formatter': 'raw',
},
})
return logger_config
| agpl-3.0 |
duhzecca/cinder | cinder/tests/unit/fake_hp_client_exceptions.py | 32 | 3077 | # (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Fake HP client exceptions to use when mocking HP clients."""
class UnsupportedVersion(Exception):
"""Unsupported version of the client."""
pass
class ClientException(Exception):
"""The base exception class for these fake exceptions."""
_error_code = None
_error_desc = None
_error_ref = None
_debug1 = None
_debug2 = None
def __init__(self, error=None):
if error:
if 'code' in error:
self._error_code = error['code']
if 'desc' in error:
self._error_desc = error['desc']
if 'ref' in error:
self._error_ref = error['ref']
if 'debug1' in error:
self._debug1 = error['debug1']
if 'debug2' in error:
self._debug2 = error['debug2']
def get_code(self):
return self._error_code
def get_description(self):
return self._error_desc
def get_ref(self):
return self._error_ref
def __str__(self):
formatted_string = self.message
if self.http_status:
formatted_string += " (HTTP %s)" % self.http_status
if self._error_code:
formatted_string += " %s" % self._error_code
if self._error_desc:
formatted_string += " - %s" % self._error_desc
if self._error_ref:
formatted_string += " - %s" % self._error_ref
if self._debug1:
formatted_string += " (1: '%s')" % self._debug1
if self._debug2:
formatted_string += " (2: '%s')" % self._debug2
return formatted_string
class HTTPConflict(Exception):
http_status = 409
message = "Conflict"
def __init__(self, error=None):
if error and 'message' in error:
self._error_desc = error['message']
def get_description(self):
return self._error_desc
class HTTPNotFound(Exception):
http_status = 404
message = "Not found"
class HTTPForbidden(ClientException):
http_status = 403
message = "Forbidden"
class HTTPBadRequest(Exception):
http_status = 400
message = "Bad request"
class HTTPServerError(Exception):
http_status = 500
message = "Error"
def __init__(self, error=None):
if error and 'message' in error:
self._error_desc = error['message']
def get_description(self):
return self._error_desc
| apache-2.0 |
jeremypogue/ansible | lib/ansible/plugins/connection/jail.py | 8 | 8028 | # Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# and chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
# (c) 2013, Michael Scherer <misc@zarb.org>
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import distutils.spawn
import os
import os.path
import pipes
import subprocess
import traceback
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes
from ansible.plugins.connection import ConnectionBase, BUFSIZE
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Connection(ConnectionBase):
''' Local BSD Jail based connections '''
transport = 'jail'
# Pipelining may work. Someone needs to test by setting this to True and
# having pipelining=True in their ansible.cfg
has_pipelining = True
# Some become_methods may work in v2 (sudo works for other chroot-based
# plugins while su seems to be failing). If some work, check chroot.py to
# see how to disable just some methods.
become_methods = frozenset()
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.jail = self._play_context.remote_addr
if os.geteuid() != 0:
raise AnsibleError("jail connection requires running as root")
self.jls_cmd = self._search_executable('jls')
self.jexec_cmd = self._search_executable('jexec')
if self.jail not in self.list_jails():
raise AnsibleError("incorrect jail name %s" % self.jail)
@staticmethod
def _search_executable(executable):
cmd = distutils.spawn.find_executable(executable)
if not cmd:
raise AnsibleError("%s command not found in PATH" % executable)
return cmd
def list_jails(self):
p = subprocess.Popen([self.jls_cmd, '-q', 'name'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return stdout.split()
def get_jail_path(self):
p = subprocess.Popen([self.jls_cmd, '-j', to_bytes(self.jail), '-q', 'path'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
# remove \n
return stdout[:-1]
def _connect(self):
''' connect to the jail; nothing to do here '''
super(Connection, self)._connect()
if not self._connected:
display.vvv(u"ESTABLISH JAIL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self.jail)
self._connected = True
def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
''' run a command on the jail. This is only needed for implementing
put_file() get_file() so that we don't have to read the whole file
into memory.
compared to exec_command() it looses some niceties like being able to
return the process's exit code immediately.
'''
local_cmd = [self.jexec_cmd]
set_env = ''
if self._play_context.remote_user is not None:
local_cmd += ['-U', self._play_context.remote_user]
# update HOME since -U does not update the jail environment
set_env = 'HOME=~' + self._play_context.remote_user + ' '
local_cmd += [self.jail, self._play_context.executable, '-c', set_env + cmd]
display.vvv("EXEC %s" % (local_cmd,), host=self.jail)
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def exec_command(self, cmd, in_data=None, sudoable=False):
''' run a command on the jail '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
p = self._buffered_exec_command(cmd)
stdout, stderr = p.communicate(in_data)
return (p.returncode, stdout, stderr)
def _prefix_login_path(self, remote_path):
''' Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
Can revisit using $HOME instead if it's a problem
'''
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
return os.path.normpath(remote_path)
def put_file(self, in_path, out_path):
''' transfer a file from local to jail '''
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail)
out_path = pipes.quote(self._prefix_login_path(out_path))
try:
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
try:
p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), stdin=in_file)
except OSError:
raise AnsibleError("jail connection requires dd command in the jail")
try:
stdout, stderr = p.communicate()
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
except IOError:
raise AnsibleError("file or module does not exist at: %s" % in_path)
def fetch_file(self, in_path, out_path):
''' fetch a file from jail to local '''
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail)
in_path = pipes.quote(self._prefix_login_path(in_path))
try:
p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
except OSError:
raise AnsibleError("jail connection requires dd command in the jail")
with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file:
try:
chunk = p.stdout.read(BUFSIZE)
while chunk:
out_file.write(chunk)
chunk = p.stdout.read(BUFSIZE)
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def close(self):
''' terminate the connection; nothing to do here '''
super(Connection, self).close()
self._connected = False
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.