repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
cchurch/ansible | lib/ansible/modules/cloud/scaleway/scaleway_organization_facts.py | 54 | 2937 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: scaleway_organization_facts
short_description: Gather facts about the Scaleway organizations available.
description:
- Gather facts about the Scaleway organizations available.
version_added: "2.7"
author:
- "Yanis Guenane (@Spredzy)"
- "Remy Leone (@sieben)"
options:
api_url:
description:
- Scaleway API URL
default: 'https://account.scaleway.com'
aliases: ['base_url']
extends_documentation_fragment: scaleway
'''
EXAMPLES = r'''
- name: Gather Scaleway organizations facts
scaleway_organization_facts:
'''
RETURN = r'''
---
scaleway_organization_facts:
description: Response from Scaleway API
returned: success
type: complex
contains:
"scaleway_organization_facts": [
{
"address_city_name": "Paris",
"address_country_code": "FR",
"address_line1": "42 Rue de l'univers",
"address_line2": null,
"address_postal_code": "75042",
"address_subdivision_code": "FR-75",
"creation_date": "2018-08-06T13:43:28.508575+00:00",
"currency": "EUR",
"customer_class": "individual",
"id": "3f709602-5e6c-4619-b80c-e8432ferewtr",
"locale": "fr_FR",
"modification_date": "2018-08-06T14:56:41.401685+00:00",
"name": "James Bond",
"support_id": "694324",
"support_level": "basic",
"support_pin": "9324",
"users": [],
"vat_number": null,
"warnings": []
}
]
'''
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.scaleway import (
Scaleway, ScalewayException, scaleway_argument_spec
)
class ScalewayOrganizationFacts(Scaleway):
def __init__(self, module):
super(ScalewayOrganizationFacts, self).__init__(module)
self.name = 'organizations'
def main():
argument_spec = scaleway_argument_spec()
argument_spec.update(dict(
api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
try:
module.exit_json(
ansible_facts={'scaleway_organization_facts': ScalewayOrganizationFacts(module).get_resources()}
)
except ScalewayException as exc:
module.fail_json(msg=exc.message)
if __name__ == '__main__':
main()
| gpl-3.0 |
robbiet480/home-assistant | tests/components/homematicip_cloud/test_switch.py | 4 | 7136 | """Tests for HomematicIP Cloud switch."""
from homeassistant.components.homematicip_cloud import DOMAIN as HMIPC_DOMAIN
from homeassistant.components.homematicip_cloud.device import (
ATTR_GROUP_MEMBER_UNREACHABLE,
)
from homeassistant.components.switch import (
ATTR_CURRENT_POWER_W,
ATTR_TODAY_ENERGY_KWH,
DOMAIN as SWITCH_DOMAIN,
)
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.setup import async_setup_component
from .helper import async_manipulate_test_data, get_and_check_entity_basics
async def test_manually_configured_platform(hass):
"""Test that we do not set up an access point."""
assert await async_setup_component(
hass, SWITCH_DOMAIN, {SWITCH_DOMAIN: {"platform": HMIPC_DOMAIN}}
)
assert not hass.data.get(HMIPC_DOMAIN)
async def test_hmip_switch(hass, default_mock_hap_factory):
"""Test HomematicipSwitch."""
entity_id = "switch.schrank"
entity_name = "Schrank"
device_model = "HMIP-PS"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
service_call_counter = len(hmip_device.mock_calls)
await hass.services.async_call(
"switch", "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "turn_off"
assert hmip_device.mock_calls[-1][1] == ()
await async_manipulate_test_data(hass, hmip_device, "on", False)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
await hass.services.async_call(
"switch", "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "turn_on"
assert hmip_device.mock_calls[-1][1] == ()
await async_manipulate_test_data(hass, hmip_device, "on", True)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
async def test_hmip_switch_measuring(hass, default_mock_hap_factory):
"""Test HomematicipSwitchMeasuring."""
entity_id = "switch.pc"
entity_name = "Pc"
device_model = "HMIP-PSM"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
service_call_counter = len(hmip_device.mock_calls)
await hass.services.async_call(
"switch", "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "turn_off"
assert hmip_device.mock_calls[-1][1] == ()
await async_manipulate_test_data(hass, hmip_device, "on", False)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
await hass.services.async_call(
"switch", "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "turn_on"
assert hmip_device.mock_calls[-1][1] == ()
await async_manipulate_test_data(hass, hmip_device, "on", True)
await async_manipulate_test_data(hass, hmip_device, "currentPowerConsumption", 50)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
assert ha_state.attributes[ATTR_CURRENT_POWER_W] == 50
assert ha_state.attributes[ATTR_TODAY_ENERGY_KWH] == 36
await async_manipulate_test_data(hass, hmip_device, "energyCounter", None)
ha_state = hass.states.get(entity_id)
assert not ha_state.attributes.get(ATTR_TODAY_ENERGY_KWH)
async def test_hmip_group_switch(hass, default_mock_hap_factory):
"""Test HomematicipGroupSwitch."""
entity_id = "switch.strom_group"
entity_name = "Strom Group"
device_model = None
mock_hap = await default_mock_hap_factory.async_get_mock_hap(test_groups=["Strom"])
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
service_call_counter = len(hmip_device.mock_calls)
await hass.services.async_call(
"switch", "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "turn_off"
assert hmip_device.mock_calls[-1][1] == ()
await async_manipulate_test_data(hass, hmip_device, "on", False)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
await hass.services.async_call(
"switch", "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "turn_on"
assert hmip_device.mock_calls[-1][1] == ()
await async_manipulate_test_data(hass, hmip_device, "on", True)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
assert not ha_state.attributes.get(ATTR_GROUP_MEMBER_UNREACHABLE)
await async_manipulate_test_data(hass, hmip_device, "unreach", True)
ha_state = hass.states.get(entity_id)
assert ha_state.attributes[ATTR_GROUP_MEMBER_UNREACHABLE]
async def test_hmip_multi_switch(hass, default_mock_hap_factory):
"""Test HomematicipMultiSwitch."""
entity_id = "switch.jalousien_1_kizi_2_schlazi_channel1"
entity_name = "Jalousien - 1 KiZi, 2 SchlaZi Channel1"
device_model = "HmIP-PCBS2"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[
"Jalousien - 1 KiZi, 2 SchlaZi",
"Multi IO Box",
"Heizungsaktor",
"ioBroker",
]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
service_call_counter = len(hmip_device.mock_calls)
await hass.services.async_call(
"switch", "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "turn_on"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(hass, hmip_device, "on", True)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
await hass.services.async_call(
"switch", "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "turn_off"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(hass, hmip_device, "on", False)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
| apache-2.0 |
JioEducation/edx-platform | lms/djangoapps/badges/api/views.py | 17 | 5986 | """
API views for badges
"""
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from rest_framework import generics
from rest_framework.exceptions import APIException
from openedx.core.djangoapps.user_api.permissions import is_field_shared_factory
from openedx.core.lib.api.authentication import (
OAuth2AuthenticationAllowInactiveUser,
SessionAuthenticationAllowInactiveUser
)
from xmodule_django.models import CourseKeyField
from badges.models import BadgeAssertion
from .serializers import BadgeAssertionSerializer
class InvalidCourseKeyError(APIException):
"""
Raised the course key given isn't valid.
"""
status_code = 400
default_detail = "The course key provided was invalid."
class UserBadgeAssertions(generics.ListAPIView):
"""
** Use cases **
Request a list of assertions for a user, optionally constrained to a course.
** Example Requests **
GET /api/badges/v1/assertions/user/{username}/
** Response Values **
Body comprised of a list of objects with the following fields:
* badge_class: The badge class the assertion was awarded for. Represented as an object
with the following fields:
* slug: The identifier for the badge class
* issuing_component: The software component responsible for issuing this badge.
* display_name: The display name of the badge.
* course_id: The course key of the course this badge is scoped to, or null if it isn't scoped to a course.
* description: A description of the award and its significance.
* criteria: A description of what is needed to obtain this award.
* image_url: A URL to the icon image used to represent this award.
* image_url: The baked assertion image derived from the badge_class icon-- contains metadata about the award
in its headers.
* assertion_url: The URL to the OpenBadges BadgeAssertion object, for verification by compatible tools
and software.
** Params **
* slug (optional): The identifier for a particular badge class to filter by.
* issuing_component (optional): The issuing component for a particular badge class to filter by
(requires slug to have been specified, or this will be ignored.) If slug is provided and this is not,
assumes the issuing_component should be empty.
* course_id (optional): Returns assertions that were awarded as part of a particular course. If slug is
provided, and this field is not specified, assumes that the target badge has an empty course_id field.
'*' may be used to get all badges with the specified slug, issuing_component combination across all courses.
** Returns **
* 200 on success, with a list of Badge Assertion objects.
* 403 if a user who does not have permission to masquerade as
another user specifies a username other than their own.
* 404 if the specified user does not exist
{
"count": 7,
"previous": null,
"num_pages": 1,
"results": [
{
"badge_class": {
"slug": "special_award",
"issuing_component": "openedx__course",
"display_name": "Very Special Award",
"course_id": "course-v1:edX+DemoX+Demo_Course",
"description": "Awarded for people who did something incredibly special",
"criteria": "Do something incredibly special.",
"image": "http://example.com/media/badge_classes/badges/special_xdpqpBv_9FYOZwN.png"
},
"image_url": "http://badges.example.com/media/issued/cd75b69fc1c979fcc1697c8403da2bdf.png",
"assertion_url": "http://badges.example.com/public/assertions/07020647-e772-44dd-98b7-d13d34335ca6"
},
...
]
}
"""
serializer_class = BadgeAssertionSerializer
authentication_classes = (
OAuth2AuthenticationAllowInactiveUser,
SessionAuthenticationAllowInactiveUser
)
permission_classes = (is_field_shared_factory("accomplishments_shared"),)
def filter_queryset(self, queryset):
"""
Return most recent to least recent badge.
"""
return queryset.order_by('-created')
def get_queryset(self):
"""
Get all badges for the username specified.
"""
queryset = BadgeAssertion.objects.filter(user__username=self.kwargs['username'])
provided_course_id = self.request.query_params.get('course_id')
if provided_course_id == '*':
# We might want to get all the matching course scoped badges to see how many courses
# a user managed to get a specific award on.
course_id = None
elif provided_course_id:
try:
course_id = CourseKey.from_string(provided_course_id)
except InvalidKeyError:
raise InvalidCourseKeyError
elif 'slug' not in self.request.query_params:
# Need to get all badges for the user.
course_id = None
else:
# Django won't let us use 'None' for querying a ForeignKey field. We have to use this special
# 'Empty' value to indicate we're looking only for badges without a course key set.
course_id = CourseKeyField.Empty
if course_id is not None:
queryset = queryset.filter(badge_class__course_id=course_id)
if self.request.query_params.get('slug'):
queryset = queryset.filter(
badge_class__slug=self.request.query_params['slug'],
badge_class__issuing_component=self.request.query_params.get('issuing_component', '')
)
return queryset
| agpl-3.0 |
oxc/Flexget | flexget/plugins/input/tail.py | 3 | 6597 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import io
import os
import re
import logging
from sqlalchemy import Column, Integer, Unicode
from flexget import options, plugin
from flexget.db_schema import versioned_base
from flexget.entry import Entry
from flexget.event import event
from flexget.manager import Session
log = logging.getLogger('tail')
Base = versioned_base('tail', 0)
class TailPosition(Base):
__tablename__ = 'tail'
id = Column(Integer, primary_key=True)
task = Column(Unicode)
filename = Column(Unicode)
position = Column(Integer)
class InputTail(object):
"""
Parse any text for entries using regular expression.
::
file: <file>
entry:
<field>: <regexp to match value>
format:
<field>: <python string formatting>
Note: each entry must have at least two fields, title and url
You may wish to specify encoding used by file so file can be properly
decoded. List of encodings
at http://docs.python.org/library/codecs.html#standard-encodings.
Example::
tail:
file: ~/irclogs/some/log
entry:
title: 'TITLE: (.*) URL:'
url: 'URL: (.*)'
encoding: utf8
"""
schema = {
'type': 'object',
'properties': {
'file': {'type': 'string', 'format': 'file'},
'encoding': {'type': 'string'},
'entry': {
'type': 'object',
'properties': {
'url': {'type': 'string', 'format': 'regex'},
'title': {'type': 'string', 'format': 'regex'}
},
'required': ['url', 'title']
},
'format': {
'type': 'object',
'additionalProperties': {'type': 'string'}
}
},
'required': ['file', 'entry'],
'additionalProperties': False
}
def format_entry(self, entry, d):
for k, v in d.items():
entry[k] = v % entry
def on_task_input(self, task, config):
# Let details plugin know that it is ok if this task doesn't produce any entries
task.no_entries_ok = True
filename = os.path.expanduser(config['file'])
encoding = config.get('encoding', 'utf-8')
with Session() as session:
db_pos = (session.query(TailPosition).
filter(TailPosition.task == task.name).filter(TailPosition.filename == filename).first())
if db_pos:
last_pos = db_pos.position
else:
last_pos = 0
with io.open(filename, 'r', encoding=encoding, errors='replace') as file:
if task.options.tail_reset == filename or task.options.tail_reset == task.name:
if last_pos == 0:
log.info('Task %s tail position is already zero' % task.name)
else:
log.info('Task %s tail position (%s) reset to zero' % (task.name, last_pos))
last_pos = 0
if os.path.getsize(filename) < last_pos:
log.info('File size is smaller than in previous execution, resetting to beginning of the file')
last_pos = 0
file.seek(last_pos)
log.debug('continuing from last position %s' % last_pos)
entry_config = config.get('entry')
format_config = config.get('format', {})
# keep track what fields have been found
used = {}
entries = []
entry = Entry()
# now parse text
for line in file:
if not line:
break
for field, regexp in entry_config.items():
# log.debug('search field: %s regexp: %s' % (field, regexp))
match = re.search(regexp, line)
if match:
# check if used field detected, in such case start with new entry
if field in used:
if entry.isvalid():
log.info('Found field %s again before entry was completed. \
Adding current incomplete, but valid entry and moving to next.' % field)
self.format_entry(entry, format_config)
entries.append(entry)
else:
log.info(
'Invalid data, entry field %s is already found once. Ignoring entry.' % field)
# start new entry
entry = Entry()
used = {}
# add field to entry
entry[field] = match.group(1)
used[field] = True
log.debug('found field: %s value: %s' % (field, entry[field]))
# if all fields have been found
if len(used) == len(entry_config):
# check that entry has at least title and url
if not entry.isvalid():
log.info('Invalid data, constructed entry is missing mandatory fields (title or url)')
else:
self.format_entry(entry, format_config)
entries.append(entry)
log.debug('Added entry %s' % entry)
# start new entry
entry = Entry()
used = {}
last_pos = file.tell()
if db_pos:
db_pos.position = last_pos
else:
session.add(TailPosition(task=task.name, filename=filename, position=last_pos))
return entries
@event('plugin.register')
def register_plugin():
plugin.register(InputTail, 'tail', api_ver=2)
@event('options.register')
def register_parser_arguments():
options.get_parser('execute').add_argument('--tail-reset', action='store', dest='tail_reset', default=False,
metavar='FILE|TASK', help='reset tail position for a file')
| mit |
tswast/google-cloud-python | dlp/noxfile.py | 27 | 5051 | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by synthtool. DO NOT EDIT!
from __future__ import absolute_import
import os
import shutil
import nox
LOCAL_DEPS = (os.path.join("..", "api_core"), os.path.join("..", "core"))
BLACK_VERSION = "black==19.3b0"
BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"]
if os.path.exists("samples"):
BLACK_PATHS.append("samples")
@nox.session(python="3.7")
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("flake8", BLACK_VERSION, *LOCAL_DEPS)
session.run("black", "--check", *BLACK_PATHS)
session.run("flake8", "google", "tests")
@nox.session(python="3.6")
def blacken(session):
"""Run black.
Format code to uniform standard.
This currently uses Python 3.6 due to the automated Kokoro run of synthtool.
That run uses an image that doesn't have 3.6 installed. Before updating this
check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
"""
session.install(BLACK_VERSION)
session.run("black", *BLACK_PATHS)
@nox.session(python="3.7")
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
def default(session):
# Install all test dependencies, then install this package in-place.
session.install("mock", "pytest", "pytest-cov")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", ".")
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
"--cov=google.cloud",
"--cov=tests.unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=0",
os.path.join("tests", "unit"),
*session.posargs,
)
@nox.session(python=["2.7", "3.5", "3.6", "3.7"])
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=["2.7", "3.7"])
def system(session):
"""Run the system test suite."""
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
# Sanity check: Only run tests if the environment variable is set.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
session.skip("Credentials must be set via environment variable")
system_test_exists = os.path.exists(system_test_path)
system_test_folder_exists = os.path.exists(system_test_folder_path)
# Sanity check: only run tests if found.
if not system_test_exists and not system_test_folder_exists:
session.skip("System tests were not found")
# Use pre-release gRPC for system tests.
session.install("--pre", "grpcio")
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install("mock", "pytest")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", "../test_utils/")
session.install("-e", ".")
# Run py.test against the system tests.
if system_test_exists:
session.run("py.test", "--quiet", system_test_path, *session.posargs)
if system_test_folder_exists:
session.run("py.test", "--quiet", system_test_folder_path, *session.posargs)
@nox.session(python="3.7")
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=100")
session.run("coverage", "erase")
@nox.session(python="3.7")
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
| apache-2.0 |
benjamindeleener/odoo | addons/account_budget/report/analytic_account_budget_report.py | 43 | 6643 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from openerp.osv import osv
from openerp.report import report_sxw
class analytic_account_budget_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(analytic_account_budget_report, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'funct': self.funct,
'funct_total': self.funct_total,
'time': time,
})
self.context = context
def funct(self, object, form, ids=None, done=None, level=1):
if ids is None:
ids = {}
if not ids:
ids = self.ids
if not done:
done = {}
global tot
tot = {
'theo':0.00,
'pln':0.00,
'prac':0.00,
'perc':0.00
}
result = []
accounts = self.pool.get('account.analytic.account').browse(self.cr, self.uid, [object.id], self.context.copy())
c_b_lines_obj = self.pool.get('crossovered.budget.lines')
obj_c_budget = self.pool.get('crossovered.budget')
for account_id in accounts:
res = {}
b_line_ids = []
for line in account_id.crossovered_budget_line:
b_line_ids.append(line.id)
if not b_line_ids:
return []
d_from = form['date_from']
d_to = form['date_to']
self.cr.execute('SELECT DISTINCT(crossovered_budget_id) FROM crossovered_budget_lines WHERE id =ANY(%s)',(b_line_ids,))
budget_ids = self.cr.fetchall()
context = {'wizard_date_from':d_from,'wizard_date_to':d_to}
for i in range(0, len(budget_ids)):
budget_name = obj_c_budget.browse(self.cr, self.uid, [budget_ids[i][0]])
res= {
'b_id':'-1',
'a_id':'-1',
'name':budget_name[0].name,
'status':1,
'theo':0.00,
'pln':0.00,
'prac':0.00,
'perc':0.00
}
result.append(res)
line_ids = c_b_lines_obj.search(self.cr, self.uid, [('id', 'in', b_line_ids), ('crossovered_budget_id','=',budget_ids[i][0])])
line_id = c_b_lines_obj.browse(self.cr, self.uid, line_ids)
tot_theo = tot_pln = tot_prac = tot_perc = 0
done_budget = []
for line in line_id:
if line.id in b_line_ids:
theo = pract = 0.00
theo = c_b_lines_obj._theo_amt(self.cr, self.uid, [line.id], context)[line.id]
pract = c_b_lines_obj._prac_amt(self.cr, self.uid, [line.id], context)[line.id]
if line.general_budget_id.id in done_budget:
for record in result:
if record['b_id'] == line.general_budget_id.id and record['a_id'] == line.analytic_account_id.id:
record['theo'] += theo
record['pln'] += line.planned_amount
record['prac'] += pract
record['perc'] += line.percentage
tot_theo += theo
tot_pln += line.planned_amount
tot_prac += pract
tot_perc += line.percentage
else:
res1 = {
'b_id': line.general_budget_id.id,
'a_id': line.analytic_account_id.id,
'name': line.general_budget_id.name,
'status': 2,
'theo': theo,
'pln': line.planned_amount,
'prac': pract,
'perc': line.percentage
}
tot_theo += theo
tot_pln += line.planned_amount
tot_prac += pract
tot_perc += line.percentage
result.append(res1)
done_budget.append(line.general_budget_id.id)
else:
if line.general_budget_id.id in done_budget:
continue
else:
res1={
'b_id': line.general_budget_id.id,
'a_id': line.analytic_account_id.id,
'name': line.general_budget_id.name,
'status': 2,
'theo': 0.00,
'pln': 0.00,
'prac': 0.00,
'perc': 0.00
}
result.append(res1)
done_budget.append(line.general_budget_id.id)
if tot_theo == 0.00:
tot_perc = 0.00
else:
tot_perc = float(tot_prac / tot_theo) * 100
result[-(len(done_budget) +1)]['theo'] = tot_theo
tot['theo'] +=tot_theo
result[-(len(done_budget) +1)]['pln'] = tot_pln
tot['pln'] +=tot_pln
result[-(len(done_budget) +1)]['prac'] = tot_prac
tot['prac'] +=tot_prac
result[-(len(done_budget) +1)]['perc'] = tot_perc
if tot['theo'] == 0.00:
tot['perc'] = 0.00
else:
tot['perc'] = float(tot['prac'] / tot['theo']) * 100
return result
def funct_total(self, form):
result = []
res = {}
res = {
'tot_theo': tot['theo'],
'tot_pln': tot['pln'],
'tot_prac': tot['prac'],
'tot_perc': tot['perc']
}
result.append(res)
return result
class report_analyticaccountbudget(osv.AbstractModel):
_name = 'report.account_budget.report_analyticaccountbudget'
_inherit = 'report.abstract_report'
_template = 'account_budget.report_analyticaccountbudget'
_wrapped_report_class = analytic_account_budget_report
| gpl-3.0 |
yalewoosoft/shadowsocks | gnupg/_logger.py | 11 | 3556 | # -*- coding: utf-8 -*-
#
# This file is part of python-gnupg, a Python interface to GnuPG.
# Copyright © 2013 Isis Lovecruft, <isis@leap.se> 0xA3ADB67A2CDB8B35
# © 2013 Andrej B.
# © 2013 LEAP Encryption Access Project
# © 2008-2012 Vinay Sajip
# © 2005 Steve Traugott
# © 2004 A.M. Kuchling
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the included LICENSE file for details.
'''Logging module for python-gnupg.'''
from __future__ import absolute_import
from __future__ import print_function
from datetime import datetime
from functools import wraps
import logging
import sys
import os
try:
from logging import NullHandler
except:
class NullHandler(logging.Handler):
def handle(self, record):
pass
from . import _ansistrm
GNUPG_STATUS_LEVEL = 9
def status(self, message, *args, **kwargs):
"""LogRecord for GnuPG internal status messages."""
if self.isEnabledFor(GNUPG_STATUS_LEVEL):
self._log(GNUPG_STATUS_LEVEL, message, args, **kwargs)
@wraps(logging.Logger)
def create_logger(level=logging.NOTSET):
"""Create a logger for python-gnupg at a specific message level.
:type level: :obj:`int` or :obj:`str`
:param level: A string or an integer for the lowest level to include in
logs.
**Available levels:**
==== ======== ========================================
int str description
==== ======== ========================================
0 NOTSET Disable all logging.
9 GNUPG Log GnuPG's internal status messages.
10 DEBUG Log module level debuging messages.
20 INFO Normal user-level messages.
30 WARN Warning messages.
40 ERROR Error messages and tracebacks.
50 CRITICAL Unhandled exceptions and tracebacks.
==== ======== ========================================
"""
_test = os.path.join(os.path.join(os.getcwd(), 'gnupg'), 'test')
_now = datetime.now().strftime("%Y-%m-%d_%H%M%S")
_fn = os.path.join(_test, "%s_test_gnupg.log" % _now)
_fmt = "%(relativeCreated)-4d L%(lineno)-4d:%(funcName)-18.18s %(levelname)-7.7s %(message)s"
## Add the GNUPG_STATUS_LEVEL LogRecord to all Loggers in the module:
logging.addLevelName(GNUPG_STATUS_LEVEL, "GNUPG")
logging.Logger.status = status
if level > logging.NOTSET:
logging.basicConfig(level=level, filename=_fn,
filemode="a", format=_fmt)
logging.logThreads = True
if hasattr(logging,'captureWarnings'):
logging.captureWarnings(True)
colouriser = _ansistrm.ColorizingStreamHandler
colouriser.level_map[9] = (None, 'blue', False)
colouriser.level_map[10] = (None, 'cyan', False)
handler = colouriser(sys.stderr)
handler.setLevel(level)
formatr = logging.Formatter(_fmt)
handler.setFormatter(formatr)
else:
handler = NullHandler()
log = logging.getLogger('gnupg')
log.addHandler(handler)
log.setLevel(level)
log.info("Log opened: %s UTC" % datetime.ctime(datetime.utcnow()))
return log
| apache-2.0 |
SeungGiJeong/SK_FastIR | registry/reg.py | 1 | 40863 | from __future__ import unicode_literals
import codecs
from utils.utils import convert_windate, dosdate, get_csv_writer, write_list_to_csv,process_hashes
import registry_obj
from win32com.shell import shell
import struct
import construct
import StringIO
import os
from csv import reader
from utils.vss import _VSS
import re
from utils.utils import regex_patern_path
import os
from filecatcher.archives import _Archives
KEY_VALUE_STR = 0
VALUE_NAME = 1
VALUE_DATA = 2
VALUE_TYPE = 3
VALUE_LAST_WRITE_TIME = 4
VALUE_PATH = 5
KEY_PATH = 1
KEY_LAST_WRITE_TIME = 2
def get_usb_key_info(key_name):
"""
Extracts information about the USB keys from the registry
:return: A list of USB key IDs
"""
# HKEY_LOCAL_MACHINE\SYSTEM\ControlSet001\Control\DeviceClasses\{a5dcbf10-6530-11d2-901f-00c04fb951ed}
str_reg_key_usbinfo = r"SYSTEM\ControlSet001\Control\DeviceClasses\{a5dcbf10-6530-11d2-901f-00c04fb951ed}"
# here is a sample of a key_name
# ##?#USBSTOR#Disk&Ven_&Prod_USB_DISK_2.0&Rev_PMAP#07BC13025A3B03A1&0#{53f56307-b6bf-11d0-94f2-00a0c91efb8b}
# the logic is : there are 6 "#" so we should split this string on "#" and get the USB id (index 5)
index_usb_id = 5
usb_id = key_name.split("#")[index_usb_id]
# now we want only the left part of the which may contain another separator "&" -> 07BC13025A3B03A1&0
usb_id = usb_id.split("&")[0]
# next we look in the registry for such an id
key_ids = ""
reg_key_info = registry_obj.get_registry_key(registry_obj.HKEY_LOCAL_MACHINE, str_reg_key_usbinfo)
if reg_key_info:
for i in xrange(reg_key_info.get_number_of_sub_keys()):
subkey = reg_key_info.get_sub_key(i)
if usb_id in subkey.get_name():
# example of a key_info_name
# ##?#USB#VID_26BD&PID_9917#0702313E309E0863#{a5dcbf10-6530-11d2-901f-00c04fb951ed}
# the pattern is quite similar, a "#" separated string, with 5 as key id and 4 as VID&PID, we need
# those 2
index_usb_id = 4
key_ids = subkey.get_name().split("#")[index_usb_id]
break
return key_ids
def csv_user_assist_value_decode_win7_and_after(str_value_datatmp, count_offset):
"""The value in user assist has changed since Win7. It is taken into account here."""
# 16 bytes data
str_value_data_session = str_value_datatmp[0:4]
str_value_data_session = unicode(struct.unpack("<I", str_value_data_session)[0])
str_value_data_count = str_value_datatmp[4:8]
str_value_data_count = unicode(struct.unpack("<I", str_value_data_count)[0] + count_offset + 1)
str_value_data_focus = str_value_datatmp[12:16]
str_value_data_focus = unicode(struct.unpack("<I", str_value_data_focus)[0])
str_value_data_timestamp = str_value_datatmp[60:68]
try:
timestamp = struct.unpack("<Q", str_value_data_timestamp)[0]
date_last_exec = convert_windate(timestamp)
except ValueError:
date_last_exec = None
arr_data = [str_value_data_session, str_value_data_count, str_value_data_focus]
if date_last_exec:
arr_data.append(date_last_exec)
else:
arr_data.append("")
return arr_data
def csv_user_assist_value_decode_before_win7(str_value_datatmp, count_offset):
"""
The Count registry key contains values representing the programs
Each value is separated as :
first 4 bytes are session
following 4 bytes are number of times the program has been run
next 8 bytes are the timestamp of last execution
each of those values are in big endian which have to be converted in little endian
:return: An array containing these information
"""
# 16 bytes data
str_value_data_session = str_value_datatmp[0:4]
str_value_data_session = unicode(struct.unpack("<I", str_value_data_session)[0])
str_value_data_count = str_value_datatmp[4:8]
str_value_data_count = unicode(struct.unpack("<I", str_value_data_count)[0] + count_offset + 1)
str_value_data_timestamp = str_value_datatmp[8:16]
try:
timestamp = struct.unpack("<I", str_value_data_timestamp)[0]
date_last_exec = convert_windate(timestamp)
except ValueError:
date_last_exec = None
arr_data = [str_value_data_session, str_value_data_count]
if date_last_exec:
arr_data.append(date_last_exec)
else:
arr_data.append("")
return arr_data
def decode_itempos(itempos):
"""
Decodes a single itempos and returns extracted information
"""
itempos_io = StringIO.StringIO(itempos)
itempos_struct = construct.Struct("itempos",
construct.ULInt16("itempos_size"),
construct.Padding(2),
construct.ULInt32("filesize"),
construct.Bytes("dos_date", 2),
construct.Bytes("dos_time", 2),
construct.ULInt16("file_attr"),
construct.CString("filename")
)
parse_res = itempos_struct.parse_stream(itempos_io)
if itempos_io.pos % 2 == 1:
itempos_io.read(1)
ext_struct = construct.Struct("ext",
construct.ULInt16("ext_size"),
construct.ULInt16("ext_version")
)
parse_ext = ext_struct.parse_stream(itempos_io)
if parse_ext["ext_version"] >= 0x3:
itempos2_struct = construct.Struct("itempos2",
construct.Padding(2), # 0004
construct.Padding(2), # BEEF
construct.Bytes("creation_dos_date", 2),
construct.Bytes("creation_dos_time", 2),
construct.Bytes("access_dos_date", 2),
construct.Bytes("access_dos_time", 2),
construct.Padding(4)
)
parse_res2 = itempos2_struct.parse_stream(itempos_io)
unicode_filename = ""
if parse_ext["ext_version"] >= 0x7:
itempos3_struct = construct.Struct("itempos3",
construct.ULInt64("file_ref"),
construct.Padding(8),
construct.Padding(2)
)
parse_res3 = itempos3_struct.parse_stream(itempos_io)
if parse_ext["ext_version"] >= 0x8:
itempos4_struct = construct.Struct("itempos4",
construct.Padding(4)
)
itempos4_struct.parse_stream(itempos_io)
tmp = itempos_io.read()
unicode_filename = tmp.decode("utf16")
if not unicode_filename.endswith("\0"):
unicode_filename = unicode_filename[:-2] # ditch last unused 2 bytes and \0 char
elif parse_ext["ext_version"] >= 0x3:
unicode_filename = itempos_io.read().decode("utf16")
if not unicode_filename.endswith("\0"):
unicode_filename = unicode_filename[:-2] # ditch last unused 2 bytes and \0 char
timestamp_modified = dosdate(parse_res["dos_date"], parse_res["dos_time"]).strftime("%d/%m/%Y %H:%M:%S")
timestamp_created = dosdate(parse_res2["creation_dos_date"], parse_res2["creation_dos_time"]).strftime(
"%d/%m/%Y %H:%M:%S")
timestamp_access = dosdate(parse_res2["access_dos_date"], parse_res2["access_dos_time"]).strftime(
"%d/%m/%Y %H:%M:%S")
return [unicode(parse_res["itempos_size"]), unicode(parse_res["filesize"]), timestamp_modified,
parse_res["filename"], timestamp_created, timestamp_access, unicode_filename]
def decode_shellbag_itempos_data(data):
"""
@see: http://www.williballenthin.com/forensics/shellbags/
:param data: The data of the registry key that needs decoding
:return: A list of readable filenames
"""
header_len = 0x10
unused_len = 0x14
padding_len = 0x8
tmp_data = data[header_len:]
decoded_itempos = []
while True:
tmp_data = tmp_data[padding_len:] # padding
itempos_len = struct.unpack("<h", tmp_data[:2])[0]
if itempos_len == 0:
# end of shellbags
break
elif itempos_len == unused_len:
# SHITEMID, unknown usage
tmp_data = tmp_data[itempos_len:]
continue
itempos = tmp_data[:itempos_len]
tmp_data = tmp_data[itempos_len:]
decoded_itempos.append(decode_itempos(itempos))
return decoded_itempos
def append_reg_values(hive_list, key):
for i in xrange(key.get_number_of_values()):
value = key.get_value(i)
hive_list.append(("VALUE", value.get_name(), value.get_data(), value.get_type(), key.get_last_written_time(),
value.get_path()))
def decode_recent_docs_mru(value):
"""
Decodes recent docs MRU list
Returns an array with 1st element being the filename, the second element being the symbolic link name
"""
value_decoded = []
index = value.find(b"\x00\x00")
try:
decoded = value[0:index + 1].decode("utf-16-le")
except UnicodeDecodeError:
try:
decoded = value[0:index + 1].decode("utf-8")
except UnicodeDecodeError:
decoded = "".join([c for c in value[0:index + 1]])
value_decoded.append(decoded)
# index+3 because the last char also ends with \x00 + null bytes \x00\x00, +14 is the offset for the link name
index_end_link_name = value.find(b"\x00", index + 3 + 14)
value_decoded.append(value[index + 3 + 14:index_end_link_name])
return value_decoded
def construct_list_from_key(hive_list, key, is_recursive=True):
"""
Constructs the hive list. Recursive method if is_recursive=True.
Keyword arguments:
hive_list -- (List) the list to append to
key -- (RegistryKey) the key to dump in the list
"""
hive_list.append(("KEY", key.get_path(), key.get_last_written_time()))
append_reg_values(hive_list, key)
for i in xrange(key.get_number_of_sub_keys()):
try:
sub_key = key.get_sub_key(i)
except TypeError:
# hack for programs using unicode in registry
for j in xrange(len(hive_list) - 1, 0, -1):
if hive_list[j][KEY_VALUE_STR] == "KEY":
# get the first VALUE item in the list
j += 1
break
if hive_list[j][VALUE_NAME] == "":
tmp = hive_list[j]
list_names = key.get_sub_keys_names()
value_name = ""
for name in list_names:
if "\x00" in name:
# invalid registry name
value_name = "\\x" + "\\x".join("{:02x}".format(ord(c)) for c in name)
# replace the name of the first VALUE item by the name of the invalid registry name
hive_list[j] = (tmp[KEY_VALUE_STR], value_name, tmp[VALUE_DATA], tmp[VALUE_TYPE],
tmp[VALUE_LAST_WRITE_TIME], tmp[VALUE_PATH])
sub_key = None
if sub_key and is_recursive:
construct_list_from_key(hive_list, sub_key, is_recursive)
class _Reg(object):
def __init__(self, params):
if params["output_dir"] and params["computer_name"]:
self.computer_name = params["computer_name"]
self.output_dir = params["output_dir"]
if params["custom_registry_keys"]:
self.exec_custom_registry_keys = True
self.custom_registry_keys = params["custom_registry_keys"]
self.registry_recursive = params["registry_recursive"]
else:
self.exec_custom_registry_keys = False
self.logger = params["logger"]
self.systemroot = params['system_root']
# get logged off users hives
self.user_hives = []
self.vss = None
self.rand_ext = params['rand_ext']
self.get_autoruns = params['get_autoruns']
def init_win_xp(self):
users = registry_obj.get_registry_key(registry_obj.HKEY_LOCAL_MACHINE,
r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList")
if users:
for i in xrange(users.get_number_of_sub_keys()):
user = users.get_sub_key(i)
path = user.get_value_by_name("ProfileImagePath").get_data() + r"\NTUSER.DAT"
try:
regf_file = registry_obj.RegfFile()
regf_file.open(path)
self.user_hives.append((user.get_name(), regf_file.get_root_key()))
except IOError: # user is logged on or not a user
pass
def init_win_vista_and_above(self):
users = registry_obj.get_registry_key(registry_obj.HKEY_LOCAL_MACHINE,
r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList")
drive, p = os.path.splitdrive(self.systemroot)
params = {"logger": self.logger}
self.vss = _VSS._get_instance(params, drive)
if users:
for i in xrange(users.get_number_of_sub_keys()):
user = users.get_sub_key(i)
tmp = user.get_value_by_name("ProfileImagePath").get_data()
path = tmp.replace(drive, self.vss._return_root()) + r"\NTUSER.DAT"
path_usrclass = tmp.replace(drive,
self.vss._return_root()) + r"\AppData\Local\Microsoft\Windows\\UsrClass.dat"
try:
regf_file = registry_obj.RegfFile()
regf_file.open(path)
regf_file_usrclass = registry_obj.RegfFile()
regf_file_usrclass.open(path_usrclass)
self.user_hives.append(
(user.get_name(), regf_file.get_root_key(), regf_file_usrclass.get_root_key()))
except IOError: # not a user
pass
def _generate_hklm_csv_list(self, to_csv_list, csv_type, path, is_recursive=True):
"""
Generates a generic list suitable for CSV output.
Extracts information from HKEY_LOCAL_MACHINE hives.
"""
hive_list = self._get_list_from_registry_key(registry_obj.HKEY_LOCAL_MACHINE, path, is_recursive=is_recursive)
for item in hive_list:
if item[KEY_VALUE_STR] in ("VALUE", "ROOT_KEY"):
try:
value_data = item[VALUE_DATA].decode('UTF-16')
if '\x00' not in value_data:
value_data = item[VALUE_DATA]
except:
value_data = item[VALUE_DATA]
to_csv_list.append((self.computer_name,
csv_type,
item[VALUE_LAST_WRITE_TIME],
"HKEY_LOCAL_MACHINE",
item[VALUE_PATH],
item[VALUE_NAME],
item[KEY_VALUE_STR],
registry_obj.get_str_type(item[VALUE_TYPE]),
value_data))
def _generate_hku_csv_list(self, to_csv_list, csv_type, path, is_recursive=True):
"""
Generates a generic list suitable for CSV output.
Extracts information from HKEY_USERS hives.
"""
hive_list = self._get_list_from_registry_key(registry_obj.HKEY_USERS, path, is_recursive=is_recursive)
for item in hive_list:
if item[KEY_VALUE_STR] == "VALUE":
try:
value_data = item[VALUE_DATA].decode('UTF-16')
if '\x00' not in value_data:
value_data = item[VALUE_DATA]
except:
value_data = item[VALUE_DATA]
to_csv_list.append((self.computer_name,
csv_type,
item[VALUE_LAST_WRITE_TIME],
"HKEY_USERS",
item[VALUE_PATH],
item[VALUE_NAME],
item[KEY_VALUE_STR],
registry_obj.get_str_type(item[VALUE_TYPE]),
value_data))
def _get_list_from_users_registry_key(self, key_path, is_recursive=True, is_usrclass=False):
"""
Extracts information from HKEY_USERS. Since logged off users hives are not mounted by Windows, it is necessary
to open each NTUSER.DAT files, except for currently logged on users.
On Windows Vista and later, HKEY_USERS\ID\Software\Classes is in UsrClass.dat.
On Windows Vista and later, shadow copies are used in order to bypass the lock on HKCU.
:param key_path: the registry key to list
:param is_recursive: whether the function should also list subkeys
:return: a list of all extracted keys/values
"""
hive_list = []
key_users = registry_obj.get_registry_key(registry_obj.HKEY_USERS)
if key_users:
for i in xrange(key_users.get_number_of_sub_keys()):
key_user = key_users.get_sub_key(i)
key_data = key_user.get_sub_key_by_path(key_path)
if key_data:
construct_list_from_key(hive_list, key_data, is_recursive)
# same thing for logged off users (NTUSER.DAT, UsrClass.dat)
for sid, root_key_ntuser, root_key_usrclass in self.user_hives:
if is_usrclass:
cur_root_key = root_key_usrclass
else:
cur_root_key = root_key_ntuser
key_data = cur_root_key.get_sub_key_by_path(key_path)
if key_data:
key_data.prepend_path_with_sid(sid)
construct_list_from_key(hive_list, key_data, is_recursive)
return hive_list
def _get_list_from_registry_key(self, hive, key_path, is_recursive=True, is_usrclass=False):
"""
Creates a list of all nodes and values from a registry key path.
Keyword arguments:
hive -- (String) the hive name
key_path -- (String) the path of the key from which the list should be created
"""
if hive == registry_obj.HKEY_USERS:
return self._get_list_from_users_registry_key(key_path, is_recursive, is_usrclass)
hive_list = []
root_key = registry_obj.get_registry_key(hive, key_path)
if root_key:
hive_list.append(("ROOT_KEY", root_key.get_name(), "", "", root_key.get_last_written_time(),
root_key.get_path()))
append_reg_values(hive_list, root_key)
for i in xrange(root_key.get_number_of_sub_keys()):
sub_key = root_key.get_sub_key(i)
if sub_key:
construct_list_from_key(hive_list, sub_key, is_recursive)
return hive_list
def _csv_user_assist(self, count_offset, is_win7_or_further):
"""
Extracts information from UserAssist registry key which contains information about executed programs
The count offset is for Windows versions before 7, where it would start at 6
"""
self.logger.info("Extracting user assist")
path = r"Software\Microsoft\Windows\CurrentVersion\Explorer\\UserAssist"
count = "\Count"
# logged on users
users = registry_obj.RegistryKey(registry_obj.HKEY_USERS)
hive_list = []
for i in xrange(users.get_number_of_sub_keys()):
user = users.get_sub_key(i)
user_assist_key = user.get_sub_key_by_path(path)
if user_assist_key:
for j in xrange(user_assist_key.get_number_of_sub_keys()):
# getting Software\Microsoft\Windows\CurrentVersion\Explorer\UserAssist\*\Count
path_no_sid = "\\".join(user_assist_key.get_sub_key(j).get_path().split("\\")[1:])
hive_list += self._get_list_from_registry_key(registry_obj.HKEY_USERS, path_no_sid + count)
if is_win7_or_further:
to_csv_list = [("COMPUTER_NAME", "TYPE", "LAST_WRITE_TIME", "HIVE", "KEY_PATH", "ATTR_NAME", "REG_TYPE",
"ATTR_TYPE", "ATTR_DATA", "DATA_SESSION", "DATA_COUNT", "DATA_FOCUS", "DATA_LAST_EXEC")]
else:
to_csv_list = [("COMPUTER_NAME", "TYPE", "LAST_WRITE_TIME", "HIVE", "KEY_PATH", "ATTR_NAME", "REG_TYPE",
"ATTR_TYPE", "ATTR_DATA", "DATA_SESSION", "DATA_COUNT", "DATA_LAST_EXEC")]
for item in hive_list:
if item[KEY_VALUE_STR] == "VALUE":
str_value_name = codecs.decode(item[VALUE_NAME], "rot_13")
str_value_datatmp = item[VALUE_DATA]
# some data are less than 16 bytes for some reason...
if len(str_value_datatmp) < 16:
to_csv_list.append((self.computer_name,
"userassist",
item[VALUE_LAST_WRITE_TIME],
"HKEY_USERS",
item[VALUE_PATH],
item[VALUE_NAME],
item[KEY_VALUE_STR],
registry_obj.get_str_type(item[VALUE_TYPE]),
str_value_name))
else:
if is_win7_or_further:
data = csv_user_assist_value_decode_win7_and_after(str_value_datatmp, count_offset)
else:
data = csv_user_assist_value_decode_before_win7(str_value_datatmp, count_offset)
to_csv_list.append((self.computer_name,
"user_assist",
item[VALUE_LAST_WRITE_TIME],
"HKEY_USERS",
item[VALUE_PATH],
item[VALUE_NAME],
item[KEY_VALUE_STR],
registry_obj.get_str_type(item[VALUE_TYPE]),
str_value_name) + tuple(data))
with open(self.output_dir + "\\" + self.computer_name + "_user_assist" + self.rand_ext, "wb") as output:
csv_writer = get_csv_writer(output)
write_list_to_csv(to_csv_list, csv_writer)
def _get_files_and_hashes(self, csv_files):
csv_files_transform = []
arch = _Archives(os.path.join(self.output_dir, self.computer_name + '_autoruns.zip'), self.logger)
for COMPUTER_NAME, TYPE, LAST_WRITE_TIME, HIVE, KEY_PATH, \
ATTR_NAME, REG_TYPE, ATTR_TYPE, ATTR_DATA in csv_files:
m = re.match(regex_patern_path,ATTR_DATA)
md5 = sha1 = sha256 = 'N\/A'
if m:
path = m.group(0).split('/')[0].strip()
if os.path.isfile(path):
if self.vss:
path = self.vss._return_root() + os.path.splitdrive(path)[1]
md5, sha1, sha256 = self.vss.process_hash_value(path)
arch.record(path)
else:
try:
md5, sha1, sha256 = process_hashes(path)
arch.record(path)
except:
pass
csv_files_transform.append((COMPUTER_NAME, TYPE, LAST_WRITE_TIME, HIVE, KEY_PATH, ATTR_NAME, REG_TYPE,
ATTR_TYPE, ATTR_DATA, md5, sha1, sha256))
return csv_files_transform
def _csv_open_save_mru(self, str_opensave_mru):
"""Extracts OpenSaveMRU containing information about files selected in the Open and Save view"""
# TODO : Win XP
self.logger.info("Extracting open save MRU")
hive_list = self._get_list_from_registry_key(registry_obj.HKEY_USERS, str_opensave_mru)
to_csv_list = [("COMPUTER_NAME", "TYPE", "LAST_WRITE_TIME", "HIVE", "KEY_PATH", "ATTR_NAME", "REG_TYPE",
"ATTR_TYPE", "ATTR_DATA")]
for item in hive_list:
if item[KEY_VALUE_STR] == 'VALUE':
if item[VALUE_NAME] != "MRUListEx":
pidl = shell.StringAsPIDL(item[VALUE_DATA])
path = shell.SHGetPathFromIDList(pidl)
to_csv_list.append((self.computer_name,
"opensaveMRU",
item[VALUE_LAST_WRITE_TIME],
"HKEY_USERS",
item[VALUE_PATH],
item[VALUE_NAME],
item[KEY_VALUE_STR],
registry_obj.get_str_type(item[VALUE_TYPE]), path))
with open(self.output_dir + "\\" + self.computer_name + "_opensaveMRU" + self.rand_ext, "wb") as output:
csv_writer = get_csv_writer(output)
write_list_to_csv(to_csv_list, csv_writer)
def csv_registry_services(self):
"""Extracts services"""
self.logger.info("Extracting services")
path = r"System\CurrentControlSet\Services"
to_csv_list = [("COMPUTER_NAME", "TYPE", "LAST_WRITE_TIME", "HIVE", "KEY_PATH", "ATTR_NAME", "REG_TYPE",
"ATTR_TYPE", "ATTR_DATA")]
self._generate_hklm_csv_list(to_csv_list, "registry_services", path)
with open(self.output_dir + "\\" + self.computer_name + "_registry_services" + self.rand_ext, "wb") as output:
csv_writer = get_csv_writer(output)
write_list_to_csv(to_csv_list, csv_writer)
def csv_recent_docs(self):
"""Extracts information about recently opened files saved location and opened date"""
self.logger.info("Extracting recent docs")
path = r"Software\Microsoft\Windows\CurrentVersion\Explorer\RecentDocs"
hive_list = self._get_list_from_registry_key(registry_obj.HKEY_USERS, path)
to_csv_list = [("COMPUTER_NAME", "TYPE", "LAST_WRITE_TIME", "HIVE", "KEY_PATH", "ATTR_NAME", "REG_TYPE",
"ATTR_TYPE", "ATTR_DATA")]
for item in hive_list:
if item[KEY_VALUE_STR] == "VALUE":
if item[VALUE_NAME] != "MRUListEx":
values_decoded = decode_recent_docs_mru(item[VALUE_DATA])
for value_decoded in values_decoded:
to_csv_list.append((self.computer_name,
"recent_docs",
item[VALUE_LAST_WRITE_TIME],
"HKEY_USERS",
item[VALUE_PATH],
item[VALUE_NAME],
item[KEY_VALUE_STR],
registry_obj.get_str_type(item[VALUE_TYPE]),
value_decoded))
with open(self.output_dir + "\\" + self.computer_name + "_recent_docs" + self.rand_ext, "wb") as output:
csv_writer = get_csv_writer(output)
write_list_to_csv(to_csv_list, csv_writer)
def csv_installer_folder(self):
"""Extracts information about folders which are created at installation"""
self.logger.info("Extracting installer folders")
path = r"Software\Microsoft\Windows\CurrentVersion\Installer\Folders"
to_csv_list = [("COMPUTER_NAME", "TYPE", "LAST_WRITE_TIME", "HIVE", "KEY_PATH", "ATTR_NAME", "REG_TYPE",
"ATTR_TYPE", "ATTR_DATA")]
self._generate_hklm_csv_list(to_csv_list, "installer_folder", path)
with open(self.output_dir + "\\" + self.computer_name + "_installer_folder" + self.rand_ext, "wb") as output:
csv_writer = get_csv_writer(output)
write_list_to_csv(to_csv_list, csv_writer)
def csv_shell_bags(self):
"""
Extracts shellbags: size, view, icon and position of graphical windows
In particular, executed graphical programs will leave a key here
"""
self.logger.info("Extracting shell bags")
paths = [r"Software\Microsoft\Windows\Shell\Bags",
r"Software\Microsoft\Windows\Shell\BagMRU"]
paths_usrclass = [r"Local Settings\Software\Microsoft\Windows\Shell\Bags",
r"Local Settings\Software\Microsoft\Windows\Shell\BagMRU"]
hive_list = []
for path in paths:
hive_list += self._get_list_from_registry_key(registry_obj.HKEY_USERS, path)
for path in paths_usrclass:
hive_list += self._get_list_from_registry_key(registry_obj.HKEY_USERS, path, is_usrclass=True)
to_csv_list = [("COMPUTER_NAME", "TYPE", "LAST_WRITE_TIME", "HIVE", "KEY_PATH", "ATTR_NAME", "REG_TYPE",
"ATTR_TYPE", "ATTR_DATA")]
for item in hive_list:
if "ItemPos" in item[VALUE_NAME]:
try:
data = decode_shellbag_itempos_data(item[VALUE_DATA])
except IndexError:
self.logger.error("Error in shellbag data format for " + item[VALUE_NAME])
data = None
if data:
if item[KEY_VALUE_STR] == "VALUE":
for data in data:
for d in data:
to_csv_list.append((self.computer_name,
"shellbags",
item[VALUE_LAST_WRITE_TIME],
"HKEY_USERS",
item[VALUE_PATH],
item[VALUE_NAME],
item[KEY_VALUE_STR],
registry_obj.get_str_type(item[VALUE_TYPE]),
d))
else:
if item[KEY_VALUE_STR] == "VALUE":
to_csv_list.append((self.computer_name,
"shellbags",
item[VALUE_LAST_WRITE_TIME],
"HKEY_USERS",
item[VALUE_PATH],
item[VALUE_NAME],
item[KEY_VALUE_STR],
registry_obj.get_str_type(item[VALUE_TYPE]),
item[VALUE_DATA]))
with open(self.output_dir + "\\" + self.computer_name + "_shellbags" + self.rand_ext, "wb") as output:
csv_writer = get_csv_writer(output)
write_list_to_csv(to_csv_list, csv_writer)
def csv_startup_programs(self):
"""Extracts programs running at startup from various keys"""
self.logger.info("Extracting startup programs")
software = "Software"
wow = r"\Wow6432Node"
ts_run = (r"\Microsoft\Windows NT\CurrentVersion\Terminal Server\Install\Software"
r"\Microsoft\Windows\CurrentVersion\Run")
ts_run_once = (r"\Microsoft\Windows NT\CurrentVersion\Terminal Server\Install\Software"
r"\Microsoft\Windows\CurrentVersion\RunOnce")
paths = [r"\Microsoft\Windows\CurrentVersion\Run",
r"\Microsoft\Windows\CurrentVersion\RunOnce",
r"\Microsoft\Windows\CurrentVersion\RunOnceEx",
r"\Microsoft\Windows\CurrentVersion\RunServices",
r"\Microsoft\Windows\CurrentVersion\RunServicesOnce",
r"\Microsoft\Windows\CurrentVersion\Policies\Explorer\Run",
ts_run,
ts_run_once]
to_csv_list = [("COMPUTER_NAME", "TYPE", "LAST_WRITE_TIME", "HIVE", "KEY_PATH", "ATTR_NAME", "REG_TYPE",
"ATTR_TYPE", "ATTR_DATA")]
for path in paths:
full_path = software + path
self._generate_hklm_csv_list(to_csv_list, "startup", full_path)
full_path = software + wow + path
self._generate_hklm_csv_list(to_csv_list, "startup", full_path)
paths = [r"\Microsoft\Windows\CurrentVersion\Run",
r"\Microsoft\Windows\CurrentVersion\RunOnce",
r"\Microsoft\Windows\CurrentVersion\RunOnceEx",
r"\Microsoft\Windows\CurrentVersion\RunServices",
r"\Microsoft\Windows\CurrentVersion\RunServicesOnce",
r"\Microsoft\Windows\CurrentVersion\Policies\Explorer\Run",
ts_run,
ts_run_once]
for path in paths:
full_path = software + path
self._generate_hku_csv_list(to_csv_list, "startup", full_path)
full_path = software + wow + path
self._generate_hku_csv_list(to_csv_list, "startup", full_path)
if self.get_autoruns:
to_csv_list = self._get_files_and_hashes(to_csv_list[1:])
to_csv_list.insert(0,
("COMPUTER_NAME", "TYPE", "LAST_WRITE_TIME", "HIVE", "KEY_PATH", "ATTR_NAME", "REG_TYPE",
"ATTR_TYPE", "ATTR_DATA", "MD5", "SHA1", "SHA256")
)
with open(self.output_dir + "\\" + self.computer_name + "_startup" + self.rand_ext, "wb") as output:
csv_writer = get_csv_writer(output)
write_list_to_csv(to_csv_list, csv_writer)
def csv_installed_components(self):
"""
Extracts installed components key
When an installed component key is in HKLM but not in HKCU, the path specified in HKLM will be added in HKCU
and will be executed by the system
"""
self.logger.info("Extracting installed components")
path = r"Software\Microsoft\Active Setup\Installed Components"
to_csv_list = [("COMPUTER_NAME", "TYPE", "LAST_WRITE_TIME", "HIVE", "KEY_PATH", "ATTR_NAME", "REG_TYPE",
"ATTR_TYPE", "ATTR_DATA")]
self._generate_hklm_csv_list(to_csv_list, "installed_components", path)
with open(self.output_dir + "\\" + self.computer_name + "_installed_components" + self.rand_ext, "wb") as output:
csv_writer = get_csv_writer(output)
write_list_to_csv(to_csv_list, csv_writer)
def csv_winlogon_values(self):
"""
Extracts winlogon values, in particular UserInit, where the specified executable will be executed at
system startup
"""
self.logger.info("Extracting winlogon values")
path = r"Software\Microsoft\Windows NT\CurrentVersion\Winlogon"
to_csv_list = [("COMPUTER_NAME", "TYPE", "LAST_WRITE_TIME", "HIVE", "KEY_PATH", "ATTR_NAME", "REG_TYPE",
"ATTR_TYPE", "ATTR_DATA")]
self._generate_hklm_csv_list(to_csv_list, "winlogon_values", path)
self._generate_hku_csv_list(to_csv_list, "winlogon_values", path)
with open(self.output_dir + "\\" + self.computer_name + "_winlogon_values" + self.rand_ext, "wb") as output:
csv_writer = get_csv_writer(output)
write_list_to_csv(to_csv_list, csv_writer)
def csv_windows_values(self):
"""
Extracts windows values, in particular AppInit_DLLs, where any DLL specified here will be loaded by any
application
"""
self.logger.info("Extracting windows values")
paths = [r"Software\Microsoft\Windows NT\CurrentVersion\Windows",
r"Software\Wow6432Node\Microsoft\Windows NT\CurrentVersion\Windows"]
to_csv_list = [("COMPUTER_NAME", "TYPE", "LAST_WRITE_TIME", "HIVE", "KEY_PATH", "ATTR_NAME", "REG_TYPE",
"ATTR_TYPE", "ATTR_DATA")]
for path in paths:
self._generate_hklm_csv_list(to_csv_list, "windows_values", path)
# self._generate_hku_csv_list(to_csv_list, "windows_values", path)
with open(self.output_dir + "\\" + self.computer_name + "_windows_values" + self.rand_ext, "wb") as output:
csv_writer = get_csv_writer(output)
write_list_to_csv(to_csv_list, csv_writer)
def csv_usb_history(self):
"""Extracts information about USB devices that have been connected since the system installation"""
self.logger.info("Extracting USB history")
hive_list = self._get_list_from_registry_key(
registry_obj.HKEY_LOCAL_MACHINE,
r"SYSTEM\CurrentControlSet\Control\DeviceClasses\{53f56307-b6bf-11d0-94f2-00a0c91efb8b}",
is_recursive=False)
to_csv_list = [("COMPUTER_NAME", "TYPE", "LAST_WRITE_TIME", "HIVE", "KEY_PATH", "KEY_VALUE", "USB_ID")]
for item in hive_list:
if item[KEY_VALUE_STR] == "KEY":
usb_decoded = get_usb_key_info(item[KEY_PATH])
to_csv_list.append((self.computer_name,
"USBHistory",
item[KEY_LAST_WRITE_TIME],
"HKEY_LOCAL_MACHINE",
item[KEY_PATH],
item[KEY_VALUE_STR],
usb_decoded))
with open(self.output_dir + "\\" + self.computer_name + "_USBHistory" + self.rand_ext, "wb") as output:
csv_writer = get_csv_writer(output)
write_list_to_csv(to_csv_list, csv_writer)
def csv_run_mru_start(self):
"""Extracts run MRU, containing the last 26 oommands executed using the RUN command"""
self.logger.info("Extracting Run MRU")
path = r"Software\Microsoft\Windows\CurrentVersion\Explorer\RunMRU"
to_csv_list = [("COMPUTER_NAME", "TYPE", "LAST_WRITE_TIME", "HIVE", "KEY_PATH", "ATTR_NAME", "REG_TYPE",
"ATTR_TYPE", "ATTR_DATA")]
self._generate_hku_csv_list(to_csv_list, "run_MRU_start", path)
with open(self.output_dir + "\\" + self.computer_name + "_run_MRU_start" + self.rand_ext, "wb") as output:
csv_writer = get_csv_writer(output)
write_list_to_csv(to_csv_list, csv_writer)
def csv_custom_registry_keys(self):
"""
Extracts custom registry keys, the user specifies whether it should be recursive or not.
The list of registry keys to extract should be comma-separated
"""
if self.exec_custom_registry_keys:
self.logger.info("Extracting custom registry keys")
to_csv_list = [
("COMPUTER_NAME", "TYPE", "LAST_WRITE_TIME", "HIVE", "KEY_PATH", "ATTR_NAME", "REG_TYPE",
"ATTR_TYPE", "ATTR_DATA")]
for paths in reader([self.custom_registry_keys]): # used as a kind of unpack
for path in paths:
temp = path.split("\\")
hive = temp[0].upper()
path = "\\".join(temp[1:])
if hive in ("HKLM", "HKEY_LOCAL_MACHINE"):
self._generate_hklm_csv_list(to_csv_list, "custom_registry_key", path,
is_recursive=self.registry_recursive)
elif hive in ("HKU", "HKEY_USERS"):
self._generate_hku_csv_list(to_csv_list, "custom_registry_key", path,
is_recursive=self.registry_recursive)
else: # error
self.logger.warn("Must specify HKLM/HKEY_LOCAL_MACHINE or HKU/HKEY_USERS as hive")
return
with open(self.output_dir + "\\" + self.computer_name + "_custom_registry_keys" + self.rand_ext, "wb") as output:
csv_writer = get_csv_writer(output)
write_list_to_csv(to_csv_list, csv_writer)
| gpl-3.0 |
n0trax/ansible | lib/ansible/module_utils/cnos_devicerules.py | 87 | 91037 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by
# Ansible still belong to the author of the module, and may assign their
# own license to the complete work.
#
# Copyright (C) 2017 Lenovo, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Contains device rule and methods
# Lenovo Networking
def getRuleString(deviceType, variableId):
retVal = variableId + ":"
if(deviceType == 'g8272_cnos'):
if variableId in g8272_cnos:
retVal = retVal + g8272_cnos[variableId]
else:
retVal = "The variable " + variableId + " is not supported"
elif(deviceType == 'g8296_cnos'):
if variableId in g8296_cnos:
retVal = retVal + g8296_cnos[variableId]
else:
retVal = "The variable " + variableId + " is not supported"
elif(deviceType == 'g8332_cnos'):
if variableId in g8332_cnos:
retVal = retVal + g8332_cnos[variableId]
else:
retVal = "The variable " + variableId + " is not supported"
elif(deviceType == 'NE1072T'):
if variableId in NE1072T:
retVal = retVal + NE1072T[variableId]
else:
retVal = "The variable " + variableId + " is not supported"
elif(deviceType == 'NE1032'):
if variableId in NE1032:
retVal = retVal + NE1032[variableId]
else:
retVal = "The variable " + variableId + " is not supported"
elif(deviceType == 'NE1032T'):
if variableId in NE1032T:
retVal = retVal + NE1032T[variableId]
else:
retVal = "The variable " + variableId + " is not supported"
elif(deviceType == 'NE10032'):
if variableId in NE10032:
retVal = retVal + NE10032[variableId]
else:
retVal = "The variable " + variableId + " is not supported"
elif(deviceType == 'NE2572'):
if variableId in NE2572:
retVal = retVal + NE2572[variableId]
else:
retVal = "The variable " + variableId + " is not supported"
else:
if variableId in default_cnos:
retVal = retVal + default_cnos[variableId]
else:
retVal = "The variable " + variableId + " is not supported"
return retVal
# EOM
default_cnos = {
'vlan_id': 'INTEGER_VALUE:1-3999',
'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999',
'vlan_name': 'TEXT:',
'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6',
'vlan_state': 'TEXT_OPTIONS:active,suspend',
'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25',
'vlan_querier': 'IPV4Address:',
'vlan_querier_timeout': 'INTEGER_VALUE:1-65535',
'vlan_query_interval': 'INTEGER_VALUE:1-18000',
'vlan_query_max_response_time': 'INTEGER_VALUE:1-25',
'vlan_report_suppression': 'INTEGER_VALUE:1-25',
'vlan_robustness_variable': 'INTEGER_VALUE:1-7',
'vlan_startup_query_count': 'INTEGER_VALUE:1-10',
'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000',
'vlan_snooping_version': 'INTEGER_VALUE:2-3',
'vlan_access_map_name': 'TEXT: ',
'vlan_ethernet_interface': 'TEXT:',
'vlan_portagg_number': 'INTEGER_VALUE:1-4096',
'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect',
'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only',
'vlan_filter_name': 'TEXT:',
'vlag_auto_recovery': 'INTEGER_VALUE:240-3600',
'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict',
'vlag_instance': 'INTEGER_VALUE:1-64',
'vlag_port_aggregation': 'INTEGER_VALUE:1-4096',
'vlag_priority': 'INTEGER_VALUE:0-65535',
'vlag_startup_delay': 'INTEGER_VALUE:0-3600',
'vlag_tier_id': 'INTEGER_VALUE:1-512',
'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\
keepalive-interval,peer-ip,retry-interval',
'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24',
'vlag_keepalive_interval': 'INTEGER_VALUE:2-300',
'vlag_retry_interval': 'INTEGER_VALUE:1-300',
'vlag_peerip': 'IPV4Address:',
'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management',
'bgp_as_number': 'NO_VALIDATION:1-4294967295',
'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
'bgp_bgp_local_count': 'INTEGER_VALUE:2-64',
'cluster_id_as_ip': 'IPV4Address:',
'cluster_id_as_number': 'NO_VALIDATION:1-4294967295',
'confederation_identifier': 'INTEGER_VALUE:1-65535',
'condeferation_peers_as': 'INTEGER_VALUE:1-65535',
'stalepath_delay_value': 'INTEGER_VALUE:1-3600',
'maxas_limit_as': 'INTEGER_VALUE:1-2000',
'neighbor_ipaddress': 'IPV4Address:',
'neighbor_as': 'NO_VALIDATION:1-4294967295',
'router_id': 'IPV4Address:',
'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600',
'bgp_holdtime': 'INTEGER_VALUE:0-3600',
'bgp_aggregate_prefix': 'IPV4AddressWithMask:',
'addrfamily_routemap_name': 'TEXT:',
'reachability_half_life': 'INTEGER_VALUE:1-45',
'start_reuse_route_value': 'INTEGER_VALUE:1-20000',
'start_suppress_route_value': 'INTEGER_VALUE:1-20000',
'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255',
'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45',
'distance_external_AS': 'INTEGER_VALUE:1-255',
'distance_internal_AS': 'INTEGER_VALUE:1-255',
'distance_local_routes': 'INTEGER_VALUE:1-255',
'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp',
'maxpath_numbers': 'INTEGER_VALUE:2-32',
'network_ip_prefix_with_mask': 'IPV4AddressWithMask:',
'network_ip_prefix_value': 'IPV4Address:',
'network_ip_prefix_mask': 'IPV4Address:',
'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295',
'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295',
'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\
static',
'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10',
'bgp_neighbor_af_filtername': 'TEXT:',
'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870',
'bgp_neighbor_af_prefixname': 'TEXT:',
'bgp_neighbor_af_routemap': 'TEXT:',
'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535',
'bgp_neighbor_description': 'TEXT:',
'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255',
'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295',
'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96',
'bgp_neighbor_password': 'TEXT:',
'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600',
'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600',
'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254',
'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\
vlan',
'bgp_neighbor_update_ethernet': 'TEXT:',
'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7',
'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094',
'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535',
'ethernet_interface_value': 'INTEGER_VALUE:1-32',
'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-32',
'ethernet_interface_string': 'TEXT:',
'loopback_interface_value': 'INTEGER_VALUE:0-7',
'mgmt_interface_value': 'INTEGER_VALUE:0-0',
'vlan_interface_value': 'INTEGER_VALUE:1-4094',
'portchannel_interface_value': 'INTEGER_VALUE:1-4096',
'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096',
'portchannel_interface_string': 'TEXT:',
'aggregation_group_no': 'INTEGER_VALUE:1-4096',
'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive',
'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\
ipv6,neighbor',
'bfd_interval': 'INTEGER_VALUE:50-999',
'bfd_minrx': 'INTEGER_VALUE:50-999',
'bfd_ multiplier': 'INTEGER_VALUE:3-50',
'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,\
interval',
'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\
meticulous-keyed-md5,meticulous-keyed-sha1,simple',
'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id',
'bfd_key_chain': 'TEXT:',
'bfd_key_id': 'INTEGER_VALUE:0-255',
'bfd_key_name': 'TEXT:',
'bfd_neighbor_ip': 'TEXT:',
'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\
non-persistent',
'bfd_access_vlan': 'INTEGER_VALUE:1-3999',
'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,\
trunk',
'trunk_options': 'TEXT_OPTIONS:allowed,native',
'trunk_vlanid': 'INTEGER_VALUE:1-3999',
'portCh_description': 'TEXT:',
'duplex_option': 'TEXT_OPTIONS:auto,full,half',
'flowcontrol_options': 'TEXT_OPTIONS:receive,send',
'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\
arp,dhcp,ospf,port,port-unreachable,redirects,router,\
unreachables',
'accessgroup_name': 'TEXT:',
'portchannel_ipv4': 'IPV4Address:',
'portchannel_ipv4_mask': 'TEXT:',
'arp_ipaddress': 'IPV4Address:',
'arp_macaddress': 'TEXT:',
'arp_timeout_value': 'INTEGER_VALUE:60-28800',
'relay_ipaddress': 'IPV4Address:',
'ip_ospf_options': 'TEXT_OPTIONS:authentication,\
authentication-key,bfd,cost,database-filter,dead-interval,\
hello-interval,message-digest-key,mtu,mtu-ignore,network,\
passive-interface,priority,retransmit-interval,shutdown,\
transmit-delay',
'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295',
'ospf_id_ipaddres_value': 'IPV4Address:',
'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\
timeout',
'port_priority': 'INTEGER_VALUE:1-65535',
'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\
trap-notification',
'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\
mac-phy-status,management-address,max-frame-size,\
port-description,port-protocol-vlan,port-vlan,power-mdi,\
protocol-identity,system-capabilities,system-description,\
system-name,vid-management,vlan-name',
'load_interval_delay': 'INTEGER_VALUE:30-300',
'load_interval_counter': 'INTEGER_VALUE:1-3',
'mac_accessgroup_name': 'TEXT:',
'mac_address': 'TEXT:',
'microburst_threshold': 'NO_VALIDATION:1-4294967295',
'mtu_value': 'INTEGER_VALUE:64-9216',
'service_instance': 'NO_VALIDATION:1-4294967295',
'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,\
input,output,type',
'service_policy_name': 'TEXT:',
'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\
cost,disable,enable,guard,link-type,mst,port,port-priority,\
vlan',
'spanning_tree_cost': 'NO_VALIDATION:1-200000000',
'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999',
'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\
192,224',
'portchannel_ipv6_neighbor_mac': 'TEXT:',
'portchannel_ipv6_neighbor_address': 'IPV6Address:',
'portchannel_ipv6_linklocal': 'IPV6Address:',
'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094',
'portchannel_ipv6_dhcp_ethernet': 'TEXT:',
'portchannel_ipv6_dhcp': 'IPV6Address:',
'portchannel_ipv6_address': 'IPV6Address:',
'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\
link-local,nd,neighbor',
'interface_speed': 'TEXT_OPTIONS:1000,10000,100000,25000,40000,50000,auto',
'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\
unicast',
'stormcontrol_level': 'FLOAT:',
'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\
egress-only',
'vrrp_id': 'INTEGER_VALUE:1-255',
}
NE2572 = {
'vlan_id': 'INTEGER_VALUE:1-3999',
'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999',
'vlan_name': 'TEXT:',
'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6',
'vlan_state': 'TEXT_OPTIONS:active,suspend',
'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25',
'vlan_querier': 'IPV4Address:',
'vlan_querier_timeout': 'INTEGER_VALUE:1-65535',
'vlan_query_interval': 'INTEGER_VALUE:1-18000',
'vlan_query_max_response_time': 'INTEGER_VALUE:1-25',
'vlan_report_suppression': 'INTEGER_VALUE:1-25',
'vlan_robustness_variable': 'INTEGER_VALUE:1-7',
'vlan_startup_query_count': 'INTEGER_VALUE:1-10',
'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000',
'vlan_snooping_version': 'INTEGER_VALUE:2-3',
'vlan_access_map_name': 'TEXT: ',
'vlan_ethernet_interface': 'TEXT:',
'vlan_portagg_number': 'INTEGER_VALUE:1-4096',
'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect',
'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only',
'vlan_filter_name': 'TEXT:',
'vlag_auto_recovery': 'INTEGER_VALUE:240-3600',
'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict',
'vlag_instance': 'INTEGER_VALUE:1-64',
'vlag_port_aggregation': 'INTEGER_VALUE:1-4096',
'vlag_priority': 'INTEGER_VALUE:0-65535',
'vlag_startup_delay': 'INTEGER_VALUE:0-3600',
'vlag_tier_id': 'INTEGER_VALUE:1-512',
'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\
keepalive-interval,peer-ip,retry-interval',
'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24',
'vlag_keepalive_interval': 'INTEGER_VALUE:2-300',
'vlag_retry_interval': 'INTEGER_VALUE:1-300',
'vlag_peerip': 'IPV4Address:',
'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management',
'bgp_as_number': 'NO_VALIDATION:1-4294967295',
'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
'bgp_bgp_local_count': 'INTEGER_VALUE:2-64',
'cluster_id_as_ip': 'IPV4Address:',
'cluster_id_as_number': 'NO_VALIDATION:1-4294967295',
'confederation_identifier': 'INTEGER_VALUE:1-65535',
'condeferation_peers_as': 'INTEGER_VALUE:1-65535',
'stalepath_delay_value': 'INTEGER_VALUE:1-3600',
'maxas_limit_as': 'INTEGER_VALUE:1-2000',
'neighbor_ipaddress': 'IPV4Address:',
'neighbor_as': 'NO_VALIDATION:1-4294967295',
'router_id': 'IPV4Address:',
'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600',
'bgp_holdtime': 'INTEGER_VALUE:0-3600',
'bgp_aggregate_prefix': 'IPV4AddressWithMask:',
'addrfamily_routemap_name': 'TEXT:',
'reachability_half_life': 'INTEGER_VALUE:1-45',
'start_reuse_route_value': 'INTEGER_VALUE:1-20000',
'start_suppress_route_value': 'INTEGER_VALUE:1-20000',
'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255',
'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45',
'distance_external_AS': 'INTEGER_VALUE:1-255',
'distance_internal_AS': 'INTEGER_VALUE:1-255',
'distance_local_routes': 'INTEGER_VALUE:1-255',
'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp',
'maxpath_numbers': 'INTEGER_VALUE:2-32',
'network_ip_prefix_with_mask': 'IPV4AddressWithMask:',
'network_ip_prefix_value': 'IPV4Address:',
'network_ip_prefix_mask': 'IPV4Address:',
'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295',
'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295',
'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\
static',
'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10',
'bgp_neighbor_af_filtername': 'TEXT:',
'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870',
'bgp_neighbor_af_prefixname': 'TEXT:',
'bgp_neighbor_af_routemap': 'TEXT:',
'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535',
'bgp_neighbor_description': 'TEXT:',
'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255',
'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295',
'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96',
'bgp_neighbor_password': 'TEXT:',
'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600',
'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600',
'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254',
'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\
vlan',
'bgp_neighbor_update_ethernet': 'TEXT:',
'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7',
'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094',
'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535',
'ethernet_interface_value': 'INTEGER_VALUE:1-54',
'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-54',
'ethernet_interface_string': 'TEXT:',
'loopback_interface_value': 'INTEGER_VALUE:0-7',
'mgmt_interface_value': 'INTEGER_VALUE:0-0',
'vlan_interface_value': 'INTEGER_VALUE:1-4094',
'portchannel_interface_value': 'INTEGER_VALUE:1-4096',
'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096',
'portchannel_interface_string': 'TEXT:',
'aggregation_group_no': 'INTEGER_VALUE:1-4096',
'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive',
'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\
ipv6,neighbor',
'bfd_interval': 'INTEGER_VALUE:50-999',
'bfd_minrx': 'INTEGER_VALUE:50-999',
'bfd_ multiplier': 'INTEGER_VALUE:3-50',
'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval',
'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\
meticulous-keyed-md5,meticulous-keyed-sha1,simple',
'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id',
'bfd_key_chain': 'TEXT:',
'bfd_key_id': 'INTEGER_VALUE:0-255',
'bfd_key_name': 'TEXT:',
'bfd_neighbor_ip': 'TEXT:',
'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\
non-persistent',
'bfd_access_vlan': 'INTEGER_VALUE:1-3999',
'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk',
'trunk_options': 'TEXT_OPTIONS:allowed,native',
'trunk_vlanid': 'INTEGER_VALUE:1-3999',
'portCh_description': 'TEXT:',
'duplex_option': 'TEXT_OPTIONS:auto,full,half',
'flowcontrol_options': 'TEXT_OPTIONS:receive,send',
'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\
arp,dhcp,ospf,port,port-unreachable,redirects,router,\
unreachables',
'accessgroup_name': 'TEXT:',
'portchannel_ipv4': 'IPV4Address:',
'portchannel_ipv4_mask': 'TEXT:',
'arp_ipaddress': 'IPV4Address:',
'arp_macaddress': 'TEXT:',
'arp_timeout_value': 'INTEGER_VALUE:60-28800',
'relay_ipaddress': 'IPV4Address:',
'ip_ospf_options': 'TEXT_OPTIONS:authentication,\
authentication-key,bfd,cost,database-filter,dead-interval,\
hello-interval,message-digest-key,mtu,mtu-ignore,network,\
passive-interface,priority,retransmit-interval,shutdown,\
transmit-delay',
'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295',
'ospf_id_ipaddres_value': 'IPV4Address:',
'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\
timeout',
'port_priority': 'INTEGER_VALUE:1-65535',
'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\
trap-notification',
'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\
mac-phy-status,management-address,max-frame-size,\
port-description,port-protocol-vlan,port-vlan,power-mdi,\
protocol-identity,system-capabilities,system-description,\
system-name,vid-management,vlan-name',
'load_interval_delay': 'INTEGER_VALUE:30-300',
'load_interval_counter': 'INTEGER_VALUE:1-3',
'mac_accessgroup_name': 'TEXT:',
'mac_address': 'TEXT:',
'microburst_threshold': 'NO_VALIDATION:1-4294967295',
'mtu_value': 'INTEGER_VALUE:64-9216',
'service_instance': 'NO_VALIDATION:1-4294967295',
'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,input,\
output,type',
'service_policy_name': 'TEXT:',
'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\
cost,disable,enable,guard,link-type,mst,port,port-priority,vlan',
'spanning_tree_cost': 'NO_VALIDATION:1-200000000',
'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999',
'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\
192,224',
'portchannel_ipv6_neighbor_mac': 'TEXT:',
'portchannel_ipv6_neighbor_address': 'IPV6Address:',
'portchannel_ipv6_linklocal': 'IPV6Address:',
'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094',
'portchannel_ipv6_dhcp_ethernet': 'TEXT:',
'portchannel_ipv6_dhcp': 'IPV6Address:',
'portchannel_ipv6_address': 'IPV6Address:',
'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\
link-local,nd,neighbor',
'interface_speed': 'TEXT_OPTIONS:10000,100000,25000,40000,50000,auto',
'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\
unicast',
'stormcontrol_level': 'FLOAT:',
'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\
egress-only',
'vrrp_id': 'INTEGER_VALUE:1-255',
}
NE1032T = {
'vlan_id': 'INTEGER_VALUE:1-3999',
'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999',
'vlan_name': 'TEXT:',
'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6',
'vlan_state': 'TEXT_OPTIONS:active,suspend',
'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25',
'vlan_querier': 'IPV4Address:',
'vlan_querier_timeout': 'INTEGER_VALUE:1-65535',
'vlan_query_interval': 'INTEGER_VALUE:1-18000',
'vlan_query_max_response_time': 'INTEGER_VALUE:1-25',
'vlan_report_suppression': 'INTEGER_VALUE:1-25',
'vlan_robustness_variable': 'INTEGER_VALUE:1-7',
'vlan_startup_query_count': 'INTEGER_VALUE:1-10',
'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000',
'vlan_snooping_version': 'INTEGER_VALUE:2-3',
'vlan_access_map_name': 'TEXT: ',
'vlan_ethernet_interface': 'TEXT:',
'vlan_portagg_number': 'INTEGER_VALUE:1-4096',
'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect',
'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only',
'vlan_filter_name': 'TEXT:',
'vlag_auto_recovery': 'INTEGER_VALUE:240-3600',
'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict',
'vlag_instance': 'INTEGER_VALUE:1-64',
'vlag_port_aggregation': 'INTEGER_VALUE:1-4096',
'vlag_priority': 'INTEGER_VALUE:0-65535',
'vlag_startup_delay': 'INTEGER_VALUE:0-3600',
'vlag_tier_id': 'INTEGER_VALUE:1-512',
'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\
keepalive-interval,peer-ip,retry-interval',
'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24',
'vlag_keepalive_interval': 'INTEGER_VALUE:2-300',
'vlag_retry_interval': 'INTEGER_VALUE:1-300',
'vlag_peerip': 'IPV4Address:',
'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management',
'bgp_as_number': 'NO_VALIDATION:1-4294967295',
'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
'bgp_bgp_local_count': 'INTEGER_VALUE:2-64',
'cluster_id_as_ip': 'IPV4Address:',
'cluster_id_as_number': 'NO_VALIDATION:1-4294967295',
'confederation_identifier': 'INTEGER_VALUE:1-65535',
'condeferation_peers_as': 'INTEGER_VALUE:1-65535',
'stalepath_delay_value': 'INTEGER_VALUE:1-3600',
'maxas_limit_as': 'INTEGER_VALUE:1-2000',
'neighbor_ipaddress': 'IPV4Address:',
'neighbor_as': 'NO_VALIDATION:1-4294967295',
'router_id': 'IPV4Address:',
'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600',
'bgp_holdtime': 'INTEGER_VALUE:0-3600',
'bgp_aggregate_prefix': 'IPV4AddressWithMask:',
'addrfamily_routemap_name': 'TEXT:',
'reachability_half_life': 'INTEGER_VALUE:1-45',
'start_reuse_route_value': 'INTEGER_VALUE:1-20000',
'start_suppress_route_value': 'INTEGER_VALUE:1-20000',
'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255',
'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45',
'distance_external_AS': 'INTEGER_VALUE:1-255',
'distance_internal_AS': 'INTEGER_VALUE:1-255',
'distance_local_routes': 'INTEGER_VALUE:1-255',
'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp',
'maxpath_numbers': 'INTEGER_VALUE:2-32',
'network_ip_prefix_with_mask': 'IPV4AddressWithMask:',
'network_ip_prefix_value': 'IPV4Address:',
'network_ip_prefix_mask': 'IPV4Address:',
'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295',
'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295',
'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\
static',
'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10',
'bgp_neighbor_af_filtername': 'TEXT:',
'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870',
'bgp_neighbor_af_prefixname': 'TEXT:',
'bgp_neighbor_af_routemap': 'TEXT:',
'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535',
'bgp_neighbor_description': 'TEXT:',
'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255',
'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295',
'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96',
'bgp_neighbor_password': 'TEXT:',
'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600',
'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600',
'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254',
'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\
vlan',
'bgp_neighbor_update_ethernet': 'TEXT:',
'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7',
'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094',
'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535',
'ethernet_interface_value': 'INTEGER_VALUE:1-32',
'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-32',
'ethernet_interface_string': 'TEXT:',
'loopback_interface_value': 'INTEGER_VALUE:0-7',
'mgmt_interface_value': 'INTEGER_VALUE:0-0',
'vlan_interface_value': 'INTEGER_VALUE:1-4094',
'portchannel_interface_value': 'INTEGER_VALUE:1-4096',
'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096',
'portchannel_interface_string': 'TEXT:',
'aggregation_group_no': 'INTEGER_VALUE:1-4096',
'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive',
'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\
ipv6,neighbor',
'bfd_interval': 'INTEGER_VALUE:50-999',
'bfd_minrx': 'INTEGER_VALUE:50-999',
'bfd_ multiplier': 'INTEGER_VALUE:3-50',
'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval',
'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\
meticulous-keyed-md5,meticulous-keyed-sha1,simple',
'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id',
'bfd_key_chain': 'TEXT:',
'bfd_key_id': 'INTEGER_VALUE:0-255',
'bfd_key_name': 'TEXT:',
'bfd_neighbor_ip': 'TEXT:',
'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\
non-persistent',
'bfd_access_vlan': 'INTEGER_VALUE:1-3999',
'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk',
'trunk_options': 'TEXT_OPTIONS:allowed,native',
'trunk_vlanid': 'INTEGER_VALUE:1-3999',
'portCh_description': 'TEXT:',
'duplex_option': 'TEXT_OPTIONS:auto,full,half',
'flowcontrol_options': 'TEXT_OPTIONS:receive,send',
'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\
arp,dhcp,ospf,port,port-unreachable,redirects,router,\
unreachables',
'accessgroup_name': 'TEXT:',
'portchannel_ipv4': 'IPV4Address:',
'portchannel_ipv4_mask': 'TEXT:',
'arp_ipaddress': 'IPV4Address:',
'arp_macaddress': 'TEXT:',
'arp_timeout_value': 'INTEGER_VALUE:60-28800',
'relay_ipaddress': 'IPV4Address:',
'ip_ospf_options': 'TEXT_OPTIONS:authentication,\
authentication-key,bfd,cost,database-filter,dead-interval,\
hello-interval,message-digest-key,mtu,mtu-ignore,network,\
passive-interface,priority,retransmit-interval,shutdown,\
transmit-delay',
'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295',
'ospf_id_ipaddres_value': 'IPV4Address:',
'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\
timeout',
'port_priority': 'INTEGER_VALUE:1-65535',
'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\
trap-notification',
'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\
mac-phy-status,management-address,max-frame-size,\
port-description,port-protocol-vlan,port-vlan,power-mdi,\
protocol-identity,system-capabilities,system-description,\
system-name,vid-management,vlan-name',
'load_interval_delay': 'INTEGER_VALUE:30-300',
'load_interval_counter': 'INTEGER_VALUE:1-3',
'mac_accessgroup_name': 'TEXT:',
'mac_address': 'TEXT:',
'microburst_threshold': 'NO_VALIDATION:1-4294967295',
'mtu_value': 'INTEGER_VALUE:64-9216',
'service_instance': 'NO_VALIDATION:1-4294967295',
'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,input,\
output,type',
'service_policy_name': 'TEXT:',
'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\
cost,disable,enable,guard,link-type,mst,port,port-priority,vlan',
'spanning_tree_cost': 'NO_VALIDATION:1-200000000',
'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999',
'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\
192,224',
'portchannel_ipv6_neighbor_mac': 'TEXT:',
'portchannel_ipv6_neighbor_address': 'IPV6Address:',
'portchannel_ipv6_linklocal': 'IPV6Address:',
'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094',
'portchannel_ipv6_dhcp_ethernet': 'TEXT:',
'portchannel_ipv6_dhcp': 'IPV6Address:',
'portchannel_ipv6_address': 'IPV6Address:',
'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\
link-local,nd,neighbor',
'interface_speed': 'TEXT_OPTIONS:1000,10000,100000,25000,40000,50000,auto',
'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\
unicast',
'stormcontrol_level': 'FLOAT:',
'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\
egress-only',
'vrrp_id': 'INTEGER_VALUE:1-255',
}
NE1032 = {
'vlan_id': 'INTEGER_VALUE:1-3999',
'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999',
'vlan_name': 'TEXT:',
'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6',
'vlan_state': 'TEXT_OPTIONS:active,suspend',
'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25',
'vlan_querier': 'IPV4Address:',
'vlan_querier_timeout': 'INTEGER_VALUE:1-65535',
'vlan_query_interval': 'INTEGER_VALUE:1-18000',
'vlan_query_max_response_time': 'INTEGER_VALUE:1-25',
'vlan_report_suppression': 'INTEGER_VALUE:1-25',
'vlan_robustness_variable': 'INTEGER_VALUE:1-7',
'vlan_startup_query_count': 'INTEGER_VALUE:1-10',
'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000',
'vlan_snooping_version': 'INTEGER_VALUE:2-3',
'vlan_access_map_name': 'TEXT: ',
'vlan_ethernet_interface': 'TEXT:',
'vlan_portagg_number': 'INTEGER_VALUE:1-4096',
'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect',
'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only',
'vlan_filter_name': 'TEXT:',
'vlag_auto_recovery': 'INTEGER_VALUE:240-3600',
'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict',
'vlag_instance': 'INTEGER_VALUE:1-64',
'vlag_port_aggregation': 'INTEGER_VALUE:1-4096',
'vlag_priority': 'INTEGER_VALUE:0-65535',
'vlag_startup_delay': 'INTEGER_VALUE:0-3600',
'vlag_tier_id': 'INTEGER_VALUE:1-512',
'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\
keepalive-interval,peer-ip,retry-interval',
'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24',
'vlag_keepalive_interval': 'INTEGER_VALUE:2-300',
'vlag_retry_interval': 'INTEGER_VALUE:1-300',
'vlag_peerip': 'IPV4Address:',
'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management',
'bgp_as_number': 'NO_VALIDATION:1-4294967295',
'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
'bgp_bgp_local_count': 'INTEGER_VALUE:2-64',
'cluster_id_as_ip': 'IPV4Address:',
'cluster_id_as_number': 'NO_VALIDATION:1-4294967295',
'confederation_identifier': 'INTEGER_VALUE:1-65535',
'condeferation_peers_as': 'INTEGER_VALUE:1-65535',
'stalepath_delay_value': 'INTEGER_VALUE:1-3600',
'maxas_limit_as': 'INTEGER_VALUE:1-2000',
'neighbor_ipaddress': 'IPV4Address:',
'neighbor_as': 'NO_VALIDATION:1-4294967295',
'router_id': 'IPV4Address:',
'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600',
'bgp_holdtime': 'INTEGER_VALUE:0-3600',
'bgp_aggregate_prefix': 'IPV4AddressWithMask:',
'addrfamily_routemap_name': 'TEXT:',
'reachability_half_life': 'INTEGER_VALUE:1-45',
'start_reuse_route_value': 'INTEGER_VALUE:1-20000',
'start_suppress_route_value': 'INTEGER_VALUE:1-20000',
'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255',
'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45',
'distance_external_AS': 'INTEGER_VALUE:1-255',
'distance_internal_AS': 'INTEGER_VALUE:1-255',
'distance_local_routes': 'INTEGER_VALUE:1-255',
'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp',
'maxpath_numbers': 'INTEGER_VALUE:2-32',
'network_ip_prefix_with_mask': 'IPV4AddressWithMask:',
'network_ip_prefix_value': 'IPV4Address:',
'network_ip_prefix_mask': 'IPV4Address:',
'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295',
'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295',
'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\
static',
'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10',
'bgp_neighbor_af_filtername': 'TEXT:',
'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870',
'bgp_neighbor_af_prefixname': 'TEXT:',
'bgp_neighbor_af_routemap': 'TEXT:',
'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535',
'bgp_neighbor_description': 'TEXT:',
'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255',
'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295',
'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96',
'bgp_neighbor_password': 'TEXT:',
'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600',
'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600',
'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254',
'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\
vlan',
'bgp_neighbor_update_ethernet': 'TEXT:',
'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7',
'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094',
'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535',
'ethernet_interface_value': 'INTEGER_VALUE:1-32',
'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-32',
'ethernet_interface_string': 'TEXT:',
'loopback_interface_value': 'INTEGER_VALUE:0-7',
'mgmt_interface_value': 'INTEGER_VALUE:0-0',
'vlan_interface_value': 'INTEGER_VALUE:1-4094',
'portchannel_interface_value': 'INTEGER_VALUE:1-4096',
'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096',
'portchannel_interface_string': 'TEXT:',
'aggregation_group_no': 'INTEGER_VALUE:1-4096',
'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive',
'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\
ipv6,neighbor',
'bfd_interval': 'INTEGER_VALUE:50-999',
'bfd_minrx': 'INTEGER_VALUE:50-999',
'bfd_ multiplier': 'INTEGER_VALUE:3-50',
'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval',
'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\
meticulous-keyed-md5,meticulous-keyed-sha1,simple',
'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id',
'bfd_key_chain': 'TEXT:',
'bfd_key_id': 'INTEGER_VALUE:0-255',
'bfd_key_name': 'TEXT:',
'bfd_neighbor_ip': 'TEXT:',
'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\
non-persistent',
'bfd_access_vlan': 'INTEGER_VALUE:1-3999',
'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk',
'trunk_options': 'TEXT_OPTIONS:allowed,native',
'trunk_vlanid': 'INTEGER_VALUE:1-3999',
'portCh_description': 'TEXT:',
'duplex_option': 'TEXT_OPTIONS:auto,full,half',
'flowcontrol_options': 'TEXT_OPTIONS:receive,send',
'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\
arp,dhcp,ospf,port,port-unreachable,redirects,router,\
unreachables',
'accessgroup_name': 'TEXT:',
'portchannel_ipv4': 'IPV4Address:',
'portchannel_ipv4_mask': 'TEXT:',
'arp_ipaddress': 'IPV4Address:',
'arp_macaddress': 'TEXT:',
'arp_timeout_value': 'INTEGER_VALUE:60-28800',
'relay_ipaddress': 'IPV4Address:',
'ip_ospf_options': 'TEXT_OPTIONS:authentication,\
authentication-key,bfd,cost,database-filter,dead-interval,\
hello-interval,message-digest-key,mtu,mtu-ignore,network,\
passive-interface,priority,retransmit-interval,shutdown,\
transmit-delay',
'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295',
'ospf_id_ipaddres_value': 'IPV4Address:',
'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\
timeout',
'port_priority': 'INTEGER_VALUE:1-65535',
'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\
trap-notification',
'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\
mac-phy-status,management-address,max-frame-size,\
port-description,port-protocol-vlan,port-vlan,power-mdi,\
protocol-identity,system-capabilities,system-description,\
system-name,vid-management,vlan-name',
'load_interval_delay': 'INTEGER_VALUE:30-300',
'load_interval_counter': 'INTEGER_VALUE:1-3',
'mac_accessgroup_name': 'TEXT:',
'mac_address': 'TEXT:',
'microburst_threshold': 'NO_VALIDATION:1-4294967295',
'mtu_value': 'INTEGER_VALUE:64-9216',
'service_instance': 'NO_VALIDATION:1-4294967295',
'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,input,\
output,type',
'service_policy_name': 'TEXT:',
'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\
cost,disable,enable,guard,link-type,mst,port,port-priority,vlan',
'spanning_tree_cost': 'NO_VALIDATION:1-200000000',
'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999',
'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\
192,224',
'portchannel_ipv6_neighbor_mac': 'TEXT:',
'portchannel_ipv6_neighbor_address': 'IPV6Address:',
'portchannel_ipv6_linklocal': 'IPV6Address:',
'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094',
'portchannel_ipv6_dhcp_ethernet': 'TEXT:',
'portchannel_ipv6_dhcp': 'IPV6Address:',
'portchannel_ipv6_address': 'IPV6Address:',
'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\
link-local,nd,neighbor',
'interface_speed': 'TEXT_OPTIONS:1000,10000,100000,25000,40000,50000,auto',
'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\
unicast',
'stormcontrol_level': 'FLOAT:',
'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\
egress-only',
'vrrp_id': 'INTEGER_VALUE:1-255',
}
NE1072T = {
'vlan_id': 'INTEGER_VALUE:1-3999',
'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999',
'vlan_name': 'TEXT:',
'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6',
'vlan_state': 'TEXT_OPTIONS:active,suspend',
'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25',
'vlan_querier': 'IPV4Address:',
'vlan_querier_timeout': 'INTEGER_VALUE:1-65535',
'vlan_query_interval': 'INTEGER_VALUE:1-18000',
'vlan_query_max_response_time': 'INTEGER_VALUE:1-25',
'vlan_report_suppression': 'INTEGER_VALUE:1-25',
'vlan_robustness_variable': 'INTEGER_VALUE:1-7',
'vlan_startup_query_count': 'INTEGER_VALUE:1-10',
'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000',
'vlan_snooping_version': 'INTEGER_VALUE:2-3',
'vlan_access_map_name': 'TEXT: ',
'vlan_ethernet_interface': 'TEXT:',
'vlan_portagg_number': 'INTEGER_VALUE:1-4096',
'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect',
'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only',
'vlan_filter_name': 'TEXT:',
'vlag_auto_recovery': 'INTEGER_VALUE:240-3600',
'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict',
'vlag_instance': 'INTEGER_VALUE:1-64',
'vlag_port_aggregation': 'INTEGER_VALUE:1-4096',
'vlag_priority': 'INTEGER_VALUE:0-65535',
'vlag_startup_delay': 'INTEGER_VALUE:0-3600',
'vlag_tier_id': 'INTEGER_VALUE:1-512',
'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\
keepalive-interval,peer-ip,retry-interval',
'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24',
'vlag_keepalive_interval': 'INTEGER_VALUE:2-300',
'vlag_retry_interval': 'INTEGER_VALUE:1-300',
'vlag_peerip': 'IPV4Address:',
'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management',
'bgp_as_number': 'NO_VALIDATION:1-4294967295',
'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
'bgp_bgp_local_count': 'INTEGER_VALUE:2-64',
'cluster_id_as_ip': 'IPV4Address:',
'cluster_id_as_number': 'NO_VALIDATION:1-4294967295',
'confederation_identifier': 'INTEGER_VALUE:1-65535',
'condeferation_peers_as': 'INTEGER_VALUE:1-65535',
'stalepath_delay_value': 'INTEGER_VALUE:1-3600',
'maxas_limit_as': 'INTEGER_VALUE:1-2000',
'neighbor_ipaddress': 'IPV4Address:',
'neighbor_as': 'NO_VALIDATION:1-4294967295',
'router_id': 'IPV4Address:',
'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600',
'bgp_holdtime': 'INTEGER_VALUE:0-3600',
'bgp_aggregate_prefix': 'IPV4AddressWithMask:',
'addrfamily_routemap_name': 'TEXT:',
'reachability_half_life': 'INTEGER_VALUE:1-45',
'start_reuse_route_value': 'INTEGER_VALUE:1-20000',
'start_suppress_route_value': 'INTEGER_VALUE:1-20000',
'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255',
'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45',
'distance_external_AS': 'INTEGER_VALUE:1-255',
'distance_internal_AS': 'INTEGER_VALUE:1-255',
'distance_local_routes': 'INTEGER_VALUE:1-255',
'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp',
'maxpath_numbers': 'INTEGER_VALUE:2-32',
'network_ip_prefix_with_mask': 'IPV4AddressWithMask:',
'network_ip_prefix_value': 'IPV4Address:',
'network_ip_prefix_mask': 'IPV4Address:',
'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295',
'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295',
'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\
static',
'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10',
'bgp_neighbor_af_filtername': 'TEXT:',
'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870',
'bgp_neighbor_af_prefixname': 'TEXT:',
'bgp_neighbor_af_routemap': 'TEXT:',
'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535',
'bgp_neighbor_description': 'TEXT:',
'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255',
'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295',
'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96',
'bgp_neighbor_password': 'TEXT:',
'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600',
'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600',
'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254',
'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\
vlan',
'bgp_neighbor_update_ethernet': 'TEXT:',
'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7',
'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094',
'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535',
'ethernet_interface_value': 'INTEGER_VALUE:1-54',
'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-54',
'ethernet_interface_string': 'TEXT:',
'loopback_interface_value': 'INTEGER_VALUE:0-7',
'mgmt_interface_value': 'INTEGER_VALUE:0-0',
'vlan_interface_value': 'INTEGER_VALUE:1-4094',
'portchannel_interface_value': 'INTEGER_VALUE:1-4096',
'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096',
'portchannel_interface_string': 'TEXT:',
'aggregation_group_no': 'INTEGER_VALUE:1-4096',
'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive',
'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\
ipv6,neighbor',
'bfd_interval': 'INTEGER_VALUE:50-999',
'bfd_minrx': 'INTEGER_VALUE:50-999',
'bfd_ multiplier': 'INTEGER_VALUE:3-50',
'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval',
'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\
meticulous-keyed-md5,meticulous-keyed-sha1,simple',
'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id',
'bfd_key_chain': 'TEXT:',
'bfd_key_id': 'INTEGER_VALUE:0-255',
'bfd_key_name': 'TEXT:',
'bfd_neighbor_ip': 'TEXT:',
'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\
non-persistent',
'bfd_access_vlan': 'INTEGER_VALUE:1-3999',
'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk',
'trunk_options': 'TEXT_OPTIONS:allowed,native',
'trunk_vlanid': 'INTEGER_VALUE:1-3999',
'portCh_description': 'TEXT:',
'duplex_option': 'TEXT_OPTIONS:auto,full,half',
'flowcontrol_options': 'TEXT_OPTIONS:receive,send',
'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\
arp,dhcp,ospf,port,port-unreachable,redirects,router,\
unreachables',
'accessgroup_name': 'TEXT:',
'portchannel_ipv4': 'IPV4Address:',
'portchannel_ipv4_mask': 'TEXT:',
'arp_ipaddress': 'IPV4Address:',
'arp_macaddress': 'TEXT:',
'arp_timeout_value': 'INTEGER_VALUE:60-28800',
'relay_ipaddress': 'IPV4Address:',
'ip_ospf_options': 'TEXT_OPTIONS:authentication,\
authentication-key,bfd,cost,database-filter,dead-interval,\
hello-interval,message-digest-key,mtu,mtu-ignore,network,\
passive-interface,priority,retransmit-interval,shutdown,\
transmit-delay',
'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295',
'ospf_id_ipaddres_value': 'IPV4Address:',
'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\
timeout',
'port_priority': 'INTEGER_VALUE:1-65535',
'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\
trap-notification',
'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\
mac-phy-status,management-address,max-frame-size,\
port-description,port-protocol-vlan,port-vlan,power-mdi,\
protocol-identity,system-capabilities,system-description,\
system-name,vid-management,vlan-name',
'load_interval_delay': 'INTEGER_VALUE:30-300',
'load_interval_counter': 'INTEGER_VALUE:1-3',
'mac_accessgroup_name': 'TEXT:',
'mac_address': 'TEXT:',
'microburst_threshold': 'NO_VALIDATION:1-4294967295',
'mtu_value': 'INTEGER_VALUE:64-9216',
'service_instance': 'NO_VALIDATION:1-4294967295',
'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,input,\
output,type',
'service_policy_name': 'TEXT:',
'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\
cost,disable,enable,guard,link-type,mst,port,port-priority,vlan',
'spanning_tree_cost': 'NO_VALIDATION:1-200000000',
'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999',
'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\
192,224',
'portchannel_ipv6_neighbor_mac': 'TEXT:',
'portchannel_ipv6_neighbor_address': 'IPV6Address:',
'portchannel_ipv6_linklocal': 'IPV6Address:',
'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094',
'portchannel_ipv6_dhcp_ethernet': 'TEXT:',
'portchannel_ipv6_dhcp': 'IPV6Address:',
'portchannel_ipv6_address': 'IPV6Address:',
'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\
link-local,nd,neighbor',
'interface_speed': 'TEXT_OPTIONS:1000,10000,100000,25000,40000,50000,auto',
'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\
unicast',
'stormcontrol_level': 'FLOAT:',
'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\
egress-only',
'vrrp_id': 'INTEGER_VALUE:1-255',
}
NE10032 = {
'vlan_id': 'INTEGER_VALUE:1-3999',
'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999',
'vlan_name': 'TEXT:',
'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6',
'vlan_state': 'TEXT_OPTIONS:active,suspend',
'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25',
'vlan_querier': 'IPV4Address:',
'vlan_querier_timeout': 'INTEGER_VALUE:1-65535',
'vlan_query_interval': 'INTEGER_VALUE:1-18000',
'vlan_query_max_response_time': 'INTEGER_VALUE:1-25',
'vlan_report_suppression': 'INTEGER_VALUE:1-25',
'vlan_robustness_variable': 'INTEGER_VALUE:1-7',
'vlan_startup_query_count': 'INTEGER_VALUE:1-10',
'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000',
'vlan_snooping_version': 'INTEGER_VALUE:2-3',
'vlan_access_map_name': 'TEXT: ',
'vlan_ethernet_interface': 'TEXT:',
'vlan_portagg_number': 'INTEGER_VALUE:1-4096',
'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect',
'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only',
'vlan_filter_name': 'TEXT:',
'vlag_auto_recovery': 'INTEGER_VALUE:240-3600',
'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict',
'vlag_instance': 'INTEGER_VALUE:1-64',
'vlag_port_aggregation': 'INTEGER_VALUE:1-4096',
'vlag_priority': 'INTEGER_VALUE:0-65535',
'vlag_startup_delay': 'INTEGER_VALUE:0-3600',
'vlag_tier_id': 'INTEGER_VALUE:1-512',
'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\
keepalive-interval,peer-ip,retry-interval',
'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24',
'vlag_keepalive_interval': 'INTEGER_VALUE:2-300',
'vlag_retry_interval': 'INTEGER_VALUE:1-300',
'vlag_peerip': 'IPV4Address:',
'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management',
'bgp_as_number': 'NO_VALIDATION:1-4294967295',
'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
'bgp_bgp_local_count': 'INTEGER_VALUE:2-64',
'cluster_id_as_ip': 'IPV4Address:',
'cluster_id_as_number': 'NO_VALIDATION:1-4294967295',
'confederation_identifier': 'INTEGER_VALUE:1-65535',
'condeferation_peers_as': 'INTEGER_VALUE:1-65535',
'stalepath_delay_value': 'INTEGER_VALUE:1-3600',
'maxas_limit_as': 'INTEGER_VALUE:1-2000',
'neighbor_ipaddress': 'IPV4Address:',
'neighbor_as': 'NO_VALIDATION:1-4294967295',
'router_id': 'IPV4Address:',
'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600',
'bgp_holdtime': 'INTEGER_VALUE:0-3600',
'bgp_aggregate_prefix': 'IPV4AddressWithMask:',
'addrfamily_routemap_name': 'TEXT:',
'reachability_half_life': 'INTEGER_VALUE:1-45',
'start_reuse_route_value': 'INTEGER_VALUE:1-20000',
'start_suppress_route_value': 'INTEGER_VALUE:1-20000',
'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255',
'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45',
'distance_external_AS': 'INTEGER_VALUE:1-255',
'distance_internal_AS': 'INTEGER_VALUE:1-255',
'distance_local_routes': 'INTEGER_VALUE:1-255',
'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp',
'maxpath_numbers': 'INTEGER_VALUE:2-32',
'network_ip_prefix_with_mask': 'IPV4AddressWithMask:',
'network_ip_prefix_value': 'IPV4Address:',
'network_ip_prefix_mask': 'IPV4Address:',
'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295',
'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295',
'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\
static',
'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10',
'bgp_neighbor_af_filtername': 'TEXT:',
'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870',
'bgp_neighbor_af_prefixname': 'TEXT:',
'bgp_neighbor_af_routemap': 'TEXT:',
'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535',
'bgp_neighbor_description': 'TEXT:',
'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255',
'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295',
'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96',
'bgp_neighbor_password': 'TEXT:',
'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600',
'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600',
'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254',
'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\
vlan',
'bgp_neighbor_update_ethernet': 'TEXT:',
'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7',
'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094',
'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535',
'ethernet_interface_value': 'INTEGER_VALUE:1-32',
'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-32',
'ethernet_interface_string': 'TEXT:',
'loopback_interface_value': 'INTEGER_VALUE:0-7',
'mgmt_interface_value': 'INTEGER_VALUE:0-0',
'vlan_interface_value': 'INTEGER_VALUE:1-4094',
'portchannel_interface_value': 'INTEGER_VALUE:1-4096',
'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096',
'portchannel_interface_string': 'TEXT:',
'aggregation_group_no': 'INTEGER_VALUE:1-4096',
'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive',
'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\
ipv6,neighbor',
'bfd_interval': 'INTEGER_VALUE:50-999',
'bfd_minrx': 'INTEGER_VALUE:50-999',
'bfd_ multiplier': 'INTEGER_VALUE:3-50',
'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval',
'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\
meticulous-keyed-md5,meticulous-keyed-sha1,simple',
'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id',
'bfd_key_chain': 'TEXT:',
'bfd_key_id': 'INTEGER_VALUE:0-255',
'bfd_key_name': 'TEXT:',
'bfd_neighbor_ip': 'TEXT:',
'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\
non-persistent',
'bfd_access_vlan': 'INTEGER_VALUE:1-3999',
'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk',
'trunk_options': 'TEXT_OPTIONS:allowed,native',
'trunk_vlanid': 'INTEGER_VALUE:1-3999',
'portCh_description': 'TEXT:',
'duplex_option': 'TEXT_OPTIONS:auto,full,half',
'flowcontrol_options': 'TEXT_OPTIONS:receive,send',
'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\
arp,dhcp,ospf,port,port-unreachable,redirects,router,\
unreachables',
'accessgroup_name': 'TEXT:',
'portchannel_ipv4': 'IPV4Address:',
'portchannel_ipv4_mask': 'TEXT:',
'arp_ipaddress': 'IPV4Address:',
'arp_macaddress': 'TEXT:',
'arp_timeout_value': 'INTEGER_VALUE:60-28800',
'relay_ipaddress': 'IPV4Address:',
'ip_ospf_options': 'TEXT_OPTIONS:authentication,\
authentication-key,bfd,cost,database-filter,dead-interval,\
hello-interval,message-digest-key,mtu,mtu-ignore,network,\
passive-interface,priority,retransmit-interval,shutdown,\
transmit-delay',
'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295',
'ospf_id_ipaddres_value': 'IPV4Address:',
'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\
timeout',
'port_priority': 'INTEGER_VALUE:1-65535',
'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\
trap-notification',
'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\
mac-phy-status,management-address,max-frame-size,\
port-description,port-protocol-vlan,port-vlan,power-mdi,\
protocol-identity,system-capabilities,system-description,\
system-name,vid-management,vlan-name',
'load_interval_delay': 'INTEGER_VALUE:30-300',
'load_interval_counter': 'INTEGER_VALUE:1-3',
'mac_accessgroup_name': 'TEXT:',
'mac_address': 'TEXT:',
'microburst_threshold': 'NO_VALIDATION:1-4294967295',
'mtu_value': 'INTEGER_VALUE:64-9216',
'service_instance': 'NO_VALIDATION:1-4294967295',
'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,input,\
output,type',
'service_policy_name': 'TEXT:',
'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\
cost,disable,enable,guard,link-type,mst,port,port-priority,vlan',
'spanning_tree_cost': 'NO_VALIDATION:1-200000000',
'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999',
'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\
192,224',
'portchannel_ipv6_neighbor_mac': 'TEXT:',
'portchannel_ipv6_neighbor_address': 'IPV6Address:',
'portchannel_ipv6_linklocal': 'IPV6Address:',
'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094',
'portchannel_ipv6_dhcp_ethernet': 'TEXT:',
'portchannel_ipv6_dhcp': 'IPV6Address:',
'portchannel_ipv6_address': 'IPV6Address:',
'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\
link-local,nd,neighbor',
'interface_speed': 'TEXT_OPTIONS:10000,100000,25000,40000,50000,auto',
'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\
unicast',
'stormcontrol_level': 'FLOAT:',
'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\
egress-only',
'vrrp_id': 'INTEGER_VALUE:1-255',
}
g8272_cnos = {'vlan_id': 'INTEGER_VALUE:1-3999',
'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999',
'vlan_name': 'TEXT:',
'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6',
'vlan_state': 'TEXT_OPTIONS:active,suspend',
'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25',
'vlan_querier': 'IPV4Address:',
'vlan_querier_timeout': 'INTEGER_VALUE:1-65535',
'vlan_query_interval': 'INTEGER_VALUE:1-18000',
'vlan_query_max_response_time': 'INTEGER_VALUE:1-25',
'vlan_report_suppression': 'INTEGER_VALUE:1-25',
'vlan_robustness_variable': 'INTEGER_VALUE:1-7',
'vlan_startup_query_count': 'INTEGER_VALUE:1-10',
'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000',
'vlan_snooping_version': 'INTEGER_VALUE:2-3',
'vlan_access_map_name': 'TEXT: ',
'vlan_ethernet_interface': 'TEXT:',
'vlan_portagg_number': 'INTEGER_VALUE:1-4096',
'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect',
'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only',
'vlan_filter_name': 'TEXT:',
'vlag_auto_recovery': 'INTEGER_VALUE:240-3600',
'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict',
'vlag_instance': 'INTEGER_VALUE:1-64',
'vlag_port_aggregation': 'INTEGER_VALUE:1-4096',
'vlag_priority': 'INTEGER_VALUE:0-65535',
'vlag_startup_delay': 'INTEGER_VALUE:0-3600',
'vlag_tier_id': 'INTEGER_VALUE:1-512',
'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\
keepalive-interval,peer-ip,retry-interval',
'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24',
'vlag_keepalive_interval': 'INTEGER_VALUE:2-300',
'vlag_retry_interval': 'INTEGER_VALUE:1-300',
'vlag_peerip': 'IPV4Address:',
'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management',
'bgp_as_number': 'NO_VALIDATION:1-4294967295',
'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
'bgp_bgp_local_count': 'INTEGER_VALUE:2-64',
'cluster_id_as_ip': 'IPV4Address:',
'cluster_id_as_number': 'NO_VALIDATION:1-4294967295',
'confederation_identifier': 'INTEGER_VALUE:1-65535',
'condeferation_peers_as': 'INTEGER_VALUE:1-65535',
'stalepath_delay_value': 'INTEGER_VALUE:1-3600',
'maxas_limit_as': 'INTEGER_VALUE:1-2000',
'neighbor_ipaddress': 'IPV4Address:',
'neighbor_as': 'NO_VALIDATION:1-4294967295',
'router_id': 'IPV4Address:',
'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600',
'bgp_holdtime': 'INTEGER_VALUE:0-3600',
'bgp_aggregate_prefix': 'IPV4AddressWithMask:',
'addrfamily_routemap_name': 'TEXT:',
'reachability_half_life': 'INTEGER_VALUE:1-45',
'start_reuse_route_value': 'INTEGER_VALUE:1-20000',
'start_suppress_route_value': 'INTEGER_VALUE:1-20000',
'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255',
'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45',
'distance_external_AS': 'INTEGER_VALUE:1-255',
'distance_internal_AS': 'INTEGER_VALUE:1-255',
'distance_local_routes': 'INTEGER_VALUE:1-255',
'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp',
'maxpath_numbers': 'INTEGER_VALUE:2-32',
'network_ip_prefix_with_mask': 'IPV4AddressWithMask:',
'network_ip_prefix_value': 'IPV4Address:',
'network_ip_prefix_mask': 'IPV4Address:',
'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295',
'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295',
'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\
static',
'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10',
'bgp_neighbor_af_filtername': 'TEXT:',
'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870',
'bgp_neighbor_af_prefixname': 'TEXT:',
'bgp_neighbor_af_routemap': 'TEXT:',
'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535',
'bgp_neighbor_description': 'TEXT:',
'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255',
'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295',
'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96',
'bgp_neighbor_password': 'TEXT:',
'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600',
'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600',
'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254',
'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\
vlan',
'bgp_neighbor_update_ethernet': 'TEXT:',
'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7',
'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094',
'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535',
'ethernet_interface_value': 'INTEGER_VALUE:1-54',
'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-54',
'ethernet_interface_string': 'TEXT:',
'loopback_interface_value': 'INTEGER_VALUE:0-7',
'mgmt_interface_value': 'INTEGER_VALUE:0-0',
'vlan_interface_value': 'INTEGER_VALUE:1-4094',
'portchannel_interface_value': 'INTEGER_VALUE:1-4096',
'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096',
'portchannel_interface_string': 'TEXT:',
'aggregation_group_no': 'INTEGER_VALUE:1-4096',
'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive',
'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\
ipv6,neighbor',
'bfd_interval': 'INTEGER_VALUE:50-999',
'bfd_minrx': 'INTEGER_VALUE:50-999',
'bfd_ multiplier': 'INTEGER_VALUE:3-50',
'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval',
'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\
meticulous-keyed-md5,meticulous-keyed-sha1,simple',
'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id',
'bfd_key_chain': 'TEXT:',
'bfd_key_id': 'INTEGER_VALUE:0-255',
'bfd_key_name': 'TEXT:',
'bfd_neighbor_ip': 'TEXT:',
'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\
non-persistent',
'bfd_access_vlan': 'INTEGER_VALUE:1-3999',
'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk',
'trunk_options': 'TEXT_OPTIONS:allowed,native',
'trunk_vlanid': 'INTEGER_VALUE:1-3999',
'portCh_description': 'TEXT:',
'duplex_option': 'TEXT_OPTIONS:auto,full,half',
'flowcontrol_options': 'TEXT_OPTIONS:receive,send',
'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\
arp,dhcp,ospf,port,port-unreachable,redirects,router,\
unreachables',
'accessgroup_name': 'TEXT:',
'portchannel_ipv4': 'IPV4Address:',
'portchannel_ipv4_mask': 'TEXT:',
'arp_ipaddress': 'IPV4Address:',
'arp_macaddress': 'TEXT:',
'arp_timeout_value': 'INTEGER_VALUE:60-28800',
'relay_ipaddress': 'IPV4Address:',
'ip_ospf_options': 'TEXT_OPTIONS:authentication,\
authentication-key,bfd,cost,database-filter,dead-interval,\
hello-interval,message-digest-key,mtu,mtu-ignore,network,\
passive-interface,priority,retransmit-interval,shutdown,\
transmit-delay',
'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295',
'ospf_id_ipaddres_value': 'IPV4Address:',
'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\
timeout',
'port_priority': 'INTEGER_VALUE:1-65535',
'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\
trap-notification',
'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\
mac-phy-status,management-address,max-frame-size,\
port-description,port-protocol-vlan,port-vlan,power-mdi,\
protocol-identity,system-capabilities,system-description,\
system-name,vid-management,vlan-name',
'load_interval_delay': 'INTEGER_VALUE:30-300',
'load_interval_counter': 'INTEGER_VALUE:1-3',
'mac_accessgroup_name': 'TEXT:',
'mac_address': 'TEXT:',
'microburst_threshold': 'NO_VALIDATION:1-4294967295',
'mtu_value': 'INTEGER_VALUE:64-9216',
'service_instance': 'NO_VALIDATION:1-4294967295',
'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,input,\
output,type',
'service_policy_name': 'TEXT:',
'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\
cost,disable,enable,guard,link-type,mst,port,port-priority,vlan',
'spanning_tree_cost': 'NO_VALIDATION:1-200000000',
'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999',
'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\
192,224',
'portchannel_ipv6_neighbor_mac': 'TEXT:',
'portchannel_ipv6_neighbor_address': 'IPV6Address:',
'portchannel_ipv6_linklocal': 'IPV6Address:',
'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094',
'portchannel_ipv6_dhcp_ethernet': 'TEXT:',
'portchannel_ipv6_dhcp': 'IPV6Address:',
'portchannel_ipv6_address': 'IPV6Address:',
'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\
link-local,nd,neighbor',
'interface_speed': 'TEXT_OPTIONS:1000,10000,40000,auto',
'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\
unicast',
'stormcontrol_level': 'FLOAT:',
'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\
egress-only',
'vrrp_id': 'INTEGER_VALUE:1-255',
}
g8296_cnos = {'vlan_id': 'INTEGER_VALUE:1-3999',
'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999',
'vlan_name': 'TEXT:',
'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6',
'vlan_state': 'TEXT_OPTIONS:active,suspend',
'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25',
'vlan_querier': 'IPV4Address:',
'vlan_querier_timeout': 'INTEGER_VALUE:1-65535',
'vlan_query_interval': 'INTEGER_VALUE:1-18000',
'vlan_query_max_response_time': 'INTEGER_VALUE:1-25',
'vlan_report_suppression': 'INTEGER_VALUE:1-25',
'vlan_robustness_variable': 'INTEGER_VALUE:1-7',
'vlan_startup_query_count': 'INTEGER_VALUE:1-10',
'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000',
'vlan_snooping_version': 'INTEGER_VALUE:2-3',
'vlan_access_map_name': 'TEXT: ',
'vlan_ethernet_interface': 'TEXT:',
'vlan_portagg_number': 'INTEGER_VALUE:1-4096',
'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect',
'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only',
'vlan_filter_name': 'TEXT:',
'vlag_auto_recovery': 'INTEGER_VALUE:240-3600',
'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict',
'vlag_instance': 'INTEGER_VALUE:1-128',
'vlag_port_aggregation': 'INTEGER_VALUE:1-4096',
'vlag_priority': 'INTEGER_VALUE:0-65535',
'vlag_startup_delay': 'INTEGER_VALUE:0-3600',
'vlag_tier_id': 'INTEGER_VALUE:1-512',
'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\
keepalive-interval,peer-ip,retry-interval',
'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24',
'vlag_keepalive_interval': 'INTEGER_VALUE:2-300',
'vlag_retry_interval': 'INTEGER_VALUE:1-300',
'vlag_peerip': 'IPV4Address:',
'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management',
'bgp_as_number': 'NO_VALIDATION:1-4294967295',
'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
'bgp_bgp_local_count': 'INTEGER_VALUE:2-64',
'cluster_id_as_ip': 'IPV4Address:',
'cluster_id_as_number': 'NO_VALIDATION:1-4294967295',
'confederation_identifier': 'INTEGER_VALUE:1-65535',
'condeferation_peers_as': 'INTEGER_VALUE:1-65535',
'stalepath_delay_value': 'INTEGER_VALUE:1-3600',
'maxas_limit_as': 'INTEGER_VALUE:1-2000',
'neighbor_ipaddress': 'IPV4Address:',
'neighbor_as': 'NO_VALIDATION:1-4294967295',
'router_id': 'IPV4Address:',
'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600',
'bgp_holdtime': 'INTEGER_VALUE:0-3600',
'bgp_aggregate_prefix': 'IPV4AddressWithMask:',
'addrfamily_routemap_name': 'TEXT:',
'reachability_half_life': 'INTEGER_VALUE:1-45',
'start_reuse_route_value': 'INTEGER_VALUE:1-20000',
'start_suppress_route_value': 'INTEGER_VALUE:1-20000',
'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255',
'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45',
'distance_external_AS': 'INTEGER_VALUE:1-255',
'distance_internal_AS': 'INTEGER_VALUE:1-255',
'distance_local_routes': 'INTEGER_VALUE:1-255',
'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp',
'maxpath_numbers': 'INTEGER_VALUE:2-32',
'network_ip_prefix_with_mask': 'IPV4AddressWithMask:',
'network_ip_prefix_value': 'IPV4Address:',
'network_ip_prefix_mask': 'IPV4Address:',
'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295',
'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295',
'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\
static',
'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10',
'bgp_neighbor_af_filtername': 'TEXT:',
'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870',
'bgp_neighbor_af_prefixname': 'TEXT:',
'bgp_neighbor_af_routemap': 'TEXT:',
'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535',
'bgp_neighbor_description': 'TEXT:',
'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255',
'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295',
'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96',
'bgp_neighbor_password': 'TEXT:',
'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600',
'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600',
'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254',
'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\
vlan',
'bgp_neighbor_update_ethernet': 'TEXT:',
'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7',
'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094',
'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535',
'ethernet_interface_value': 'INTEGER_VALUE:1-96',
'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-96',
'ethernet_interface_string': 'TEXT:',
'loopback_interface_value': 'INTEGER_VALUE:0-7',
'mgmt_interface_value': 'INTEGER_VALUE:0-0',
'vlan_interface_value': 'INTEGER_VALUE:1-4094',
'portchannel_interface_value': 'INTEGER_VALUE:1-4096',
'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096',
'portchannel_interface_string': 'TEXT:',
'aggregation_group_no': 'INTEGER_VALUE:1-4096',
'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive',
'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\
ipv6,neighbor',
'bfd_interval': 'INTEGER_VALUE:50-999',
'bfd_minrx': 'INTEGER_VALUE:50-999',
'bfd_ multiplier': 'INTEGER_VALUE:3-50',
'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval',
'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\
meticulous-keyed-md5,meticulous-keyed-sha1,simple',
'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id',
'bfd_key_chain': 'TEXT:',
'bfd_key_id': 'INTEGER_VALUE:0-255',
'bfd_key_name': 'TEXT:',
'bfd_neighbor_ip': 'TEXT:',
'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\
non-persistent',
'bfd_access_vlan': 'INTEGER_VALUE:1-3999',
'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk',
'trunk_options': 'TEXT_OPTIONS:allowed,native',
'trunk_vlanid': 'INTEGER_VALUE:1-3999',
'portCh_description': 'TEXT:',
'duplex_option': 'TEXT_OPTIONS:auto,full,half',
'flowcontrol_options': 'TEXT_OPTIONS:receive,send',
'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\
arp,dhcp,ospf,port,port-unreachable,redirects,router,\
unreachables',
'accessgroup_name': 'TEXT:',
'portchannel_ipv4': 'IPV4Address:',
'portchannel_ipv4_mask': 'TEXT:',
'arp_ipaddress': 'IPV4Address:',
'arp_macaddress': 'TEXT:',
'arp_timeout_value': 'INTEGER_VALUE:60-28800',
'relay_ipaddress': 'IPV4Address:',
'ip_ospf_options': 'TEXT_OPTIONS:authentication,\
authentication-key,bfd,cost,database-filter,dead-interval,\
hello-interval,message-digest-key,mtu,mtu-ignore,network,\
passive-interface,priority,retransmit-interval,shutdown,\
transmit-delay',
'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295',
'ospf_id_ipaddres_value': 'IPV4Address:',
'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\
timeout',
'port_priority': 'INTEGER_VALUE:1-65535',
'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\
trap-notification',
'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\
mac-phy-status,management-address,max-frame-size,\
port-description,port-protocol-vlan,port-vlan,power-mdi,\
protocol-identity,system-capabilities,system-description,\
system-name,vid-management,vlan-name',
'load_interval_delay': 'INTEGER_VALUE:30-300',
'load_interval_counter': 'INTEGER_VALUE:1-3',
'mac_accessgroup_name': 'TEXT:',
'mac_address': 'TEXT:',
'microburst_threshold': 'NO_VALIDATION:1-4294967295',
'mtu_value': 'INTEGER_VALUE:64-9216',
'service_instance': 'NO_VALIDATION:1-4294967295',
'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,\
input,output,type',
'service_policy_name': 'TEXT:',
'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\
cost,disable,enable,guard,link-type,mst,port,port-priority,vlan',
'spanning_tree_cost': 'NO_VALIDATION:1-200000000',
'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999',
'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\
192,224',
'portchannel_ipv6_neighbor_mac': 'TEXT:',
'portchannel_ipv6_neighbor_address': 'IPV6Address:',
'portchannel_ipv6_linklocal': 'IPV6Address:',
'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094',
'portchannel_ipv6_dhcp_ethernet': 'TEXT:',
'portchannel_ipv6_dhcp': 'IPV6Address:',
'portchannel_ipv6_address': 'IPV6Address:',
'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\
link-local,nd,neighbor',
'interface_speed': 'TEXT_OPTIONS:1000,10000,40000,auto',
'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\
unicast',
'stormcontrol_level': 'FLOAT:',
'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\
egress-only',
'vrrp_id': 'INTEGER_VALUE:1-255',
}
g8332_cnos = {'vlan_id': 'INTEGER_VALUE:1-3999',
'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999',
'vlan_name': 'TEXT:',
'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6',
'vlan_state': 'TEXT_OPTIONS:active,suspend',
'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25',
'vlan_querier': 'IPV4Address:',
'vlan_querier_timeout': 'INTEGER_VALUE:1-65535',
'vlan_query_interval': 'INTEGER_VALUE:1-18000',
'vlan_query_max_response_time': 'INTEGER_VALUE:1-25',
'vlan_report_suppression': 'INTEGER_VALUE:1-25',
'vlan_robustness_variable': 'INTEGER_VALUE:1-7',
'vlan_startup_query_count': 'INTEGER_VALUE:1-10',
'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000',
'vlan_snooping_version': 'INTEGER_VALUE:2-3',
'vlan_access_map_name': 'TEXT: ',
'vlan_ethernet_interface': 'TEXT:',
'vlan_portagg_number': 'INTEGER_VALUE:1-4096',
'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect',
'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only',
'vlan_filter_name': 'TEXT:',
'vlag_auto_recovery': 'INTEGER_VALUE:240-3600',
'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict',
'vlag_instance': 'INTEGER_VALUE:1-128',
'vlag_port_aggregation': 'INTEGER_VALUE:1-4096',
'vlag_priority': 'INTEGER_VALUE:0-65535',
'vlag_startup_delay': 'INTEGER_VALUE:0-3600',
'vlag_tier_id': 'INTEGER_VALUE:1-512',
'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\
keepalive-interval,peer-ip,retry-interval',
'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24',
'vlag_keepalive_interval': 'INTEGER_VALUE:2-300',
'vlag_retry_interval': 'INTEGER_VALUE:1-300',
'vlag_peerip': 'IPV4Address:',
'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management',
'bgp_as_number': 'NO_VALIDATION:1-4294967295',
'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
'bgp_bgp_local_count': 'INTEGER_VALUE:2-64',
'cluster_id_as_ip': 'IPV4Address:',
'cluster_id_as_number': 'NO_VALIDATION:1-4294967295',
'confederation_identifier': 'INTEGER_VALUE:1-65535',
'condeferation_peers_as': 'INTEGER_VALUE:1-65535',
'stalepath_delay_value': 'INTEGER_VALUE:1-3600',
'maxas_limit_as': 'INTEGER_VALUE:1-2000',
'neighbor_ipaddress': 'IPV4Address:',
'neighbor_as': 'NO_VALIDATION:1-4294967295',
'router_id': 'IPV4Address:',
'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600',
'bgp_holdtime': 'INTEGER_VALUE:0-3600',
'bgp_aggregate_prefix': 'IPV4AddressWithMask:',
'addrfamily_routemap_name': 'TEXT:',
'reachability_half_life': 'INTEGER_VALUE:1-45',
'start_reuse_route_value': 'INTEGER_VALUE:1-20000',
'start_suppress_route_value': 'INTEGER_VALUE:1-20000',
'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255',
'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45',
'distance_external_AS': 'INTEGER_VALUE:1-255',
'distance_internal_AS': 'INTEGER_VALUE:1-255',
'distance_local_routes': 'INTEGER_VALUE:1-255',
'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp',
'maxpath_numbers': 'INTEGER_VALUE:2-32',
'network_ip_prefix_with_mask': 'IPV4AddressWithMask:',
'network_ip_prefix_value': 'IPV4Address:',
'network_ip_prefix_mask': 'IPV4Address:',
'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295',
'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295',
'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\
static',
'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10',
'bgp_neighbor_af_filtername': 'TEXT:',
'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870',
'bgp_neighbor_af_prefixname': 'TEXT:',
'bgp_neighbor_af_routemap': 'TEXT:',
'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535',
'bgp_neighbor_description': 'TEXT:',
'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255',
'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295',
'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96',
'bgp_neighbor_password': 'TEXT:',
'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600',
'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600',
'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254',
'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\
vlan',
'bgp_neighbor_update_ethernet': 'TEXT:',
'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7',
'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094',
'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535',
'ethernet_interface_value': 'INTEGER_VALUE:1-32',
'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-32',
'ethernet_interface_string': 'TEXT:',
'loopback_interface_value': 'INTEGER_VALUE:0-7',
'mgmt_interface_value': 'INTEGER_VALUE:0-0',
'vlan_interface_value': 'INTEGER_VALUE:1-4094',
'portchannel_interface_value': 'INTEGER_VALUE:1-4096',
'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096',
'portchannel_interface_string': 'TEXT:',
'aggregation_group_no': 'INTEGER_VALUE:1-4096',
'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive',
'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\
ipv6,neighbor',
'bfd_interval': 'INTEGER_VALUE:50-999',
'bfd_minrx': 'INTEGER_VALUE:50-999',
'bfd_ multiplier': 'INTEGER_VALUE:3-50',
'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval',
'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\
meticulous-keyed-md5,meticulous-keyed-sha1,simple',
'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id',
'bfd_key_chain': 'TEXT:',
'bfd_key_id': 'INTEGER_VALUE:0-255',
'bfd_key_name': 'TEXT:',
'bfd_neighbor_ip': 'TEXT:',
'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\
non-persistent',
'bfd_access_vlan': 'INTEGER_VALUE:1-3999',
'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk',
'trunk_options': 'TEXT_OPTIONS:allowed,native',
'trunk_vlanid': 'INTEGER_VALUE:1-3999',
'portCh_description': 'TEXT:',
'duplex_option': 'TEXT_OPTIONS:auto,full,half',
'flowcontrol_options': 'TEXT_OPTIONS:receive,send',
'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,arp,\
dhcp,ospf,port,port-unreachable,redirects,router,unreachables',
'accessgroup_name': 'TEXT:',
'portchannel_ipv4': 'IPV4Address:',
'portchannel_ipv4_mask': 'TEXT:',
'arp_ipaddress': 'IPV4Address:',
'arp_macaddress': 'TEXT:',
'arp_timeout_value': 'INTEGER_VALUE:60-28800',
'relay_ipaddress': 'IPV4Address:',
'ip_ospf_options': 'TEXT_OPTIONS:authentication,\
authentication-key,bfd,cost,database-filter,dead-interval,\
hello-interval,message-digest-key,mtu,mtu-ignore,network,\
passive-interface,priority,retransmit-interval,shutdown,\
transmit-delay',
'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295',
'ospf_id_ipaddres_value': 'IPV4Address:',
'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\
timeout',
'port_priority': 'INTEGER_VALUE:1-65535',
'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\
trap-notification',
'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\
mac-phy-status,management-address,max-frame-size,\
port-description,port-protocol-vlan,port-vlan,power-mdi,\
protocol-identity,system-capabilities,system-description,\
system-name,vid-management,vlan-name',
'load_interval_delay': 'INTEGER_VALUE:30-300',
'load_interval_counter': 'INTEGER_VALUE:1-3',
'mac_accessgroup_name': 'TEXT:',
'mac_address': 'TEXT:',
'microburst_threshold': 'NO_VALIDATION:1-4294967295',
'mtu_value': 'INTEGER_VALUE:64-9216',
'service_instance': 'NO_VALIDATION:1-4294967295',
'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,\
input,output,type',
'service_policy_name': 'TEXT:',
'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\
cost,disable,enable,guard,link-type,mst,port,port-priority,vlan',
'spanning_tree_cost': 'NO_VALIDATION:1-200000000',
'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999',
'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\
192,224',
'portchannel_ipv6_neighbor_mac': 'TEXT:',
'portchannel_ipv6_neighbor_address': 'IPV6Address:',
'portchannel_ipv6_linklocal': 'IPV6Address:',
'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094',
'portchannel_ipv6_dhcp_ethernet': 'TEXT:',
'portchannel_ipv6_dhcp': 'IPV6Address:',
'portchannel_ipv6_address': 'IPV6Address:',
'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\
link-local,nd,neighbor',
'interface_speed': 'TEXT_OPTIONS:1000,10000,40000,50000,auto',
'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\
unicast',
'stormcontrol_level': 'FLOAT:',
'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\
egress-only',
'vrrp_id': 'INTEGER_VALUE:1-255',
}
| gpl-3.0 |
stoivo/GitSavvy | core/commands/git_add.py | 1 | 2967 | """
Implements a special view that displays an editable diff of unstaged changes.
"""
import os
import sys
import sublime
from sublime_plugin import WindowCommand, TextCommand
from ...common import util
from ..git_command import GitCommand
from ..exceptions import GitSavvyError
TITLE = "GIT-ADD: {}"
class GsAddEditCommand(WindowCommand, GitCommand):
"""
Create a new view to display the project's unstaged changes.
"""
def run(self, **kwargs):
sublime.set_timeout_async(lambda: self.run_async(**kwargs), 0)
def run_async(self):
git_add_view = util.view.get_scratch_view(self, "git_add", read_only=False)
git_add_view.set_name(TITLE.format(os.path.basename(self.repo_path)))
git_add_view.set_syntax_file("Packages/GitSavvy/syntax/diff.sublime_syntax")
git_add_view.settings().set("git_savvy.repo_path", self.repo_path)
git_add_view.settings().set("translate_tabs_to_spaces", False)
self.window.focus_view(git_add_view)
git_add_view.sel().clear()
git_add_view.run_command("gs_add_edit_refresh")
super_key = "SUPER" if sys.platform == "darwin" else "CTRL"
message = "Press {}-Enter to apply the diff. Close the window to cancel.".format(super_key)
sublime.message_dialog(message)
class GsAddEditRefreshCommand(TextCommand, GitCommand):
"""
Refresh the view with the latest unstaged changes.
"""
def run(self, edit, cursors=None):
if self.view.settings().get("git_savvy.disable_diff"):
return
try:
stdout = self.git("diff", "--no-color")
except GitSavvyError as err:
# When the output of the above Git command fails to correctly parse,
# the expected notification will be displayed to the user. However,
# once the userpresses OK, a new refresh event will be triggered on
# the view.
#
# This causes an infinite loop of increasingly frustrating error
# messages, ultimately resulting in psychosis and serious medical
# bills. This is a better, though somewhat cludgy, alternative.
#
if err.args and type(err.args[0]) == UnicodeDecodeError:
self.view.settings().set("git_savvy.disable_diff", True)
return
raise err
self.view.run_command("gs_replace_view_text", {"text": stdout})
class GsAddEditCommitCommand(TextCommand, GitCommand):
"""
Apply the commit as it is presented in the view to the index. Then close the view.
"""
def run(self, edit):
sublime.set_timeout_async(lambda: self.run_async(), 0)
def run_async(self):
diff_content = self.view.substr(sublime.Region(0, self.view.size()))
self.git("apply", "--cached", "-", stdin=diff_content)
self.view.window().focus_view(self.view)
self.view.window().run_command("close_file")
| mit |
CamelBackNotation/CarnotKE | jyhton/lib-python/2.7/test/test_httpservers.py | 36 | 18963 | """Unittests for the various HTTPServer modules.
Written by Cody A.W. Somerville <cody-somerville@ubuntu.com>,
Josip Dzolonga, and Michael Otteneder for the 2007/08 GHOP contest.
"""
import os
import sys
import re
import base64
import shutil
import urllib
import httplib
import tempfile
import unittest
import CGIHTTPServer
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from CGIHTTPServer import CGIHTTPRequestHandler
from StringIO import StringIO
from test import test_support
threading = test_support.import_module('threading')
class NoLogRequestHandler:
def log_message(self, *args):
# don't write log messages to stderr
pass
class SocketlessRequestHandler(SimpleHTTPRequestHandler):
def __init__(self):
self.get_called = False
self.protocol_version = "HTTP/1.1"
def do_GET(self):
self.get_called = True
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(b'<html><body>Data</body></html>\r\n')
def log_message(self, fmt, *args):
pass
class TestServerThread(threading.Thread):
def __init__(self, test_object, request_handler):
threading.Thread.__init__(self)
self.request_handler = request_handler
self.test_object = test_object
def run(self):
self.server = HTTPServer(('', 0), self.request_handler)
self.test_object.PORT = self.server.socket.getsockname()[1]
self.test_object.server_started.set()
self.test_object = None
try:
self.server.serve_forever(0.05)
finally:
self.server.server_close()
def stop(self):
self.server.shutdown()
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test_support.threading_setup()
os.environ = test_support.EnvironmentVarGuard()
self.server_started = threading.Event()
self.thread = TestServerThread(self, self.request_handler)
self.thread.start()
self.server_started.wait()
def tearDown(self):
self.thread.stop()
os.environ.__exit__()
test_support.threading_cleanup(*self._threads)
def request(self, uri, method='GET', body=None, headers={}):
self.connection = httplib.HTTPConnection('localhost', self.PORT)
self.connection.request(method, uri, body, headers)
return self.connection.getresponse()
class BaseHTTPRequestHandlerTestCase(unittest.TestCase):
"""Test the functionality of the BaseHTTPServer focussing on
BaseHTTPRequestHandler.
"""
HTTPResponseMatch = re.compile('HTTP/1.[0-9]+ 200 OK')
def setUp (self):
self.handler = SocketlessRequestHandler()
def send_typical_request(self, message):
input_msg = StringIO(message)
output = StringIO()
self.handler.rfile = input_msg
self.handler.wfile = output
self.handler.handle_one_request()
output.seek(0)
return output.readlines()
def verify_get_called(self):
self.assertTrue(self.handler.get_called)
def verify_expected_headers(self, headers):
for fieldName in 'Server: ', 'Date: ', 'Content-Type: ':
self.assertEqual(sum(h.startswith(fieldName) for h in headers), 1)
def verify_http_server_response(self, response):
match = self.HTTPResponseMatch.search(response)
self.assertTrue(match is not None)
def test_http_1_1(self):
result = self.send_typical_request('GET / HTTP/1.1\r\n\r\n')
self.verify_http_server_response(result[0])
self.verify_expected_headers(result[1:-1])
self.verify_get_called()
self.assertEqual(result[-1], '<html><body>Data</body></html>\r\n')
def test_http_1_0(self):
result = self.send_typical_request('GET / HTTP/1.0\r\n\r\n')
self.verify_http_server_response(result[0])
self.verify_expected_headers(result[1:-1])
self.verify_get_called()
self.assertEqual(result[-1], '<html><body>Data</body></html>\r\n')
def test_http_0_9(self):
result = self.send_typical_request('GET / HTTP/0.9\r\n\r\n')
self.assertEqual(len(result), 1)
self.assertEqual(result[0], '<html><body>Data</body></html>\r\n')
self.verify_get_called()
def test_with_continue_1_0(self):
result = self.send_typical_request('GET / HTTP/1.0\r\nExpect: 100-continue\r\n\r\n')
self.verify_http_server_response(result[0])
self.verify_expected_headers(result[1:-1])
self.verify_get_called()
self.assertEqual(result[-1], '<html><body>Data</body></html>\r\n')
def test_request_length(self):
# Issue #10714: huge request lines are discarded, to avoid Denial
# of Service attacks.
result = self.send_typical_request(b'GET ' + b'x' * 65537)
self.assertEqual(result[0], b'HTTP/1.1 414 Request-URI Too Long\r\n')
self.assertFalse(self.handler.get_called)
class BaseHTTPServerTestCase(BaseTestCase):
class request_handler(NoLogRequestHandler, BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
default_request_version = 'HTTP/1.1'
def do_TEST(self):
self.send_response(204)
self.send_header('Content-Type', 'text/html')
self.send_header('Connection', 'close')
self.end_headers()
def do_KEEP(self):
self.send_response(204)
self.send_header('Content-Type', 'text/html')
self.send_header('Connection', 'keep-alive')
self.end_headers()
def do_KEYERROR(self):
self.send_error(999)
def do_CUSTOM(self):
self.send_response(999)
self.send_header('Content-Type', 'text/html')
self.send_header('Connection', 'close')
self.end_headers()
def setUp(self):
BaseTestCase.setUp(self)
self.con = httplib.HTTPConnection('localhost', self.PORT)
self.con.connect()
def test_command(self):
self.con.request('GET', '/')
res = self.con.getresponse()
self.assertEqual(res.status, 501)
def test_request_line_trimming(self):
self.con._http_vsn_str = 'HTTP/1.1\n'
self.con.putrequest('GET', '/')
self.con.endheaders()
res = self.con.getresponse()
self.assertEqual(res.status, 501)
def test_version_bogus(self):
self.con._http_vsn_str = 'FUBAR'
self.con.putrequest('GET', '/')
self.con.endheaders()
res = self.con.getresponse()
self.assertEqual(res.status, 400)
def test_version_digits(self):
self.con._http_vsn_str = 'HTTP/9.9.9'
self.con.putrequest('GET', '/')
self.con.endheaders()
res = self.con.getresponse()
self.assertEqual(res.status, 400)
def test_version_none_get(self):
self.con._http_vsn_str = ''
self.con.putrequest('GET', '/')
self.con.endheaders()
res = self.con.getresponse()
self.assertEqual(res.status, 501)
def test_version_none(self):
self.con._http_vsn_str = ''
self.con.putrequest('PUT', '/')
self.con.endheaders()
res = self.con.getresponse()
self.assertEqual(res.status, 400)
def test_version_invalid(self):
self.con._http_vsn = 99
self.con._http_vsn_str = 'HTTP/9.9'
self.con.putrequest('GET', '/')
self.con.endheaders()
res = self.con.getresponse()
self.assertEqual(res.status, 505)
def test_send_blank(self):
self.con._http_vsn_str = ''
self.con.putrequest('', '')
self.con.endheaders()
res = self.con.getresponse()
self.assertEqual(res.status, 400)
def test_header_close(self):
self.con.putrequest('GET', '/')
self.con.putheader('Connection', 'close')
self.con.endheaders()
res = self.con.getresponse()
self.assertEqual(res.status, 501)
def test_head_keep_alive(self):
self.con._http_vsn_str = 'HTTP/1.1'
self.con.putrequest('GET', '/')
self.con.putheader('Connection', 'keep-alive')
self.con.endheaders()
res = self.con.getresponse()
self.assertEqual(res.status, 501)
def test_handler(self):
self.con.request('TEST', '/')
res = self.con.getresponse()
self.assertEqual(res.status, 204)
def test_return_header_keep_alive(self):
self.con.request('KEEP', '/')
res = self.con.getresponse()
self.assertEqual(res.getheader('Connection'), 'keep-alive')
self.con.request('TEST', '/')
self.addCleanup(self.con.close)
def test_internal_key_error(self):
self.con.request('KEYERROR', '/')
res = self.con.getresponse()
self.assertEqual(res.status, 999)
def test_return_custom_status(self):
self.con.request('CUSTOM', '/')
res = self.con.getresponse()
self.assertEqual(res.status, 999)
class SimpleHTTPServerTestCase(BaseTestCase):
class request_handler(NoLogRequestHandler, SimpleHTTPRequestHandler):
pass
def setUp(self):
BaseTestCase.setUp(self)
self.cwd = os.getcwd()
basetempdir = tempfile.gettempdir()
os.chdir(basetempdir)
self.data = 'We are the knights who say Ni!'
self.tempdir = tempfile.mkdtemp(dir=basetempdir)
self.tempdir_name = os.path.basename(self.tempdir)
temp = open(os.path.join(self.tempdir, 'test'), 'wb')
temp.write(self.data)
temp.close()
def tearDown(self):
try:
os.chdir(self.cwd)
try:
shutil.rmtree(self.tempdir)
except OSError:
pass
finally:
BaseTestCase.tearDown(self)
def check_status_and_reason(self, response, status, data=None):
body = response.read()
self.assertTrue(response)
self.assertEqual(response.status, status)
self.assertIsNotNone(response.reason)
if data:
self.assertEqual(data, body)
def test_get(self):
#constructs the path relative to the root directory of the HTTPServer
response = self.request(self.tempdir_name + '/test')
self.check_status_and_reason(response, 200, data=self.data)
response = self.request(self.tempdir_name + '/')
self.check_status_and_reason(response, 200)
response = self.request(self.tempdir_name)
self.check_status_and_reason(response, 301)
response = self.request('/ThisDoesNotExist')
self.check_status_and_reason(response, 404)
response = self.request('/' + 'ThisDoesNotExist' + '/')
self.check_status_and_reason(response, 404)
f = open(os.path.join(self.tempdir_name, 'index.html'), 'w')
response = self.request('/' + self.tempdir_name + '/')
self.check_status_and_reason(response, 200)
# chmod() doesn't work as expected on Windows, and filesystem
# permissions are ignored by root on Unix.
if os.name == 'posix' and os.geteuid() != 0:
os.chmod(self.tempdir, 0)
response = self.request(self.tempdir_name + '/')
self.check_status_and_reason(response, 404)
os.chmod(self.tempdir, 0755)
def test_head(self):
response = self.request(
self.tempdir_name + '/test', method='HEAD')
self.check_status_and_reason(response, 200)
self.assertEqual(response.getheader('content-length'),
str(len(self.data)))
self.assertEqual(response.getheader('content-type'),
'application/octet-stream')
def test_invalid_requests(self):
response = self.request('/', method='FOO')
self.check_status_and_reason(response, 501)
# requests must be case sensitive,so this should fail too
response = self.request('/', method='get')
self.check_status_and_reason(response, 501)
response = self.request('/', method='GETs')
self.check_status_and_reason(response, 501)
cgi_file1 = """\
#!%s
print "Content-type: text/html"
print
print "Hello World"
"""
cgi_file2 = """\
#!%s
import cgi
print "Content-type: text/html"
print
form = cgi.FieldStorage()
print "%%s, %%s, %%s" %% (form.getfirst("spam"), form.getfirst("eggs"),
form.getfirst("bacon"))
"""
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"This test can't be run reliably as root (issue #13308).")
class CGIHTTPServerTestCase(BaseTestCase):
class request_handler(NoLogRequestHandler, CGIHTTPRequestHandler):
pass
def setUp(self):
BaseTestCase.setUp(self)
self.parent_dir = tempfile.mkdtemp()
self.cgi_dir = os.path.join(self.parent_dir, 'cgi-bin')
os.mkdir(self.cgi_dir)
# The shebang line should be pure ASCII: use symlink if possible.
# See issue #7668.
if hasattr(os, 'symlink'):
self.pythonexe = os.path.join(self.parent_dir, 'python')
os.symlink(sys.executable, self.pythonexe)
else:
self.pythonexe = sys.executable
self.file1_path = os.path.join(self.cgi_dir, 'file1.py')
with open(self.file1_path, 'w') as file1:
file1.write(cgi_file1 % self.pythonexe)
os.chmod(self.file1_path, 0777)
self.file2_path = os.path.join(self.cgi_dir, 'file2.py')
with open(self.file2_path, 'w') as file2:
file2.write(cgi_file2 % self.pythonexe)
os.chmod(self.file2_path, 0777)
self.cwd = os.getcwd()
os.chdir(self.parent_dir)
def tearDown(self):
try:
os.chdir(self.cwd)
if self.pythonexe != sys.executable:
os.remove(self.pythonexe)
os.remove(self.file1_path)
os.remove(self.file2_path)
os.rmdir(self.cgi_dir)
os.rmdir(self.parent_dir)
finally:
BaseTestCase.tearDown(self)
def test_url_collapse_path(self):
# verify tail is the last portion and head is the rest on proper urls
test_vectors = {
'': '//',
'..': IndexError,
'/.//..': IndexError,
'/': '//',
'//': '//',
'/\\': '//\\',
'/.//': '//',
'cgi-bin/file1.py': '/cgi-bin/file1.py',
'/cgi-bin/file1.py': '/cgi-bin/file1.py',
'a': '//a',
'/a': '//a',
'//a': '//a',
'./a': '//a',
'./C:/': '/C:/',
'/a/b': '/a/b',
'/a/b/': '/a/b/',
'/a/b/.': '/a/b/',
'/a/b/c/..': '/a/b/',
'/a/b/c/../d': '/a/b/d',
'/a/b/c/../d/e/../f': '/a/b/d/f',
'/a/b/c/../d/e/../../f': '/a/b/f',
'/a/b/c/../d/e/.././././..//f': '/a/b/f',
'../a/b/c/../d/e/.././././..//f': IndexError,
'/a/b/c/../d/e/../../../f': '/a/f',
'/a/b/c/../d/e/../../../../f': '//f',
'/a/b/c/../d/e/../../../../../f': IndexError,
'/a/b/c/../d/e/../../../../f/..': '//',
'/a/b/c/../d/e/../../../../f/../.': '//',
}
for path, expected in test_vectors.iteritems():
if isinstance(expected, type) and issubclass(expected, Exception):
self.assertRaises(expected,
CGIHTTPServer._url_collapse_path, path)
else:
actual = CGIHTTPServer._url_collapse_path(path)
self.assertEqual(expected, actual,
msg='path = %r\nGot: %r\nWanted: %r' %
(path, actual, expected))
def test_headers_and_content(self):
res = self.request('/cgi-bin/file1.py')
self.assertEqual(('Hello World\n', 'text/html', 200),
(res.read(), res.getheader('Content-type'), res.status))
def test_post(self):
params = urllib.urlencode({'spam' : 1, 'eggs' : 'python', 'bacon' : 123456})
headers = {'Content-type' : 'application/x-www-form-urlencoded'}
res = self.request('/cgi-bin/file2.py', 'POST', params, headers)
self.assertEqual(res.read(), '1, python, 123456\n')
def test_invaliduri(self):
res = self.request('/cgi-bin/invalid')
res.read()
self.assertEqual(res.status, 404)
def test_authorization(self):
headers = {'Authorization' : 'Basic %s' %
base64.b64encode('username:pass')}
res = self.request('/cgi-bin/file1.py', 'GET', headers=headers)
self.assertEqual(('Hello World\n', 'text/html', 200),
(res.read(), res.getheader('Content-type'), res.status))
def test_no_leading_slash(self):
# http://bugs.python.org/issue2254
res = self.request('cgi-bin/file1.py')
self.assertEqual(('Hello World\n', 'text/html', 200),
(res.read(), res.getheader('Content-type'), res.status))
def test_os_environ_is_not_altered(self):
signature = "Test CGI Server"
os.environ['SERVER_SOFTWARE'] = signature
res = self.request('/cgi-bin/file1.py')
self.assertEqual((b'Hello World\n', 'text/html', 200),
(res.read(), res.getheader('Content-type'), res.status))
self.assertEqual(os.environ['SERVER_SOFTWARE'], signature)
class SimpleHTTPRequestHandlerTestCase(unittest.TestCase):
""" Test url parsing """
def setUp(self):
self.translated = os.getcwd()
self.translated = os.path.join(self.translated, 'filename')
self.handler = SocketlessRequestHandler()
def test_query_arguments(self):
path = self.handler.translate_path('/filename')
self.assertEqual(path, self.translated)
path = self.handler.translate_path('/filename?foo=bar')
self.assertEqual(path, self.translated)
path = self.handler.translate_path('/filename?a=b&spam=eggs#zot')
self.assertEqual(path, self.translated)
def test_start_with_double_slash(self):
path = self.handler.translate_path('//filename')
self.assertEqual(path, self.translated)
path = self.handler.translate_path('//filename?foo=bar')
self.assertEqual(path, self.translated)
def test_main(verbose=None):
try:
cwd = os.getcwd()
test_support.run_unittest(BaseHTTPRequestHandlerTestCase,
SimpleHTTPRequestHandlerTestCase,
BaseHTTPServerTestCase,
SimpleHTTPServerTestCase,
CGIHTTPServerTestCase
)
finally:
os.chdir(cwd)
if __name__ == '__main__':
test_main()
| apache-2.0 |
Sidney84/pa-chromium | tools/telemetry/telemetry/core/chrome/inspector_memory.py | 33 | 1541 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
class InspectorMemoryException(Exception):
pass
class InspectorMemory(object):
"""Communicates with the remote inspector's Memory domain."""
def __init__(self, inspector_backend):
self._inspector_backend = inspector_backend
self._inspector_backend.RegisterDomain(
'Memory',
self._OnNotification,
self._OnClose)
def _OnNotification(self, msg):
pass
def _OnClose(self):
pass
def GetDOMCounters(self, timeout):
"""Retrieves DOM element counts.
Args:
timeout: The number of seconds to wait for the inspector backend to
service the request before timing out.
Returns:
A dictionary containing the counts associated with "nodes", "documents",
and "jsEventListeners".
"""
res = self._inspector_backend.SyncRequest({
'method': 'Memory.getDOMCounters'
}, timeout)
if ('result' not in res or
'nodes' not in res['result'] or
'documents' not in res['result'] or
'jsEventListeners' not in res['result']):
raise InspectorMemoryException(
'Inspector returned unexpected result for Memory.getDOMCounters:\n' +
json.dumps(res, indent=2))
return {
'nodes': res['result']['nodes'],
'documents': res['result']['documents'],
'jsEventListeners': res['result']['jsEventListeners']
}
| bsd-3-clause |
oasiswork/odoo | addons/l10n_ro/res_partner.py | 309 | 2255 | # -*- encoding: utf-8 -*-
##############################################################################
#
# @author - Fekete Mihai <feketemihai@gmail.com>
# Copyright (C) 2011 TOTAL PC SYSTEMS (http://www.www.erpsystems.ro).
# Copyright (C) 2009 (<http://www.filsystem.ro>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_partner(osv.osv):
_name = "res.partner"
_inherit = "res.partner"
_columns = {
'nrc' : fields.char('NRC', help='Registration number at the Registry of Commerce'),
}
def _auto_init(self, cr, context=None):
result = super(res_partner, self)._auto_init(cr, context=context)
# Remove constrains for vat, nrc on "commercial entities" because is not mandatory by legislation
# Even that VAT numbers are unique, the NRC field is not unique, and there are certain entities that
# doesn't have a NRC number plus the formatting was changed few times, so we cannot have a base rule for
# checking if available and emmited by the Ministry of Finance, only online on their website.
cr.execute("""
DROP INDEX IF EXISTS res_partner_vat_uniq_for_companies;
DROP INDEX IF EXISTS res_partner_nrc_uniq_for_companies;
""")
return result
def _commercial_fields(self, cr, uid, context=None):
return super(res_partner, self)._commercial_fields(cr, uid, context=context) + ['nrc']
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
siddartha1992/cloud-custodian | c7n/resources/gamelift.py | 2 | 1452 | # Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from c7n.manager import resources
from c7n.query import QueryResourceManager
@resources.register('gamelift-build')
class GameLiftBuild(QueryResourceManager):
class resource_type(object):
service = 'gamelift'
enum_spec = ('list_builds', 'Builds', None)
id = 'BuildId'
name = 'Name'
date = 'CreationTime'
dimension = None
@resources.register('gamelift-fleet')
class GameLiftFleet(QueryResourceManager):
class resource_type(object):
service = 'gamelift'
enum_spec = ('list_fleets', 'FleetIds', None)
id = 'FleetId'
name = 'Name'
date = 'CreationTime'
dimension = None
batch_detail_spec = (
"describe_fleet_attributes", "FleetIds", None, "FleetAttributes")
| apache-2.0 |
grgbr/karn | script/slist_run_pt.py | 2 | 2438 | #!/usr/bin/python3
import sys
import os
import argparse as argp
import subprocess as subproc
if __name__ == '__main__':
parse = argp.ArgumentParser(description = 'Sort input integer data ' +
'file for performance ' +
'assessment.')
parse.add_argument('bin_path', metavar = 'BIN_FILE',
type = str, nargs = 1,
help = 'binary file to execute')
parse.add_argument('dir_path', metavar = 'OUT_DIR',
type = str, nargs = 1,
help = 'output directory where to generate data')
parse.add_argument('key_nr', metavar = 'KEY_NR',
type = int, nargs = 1,
help = 'number of keys to generate')
parse.add_argument('presort', metavar = 'PRESORT',
type = str, nargs = 1,
choices = ['fullrev', 'rarerev', 'even', 'rarein',
'fullin', 'worstins', 'random'],
help = 'presorting scheme')
parse.add_argument('algo', metavar = 'ALGORITHM',
type = str, nargs = 1,
choices = ['insertion', 'selection', 'bubble',
'merge'],
help = 'sorting algorithm')
parse.add_argument('loop_nr', metavar = 'LOOP_NR',
type = int, nargs = 1,
help = 'number of measurement loops to run')
args = parse.parse_args()
key_nr = args.key_nr[0]
if key_nr < 0 or key_nr > (1 << 30):
parse.error("argument KEY_NR: invalid number of keys " +
"specified")
presort = args.presort[0]
loop_nr = args.loop_nr[0]
if loop_nr < 0:
parse.error("argument LOOP_NR: invalid number of " +
"measurement loops")
data_path = os.path.join(args.dir_path[0],
"type-int_keynr-" + str(key_nr) +
"_presort-" + presort +
".dat")
try:
out = subproc.check_output([args.bin_path[0], data_path,
args.algo[0], str(loop_nr)],
timeout = 120)
except Exception as e:
print(e)
sys.exit(1)
print("type=int keynr=%d presort=%s algo=%s" %
(key_nr, presort, args.algo[0]))
for line in out.decode(sys.stdout.encoding).split('\n'):
if not line:
# Skip empty lines
continue
print(line)
| gpl-3.0 |
taknevski/tensorflow-xsmm | tensorflow/contrib/bayesflow/python/kernel_tests/entropy_test.py | 22 | 13119 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Monte Carlo Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions as distributions_lib
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.bayesflow.python.ops import entropy_impl as entropy_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
distributions = distributions_lib
layers = layers_lib
entropy = entropy_lib
class NormalNoEntropy(distributions.Normal): # pylint: disable=no-init
"""Normal distribution without a `.entropy` method."""
def entropy(self):
return NotImplementedError('Entropy removed by gremlins')
def get_train_op(scalar_loss, optimizer='SGD', learning_rate=1.0, decay=0.0):
global_step = variables.Variable(0)
def decay_fn(rate, t):
return rate * (1 + math_ops.to_float(t))**(-decay)
train_op = layers.optimize_loss(
scalar_loss,
global_step,
learning_rate,
optimizer,
learning_rate_decay_fn=decay_fn)
return train_op
def _assert_monotonic_decreasing(array, atol=1e-5):
array = np.asarray(array)
_assert_monotonic_increasing(-array, atol=atol)
def _assert_monotonic_increasing(array, atol=1e-5):
array = np.asarray(array)
diff = np.diff(array.ravel())
np.testing.assert_array_less(-1 * atol, diff)
class ElboRatioTest(test.TestCase):
"""Show sampling converges to true KL values."""
def setUp(self):
self._rng = np.random.RandomState(0)
def test_convergence_to_kl_using_sample_form_on_3dim_normal(self):
# Test that the sample mean KL is the same as analytic when we use samples
# to estimate every part of the KL divergence ratio.
vector_shape = (2, 3)
n_samples = 5000
with self.test_session():
q = distributions.MultivariateNormalDiag(
loc=self._rng.rand(*vector_shape),
scale_diag=self._rng.rand(*vector_shape))
p = distributions.MultivariateNormalDiag(
loc=self._rng.rand(*vector_shape),
scale_diag=self._rng.rand(*vector_shape))
# In this case, the log_ratio is the KL.
sample_kl = -1 * entropy.elbo_ratio(
log_p=p.log_prob,
q=q,
n=n_samples,
form=entropy.ELBOForms.sample,
seed=42)
actual_kl = distributions.kl(q, p)
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertEqual((2,), sample_kl.get_shape())
self.assertAllClose(actual_kl.eval(), sample_kl.eval(), rtol=0.05)
def test_convergence_to_kl_using_analytic_entropy_form_on_3dim_normal(self):
# Test that the sample mean KL is the same as analytic when we use an
# analytic entropy combined with sampled cross-entropy.
n_samples = 5000
vector_shape = (2, 3)
with self.test_session():
q = distributions.MultivariateNormalDiag(
loc=self._rng.rand(*vector_shape),
scale_diag=self._rng.rand(*vector_shape))
p = distributions.MultivariateNormalDiag(
loc=self._rng.rand(*vector_shape),
scale_diag=self._rng.rand(*vector_shape))
# In this case, the log_ratio is the KL.
sample_kl = -1 * entropy.elbo_ratio(
log_p=p.log_prob,
q=q,
n=n_samples,
form=entropy.ELBOForms.analytic_entropy,
seed=42)
actual_kl = distributions.kl(q, p)
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertEqual((2,), sample_kl.get_shape())
self.assertAllClose(actual_kl.eval(), sample_kl.eval(), rtol=0.1)
def test_sample_kl_zero_when_p_and_q_are_the_same_distribution(self):
n_samples = 50
vector_shape = (2, 3)
with self.test_session():
q = distributions.MultivariateNormalDiag(
loc=self._rng.rand(*vector_shape),
scale_diag=self._rng.rand(*vector_shape))
# In this case, the log_ratio is the KL.
sample_kl = -1 * entropy.elbo_ratio(
log_p=q.log_prob,
q=q,
n=n_samples,
form=entropy.ELBOForms.sample,
seed=42)
self.assertEqual((2,), sample_kl.get_shape())
self.assertAllClose(np.zeros(2), sample_kl.eval())
class EntropyShannonTest(test.TestCase):
def test_normal_entropy_default_form_uses_exact_entropy(self):
with self.test_session():
dist = distributions.Normal(loc=1.11, scale=2.22)
mc_entropy = entropy.entropy_shannon(dist, n=11)
exact_entropy = dist.entropy()
self.assertEqual(exact_entropy.get_shape(), mc_entropy.get_shape())
self.assertAllClose(exact_entropy.eval(), mc_entropy.eval())
def test_normal_entropy_analytic_form_uses_exact_entropy(self):
with self.test_session():
dist = distributions.Normal(loc=1.11, scale=2.22)
mc_entropy = entropy.entropy_shannon(
dist, form=entropy.ELBOForms.analytic_entropy)
exact_entropy = dist.entropy()
self.assertEqual(exact_entropy.get_shape(), mc_entropy.get_shape())
self.assertAllClose(exact_entropy.eval(), mc_entropy.eval())
def test_normal_entropy_sample_form_gets_approximate_answer(self):
# Tested by showing we get a good answer that is not exact.
with self.test_session():
dist = distributions.Normal(loc=1.11, scale=2.22)
mc_entropy = entropy.entropy_shannon(
dist, n=1000, form=entropy.ELBOForms.sample, seed=0)
exact_entropy = dist.entropy()
self.assertEqual(exact_entropy.get_shape(), mc_entropy.get_shape())
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertAllClose(exact_entropy.eval(), mc_entropy.eval(), rtol=0.01)
# Make sure there is some error, proving we used samples
self.assertLess(0.0001, math_ops.abs(exact_entropy - mc_entropy).eval())
def test_default_entropy_falls_back_on_sample_if_analytic_not_available(self):
# Tested by showing we get a good answer that is not exact.
with self.test_session():
# NormalNoEntropy is like a Normal, but does not have .entropy method, so
# we are forced to fall back on sample entropy.
dist_no_entropy = NormalNoEntropy(loc=1.11, scale=2.22)
dist_yes_entropy = distributions.Normal(loc=1.11, scale=2.22)
mc_entropy = entropy.entropy_shannon(
dist_no_entropy, n=1000, form=entropy.ELBOForms.sample, seed=0)
exact_entropy = dist_yes_entropy.entropy()
self.assertEqual(exact_entropy.get_shape(), mc_entropy.get_shape())
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertAllClose(exact_entropy.eval(), mc_entropy.eval(), rtol=0.01)
# Make sure there is some error, proving we used samples
self.assertLess(0.0001, math_ops.abs(exact_entropy - mc_entropy).eval())
class RenyiRatioTest(test.TestCase):
"""Show renyi_ratio is minimized when the distributions match."""
def setUp(self):
self._rng = np.random.RandomState(0)
def test_fitting_two_dimensional_normal_n_equals_1000(self):
# Minmizing Renyi divergence should allow us to make one normal match
# another one exactly.
n = 1000
mu_true = np.array([1.0, -1.0], dtype=np.float64)
chol_true = np.array([[2.0, 0.0], [0.5, 1.0]], dtype=np.float64)
with self.test_session() as sess:
target = distributions.MultivariateNormalTriL(mu_true, chol_true)
# Set up q distribution by defining mean/covariance as Variables
mu = variables.Variable(
np.zeros(mu_true.shape), dtype=mu_true.dtype, name='mu')
mat = variables.Variable(
np.zeros(chol_true.shape), dtype=chol_true.dtype, name='mat')
chol = distributions.matrix_diag_transform(mat, transform=nn_ops.softplus)
q = distributions.MultivariateNormalTriL(mu, chol)
for alpha in [0.25, 0.75]:
negative_renyi_divergence = entropy.renyi_ratio(
log_p=target.log_prob, q=q, n=n, alpha=alpha, seed=0)
train_op = get_train_op(
math_ops.reduce_mean(-negative_renyi_divergence),
optimizer='SGD',
learning_rate=0.5,
decay=0.1)
variables.global_variables_initializer().run()
renyis = []
for step in range(1000):
sess.run(train_op)
if step in [1, 5, 100]:
renyis.append(negative_renyi_divergence.eval())
# This optimization should maximize the renyi divergence.
_assert_monotonic_increasing(renyis, atol=0)
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertAllClose(target.loc.eval(), q.loc.eval(), rtol=0.06)
self.assertAllClose(target.scale.to_dense().eval(),
q.scale.to_dense().eval(),
rtol=0.1)
def test_divergence_between_identical_distributions_is_zero(self):
n = 1000
vector_shape = (2, 3)
with self.test_session():
q = distributions.MultivariateNormalDiag(
loc=self._rng.rand(*vector_shape),
scale_diag=self._rng.rand(*vector_shape))
for alpha in [0.25, 0.75]:
negative_renyi_divergence = entropy.renyi_ratio(
log_p=q.log_prob, q=q, n=n, alpha=alpha, seed=0)
self.assertEqual((2,), negative_renyi_divergence.get_shape())
self.assertAllClose(np.zeros(2), negative_renyi_divergence.eval())
class RenyiAlphaTest(test.TestCase):
def test_with_three_alphas(self):
with self.test_session():
for dtype in (dtypes.float32, dtypes.float64):
alpha_min = constant_op.constant(0.0, dtype=dtype)
alpha_max = 0.5
decay_time = 3
alpha_0 = entropy.renyi_alpha(
0, decay_time, alpha_min=alpha_min, alpha_max=alpha_max)
alpha_1 = entropy.renyi_alpha(
1, decay_time, alpha_min=alpha_min, alpha_max=alpha_max)
alpha_2 = entropy.renyi_alpha(
2, decay_time, alpha_min=alpha_min, alpha_max=alpha_max)
alpha_3 = entropy.renyi_alpha(
3, decay_time, alpha_min=alpha_min, alpha_max=alpha_max)
# Alpha should start at alpha_max.
self.assertAllClose(alpha_max, alpha_0.eval(), atol=1e-5)
# Alpha should finish at alpha_min.
self.assertAllClose(alpha_min.eval(), alpha_3.eval(), atol=1e-5)
# In between, alpha should be monotonically decreasing.
_assert_monotonic_decreasing(
[alpha_0.eval(), alpha_1.eval(), alpha_2.eval(), alpha_3.eval()])
def test_non_scalar_input_raises(self):
with self.test_session():
# Good values here
step = 0
alpha_min = 0.0
alpha_max = 0.5
decay_time = 3
# Use one bad value inside each check.
# The "bad" value is always the non-scalar one.
with self.assertRaisesRegexp(ValueError, 'must be scalar'):
entropy.renyi_alpha(
[step], decay_time, alpha_min=alpha_min, alpha_max=alpha_max).eval()
with self.assertRaisesRegexp(ValueError, 'must be scalar'):
entropy.renyi_alpha(
step, [decay_time], alpha_min=alpha_min, alpha_max=alpha_max).eval()
with self.assertRaisesRegexp(ValueError, 'must be scalar'):
entropy.renyi_alpha(
step, decay_time, alpha_min=[alpha_min], alpha_max=alpha_max).eval()
with self.assertRaisesRegexp(ValueError, 'must be scalar'):
entropy.renyi_alpha(
step, decay_time, alpha_min=alpha_min, alpha_max=[alpha_max]).eval()
def test_input_with_wrong_sign_raises(self):
with self.test_session():
# Good values here
step = 0
alpha_min = 0.0
alpha_max = 0.5
decay_time = 3
# Use one bad value inside each check.
# The "bad" value is always the non-scalar one.
with self.assertRaisesOpError('decay_time must be positive'):
entropy.renyi_alpha(
step, 0.0, alpha_min=alpha_min, alpha_max=alpha_max).eval()
with self.assertRaisesOpError('step must be non-negative'):
entropy.renyi_alpha(
-1, decay_time, alpha_min=alpha_min, alpha_max=alpha_max).eval()
if __name__ == '__main__':
test.main()
| apache-2.0 |
Fokko/incubator-airflow | airflow/contrib/hooks/azure_container_volume_hook.py | 2 | 2154 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from azure.mgmt.containerinstance.models import AzureFileVolume, Volume
from airflow.hooks.base_hook import BaseHook
class AzureContainerVolumeHook(BaseHook):
"""
A hook which wraps an Azure Volume.
:param wasb_conn_id: connection id of a Azure storage account of
which file shares should be mounted
:type wasb_conn_id: str
"""
def __init__(self, wasb_conn_id='wasb_default'):
self.conn_id = wasb_conn_id
def get_storagekey(self):
conn = self.get_connection(self.conn_id)
service_options = conn.extra_dejson
if 'connection_string' in service_options:
for keyvalue in service_options['connection_string'].split(";"):
key, value = keyvalue.split("=", 1)
if key == "AccountKey":
return value
return conn.password
def get_file_volume(self, mount_name, share_name,
storage_account_name, read_only=False):
return Volume(name=mount_name,
azure_file=AzureFileVolume(share_name=share_name,
storage_account_name=storage_account_name,
read_only=read_only,
storage_account_key=self.get_storagekey()))
| apache-2.0 |
cernops/neutron | neutron/db/l3_dvrscheduler_db.py | 4 | 14512 | # (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from sqlalchemy.orm import joinedload
from neutron.common import constants as q_const
from neutron.common import utils as n_utils
from neutron.db import agents_db
from neutron.db import l3_agentschedulers_db as l3agent_sch_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.openstack.common import log as logging
from neutron.plugins.ml2 import db as ml2_db
LOG = logging.getLogger(__name__)
class CentralizedSnatL3AgentBinding(model_base.BASEV2):
"""Represents binding between Neutron Centralized SNAT and L3 agents."""
__tablename__ = "csnat_l3_agent_bindings"
router_id = sa.Column(sa.String(36),
sa.ForeignKey("routers.id", ondelete='CASCADE'),
primary_key=True)
l3_agent_id = sa.Column(sa.String(36),
sa.ForeignKey("agents.id", ondelete='CASCADE'),
nullable=False)
host_id = sa.Column(sa.String(255))
csnat_gw_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id'))
l3_agent = orm.relationship(agents_db.Agent)
csnat_gw_port = orm.relationship(models_v2.Port)
class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
"""Mixin class for L3 DVR scheduler.
DVR currently supports the following use cases:
- East/West (E/W) traffic between VMs: this is handled in a
distributed manner across Compute Nodes without a centralized element.
This includes E/W traffic between VMs on the same Compute Node.
- North/South traffic for Floating IPs (FIP N/S): this is supported on the
distributed routers on Compute Nodes without any centralized element.
- North/South traffic for SNAT (SNAT N/S): this is supported via a
centralized element that handles the SNAT traffic.
To support these use cases, DVR routers rely on an L3 agent that runs on a
central node (also known as Network Node or Service Node), as well as, L3
agents that run individually on each Compute Node of an OpenStack cloud.
Each L3 agent creates namespaces to route traffic according to the use
cases outlined above. The mechanism adopted for creating and managing
these namespaces is via (Router, Agent) binding and Scheduling in general.
The main difference between distributed routers and centralized ones is
that in the distributed case, multiple bindings will exist, one for each
of the agents participating in the routed topology for the specific router.
These bindings are created in the following circumstances:
- A subnet is added to a router via router-interface-add, and that subnet
has running VM's deployed in it. A binding will be created between the
router and any L3 agent whose Compute Node is hosting the VM(s).
- An external gateway is set to a router via router-gateway-set. A binding
will be created between the router and the L3 agent running centrally
on the Network Node.
Therefore, any time a router operation occurs (create, update or delete),
scheduling will determine whether the router needs to be associated to an
L3 agent, just like a regular centralized router, with the difference that,
in the distributed case, the bindings required are established based on
the state of the router and the Compute Nodes.
"""
def dvr_update_router_addvm(self, context, port):
ips = port['fixed_ips']
for ip in ips:
subnet = ip['subnet_id']
filter_sub = {'fixed_ips': {'subnet_id': [subnet]},
'device_owner':
[q_const.DEVICE_OWNER_DVR_INTERFACE]}
router_id = None
ports = self._core_plugin.get_ports(context, filters=filter_sub)
for port in ports:
router_id = port['device_id']
router_dict = self.get_router(context, router_id)
if router_dict.get('distributed', False):
payload = {'subnet_id': subnet}
self.l3_rpc_notifier.routers_updated(
context, [router_id], None, payload)
break
LOG.debug('DVR: dvr_update_router_addvm %s ', router_id)
def get_dvr_routers_by_portid(self, context, port_id):
"""Gets the dvr routers on vmport subnets."""
router_ids = set()
port_dict = self._core_plugin.get_port(context, port_id)
fixed_ips = port_dict['fixed_ips']
for fixedip in fixed_ips:
vm_subnet = fixedip['subnet_id']
filter_sub = {'fixed_ips': {'subnet_id': [vm_subnet]},
'device_owner':
[q_const.DEVICE_OWNER_DVR_INTERFACE]}
subnet_ports = self._core_plugin.get_ports(
context, filters=filter_sub)
for subnet_port in subnet_ports:
router_ids.add(subnet_port['device_id'])
return router_ids
def get_subnet_ids_on_router(self, context, router_id):
"""Return subnet IDs for interfaces attached to the given router."""
subnet_ids = set()
filter_rtr = {'device_id': [router_id]}
int_ports = self._core_plugin.get_ports(context, filters=filter_rtr)
for int_port in int_ports:
int_ips = int_port['fixed_ips']
int_subnet = int_ips[0]['subnet_id']
subnet_ids.add(int_subnet)
return subnet_ids
def check_ports_active_on_host_and_subnet(self, context, host,
port_id, subnet_id):
"""Check if there is any dvr serviceable port on the subnet_id."""
filter_sub = {'fixed_ips': {'subnet_id': [subnet_id]}}
ports = self._core_plugin.get_ports(context, filters=filter_sub)
for port in ports:
if (n_utils.is_dvr_serviced(port['device_owner'])
and port['status'] == 'ACTIVE'
and port['binding:host_id'] == host
and port['id'] != port_id):
LOG.debug('DVR: Active port exists for subnet %(subnet_id)s '
'on host %(host)s', {'subnet_id': subnet_id,
'host': host})
return True
return False
def dvr_deletens_if_no_port(self, context, port_id):
"""Delete the DVR namespace if no dvr serviced port exists."""
router_ids = self.get_dvr_routers_by_portid(context, port_id)
port_host = ml2_db.get_port_binding_host(port_id)
if not router_ids:
LOG.debug('No namespaces available for this DVR port %(port)s '
'on host %(host)s', {'port': port_id,
'host': port_host})
return []
removed_router_info = []
for router_id in router_ids:
subnet_ids = self.get_subnet_ids_on_router(context, router_id)
port_exists_on_subnet = False
for subnet in subnet_ids:
if self.check_ports_active_on_host_and_subnet(context,
port_host,
port_id,
subnet):
port_exists_on_subnet = True
break
if port_exists_on_subnet:
continue
filter_rtr = {'device_id': [router_id],
'device_owner':
[q_const.DEVICE_OWNER_DVR_INTERFACE]}
int_ports = self._core_plugin.get_ports(
context, filters=filter_rtr)
for prt in int_ports:
dvr_binding = (ml2_db.
get_dvr_port_binding_by_host(context.session,
prt['id'],
port_host))
if dvr_binding:
# unbind this port from router
dvr_binding['router_id'] = None
dvr_binding.update(dvr_binding)
agent = self._get_agent_by_type_and_host(context,
q_const.AGENT_TYPE_L3,
port_host)
info = {'router_id': router_id, 'host': port_host,
'agent_id': str(agent.id)}
removed_router_info.append(info)
LOG.debug('Router namespace %(router_id)s on host %(host)s '
'to be deleted', info)
return removed_router_info
def bind_snat_router(self, context, router_id, chosen_agent):
"""Bind the router to the chosen l3 agent."""
with context.session.begin(subtransactions=True):
binding = CentralizedSnatL3AgentBinding()
binding.l3_agent = chosen_agent
binding.router_id = router_id
context.session.add(binding)
LOG.debug('SNAT Router %(router_id)s is scheduled to L3 agent '
'%(agent_id)s', {'router_id': router_id,
'agent_id': chosen_agent.id})
def bind_dvr_router_servicenode(self, context, router_id,
chosen_snat_agent):
"""Bind the IR router to service node if not already hosted."""
query = (context.session.query(l3agent_sch_db.RouterL3AgentBinding).
filter_by(router_id=router_id))
for bind in query:
if bind.l3_agent_id == chosen_snat_agent.id:
LOG.debug('Distributed Router %(router_id)s already hosted '
'on snat l3_agent %(snat_id)s',
{'router_id': router_id,
'snat_id': chosen_snat_agent.id})
return
with context.session.begin(subtransactions=True):
binding = l3agent_sch_db.RouterL3AgentBinding()
binding.l3_agent = chosen_snat_agent
binding.router_id = router_id
context.session.add(binding)
LOG.debug('Binding the distributed router %(router_id)s to '
'the snat agent %(snat_id)s',
{'router_id': router_id,
'snat_id': chosen_snat_agent.id})
def bind_snat_servicenode(self, context, router_id, snat_candidates):
"""Bind the snat router to the chosen l3 service agent."""
chosen_snat_agent = random.choice(snat_candidates)
self.bind_snat_router(context, router_id, chosen_snat_agent)
return chosen_snat_agent
def unbind_snat_servicenode(self, context, router_id):
"""Unbind the snat router to the chosen l3 service agent."""
vm_ports = []
with context.session.begin(subtransactions=True):
query = (context.session.
query(CentralizedSnatL3AgentBinding).
filter_by(router_id=router_id))
try:
binding = query.one()
except exc.NoResultFound:
LOG.debug('no snat router binding found for %s', router_id)
return
host = binding.l3_agent.host
subnet_ids = self.get_subnet_ids_on_router(context, router_id)
for subnet in subnet_ids:
vm_ports = (
self._core_plugin.get_ports_on_host_by_subnet(
context, host, subnet))
if vm_ports:
LOG.debug('One or more ports exist on the snat enabled '
'l3_agent host %(host)s and router_id %(id)s',
{'host': host, 'id': router_id})
break
agent_id = binding.l3_agent_id
LOG.debug('Delete binding of the SNAT router %(router_id)s '
'from agent %(id)s', {'router_id': router_id,
'id': agent_id})
context.session.delete(binding)
if not vm_ports:
query = (context.session.
query(l3agent_sch_db.RouterL3AgentBinding).
filter_by(router_id=router_id,
l3_agent_id=agent_id).
delete(synchronize_session=False))
self.l3_rpc_notifier.router_removed_from_agent(
context, router_id, host)
LOG.debug('Removed binding for router %(router_id)s and '
'agent %(id)s', {'router_id': router_id, 'id': agent_id})
def get_snat_bindings(self, context, router_ids):
""" Retrieves the dvr snat bindings for a router."""
if not router_ids:
return []
query = context.session.query(CentralizedSnatL3AgentBinding)
query = query.options(joinedload('l3_agent')).filter(
CentralizedSnatL3AgentBinding.router_id.in_(router_ids))
return query.all()
def schedule_snat_router(self, context, router_id, sync_router):
"""Schedule the snat router on l3 service agent."""
active_l3_agents = self.get_l3_agents(context, active=True)
if not active_l3_agents:
LOG.warn(_('No active L3 agents found for SNAT'))
return
snat_candidates = self.get_snat_candidates(sync_router,
active_l3_agents)
if snat_candidates:
chosen_agent = self.bind_snat_servicenode(
context, router_id, snat_candidates)
self.bind_dvr_router_servicenode(
context, router_id, chosen_agent)
| apache-2.0 |
GaZ3ll3/numpy | numpy/core/code_generators/genapi.py | 63 | 16719 | """
Get API information encoded in C files.
See ``find_function`` for how functions should be formatted, and
``read_order`` for how the order of the functions should be
specified.
"""
from __future__ import division, absolute_import, print_function
import sys, os, re
try:
import hashlib
md5new = hashlib.md5
except ImportError:
import md5
md5new = md5.new
import textwrap
from os.path import join
__docformat__ = 'restructuredtext'
# The files under src/ that are scanned for API functions
API_FILES = [join('multiarray', 'alloc.c'),
join('multiarray', 'array_assign_array.c'),
join('multiarray', 'array_assign_scalar.c'),
join('multiarray', 'arrayobject.c'),
join('multiarray', 'arraytypes.c.src'),
join('multiarray', 'buffer.c'),
join('multiarray', 'calculation.c'),
join('multiarray', 'conversion_utils.c'),
join('multiarray', 'convert.c'),
join('multiarray', 'convert_datatype.c'),
join('multiarray', 'ctors.c'),
join('multiarray', 'datetime.c'),
join('multiarray', 'datetime_busday.c'),
join('multiarray', 'datetime_busdaycal.c'),
join('multiarray', 'datetime_strings.c'),
join('multiarray', 'descriptor.c'),
join('multiarray', 'einsum.c.src'),
join('multiarray', 'flagsobject.c'),
join('multiarray', 'getset.c'),
join('multiarray', 'item_selection.c'),
join('multiarray', 'iterators.c'),
join('multiarray', 'mapping.c'),
join('multiarray', 'methods.c'),
join('multiarray', 'multiarraymodule.c'),
join('multiarray', 'nditer_api.c'),
join('multiarray', 'nditer_constr.c'),
join('multiarray', 'nditer_pywrap.c'),
join('multiarray', 'nditer_templ.c.src'),
join('multiarray', 'number.c'),
join('multiarray', 'refcount.c'),
join('multiarray', 'scalartypes.c.src'),
join('multiarray', 'scalarapi.c'),
join('multiarray', 'sequence.c'),
join('multiarray', 'shape.c'),
join('multiarray', 'usertypes.c'),
join('umath', 'loops.c.src'),
join('umath', 'ufunc_object.c'),
join('umath', 'ufunc_type_resolution.c'),
join('umath', 'reduction.c'),
]
THIS_DIR = os.path.dirname(__file__)
API_FILES = [os.path.join(THIS_DIR, '..', 'src', a) for a in API_FILES]
def file_in_this_dir(filename):
return os.path.join(THIS_DIR, filename)
def remove_whitespace(s):
return ''.join(s.split())
def _repl(str):
return str.replace('Bool', 'npy_bool')
class StealRef:
def __init__(self, arg):
self.arg = arg # counting from 1
def __str__(self):
try:
return ' '.join('NPY_STEALS_REF_TO_ARG(%d)' % x for x in self.arg)
except TypeError:
return 'NPY_STEALS_REF_TO_ARG(%d)' % self.arg
class NonNull:
def __init__(self, arg):
self.arg = arg # counting from 1
def __str__(self):
try:
return ' '.join('NPY_GCC_NONNULL(%d)' % x for x in self.arg)
except TypeError:
return 'NPY_GCC_NONNULL(%d)' % self.arg
class Function(object):
def __init__(self, name, return_type, args, doc=''):
self.name = name
self.return_type = _repl(return_type)
self.args = args
self.doc = doc
def _format_arg(self, typename, name):
if typename.endswith('*'):
return typename + name
else:
return typename + ' ' + name
def __str__(self):
argstr = ', '.join([self._format_arg(*a) for a in self.args])
if self.doc:
doccomment = '/* %s */\n' % self.doc
else:
doccomment = ''
return '%s%s %s(%s)' % (doccomment, self.return_type, self.name, argstr)
def to_ReST(self):
lines = ['::', '', ' ' + self.return_type]
argstr = ',\000'.join([self._format_arg(*a) for a in self.args])
name = ' %s' % (self.name,)
s = textwrap.wrap('(%s)' % (argstr,), width=72,
initial_indent=name,
subsequent_indent=' ' * (len(name)+1),
break_long_words=False)
for l in s:
lines.append(l.replace('\000', ' ').rstrip())
lines.append('')
if self.doc:
lines.append(textwrap.dedent(self.doc))
return '\n'.join(lines)
def api_hash(self):
m = md5new()
m.update(remove_whitespace(self.return_type))
m.update('\000')
m.update(self.name)
m.update('\000')
for typename, name in self.args:
m.update(remove_whitespace(typename))
m.update('\000')
return m.hexdigest()[:8]
class ParseError(Exception):
def __init__(self, filename, lineno, msg):
self.filename = filename
self.lineno = lineno
self.msg = msg
def __str__(self):
return '%s:%s:%s' % (self.filename, self.lineno, self.msg)
def skip_brackets(s, lbrac, rbrac):
count = 0
for i, c in enumerate(s):
if c == lbrac:
count += 1
elif c == rbrac:
count -= 1
if count == 0:
return i
raise ValueError("no match '%s' for '%s' (%r)" % (lbrac, rbrac, s))
def split_arguments(argstr):
arguments = []
bracket_counts = {'(': 0, '[': 0}
current_argument = []
state = 0
i = 0
def finish_arg():
if current_argument:
argstr = ''.join(current_argument).strip()
m = re.match(r'(.*(\s+|[*]))(\w+)$', argstr)
if m:
typename = m.group(1).strip()
name = m.group(3)
else:
typename = argstr
name = ''
arguments.append((typename, name))
del current_argument[:]
while i < len(argstr):
c = argstr[i]
if c == ',':
finish_arg()
elif c == '(':
p = skip_brackets(argstr[i:], '(', ')')
current_argument += argstr[i:i+p]
i += p-1
else:
current_argument += c
i += 1
finish_arg()
return arguments
def find_functions(filename, tag='API'):
"""
Scan the file, looking for tagged functions.
Assuming ``tag=='API'``, a tagged function looks like::
/*API*/
static returntype*
function_name(argtype1 arg1, argtype2 arg2)
{
}
where the return type must be on a separate line, the function
name must start the line, and the opening ``{`` must start the line.
An optional documentation comment in ReST format may follow the tag,
as in::
/*API
This function does foo...
*/
"""
fo = open(filename, 'r')
functions = []
return_type = None
function_name = None
function_args = []
doclist = []
SCANNING, STATE_DOC, STATE_RETTYPE, STATE_NAME, STATE_ARGS = list(range(5))
state = SCANNING
tagcomment = '/*' + tag
for lineno, line in enumerate(fo):
try:
line = line.strip()
if state == SCANNING:
if line.startswith(tagcomment):
if line.endswith('*/'):
state = STATE_RETTYPE
else:
state = STATE_DOC
elif state == STATE_DOC:
if line.startswith('*/'):
state = STATE_RETTYPE
else:
line = line.lstrip(' *')
doclist.append(line)
elif state == STATE_RETTYPE:
# first line of declaration with return type
m = re.match(r'NPY_NO_EXPORT\s+(.*)$', line)
if m:
line = m.group(1)
return_type = line
state = STATE_NAME
elif state == STATE_NAME:
# second line, with function name
m = re.match(r'(\w+)\s*\(', line)
if m:
function_name = m.group(1)
else:
raise ParseError(filename, lineno+1,
'could not find function name')
function_args.append(line[m.end():])
state = STATE_ARGS
elif state == STATE_ARGS:
if line.startswith('{'):
# finished
fargs_str = ' '.join(function_args).rstrip(' )')
fargs = split_arguments(fargs_str)
f = Function(function_name, return_type, fargs,
'\n'.join(doclist))
functions.append(f)
return_type = None
function_name = None
function_args = []
doclist = []
state = SCANNING
else:
function_args.append(line)
except:
print(filename, lineno + 1)
raise
fo.close()
return functions
def should_rebuild(targets, source_files):
from distutils.dep_util import newer_group
for t in targets:
if not os.path.exists(t):
return True
sources = API_FILES + list(source_files) + [__file__]
if newer_group(sources, targets[0], missing='newer'):
return True
return False
# Those *Api classes instances know how to output strings for the generated code
class TypeApi(object):
def __init__(self, name, index, ptr_cast, api_name):
self.index = index
self.name = name
self.ptr_cast = ptr_cast
self.api_name = api_name
def define_from_array_api_string(self):
return "#define %s (*(%s *)%s[%d])" % (self.name,
self.ptr_cast,
self.api_name,
self.index)
def array_api_define(self):
return " (void *) &%s" % self.name
def internal_define(self):
astr = """\
#ifdef NPY_ENABLE_SEPARATE_COMPILATION
extern NPY_NO_EXPORT PyTypeObject %(type)s;
#else
NPY_NO_EXPORT PyTypeObject %(type)s;
#endif
""" % {'type': self.name}
return astr
class GlobalVarApi(object):
def __init__(self, name, index, type, api_name):
self.name = name
self.index = index
self.type = type
self.api_name = api_name
def define_from_array_api_string(self):
return "#define %s (*(%s *)%s[%d])" % (self.name,
self.type,
self.api_name,
self.index)
def array_api_define(self):
return " (%s *) &%s" % (self.type, self.name)
def internal_define(self):
astr = """\
#ifdef NPY_ENABLE_SEPARATE_COMPILATION
extern NPY_NO_EXPORT %(type)s %(name)s;
#else
NPY_NO_EXPORT %(type)s %(name)s;
#endif
""" % {'type': self.type, 'name': self.name}
return astr
# Dummy to be able to consistently use *Api instances for all items in the
# array api
class BoolValuesApi(object):
def __init__(self, name, index, api_name):
self.name = name
self.index = index
self.type = 'PyBoolScalarObject'
self.api_name = api_name
def define_from_array_api_string(self):
return "#define %s ((%s *)%s[%d])" % (self.name,
self.type,
self.api_name,
self.index)
def array_api_define(self):
return " (void *) &%s" % self.name
def internal_define(self):
astr = """\
#ifdef NPY_ENABLE_SEPARATE_COMPILATION
extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
#else
NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
#endif
"""
return astr
class FunctionApi(object):
def __init__(self, name, index, annotations, return_type, args, api_name):
self.name = name
self.index = index
self.annotations = annotations
self.return_type = return_type
self.args = args
self.api_name = api_name
def _argtypes_string(self):
if not self.args:
return 'void'
argstr = ', '.join([_repl(a[0]) for a in self.args])
return argstr
def define_from_array_api_string(self):
define = """\
#define %s \\\n (*(%s (*)(%s)) \\
%s[%d])""" % (self.name,
self.return_type,
self._argtypes_string(),
self.api_name,
self.index)
return define
def array_api_define(self):
return " (void *) %s" % self.name
def internal_define(self):
annstr = []
for a in self.annotations:
annstr.append(str(a))
annstr = ' '.join(annstr)
astr = """\
NPY_NO_EXPORT %s %s %s \\\n (%s);""" % (annstr, self.return_type,
self.name,
self._argtypes_string())
return astr
def order_dict(d):
"""Order dict by its values."""
o = list(d.items())
def _key(x):
return x[1] + (x[0],)
return sorted(o, key=_key)
def merge_api_dicts(dicts):
ret = {}
for d in dicts:
for k, v in d.items():
ret[k] = v
return ret
def check_api_dict(d):
"""Check that an api dict is valid (does not use the same index twice)."""
# We have if a same index is used twice: we 'revert' the dict so that index
# become keys. If the length is different, it means one index has been used
# at least twice
revert_dict = dict([(v, k) for k, v in d.items()])
if not len(revert_dict) == len(d):
# We compute a dict index -> list of associated items
doubled = {}
for name, index in d.items():
try:
doubled[index].append(name)
except KeyError:
doubled[index] = [name]
msg = """\
Same index has been used twice in api definition: %s
""" % ['index %d -> %s' % (index, names) for index, names in doubled.items() \
if len(names) != 1]
raise ValueError(msg)
# No 'hole' in the indexes may be allowed, and it must starts at 0
indexes = set(v[0] for v in d.values())
expected = set(range(len(indexes)))
if not indexes == expected:
diff = expected.symmetric_difference(indexes)
msg = "There are some holes in the API indexing: " \
"(symmetric diff is %s)" % diff
raise ValueError(msg)
def get_api_functions(tagname, api_dict):
"""Parse source files to get functions tagged by the given tag."""
functions = []
for f in API_FILES:
functions.extend(find_functions(f, tagname))
dfunctions = []
for func in functions:
o = api_dict[func.name][0]
dfunctions.append( (o, func) )
dfunctions.sort()
return [a[1] for a in dfunctions]
def fullapi_hash(api_dicts):
"""Given a list of api dicts defining the numpy C API, compute a checksum
of the list of items in the API (as a string)."""
a = []
for d in api_dicts:
for name, data in order_dict(d):
a.extend(name)
a.extend(','.join(map(str, data)))
return md5new(''.join(a).encode('ascii')).hexdigest()
# To parse strings like 'hex = checksum' where hex is e.g. 0x1234567F and
# checksum a 128 bits md5 checksum (hex format as well)
VERRE = re.compile('(^0x[\da-f]{8})\s*=\s*([\da-f]{32})')
def get_versions_hash():
d = []
file = os.path.join(os.path.dirname(__file__), 'cversions.txt')
fid = open(file, 'r')
try:
for line in fid:
m = VERRE.match(line)
if m:
d.append((int(m.group(1), 16), m.group(2)))
finally:
fid.close()
return dict(d)
def main():
tagname = sys.argv[1]
order_file = sys.argv[2]
functions = get_api_functions(tagname, order_file)
m = md5new(tagname)
for func in functions:
print(func)
ah = func.api_hash()
m.update(ah)
print(hex(int(ah, 16)))
print(hex(int(m.hexdigest()[:8], 16)))
if __name__ == '__main__':
main()
| bsd-3-clause |
Johnzero/erp | openerp/addons/account/wizard/account_fiscalyear_close.py | 9 | 15383 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
from tools.translate import _
class account_fiscalyear_close(osv.osv_memory):
"""
Closes Account Fiscalyear and Generate Opening entries for New Fiscalyear
"""
_name = "account.fiscalyear.close"
_description = "Fiscalyear Close"
_columns = {
'fy_id': fields.many2one('account.fiscalyear', \
'Fiscal Year to close', required=True, help="Select a Fiscal year to close"),
'fy2_id': fields.many2one('account.fiscalyear', \
'New Fiscal Year', required=True),
'journal_id': fields.many2one('account.journal', 'Opening Entries Journal', domain="[('type','=','situation')]", required=True, help='The best practice here is to use a journal dedicated to contain the opening entries of all fiscal years. Note that you should define it with default debit/credit accounts, of type \'situation\' and with a centralized counterpart.'),
'period_id': fields.many2one('account.period', 'Opening Entries Period', required=True),
'report_name': fields.char('Name of new entries',size=64, required=True, help="Give name of the new entries"),
}
_defaults = {
'report_name': _('End of Fiscal Year Entry'),
}
def data_save(self, cr, uid, ids, context=None):
"""
This function close account fiscalyear and create entries in new fiscalyear
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Account fiscalyear close state’s IDs
"""
def _reconcile_fy_closing(cr, uid, ids, context=None):
"""
This private function manually do the reconciliation on the account_move_line given as `ids´, and directly
through psql. It's necessary to do it this way because the usual `reconcile()´ function on account.move.line
object is really resource greedy (not supposed to work on reconciliation between thousands of records) and
it does a lot of different computation that are useless in this particular case.
"""
#check that the reconcilation concern journal entries from only one company
cr.execute('select distinct(company_id) from account_move_line where id in %s',(tuple(ids),))
if len(cr.fetchall()) > 1:
raise osv.except_osv(_('Warning !'), _('The entries to reconcile should belong to the same company'))
r_id = self.pool.get('account.move.reconcile').create(cr, uid, {'type': 'auto'})
cr.execute('update account_move_line set reconcile_id = %s where id in %s',(r_id, tuple(ids),))
return r_id
obj_acc_period = self.pool.get('account.period')
obj_acc_fiscalyear = self.pool.get('account.fiscalyear')
obj_acc_journal = self.pool.get('account.journal')
obj_acc_move = self.pool.get('account.move')
obj_acc_move_line = self.pool.get('account.move.line')
obj_acc_account = self.pool.get('account.account')
obj_acc_journal_period = self.pool.get('account.journal.period')
currency_obj = self.pool.get('res.currency')
data = self.browse(cr, uid, ids, context=context)
if context is None:
context = {}
fy_id = data[0].fy_id.id
cr.execute("SELECT id FROM account_period WHERE date_stop < (SELECT date_start FROM account_fiscalyear WHERE id = %s)", (str(data[0].fy2_id.id),))
fy_period_set = ','.join(map(lambda id: str(id[0]), cr.fetchall()))
cr.execute("SELECT id FROM account_period WHERE date_start > (SELECT date_stop FROM account_fiscalyear WHERE id = %s)", (str(fy_id),))
fy2_period_set = ','.join(map(lambda id: str(id[0]), cr.fetchall()))
if not fy_period_set or not fy2_period_set:
raise osv.except_osv(_('UserError'), _('The periods to generate opening entries were not found'))
period = obj_acc_period.browse(cr, uid, data[0].period_id.id, context=context)
new_fyear = obj_acc_fiscalyear.browse(cr, uid, data[0].fy2_id.id, context=context)
old_fyear = obj_acc_fiscalyear.browse(cr, uid, fy_id, context=context)
new_journal = data[0].journal_id.id
new_journal = obj_acc_journal.browse(cr, uid, new_journal, context=context)
company_id = new_journal.company_id.id
if not new_journal.default_credit_account_id or not new_journal.default_debit_account_id:
raise osv.except_osv(_('UserError'),
_('The journal must have default credit and debit account'))
if (not new_journal.centralisation) or new_journal.entry_posted:
raise osv.except_osv(_('UserError'),
_('The journal must have centralised counterpart without the Skipping draft state option checked!'))
#delete existing move and move lines if any
move_ids = obj_acc_move.search(cr, uid, [
('journal_id', '=', new_journal.id), ('period_id', '=', period.id)])
if move_ids:
move_line_ids = obj_acc_move_line.search(cr, uid, [('move_id', 'in', move_ids)])
obj_acc_move_line._remove_move_reconcile(cr, uid, move_line_ids, context=context)
obj_acc_move_line.unlink(cr, uid, move_line_ids, context=context)
obj_acc_move.unlink(cr, uid, move_ids, context=context)
cr.execute("SELECT id FROM account_fiscalyear WHERE date_stop < %s", (str(new_fyear.date_start),))
result = cr.dictfetchall()
fy_ids = ','.join([str(x['id']) for x in result])
query_line = obj_acc_move_line._query_get(cr, uid,
obj='account_move_line', context={'fiscalyear': fy_ids})
#create the opening move
vals = {
'name': '/',
'ref': '',
'period_id': period.id,
'date': period.date_start,
'journal_id': new_journal.id,
}
move_id = obj_acc_move.create(cr, uid, vals, context=context)
#1. report of the accounts with defferal method == 'unreconciled'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type != 'view'
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'unreconciled', ))
account_ids = map(lambda x: x[0], cr.fetchall())
if account_ids:
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT name, create_uid, create_date, write_uid, write_date,
statement_id, %s,currency_id, date_maturity, partner_id,
blocked, credit, 'draft', debit, ref, account_id,
%s, (%s) AS date, %s, amount_currency, quantity, product_id, company_id
FROM account_move_line
WHERE account_id IN %s
AND ''' + query_line + '''
AND reconcile_id IS NULL)''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
#We have also to consider all move_lines that were reconciled
#on another fiscal year, and report them too
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT
b.name, b.create_uid, b.create_date, b.write_uid, b.write_date,
b.statement_id, %s, b.currency_id, b.date_maturity,
b.partner_id, b.blocked, b.credit, 'draft', b.debit,
b.ref, b.account_id, %s, (%s) AS date, %s, b.amount_currency,
b.quantity, b.product_id, b.company_id
FROM account_move_line b
WHERE b.account_id IN %s
AND b.reconcile_id IS NOT NULL
AND b.period_id IN ('''+fy_period_set+''')
AND b.reconcile_id IN (SELECT DISTINCT(reconcile_id)
FROM account_move_line a
WHERE a.period_id IN ('''+fy2_period_set+''')))''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
#2. report of the accounts with defferal method == 'detail'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type != 'view'
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'detail', ))
account_ids = map(lambda x: x[0], cr.fetchall())
if account_ids:
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT name, create_uid, create_date, write_uid, write_date,
statement_id, %s,currency_id, date_maturity, partner_id,
blocked, credit, 'draft', debit, ref, account_id,
%s, (%s) AS date, %s, amount_currency, quantity, product_id, company_id
FROM account_move_line
WHERE account_id IN %s
AND ''' + query_line + ''')
''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
#3. report of the accounts with defferal method == 'balance'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type != 'view'
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'balance', ))
account_ids = map(lambda x: x[0], cr.fetchall())
query_1st_part = """
INSERT INTO account_move_line (
debit, credit, name, date, move_id, journal_id, period_id,
account_id, currency_id, amount_currency, company_id, state) VALUES
"""
query_2nd_part = ""
query_2nd_part_args = []
for account in obj_acc_account.browse(cr, uid, account_ids, context={'fiscalyear': fy_id}):
balance_in_currency = 0.0
if account.currency_id:
cr.execute('SELECT sum(amount_currency) as balance_in_currency FROM account_move_line ' \
'WHERE account_id = %s ' \
'AND ' + query_line + ' ' \
'AND currency_id = %s', (account.id, account.currency_id.id))
balance_in_currency = cr.dictfetchone()['balance_in_currency']
company_currency_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id
if not currency_obj.is_zero(cr, uid, company_currency_id, abs(account.balance)):
if query_2nd_part:
query_2nd_part += ','
query_2nd_part += "(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
query_2nd_part_args += (account.balance > 0 and account.balance or 0.0,
account.balance < 0 and -account.balance or 0.0,
data[0].report_name,
period.date_start,
move_id,
new_journal.id,
period.id,
account.id,
account.currency_id and account.currency_id.id or None,
balance_in_currency,
account.company_id.id,
'draft')
if query_2nd_part:
cr.execute(query_1st_part + query_2nd_part, tuple(query_2nd_part_args))
#validate and centralize the opening move
obj_acc_move.validate(cr, uid, [move_id], context=context)
#reconcile all the move.line of the opening move
ids = obj_acc_move_line.search(cr, uid, [('journal_id', '=', new_journal.id),
('period_id.fiscalyear_id','=',new_fyear.id)])
if ids:
reconcile_id = _reconcile_fy_closing(cr, uid, ids, context=context)
#set the creation date of the reconcilation at the first day of the new fiscalyear, in order to have good figures in the aged trial balance
self.pool.get('account.move.reconcile').write(cr, uid, [reconcile_id], {'create_date': new_fyear.date_start}, context=context)
#create the journal.period object and link it to the old fiscalyear
new_period = data[0].period_id.id
ids = obj_acc_journal_period.search(cr, uid, [('journal_id', '=', new_journal.id), ('period_id', '=', new_period)])
if not ids:
ids = [obj_acc_journal_period.create(cr, uid, {
'name': (new_journal.name or '') + ':' + (period.code or ''),
'journal_id': new_journal.id,
'period_id': period.id
})]
cr.execute('UPDATE account_fiscalyear ' \
'SET end_journal_period_id = %s ' \
'WHERE id = %s', (ids[0], old_fyear.id))
return {'type': 'ir.actions.act_window_close'}
account_fiscalyear_close()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
JT5D/Alfred-Popclip-Sublime | Sublime Text 2/PhpBeautifier/php_beautifier.py | 3 | 2335 | import re
import os
import subprocess
import sublime
import sublime_plugin
class PhpBeautifierCommand(sublime_plugin.TextCommand):
def run(self, edit):
# Test environment
if self.view.is_scratch():
return
if self.view.is_dirty():
return sublime.status_message("Please save the file.")
FILE = self.view.file_name()
if not FILE or not os.path.exists(FILE):
return self.status("File does not exist.")
if not FILE[-3:] == 'php':
return self.status("File does not have php extension.")
# Start doing stuff
cmd = "php_beautifier"
indent = "-s4"
filters = "ArrayNested() NewLines(before=switch:while:for:foreach:T_CLASS:return:break) Pear(add-header=false)"
allFile = sublime.Region(0, self.view.size())
AllFileText = self.view.substr(allFile).encode('utf-8')
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
p = subprocess.Popen([cmd, indent, "-l", filters, "-f", "-", "-o", "-"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, startupinfo=startupinfo)
else:
p = subprocess.Popen([cmd, indent, "-l", filters, "-f", "-", "-o", "-"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate(AllFileText)
if len(stderr) == 0:
self.view.replace(edit, allFile, self.fixup(stdout))
else:
self.show_error_panel(self.fixup(stderr))
# Error panel & fixup from external command
# https://github.com/technocoreai/SublimeExternalCommand
def show_error_panel(self, stderr):
panel = self.view.window().get_output_panel("php_beautifier_errors")
panel.set_read_only(False)
edit = panel.begin_edit()
panel.erase(edit, sublime.Region(0, panel.size()))
panel.insert(edit, panel.size(), stderr)
panel.set_read_only(True)
self.view.window().run_command("show_panel", {"panel": "output.php_beautifier_errors"})
panel.end_edit(edit)
def fixup(self, string):
return re.sub(r'\r\n|\r', '\n', string.decode('utf-8'))
| gpl-2.0 |
dmnfarrell/epitopepredict | epitopepredict/utilities.py | 2 | 6016 | #!/usr/bin/env python
"""
Utilities for epitopepredict
Created March 2013
Copyright (C) Damien Farrell
"""
from __future__ import absolute_import, print_function
import os, math, csv, string
import shutil
import numpy as np
import pandas as pd
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio import PDB
home = os.path.expanduser("~")
def venndiagram(names, labels, ax=None, colors=('r','g','b'), **kwargs):
"""Plot a venn diagram"""
from matplotlib_venn import venn2,venn3
import pylab as plt
f=None
if ax==None:
f=plt.figure(figsize=(4,4))
ax=f.add_subplot(111)
if len(names)==2:
n1,n2=names
v = venn2([set(n1), set(n2)], set_labels=labels, set_colors=colors, **kwargs)
elif len(names)==3:
n1,n2,n3=names
v = venn3([set(n1), set(n2), set(n3)], set_labels=labels, set_colors=colors, **kwargs)
ax.axis('off')
#f.patch.set_visible(False)
ax.set_axis_off()
return f
def compress(filename, remove=False):
"""Compress a file with gzip"""
import gzip
fin = open(filename, 'rb')
fout = gzip.open(filename+'.gz', 'wb')
fout.writelines(fin)
fout.close()
fin.close()
if remove == True:
os.remove(filename)
return
def rmse(ar1, ar2):
"""Mean squared error"""
ar1 = np.asarray(ar1)
ar2 = np.asarray(ar2)
dif = ar1 - ar2
dif *= dif
return np.sqrt(dif.sum()/len(ar1))
def add_dicts(a, b):
return dict((n, a.get(n, 0)+b.get(n, 0)) for n in set(a)|set(b))
def copyfile(source, dest, newname=None):
"""Helper method to copy files"""
if not os.path.exists(source):
#print 'no such file %s' %source
return False
shutil.copy(source, newname)
dest = os.path.join(dest, newname)
if os.path.exists(dest):
os.remove(dest)
shutil.move(newname, dest)
return True
def copyfiles(path, files):
for f in files:
src = os.path.join(path, f)
print (src)
if not os.path.exists(src):
return False
shutil.copy(src, f)
return True
def symmetrize(m, lower=True):
"""Return symmetric array"""
m=m.fillna(0)
if lower==True:
return np.tril(m) + np.triu(m.T) - np.diag(np.diag(m))
else:
return np.triu(m) + np.tril(m.T) - np.diag(np.diag(m))
def get_symmetric_data_frame(m):
x = symmetrize(m)
return pd.DataFrame(x, columns=m.columns,index=m.index)
def find_filefrom_string(files, string):
for f in files:
if string in os.path.splitext(f)[0]:
return f
return ''
def find_files(path, ext='txt'):
"""List files in a dir of a specific type"""
if not os.path.exists(path):
print ('no such directory: %s' %path)
return []
files=[]
for dirname, dirnames, filenames in os.walk(path):
for f in filenames:
name = os.path.join(dirname, f)
if f.endswith(ext):
files.append(name)
return files
def find_folders(path):
if not os.path.exists(path):
print ('no such directory: %s' %path)
return []
dirs = []
for dirname, dirnames, filenames in os.walk(path):
dirs.append(dirname)
return dirs
def reorder_filenames(files, order):
"""reorder filenames by another list order(seqs)"""
new = []
for i in order:
found=False
for f in files:
if i in f:
new.append(f)
found=True
if found==False:
new.append('')
return new
def read_iedb(filename, key='Epitope ID'):
"""Load iedb peptidic csv file and return dataframe"""
#cr = csv.reader(open(filename,'r'))
cr = csv.DictReader(open(filename,'r'),quotechar='"')
cr.fieldnames = [field.strip() for field in cr.fieldnames]
D={}
for r in cr:
k = r[key]
D[k] = r
return D
def get_sequencefrom_pdb(pdbfile, chain='C', index=0):
"""Get AA sequence from PDB"""
parser = PDB.PDBParser(QUIET=True)
struct = parser.get_structure(pdbfile,pdbfile)
ppb = PDB.PPBuilder()
model = struct[0]
peptides = ppb.build_peptides(model[chain])
seq=''
for i,pep in enumerate(peptides):
seq+=str(pep.get_sequence())
return seq
def filter_iedb_file(filename, field, search):
"""Return filtered iedb data"""
X = pd.read_csv(filename)
cols = ['PubMed ID','Author','Journal','Year','T Cell ID','MHC Allele Name',
'Epitope Linear Sequence','Epitope Source Organism Name']
y = X[X[field].str.contains(search)]
print (y[cols])
y.to_csv('filtered.csv',cols=cols)
return y
def search_pubmed(term, max_count=100):
from Bio import Entrez
from Bio import Medline
def fetch_details(id_list):
ids = ','.join(id_list)
Entrez.email = 'your.email@example.com'
handle = Entrez.efetch(db='pubmed',
retmode='xml',
id=ids)
results = Entrez.read(handle)
return results
def search(query):
Entrez.email = 'your.email@example.com'
handle = Entrez.esearch(db='pubmed',
sort='relevance',
retmax=max_count,
retmode='xml',
term=query)
results = Entrez.read(handle)
return results
results = search(term)
id_list = results['IdList']
papers = fetch_details(id_list)
for i, paper in enumerate(papers):
print("%d) %s" % (i+1, paper['MedlineCitation']['Article']['ArticleTitle']))
# Pretty print the first paper in full to observe its structure
#import json
#print(json.dumps(papers[0], indent=2, separators=(',', ':')))
def test():
sourcefasta = os.path.join(home,'dockingdata/fastafiles/1KLU.fasta')
findClosestStructures(sourcefasta)
#fetchPDBList('MHCII_homologs.csv')
if __name__ == '__main__':
test()
| apache-2.0 |
4rado/RepositoryForProject | Lib/xml/dom/minicompat.py | 209 | 3330 | """Python version compatibility support for minidom."""
# This module should only be imported using "import *".
#
# The following names are defined:
#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guaranteed to
# remain empty (immutable)
#
# StringTypes -- tuple of defined string types
#
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
# and older versions. For example:
#
# class MyClass(GetattrMagic):
# def _get_myattr(self):
# return something
#
# defproperty(MyClass, "myattr",
# "return some value")
#
# For Python 2.2 and newer, this will construct a
# property object on the class, which avoids
# needing to override __getattr__(). It will only
# work for read-only attributes.
#
# For older versions of Python, inheriting from
# GetattrMagic will use the traditional
# __getattr__() hackery to achieve the same effect,
# but less efficiently.
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"]
import xml.dom
try:
unicode
except NameError:
StringTypes = type(''),
else:
StringTypes = type(''), type(unicode(''))
class NodeList(list):
__slots__ = ()
def item(self, index):
if 0 <= index < len(self):
return self[index]
def _get_length(self):
return len(self)
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class EmptyNodeList(tuple):
__slots__ = ()
def __add__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def __radd__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def item(self, index):
return None
def _get_length(self):
return 0
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def defproperty(klass, name, doc):
get = getattr(klass, ("_get_" + name)).im_func
def set(self, value, name=name):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute " + repr(name))
assert not hasattr(klass, "_set_" + name), \
"expected not to find _set_" + name
prop = property(get, set, doc=doc)
setattr(klass, name, prop)
| gpl-3.0 |
candlepin/subscription-manager | src/subscription_manager/cli_command/redeem.py | 1 | 3313 | #
# Subscription manager command line utility.
#
# Copyright (c) 2021 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import rhsm.connection as connection
import subscription_manager.injection as inj
from subscription_manager.cli import system_exit
from subscription_manager.cli_command.cli import CliCommand, handle_exception
from subscription_manager.i18n import ugettext as _
class RedeemCommand(CliCommand):
def __init__(self):
shortdesc = _("Attempt to redeem a subscription for a preconfigured system")
super(RedeemCommand, self).__init__("redeem", shortdesc, False)
self.parser.add_argument("--email", dest="email", action='store',
help=_("email address to notify when "
"subscription redemption is complete"))
self.parser.add_argument("--locale", dest="locale", action='store',
help=_("optional language to use for email "
"notification when subscription redemption is "
"complete (Examples: en-us, de-de)"))
def _validate_options(self):
if not self.options.email:
system_exit(os.EX_USAGE, _("Error: This command requires that you specify an email address with --email."))
def _do_command(self):
"""
Executes the command.
"""
self.assert_should_be_registered()
self._validate_options()
try:
# FIXME: why just facts and package profile update here?
# update facts first, if we need to
facts = inj.require(inj.FACTS)
facts.update_check(self.cp, self.identity.uuid)
profile_mgr = inj.require(inj.PROFILE_MANAGER)
profile_mgr.update_check(self.cp, self.identity.uuid)
# BZ 1248833 Ensure we print out the display message if we get any back
response = self.cp.activateMachine(self.identity.uuid, self.options.email, self.options.locale)
if response and response.get('displayMessage'):
system_exit(0, response.get('displayMessage'))
except connection.GoneException as ge:
raise ge
except connection.RestlibException as e:
# candlepin throws an exception during activateMachine, even for
# 200's. We need to look at the code in the RestlibException and proceed
# accordingly
if 200 <= e.code <= 210:
system_exit(0, e)
else:
handle_exception(u"Unable to redeem: {e}".format(e=e), e)
except Exception as e:
handle_exception(u"Unable to redeem: {e}".format(e=e), e)
self._request_validity_check()
| gpl-2.0 |
folti/subuser | logic/subuserCommands/subuser-version.py | 1 | 1512 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
try:
import pathConfig
except ImportError:
pass
#external imports
import json
import sys
import optparse
#internal imports
import subuserlib.version
import subuserlib.commandLineArguments
import subuserlib.profile
from subuserlib.classes.user import User
def parseCliArgs(realArgs):
usage = "usage: subuser version"
description = """Prints subuser's version and other usefull debugging info.
"""
parser = optparse.OptionParser(usage=usage,description=description,formatter=subuserlib.commandLineArguments.HelpFormatterThatDoesntReformatDescription())
parser.add_option("--json",dest="json",action="store_true",default=False,help="Display results in JSON format.")
return parser.parse_args(args=realArgs)
@subuserlib.profile.do_cprofile
def printVersion(realArgs):
"""
>>> version = __import__("subuser-version") #import self
>>> version.printVersion([])
Subuser version: 0.5
Docker info:
Foo: bar
"""
user = User()
(options,args) = parseCliArgs(realArgs)
if options.json:
print(json.dumps(subuserlib.version.getInfo(user),indent=1,separators=(",",": ")))
else:
print("Subuser version: " + subuserlib.version.getSubuserVersion(user))
print("Docker info:")
for key,value in subuserlib.version.getDockerInfo(user).items():
print(" "+key+": "+str(value))
#################################################################################################
if __name__ == "__main__":
printVersion(sys.argv[1:])
| lgpl-3.0 |
coreycb/horizon | openstack_dashboard/dashboards/project/loadbalancers/views.py | 3 | 15662 | # Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tabs
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.loadbalancers \
import forms as project_forms
from openstack_dashboard.dashboards.project.loadbalancers \
import tables as project_tables
from openstack_dashboard.dashboards.project.loadbalancers \
import tabs as project_tabs
from openstack_dashboard.dashboards.project.loadbalancers import utils
from openstack_dashboard.dashboards.project.loadbalancers \
import workflows as project_workflows
class IndexView(tabs.TabbedTableView):
tab_group_class = (project_tabs.LoadBalancerTabs)
template_name = 'project/loadbalancers/details_tabs.html'
page_title = _("Load Balancer")
class AddPoolView(workflows.WorkflowView):
workflow_class = project_workflows.AddPool
class AddVipView(workflows.WorkflowView):
workflow_class = project_workflows.AddVip
def get_initial(self):
initial = super(AddVipView, self).get_initial()
initial['pool_id'] = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, initial['pool_id'])
initial['subnet'] = api.neutron.subnet_get(
self.request, pool.subnet_id).cidr
except Exception as e:
initial['subnet'] = ''
msg = _('Unable to retrieve pool subnet. %s') % e
exceptions.handle(self.request, msg)
return initial
class AddMemberView(workflows.WorkflowView):
workflow_class = project_workflows.AddMember
class AddMonitorView(workflows.WorkflowView):
workflow_class = project_workflows.AddMonitor
class PoolDetailsView(tabs.TabView):
tab_group_class = project_tabs.PoolDetailsTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ pool.name|default:pool.id }}"
@memoized.memoized_method
def get_data(self):
pid = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, pid)
except Exception:
pool = []
exceptions.handle(self.request,
_('Unable to retrieve pool details.'))
else:
for monitor in pool.health_monitors:
display_name = utils.get_monitor_display_name(monitor)
setattr(monitor, 'display_name', display_name)
return pool
def get_context_data(self, **kwargs):
context = super(PoolDetailsView, self).get_context_data(**kwargs)
pool = self.get_data()
context['pool'] = pool
table = project_tables.PoolsTable(self.request)
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(pool)
return context
def get_tabs(self, request, *args, **kwargs):
pool = self.get_data()
return self.tab_group_class(self.request, pool=pool, **kwargs)
@staticmethod
def get_redirect_url():
return reverse_lazy("horizon:project:loadbalancers:index")
class VipDetailsView(tabs.TabView):
tab_group_class = project_tabs.VipDetailsTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ vip.name|default:vip_id }}"
@memoized.memoized_method
def get_data(self):
vid = self.kwargs['vip_id']
vip = []
try:
vip = api.lbaas.vip_get(self.request, vid)
fips = api.network.tenant_floating_ip_list(self.request)
vip_fip = [fip for fip in fips
if fip.port_id == vip.port.id]
if vip_fip:
vip.fip = vip_fip[0]
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve VIP details.'))
return vip
def get_context_data(self, **kwargs):
context = super(VipDetailsView, self).get_context_data(**kwargs)
vip = self.get_data()
context['vip'] = vip
vip_nav = vip.pool.name_or_id
breadcrumb = [
(vip_nav,
reverse('horizon:project:loadbalancers:vipdetails',
args=(vip.id,))),
(_("VIP"),), ]
context["custom_breadcrumb"] = breadcrumb
return context
def get_tabs(self, request, *args, **kwargs):
vip = self.get_data()
return self.tab_group_class(request, vip=vip, **kwargs)
@staticmethod
def get_redirect_url():
return reverse("horizon:project:loadbalancers:index")
class MemberDetailsView(tabs.TabView):
tab_group_class = project_tabs.MemberDetailsTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ member.name|default:member.id }}"
@memoized.memoized_method
def get_data(self):
mid = self.kwargs['member_id']
try:
return api.lbaas.member_get(self.request, mid)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve member details.'))
def get_context_data(self, **kwargs):
context = super(MemberDetailsView, self).get_context_data(**kwargs)
member = self.get_data()
context['member'] = member
member_nav = member.pool.name_or_id
breadcrumb = [
(member_nav,
reverse('horizon:project:loadbalancers:pooldetails',
args=(member.pool.id,))),
(_("Members"), reverse('horizon:project:loadbalancers:members')),
]
context["custom_breadcrumb"] = breadcrumb
table = project_tables.MembersTable(self.request)
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(member)
return context
def get_tabs(self, request, *args, **kwargs):
member = self.get_data()
return self.tab_group_class(request, member=member, **kwargs)
@staticmethod
def get_redirect_url():
return reverse_lazy("horizon:project:loadbalancers:index")
class MonitorDetailsView(tabs.TabView):
tab_group_class = project_tabs.MonitorDetailsTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ monitor.name|default:monitor.id }}"
@memoized.memoized_method
def get_data(self):
mid = self.kwargs['monitor_id']
try:
return api.lbaas.pool_health_monitor_get(self.request, mid)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve monitor details.'))
def get_context_data(self, **kwargs):
context = super(MonitorDetailsView, self).get_context_data(**kwargs)
monitor = self.get_data()
context['monitor'] = monitor
breadcrumb = [
(_("Monitors"), reverse('horizon:project:loadbalancers:monitors')),
]
context["custom_breadcrumb"] = breadcrumb
table = project_tables.MonitorsTable(self.request)
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(monitor)
return context
def get_tabs(self, request, *args, **kwargs):
monitor = self.get_data()
return self.tab_group_class(request, monitor=monitor, **kwargs)
@staticmethod
def get_redirect_url():
return reverse_lazy("horizon:project:loadbalancers:index")
class UpdatePoolView(forms.ModalFormView):
form_class = project_forms.UpdatePool
form_id = "update_pool_form"
modal_header = _("Edit Pool")
template_name = "project/loadbalancers/updatepool.html"
context_object_name = 'pool'
submit_label = _("Save Changes")
submit_url = "horizon:project:loadbalancers:updatepool"
success_url = reverse_lazy("horizon:project:loadbalancers:index")
page_title = _("Edit Pool")
def get_context_data(self, **kwargs):
context = super(UpdatePoolView, self).get_context_data(**kwargs)
context["pool_id"] = self.kwargs['pool_id']
args = (self.kwargs['pool_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
pool_id = self.kwargs['pool_id']
try:
return api.lbaas.pool_get(self.request, pool_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve pool details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
pool = self._get_object()
return {'name': pool['name'],
'pool_id': pool['id'],
'description': pool['description'],
'lb_method': pool['lb_method'],
'admin_state_up': pool['admin_state_up']}
class UpdateVipView(forms.ModalFormView):
form_class = project_forms.UpdateVip
form_id = "update_vip_form"
modal_header = _("Edit VIP")
template_name = "project/loadbalancers/updatevip.html"
context_object_name = 'vip'
submit_label = _("Save Changes")
submit_url = "horizon:project:loadbalancers:updatevip"
success_url = reverse_lazy("horizon:project:loadbalancers:index")
page_title = _("Edit VIP")
def get_context_data(self, **kwargs):
context = super(UpdateVipView, self).get_context_data(**kwargs)
context["vip_id"] = self.kwargs['vip_id']
args = (self.kwargs['vip_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
vip_id = self.kwargs['vip_id']
try:
return api.lbaas.vip_get(self.request, vip_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve VIP details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
vip = self._get_object()
persistence = getattr(vip, 'session_persistence', None)
if persistence:
stype = persistence['type']
if stype == 'APP_COOKIE':
cookie = persistence['cookie_name']
else:
cookie = ''
else:
stype = ''
cookie = ''
return {'name': vip['name'],
'vip_id': vip['id'],
'description': vip['description'],
'pool_id': vip['pool_id'],
'session_persistence': stype,
'cookie_name': cookie,
'connection_limit': vip['connection_limit'],
'admin_state_up': vip['admin_state_up']}
class UpdateMemberView(forms.ModalFormView):
form_class = project_forms.UpdateMember
form_id = "update_pool_form"
modal_header = _("Edit Member")
template_name = "project/loadbalancers/updatemember.html"
context_object_name = 'member'
submit_label = _("Save Changes")
submit_url = "horizon:project:loadbalancers:updatemember"
success_url = reverse_lazy("horizon:project:loadbalancers:index")
page_title = _("Edit Member")
def get_context_data(self, **kwargs):
context = super(UpdateMemberView, self).get_context_data(**kwargs)
context["member_id"] = self.kwargs['member_id']
args = (self.kwargs['member_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
member_id = self.kwargs['member_id']
try:
return api.lbaas.member_get(self.request, member_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve member details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
member = self._get_object()
return {'member_id': member['id'],
'pool_id': member['pool_id'],
'weight': member['weight'],
'admin_state_up': member['admin_state_up']}
class UpdateMonitorView(forms.ModalFormView):
form_class = project_forms.UpdateMonitor
form_id = "update_monitor_form"
modal_header = _("Edit Monitor")
template_name = "project/loadbalancers/updatemonitor.html"
context_object_name = 'monitor'
submit_label = _("Save Changes")
submit_url = "horizon:project:loadbalancers:updatemonitor"
success_url = reverse_lazy("horizon:project:loadbalancers:index")
page_title = _("Edit Monitor")
def get_context_data(self, **kwargs):
context = super(UpdateMonitorView, self).get_context_data(**kwargs)
context["monitor_id"] = self.kwargs['monitor_id']
args = (self.kwargs['monitor_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
monitor_id = self.kwargs['monitor_id']
try:
return api.lbaas.pool_health_monitor_get(self.request, monitor_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve health monitor details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
monitor = self._get_object()
return {'monitor_id': monitor['id'],
'delay': monitor['delay'],
'timeout': monitor['timeout'],
'max_retries': monitor['max_retries'],
'admin_state_up': monitor['admin_state_up']}
class AddPMAssociationView(workflows.WorkflowView):
workflow_class = project_workflows.AddPMAssociation
def get_initial(self):
initial = super(AddPMAssociationView, self).get_initial()
initial['pool_id'] = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, initial['pool_id'])
initial['pool_name'] = pool.name
initial['pool_monitors'] = pool.health_monitors
except Exception as e:
msg = _('Unable to retrieve pool. %s') % e
exceptions.handle(self.request, msg)
return initial
class DeletePMAssociationView(workflows.WorkflowView):
workflow_class = project_workflows.DeletePMAssociation
def get_initial(self):
initial = super(DeletePMAssociationView, self).get_initial()
initial['pool_id'] = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, initial['pool_id'])
initial['pool_name'] = pool.name
initial['pool_monitors'] = pool.health_monitors
except Exception as e:
msg = _('Unable to retrieve pool. %s') % e
exceptions.handle(self.request, msg)
return initial
| apache-2.0 |
apark263/tensorflow | tensorflow/python/eager/execute.py | 10 | 8360 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions called by the generated code to execute an eager-mode op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import compat
def quick_execute(op_name, num_outputs, inputs, attrs, ctx, name=None):
"""Execute a TensorFlow operation.
Args:
op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to
execute.
num_outputs: The number of outputs of the operation to fetch.
(Explicitly provided instead of being inferred for performance
reasons).
inputs: A list of inputs to the operation. Each entry should be a Tensor, or
a value which can be passed to the Tensor constructor to create one.
attrs: A tuple with alternating string attr names and attr values for this
operation.
ctx: The value of context.context().
name: Customized name for the operation.
Returns:
List of output Tensor objects. The list is empty if there are no outputs
Raises:
An exception on error.
"""
device_name = ctx.device_name
# pylint: disable=protected-access
try:
tensors = pywrap_tensorflow.TFE_Py_Execute(ctx._handle, device_name,
op_name, inputs, attrs,
num_outputs)
except core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
six.raise_from(core._status_to_exception(e.code, message), None)
except TypeError as e:
if any(ops._is_keras_symbolic_tensor(x) for x in inputs):
raise core._SymbolicException
raise e
# pylint: enable=protected-access
return tensors
def execute_with_callbacks(op_name, num_outputs, inputs, attrs, ctx, name=None):
"""Monkey-patch to execute to enable execution callbacks."""
tensors = quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
for callback in ctx.post_execution_callbacks:
callback(op_name, inputs, attrs, tensors, name)
return tensors
execute = quick_execute
def record_gradient(unused_op_name, unused_inputs, unused_attrs, unused_results,
unused_name):
"""Import backprop if you want gradients recorded."""
pass
def make_float(v, arg_name):
if not isinstance(v, compat.real_types):
raise TypeError("Expected float for argument '%s' not %s." %
(arg_name, repr(v)))
return float(v)
def make_int(v, arg_name):
if isinstance(v, six.string_types):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
try:
return int(v)
except (ValueError, TypeError):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
def make_str(v, arg_name):
if not isinstance(v, compat.bytes_or_text_types):
raise TypeError("Expected string for argument '%s' not %s." %
(arg_name, repr(v)))
return compat.as_bytes(v) # Convert unicode strings to bytes.
def make_bool(v, arg_name):
if not isinstance(v, bool):
raise TypeError("Expected bool for argument '%s' not %s." %
(arg_name, repr(v)))
return v
def make_type(v, arg_name):
try:
v = dtypes.as_dtype(v).base_dtype
except TypeError:
raise TypeError("Expected DataType for argument '%s' not %s." %
(arg_name, repr(v)))
i = v.as_datatype_enum
return i
def make_shape(v, arg_name):
"""Convert v into a list."""
# Args:
# v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape.
# arg_name: String, for error messages.
# Returns:
# None if the rank is unknown, otherwise a list of ints (or Nones in the
# position where the dimension is unknown).
try:
shape = tensor_shape.as_shape(v)
except TypeError as e:
raise TypeError("Error converting %s to a TensorShape: %s." % (arg_name, e))
except ValueError as e:
raise ValueError("Error converting %s to a TensorShape: %s." % (arg_name,
e))
if shape.ndims is None:
return None
else:
return shape.as_list()
def make_tensor(v, arg_name):
"""Ensure v is a TensorProto."""
if isinstance(v, tensor_pb2.TensorProto):
return v
elif isinstance(v, six.string_types):
pb = tensor_pb2.TensorProto()
text_format.Merge(v, pb)
return pb
raise TypeError(
"Don't know how to convert %s to a TensorProto for argument '%s'." %
(repr(v), arg_name))
def args_to_matching_eager(l, ctx, default_dtype=None):
"""Convert sequence `l` to eager same-type Tensors."""
EagerTensor = ops.EagerTensor # pylint: disable=invalid-name
for x in l:
if not isinstance(x, EagerTensor):
break
else: # note: intentional for-else
return l[0]._datatype_enum(), l # pylint: disable=protected-access
# TODO(josh11b): Could we do a better job if we also passed in the
# allowed dtypes when that was known?
# Is some input already a Tensor with a dtype?
dtype = None
for t in l:
if isinstance(t, EagerTensor):
dtype = t.dtype
break
internal_convert_to_tensor = ops.internal_convert_to_tensor
if dtype is None:
# Infer a dtype based on the first value, and use that dtype for the
# remaining values.
ret = []
for t in l:
ret.append(internal_convert_to_tensor(
t, dtype,
preferred_dtype=default_dtype,
ctx=ctx,
accept_symbolic_tensors=False))
if dtype is None:
dtype = ret[-1].dtype
else:
ret = [internal_convert_to_tensor(t, dtype, ctx=ctx) for t in l]
return dtype.as_datatype_enum, ret
def convert_to_mixed_eager_tensors(values, ctx):
v = [ops.internal_convert_to_tensor(t, ctx=ctx) for t in values]
types = [t._datatype_enum() for t in v] # pylint: disable=protected-access
return types, v
def args_to_mixed_eager_tensors(lists, ctx):
"""Converts a list of same-length lists of values to eager tensors."""
assert len(lists) > 1
# Generate an error if len(lists[i]) is not the same for all i.
lists_ret = []
for l in lists[1:]:
if len(l) != len(lists[0]):
raise ValueError(
"Expected list arguments to be the same length: %d != %d (%r vs. %r)."
% (len(lists[0]), len(l), lists[0], l))
lists_ret.append([])
# Convert the first element of each list first, then the second element, etc.
types = []
for i in range(len(lists[0])):
dtype = None
# If any list has a Tensor, use that dtype
for l in lists:
if isinstance(l[i], ops.EagerTensor):
dtype = l[i].dtype
break
if dtype is None:
# Convert the first one and use its dtype.
lists_ret[0].append(ops.internal_convert_to_tensor(lists[0][i], ctx=ctx))
dtype = lists_ret[0][i].dtype
for j in range(1, len(lists)):
lists_ret[j].append(
ops.internal_convert_to_tensor(lists[j][i], dtype=dtype, ctx=ctx))
else:
# Convert everything to the found dtype.
for j in range(len(lists)):
lists_ret[j].append(
ops.internal_convert_to_tensor(lists[j][i], dtype=dtype, ctx=ctx))
types.append(dtype.as_datatype_enum)
return types, lists_ret
| apache-2.0 |
kalahbrown/HueBigSQL | desktop/core/ext-py/Django-1.6.10/django/contrib/auth/models.py | 40 | 18337 | from __future__ import unicode_literals
import re
import warnings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail import send_mail
from django.core import validators
from django.db import models
from django.db.models.manager import EmptyManager
from django.utils.crypto import get_random_string
from django.utils.http import urlquote
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib import auth
from django.contrib.auth.hashers import (
check_password, make_password, is_password_usable)
from django.contrib.auth.signals import user_logged_in
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import python_2_unicode_compatible
def update_last_login(sender, user, **kwargs):
"""
A signal receiver which updates the last_login date for
the user logging in.
"""
user.last_login = timezone.now()
user.save(update_fields=['last_login'])
user_logged_in.connect(update_last_login)
class SiteProfileNotAvailable(Exception):
pass
class PermissionManager(models.Manager):
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.get_by_natural_key(app_label,
model),
)
@python_2_unicode_compatible
class Permission(models.Model):
"""
The permissions system provides a way to assign permissions to specific
users and groups of users.
The permission system is used by the Django admin site, but may also be
useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form
and add an object.
- The "change" permission limits a user's ability to view the change
list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object
instance. It is possible to say "Mary may change news stories," but it's
not currently possible to say "Mary may change news stories, but only the
ones she created herself" or "Mary may only change news stories that have a
certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically
created for each Django model.
"""
name = models.CharField(_('name'), max_length=50)
content_type = models.ForeignKey(ContentType)
codename = models.CharField(_('codename'), max_length=100)
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
unique_together = (('content_type', 'codename'),)
ordering = ('content_type__app_label', 'content_type__model',
'codename')
def __str__(self):
return "%s | %s | %s" % (
six.text_type(self.content_type.app_label),
six.text_type(self.content_type),
six.text_type(self.name))
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class GroupManager(models.Manager):
"""
The manager for the auth's Group model.
"""
def get_by_natural_key(self, name):
return self.get(name=name)
@python_2_unicode_compatible
class Group(models.Model):
"""
Groups are a generic way of categorizing users to apply permissions, or
some other label, to those users. A user can belong to any number of
groups.
A user in a group automatically has all the permissions granted to that
group. For example, if the group Site editors has the permission
can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to
apply some label, or extended functionality, to them. For example, you
could create a group 'Special users', and you could write code that would
do special things to those users -- such as giving them access to a
members-only portion of your site, or sending them members-only email
messages.
"""
name = models.CharField(_('name'), max_length=80, unique=True)
permissions = models.ManyToManyField(Permission,
verbose_name=_('permissions'), blank=True)
objects = GroupManager()
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
class BaseUserManager(models.Manager):
@classmethod
def normalize_email(cls, email):
"""
Normalize the address by lowercasing the domain part of the email
address.
"""
email = email or ''
try:
email_name, domain_part = email.strip().rsplit('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
return email
def make_random_password(self, length=10,
allowed_chars='abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789'):
"""
Generates a random password with the given length and given
allowed_chars. Note that the default value of allowed_chars does not
have "I" or "O" or letters and digits that look similar -- just to
avoid confusion.
"""
return get_random_string(length, allowed_chars)
def get_by_natural_key(self, username):
return self.get(**{self.model.USERNAME_FIELD: username})
class UserManager(BaseUserManager):
def _create_user(self, username, email, password,
is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
user = self.model(username=username, email=email,
is_staff=is_staff, is_active=True,
is_superuser=is_superuser, last_login=now,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
return self._create_user(username, email, password, False, False,
**extra_fields)
def create_superuser(self, username, email, password, **extra_fields):
return self._create_user(username, email, password, True, True,
**extra_fields)
@python_2_unicode_compatible
class AbstractBaseUser(models.Model):
password = models.CharField(_('password'), max_length=128)
last_login = models.DateTimeField(_('last login'), default=timezone.now)
is_active = True
REQUIRED_FIELDS = []
class Meta:
abstract = True
def get_username(self):
"Return the identifying username for this User"
return getattr(self, self.USERNAME_FIELD)
def __str__(self):
return self.get_username()
def natural_key(self):
return (self.get_username(),)
def is_anonymous(self):
"""
Always returns False. This is a way of comparing User objects to
anonymous users.
"""
return False
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def set_password(self, raw_password):
self.password = make_password(raw_password)
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
"""
def setter(raw_password):
self.set_password(raw_password)
self.save(update_fields=["password"])
return check_password(raw_password, self.password, setter)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = make_password(None)
def has_usable_password(self):
return is_password_usable(self.password)
def get_full_name(self):
raise NotImplementedError()
def get_short_name(self):
raise NotImplementedError()
# A few helper functions for common logic between User and AnonymousUser.
def _user_get_all_permissions(user, obj):
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_all_permissions"):
permissions.update(backend.get_all_permissions(user, obj))
return permissions
def _user_has_perm(user, perm, obj):
for backend in auth.get_backends():
if hasattr(backend, "has_perm"):
if backend.has_perm(user, perm, obj):
return True
return False
def _user_has_module_perms(user, app_label):
for backend in auth.get_backends():
if hasattr(backend, "has_module_perms"):
if backend.has_module_perms(user, app_label):
return True
return False
class PermissionsMixin(models.Model):
"""
A mixin class that adds the fields and methods necessary to support
Django's Group and Permission model using the ModelBackend.
"""
is_superuser = models.BooleanField(_('superuser status'), default=False,
help_text=_('Designates that this user has all permissions without '
'explicitly assigning them.'))
groups = models.ManyToManyField(Group, verbose_name=_('groups'),
blank=True, help_text=_('The groups this user belongs to. A user will '
'get all permissions granted to each of '
'his/her group.'),
related_name="user_set", related_query_name="user")
user_permissions = models.ManyToManyField(Permission,
verbose_name=_('user permissions'), blank=True,
help_text=_('Specific permissions for this user.'),
related_name="user_set", related_query_name="user")
class Meta:
abstract = True
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through their
groups. This method queries all available auth backends. If an object
is passed in, only permissions matching this object are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
permissions.update(backend.get_group_permissions(self, obj))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an object is
provided, permissions for this specific object are checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_perms(self, perm_list, obj=None):
"""
Returns True if the user has each of the specified permissions. If
object is passed, it checks if the user has all required perms for this
object.
"""
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app label.
Uses pretty much the same logic as has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
class AbstractUser(AbstractBaseUser, PermissionsMixin):
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
Username, password and email are required. Other fields are optional.
"""
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, numbers and '
'@/./+/-/_ characters'),
validators=[
validators.RegexValidator(re.compile('^[\w.@+-]+$'), _('Enter a valid username.'), 'invalid')
])
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('email address'), blank=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
abstract = True
def get_absolute_url(self):
return "/users/%s/" % urlquote(self.username)
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=None):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email])
def get_profile(self):
"""
Returns site-specific profile for this user. Raises
SiteProfileNotAvailable if this site does not allow profiles.
"""
warnings.warn("The use of AUTH_PROFILE_MODULE to define user profiles has been deprecated.",
DeprecationWarning, stacklevel=2)
if not hasattr(self, '_profile_cache'):
from django.conf import settings
if not getattr(settings, 'AUTH_PROFILE_MODULE', False):
raise SiteProfileNotAvailable(
'You need to set AUTH_PROFILE_MODULE in your project '
'settings')
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
except ValueError:
raise SiteProfileNotAvailable(
'app_label and model_name should be separated by a dot in '
'the AUTH_PROFILE_MODULE setting')
try:
model = models.get_model(app_label, model_name)
if model is None:
raise SiteProfileNotAvailable(
'Unable to load the profile model, check '
'AUTH_PROFILE_MODULE in your project settings')
self._profile_cache = model._default_manager.using(
self._state.db).get(user__id__exact=self.id)
self._profile_cache.user = self
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
return self._profile_cache
class User(AbstractUser):
"""
Users within the Django authentication system are represented by this
model.
Username, password and email are required. Other fields are optional.
"""
class Meta(AbstractUser.Meta):
swappable = 'AUTH_USER_MODEL'
@python_2_unicode_compatible
class AnonymousUser(object):
id = None
pk = None
username = ''
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager(Group)
_user_permissions = EmptyManager(Permission)
def __init__(self):
pass
def __str__(self):
return 'AnonymousUser'
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return 1 # instances always return the same hash value
def save(self):
raise NotImplementedError
def delete(self):
raise NotImplementedError
def set_password(self, raw_password):
raise NotImplementedError
def check_password(self, raw_password):
raise NotImplementedError
def _get_groups(self):
return self._groups
groups = property(_get_groups)
def _get_user_permissions(self):
return self._user_permissions
user_permissions = property(_get_user_permissions)
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj=obj)
def has_perm(self, perm, obj=None):
return _user_has_perm(self, perm, obj=obj)
def has_perms(self, perm_list, obj=None):
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, module):
return _user_has_module_perms(self, module)
def is_anonymous(self):
return True
def is_authenticated(self):
return False
| apache-2.0 |
googlearchive/big-rig | app/src/thirdparty/telemetry/web_perf/metrics/responsiveness_metric.py | 19 | 2140 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from telemetry.value import scalar
from telemetry.web_perf.metrics import mainthread_jank_stats
from telemetry.web_perf.metrics import timeline_based_metric
from telemetry.web_perf import timeline_interaction_record as tir_module
class ResponsivenessMetric(timeline_based_metric.TimelineBasedMetric):
"""Computes metrics that measure respsonsiveness on the record ranges.
total_big_jank_thread_time is the total thread duration of all top
slices whose thread time ranges overlapped with any thread time ranges of
the records and the overlapped thread duration is greater than or equal
USER_PERCEIVABLE_DELAY_THRESHOLD_MS.
biggest_jank_thread_time is the biggest thread duration of all
top slices whose thread time ranges overlapped with any of records' thread
time ranges.
All *_time values are measured in milliseconds.
"""
def __init__(self):
super(ResponsivenessMetric, self).__init__()
def AddResults(self, _, renderer_thread, interaction_records, results):
self.VerifyNonOverlappedRecords(interaction_records)
try:
jank_stats = mainthread_jank_stats.MainthreadJankStats(
renderer_thread, interaction_records)
# TODO(nednguyen): maybe fall back to use wall-time for computing the
# metrics.
except tir_module.NoThreadTimeDataException as e:
#TODO(nednguyen): Report the warning with page_results system.
logging.warning(
'Main thread jank metrics cannot be computed for records %s since '
'trace does not contain thread time data. %s',
repr(interaction_records), repr(e))
return
results.AddValue(scalar.ScalarValue(
results.current_page, 'responsive-total_big_jank_thread_time', 'ms',
jank_stats.total_big_jank_thread_time))
results.AddValue(scalar.ScalarValue(
results.current_page, 'responsive-biggest_jank_thread_time', 'ms',
jank_stats.biggest_jank_thread_time))
| apache-2.0 |
cao1998wang/twilio_int | vendor/cache/ruby/2.1.0/gems/twilio-ruby-3.11.1/docs/_themes/flask_theme_support.py | 2228 | 4875 | # flasky extensions. flasky pygments style based on tango style
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class FlaskyStyle(Style):
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
Keyword.Namespace: "bold #004461", # class: 'kn'
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#004461", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
| bsd-3-clause |
imankulov/sentry | src/sentry/migrations/0120_auto__add_grouprulestatus.py | 36 | 23983 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'GroupRuleStatus'
db.create_table('sentry_grouprulestatus', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('project', self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Project'])),
('rule', self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Rule'])),
('group', self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Group'])),
('status', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['GroupRuleStatus'])
def backwards(self, orm):
# Deleting model 'GroupRuleStatus'
db.delete_table('sentry_grouprulestatus')
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.TeamMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
}, 'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry']
| bsd-3-clause |
nathanaevitas/odoo | openerp/addons/mrp/wizard/stock_move.py | 186 | 4649 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools import float_compare
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class stock_move_consume(osv.osv_memory):
_name = "stock.move.consume"
_description = "Consume Products"
_columns = {
'product_id': fields.many2one('product.product', 'Product', required=True, select=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'location_id': fields.many2one('stock.location', 'Location', required=True),
'restrict_lot_id': fields.many2one('stock.production.lot', 'Lot'),
}
#TOFIX: product_uom should not have different category of default UOM of product. Qty should be convert into UOM of original move line before going in consume and scrap
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
res = super(stock_move_consume, self).default_get(cr, uid, fields, context=context)
move = self.pool.get('stock.move').browse(cr, uid, context['active_id'], context=context)
if 'product_id' in fields:
res.update({'product_id': move.product_id.id})
if 'product_uom' in fields:
res.update({'product_uom': move.product_uom.id})
if 'product_qty' in fields:
res.update({'product_qty': move.product_uom_qty})
if 'location_id' in fields:
res.update({'location_id': move.location_id.id})
return res
def do_move_consume(self, cr, uid, ids, context=None):
if context is None:
context = {}
move_obj = self.pool.get('stock.move')
uom_obj = self.pool.get('product.uom')
production_obj = self.pool.get('mrp.production')
move_ids = context['active_ids']
move = move_obj.browse(cr, uid, move_ids[0], context=context)
production_id = move.raw_material_production_id.id
production = production_obj.browse(cr, uid, production_id, context=context)
precision = self.pool['decimal.precision'].precision_get(cr, uid, 'Product Unit of Measure')
for data in self.browse(cr, uid, ids, context=context):
qty = uom_obj._compute_qty(cr, uid, data['product_uom'].id, data.product_qty, data.product_id.uom_id.id)
remaining_qty = move.product_qty - qty
#check for product quantity is less than previously planned
if float_compare(remaining_qty, 0, precision_digits=precision) >= 0:
move_obj.action_consume(cr, uid, move_ids, qty, data.location_id.id, restrict_lot_id=data.restrict_lot_id.id, context=context)
else:
consumed_qty = min(move.product_qty, qty)
new_moves = move_obj.action_consume(cr, uid, move_ids, consumed_qty, data.location_id.id, restrict_lot_id=data.restrict_lot_id.id, context=context)
#consumed more in wizard than previously planned
extra_more_qty = qty - consumed_qty
#create new line for a remaining qty of the product
extra_move_id = production_obj._make_consume_line_from_data(cr, uid, production, data.product_id, data.product_id.uom_id.id, extra_more_qty, False, 0, context=context)
move_obj.write(cr, uid, [extra_move_id], {'restrict_lot_id': data.restrict_lot_id.id}, context=context)
move_obj.action_done(cr, uid, [extra_move_id], context=context)
return {'type': 'ir.actions.act_window_close'} | agpl-3.0 |
hflynn/openmicroscopy | components/tools/OmeroPy/src/omero/plugins/upload.py | 4 | 2168 | #!/usr/bin/env python
"""
upoad plugin
Plugin read by omero.cli.Cli during initialization. The method(s)
defined here will be added to the Cli class for later use.
Copyright 2007 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import sys, re
from omero.cli import BaseControl, CLI
import omero
import omero.rtypes
import omero.util.originalfileutils
from omero.rtypes import rlong
from omero.rtypes import rint
from omero.rtypes import rstring
from omero.rtypes import rdouble
from omero.rtypes import rfloat
try:
import hashlib
hash_sha1 = hashlib.sha1
except:
import sha
hash_sha1 = sha.new
HELP = """Upload local files to the OMERO server"""
RE = re.compile("\s*upload\s*")
class UploadControl(BaseControl):
def _complete(self, text, line, begidx, endidx):
"""
Returns a file after "upload" and otherwise delegates to the BaseControl
"""
m = RE.match(line)
if m:
return self._complete_file(RE.sub('', line))
else:
return BaseControl._complete(self, text, line, begidx, endidx)
def _configure(self, parser):
parser.add_argument("--pytable", action="store_true", help="If set, the following files are interpreted as pytable files" )
parser.add_argument("file", nargs="+")
parser.set_defaults(func=self.upload)
def upload(self, args):
client = self.ctx.conn(args)
for file in args.file:
is_importer, omero_format = omero.util.originalfileutils.getFormat(file)
if (is_importer == omero.util.originalfileutils.IMPORTER):
self.ctx.dir(493, "This file should be imported using omero import")
else:
obj = client.upload(file, type=omero_format)
self.ctx.out("Uploaded %s as " % file + str(obj.id.val))
self.ctx.set("last.upload.id", obj.id.val)
try:
register("upload", UploadControl, HELP)
except NameError:
if __name__ == "__main__":
cli = CLI()
cli.register("upload", UploadControl, HELP)
cli.invoke(sys.argv[1:])
| gpl-2.0 |
exoanalytic/python-skyfield | skyfield/functions.py | 2 | 1910 | from numpy import arcsin, arctan2, array, cos, sin, sqrt
from skyfield.constants import tau
def dots(v, u):
"""Given one or more vectors in `v` and `u`, return their dot products.
This works whether `v` and `u` each have the shape ``(3,)``, or
whether they are each whole arrays of corresponding x, y, and z
coordinates and have shape ``(3, N)``.
"""
return (v * u).sum(axis=0)
def length_of(xyz):
"""Given a 3-element array `[x y z]`, return its length.
The three elements can be simple scalars, or the array can be two
dimensions and offer three whole series of x, y, and z coordinates.
"""
return sqrt((xyz * xyz).sum(axis=0))
def to_polar(xyz):
"""Convert ``[x y z]`` into spherical coordinates ``(r, theta, phi)``.
``r`` - vector length
``theta`` - angle above (+) or below (-) the xy-plane
``phi`` - angle around the z-axis
The order of the three return values is intended to match ISO 31-11.
"""
r = length_of(xyz)
x, y, z = xyz
theta = arcsin(z / r)
phi = arctan2(y, x) % tau
return r, theta, phi
def from_polar(r, theta, phi):
"""Convert ``(r, theta, phi)`` to Cartesian coordinates ``[x y z]``.
``r`` - vector length
``theta`` - angle above (+) or below (-) the xy-plane
``phi`` - angle around the z-axis
The order of the three arguments is intended to match ISO 31-11.
"""
rxy = r * cos(theta)
return array((rxy * cos(phi), rxy * sin(phi), r * sin(theta)))
def rot_x(theta):
c = cos(theta)
s = sin(theta)
return array([(1.0, 0.0, 0.0), (0.0, c, -s), (0.0, s, c)])
def rot_y(theta):
c = cos(theta)
s = sin(theta)
return array([(c, 0.0, s), (0.0, 1.0, 0.0), (-s, 0.0, c)])
def rot_z(theta):
c = cos(theta)
s = sin(theta)
zero = theta * 0.0
one = zero + 1.0
return array(((c, -s, zero), (s, c, zero), (zero, zero, one)))
| mit |
ZuluPro/moto | moto/packages/httpretty/compat.py | 12 | 2914 | # #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# <HTTPretty - HTTP client mock for Python>
# Copyright (C) <2011-2013> Gabriel Falcão <gabriel@nacaolivre.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
import sys
import types
PY3 = sys.version_info[0] == 3
if PY3: # pragma: no cover
text_type = str
byte_type = bytes
import io
StringIO = io.BytesIO
basestring = (str, bytes)
class BaseClass(object):
def __repr__(self):
return self.__str__()
else: # pragma: no cover
text_type = unicode
byte_type = str
import StringIO
StringIO = StringIO.StringIO
basestring = basestring
class BaseClass(object):
def __repr__(self):
ret = self.__str__()
if PY3: # pragma: no cover
return ret
else:
return ret.encode('utf-8')
try: # pragma: no cover
from urllib.parse import urlsplit, urlunsplit, parse_qs, quote, quote_plus, unquote
unquote_utf8 = unquote
except ImportError: # pragma: no cover
from urlparse import urlsplit, urlunsplit, parse_qs, unquote
from urllib import quote, quote_plus
def unquote_utf8(qs):
if isinstance(qs, text_type):
qs = qs.encode('utf-8')
s = unquote(qs)
if isinstance(s, byte_type):
return s.decode("utf-8")
else:
return s
try: # pragma: no cover
from http.server import BaseHTTPRequestHandler
except ImportError: # pragma: no cover
from BaseHTTPServer import BaseHTTPRequestHandler
ClassTypes = (type,)
if not PY3: # pragma: no cover
ClassTypes = (type, types.ClassType)
__all__ = [
'PY3',
'StringIO',
'text_type',
'byte_type',
'BaseClass',
'BaseHTTPRequestHandler',
'quote',
'quote_plus',
'urlunsplit',
'urlsplit',
'parse_qs',
'ClassTypes',
]
| apache-2.0 |
arun6582/django | tests/gis_tests/test_data.py | 46 | 2588 | """
This module has the mock object definitions used to hold reference geometry
for the GEOS and GDAL tests.
"""
import json
import os
from django.utils.functional import cached_property
# Path where reference test data is located.
TEST_DATA = os.path.join(os.path.dirname(__file__), 'data')
def tuplize(seq):
"Turn all nested sequences to tuples in given sequence."
if isinstance(seq, (list, tuple)):
return tuple(tuplize(i) for i in seq)
return seq
def strconvert(d):
"Converts all keys in dictionary to str type."
return {str(k): v for k, v in d.items()}
def get_ds_file(name, ext):
return os.path.join(TEST_DATA,
name,
name + '.%s' % ext
)
class TestObj:
"""
Base testing object, turns keyword args into attributes.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class TestDS(TestObj):
"""
Object for testing GDAL data sources.
"""
def __init__(self, name, *, ext='shp', **kwargs):
# Shapefile is default extension, unless specified otherwise.
self.ds = get_ds_file(name, ext)
super().__init__(**kwargs)
class TestGeom(TestObj):
"""
Testing object used for wrapping reference geometry data
in GEOS/GDAL tests.
"""
def __init__(self, *, coords=None, centroid=None, ext_ring_cs=None, **kwargs):
# Converting lists to tuples of certain keyword args
# so coordinate test cases will match (JSON has no
# concept of tuple).
if coords:
self.coords = tuplize(coords)
if centroid:
self.centroid = tuple(centroid)
if ext_ring_cs:
ext_ring_cs = tuplize(ext_ring_cs)
self.ext_ring_cs = ext_ring_cs
super().__init__(**kwargs)
class TestGeomSet:
"""
Each attribute of this object is a list of `TestGeom` instances.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, [TestGeom(**strconvert(kw)) for kw in value])
class TestDataMixin:
"""
Mixin used for GEOS/GDAL test cases that defines a `geometries`
property, which returns and/or loads the reference geometry data.
"""
@cached_property
def geometries(self):
# Load up the test geometry data from fixture into global.
with open(os.path.join(TEST_DATA, 'geometries.json')) as f:
geometries = json.load(f)
return TestGeomSet(**strconvert(geometries))
| bsd-3-clause |
maohongyuan/kbengine | kbe/res/scripts/common/Lib/test/test_codecencodings_kr.py | 88 | 3102 | #
# test_codecencodings_kr.py
# Codec encoding tests for ROK encodings.
#
from test import support
from test import multibytecodec_support
import unittest
class Test_CP949(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'cp949'
tstring = multibytecodec_support.load_teststring('cp949')
codectests = (
# invalid bytes
(b"abc\x80\x80\xc1\xc4", "strict", None),
(b"abc\xc8", "strict", None),
(b"abc\x80\x80\xc1\xc4", "replace", "abc\ufffd\ufffd\uc894"),
(b"abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\ufffd\uc894\ufffd"),
(b"abc\x80\x80\xc1\xc4", "ignore", "abc\uc894"),
)
class Test_EUCKR(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'euc_kr'
tstring = multibytecodec_support.load_teststring('euc_kr')
codectests = (
# invalid bytes
(b"abc\x80\x80\xc1\xc4", "strict", None),
(b"abc\xc8", "strict", None),
(b"abc\x80\x80\xc1\xc4", "replace", 'abc\ufffd\ufffd\uc894'),
(b"abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\ufffd\uc894\ufffd"),
(b"abc\x80\x80\xc1\xc4", "ignore", "abc\uc894"),
# composed make-up sequence errors
(b"\xa4\xd4", "strict", None),
(b"\xa4\xd4\xa4", "strict", None),
(b"\xa4\xd4\xa4\xb6", "strict", None),
(b"\xa4\xd4\xa4\xb6\xa4", "strict", None),
(b"\xa4\xd4\xa4\xb6\xa4\xd0", "strict", None),
(b"\xa4\xd4\xa4\xb6\xa4\xd0\xa4", "strict", None),
(b"\xa4\xd4\xa4\xb6\xa4\xd0\xa4\xd4", "strict", "\uc4d4"),
(b"\xa4\xd4\xa4\xb6\xa4\xd0\xa4\xd4x", "strict", "\uc4d4x"),
(b"a\xa4\xd4\xa4\xb6\xa4", "replace", 'a\ufffd'),
(b"\xa4\xd4\xa3\xb6\xa4\xd0\xa4\xd4", "strict", None),
(b"\xa4\xd4\xa4\xb6\xa3\xd0\xa4\xd4", "strict", None),
(b"\xa4\xd4\xa4\xb6\xa4\xd0\xa3\xd4", "strict", None),
(b"\xa4\xd4\xa4\xff\xa4\xd0\xa4\xd4", "replace", '\ufffd\u6e21\ufffd\u3160\ufffd'),
(b"\xa4\xd4\xa4\xb6\xa4\xff\xa4\xd4", "replace", '\ufffd\u6e21\ub544\ufffd\ufffd'),
(b"\xa4\xd4\xa4\xb6\xa4\xd0\xa4\xff", "replace", '\ufffd\u6e21\ub544\u572d\ufffd'),
(b"\xa4\xd4\xff\xa4\xd4\xa4\xb6\xa4\xd0\xa4\xd4", "replace", '\ufffd\ufffd\ufffd\uc4d4'),
(b"\xc1\xc4", "strict", "\uc894"),
)
class Test_JOHAB(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'johab'
tstring = multibytecodec_support.load_teststring('johab')
codectests = (
# invalid bytes
(b"abc\x80\x80\xc1\xc4", "strict", None),
(b"abc\xc8", "strict", None),
(b"abc\x80\x80\xc1\xc4", "replace", "abc\ufffd\ufffd\ucd27"),
(b"abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\ufffd\ucd27\ufffd"),
(b"abc\x80\x80\xc1\xc4", "ignore", "abc\ucd27"),
(b"\xD8abc", "replace", "\uFFFDabc"),
(b"\xD8\xFFabc", "replace", "\uFFFD\uFFFDabc"),
(b"\x84bxy", "replace", "\uFFFDbxy"),
(b"\x8CBxy", "replace", "\uFFFDBxy"),
)
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
fbagirov/scikit-learn | sklearn/metrics/cluster/tests/test_unsupervised.py | 230 | 2823 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.metrics.cluster.unsupervised import silhouette_score
from sklearn.metrics import pairwise_distances
from sklearn.utils.testing import assert_false, assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
# Assert Silhouette Coefficient != nan when there is 1 sample in a class.
# This tests for the condition that caused issue 960.
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
| bsd-3-clause |
alsrgv/tensorflow | tensorflow/contrib/cloud/__init__.py | 29 | 1534 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for cloud ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# pylint: disable=line-too-long,wildcard-import,g-import-not-at-top
from tensorflow.contrib.cloud.python.ops.bigquery_reader_ops import *
from tensorflow.contrib.cloud.python.ops.gcs_config_ops import *
if os.name != 'nt':
from tensorflow.contrib.bigtable.python.ops.bigtable_api import BigtableClient
from tensorflow.contrib.bigtable.python.ops.bigtable_api import BigtableTable
del os
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'BigQueryReader',
'BigtableClient',
'BigtableTable',
'BlockCacheParams',
'configure_colab_session',
'configure_gcs',
'ConfigureGcsHook',
]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
hernad/frappe | frappe/patches/v5_0/fix_text_editor_file_urls.py | 49 | 1354 | from __future__ import unicode_literals
import frappe
import re
def execute():
"""Fix relative urls for image src="files/" to src="/files/" in DocTypes with text editor fields"""
doctypes_with_text_fields = frappe.get_all("DocField", fields=["parent", "fieldname"],
filters={"fieldtype": "Text Editor"})
done = []
for opts in doctypes_with_text_fields:
if opts in done:
continue
try:
result = frappe.get_all(opts.parent, fields=["name", opts.fieldname])
except frappe.SQLError, e:
# bypass single tables
continue
for data in result:
old_value = data[opts.fieldname]
if not old_value:
continue
html = scrub_relative_urls(old_value)
if html != old_value:
# print_diff(html, old_value)
frappe.db.set_value(opts.parent, data.name, opts.fieldname, html, update_modified=False)
done.append(opts)
def scrub_relative_urls(html):
"""prepend a slash before a relative url"""
try:
return re.sub("""src[\s]*=[\s]*['"]files/([^'"]*)['"]""", 'src="/files/\g<1>"', html)
# return re.sub("""(src|href)[^\w'"]*['"](?!http|ftp|mailto|/|#|%|{|cid:|\.com/www\.)([^'" >]+)['"]""", '\g<1>="/\g<2>"', html)
except:
print "Error", html
raise
def print_diff(html, old_value):
import difflib
diff = difflib.unified_diff(old_value.splitlines(1), html.splitlines(1), lineterm='')
print '\n'.join(list(diff))
| mit |
hvanwyk/drifter | src/grid/mesh.py | 1 | 15658 | from grid.cell import Cell
from grid.vertex import Vertex
from grid.triangle import Triangle
import numpy
import matplotlib.pyplot as plt
class Mesh(object):
'''
Description: (Quad) Mesh object
Attributes:
bounding_box: [xmin, xmax, ymin, ymax]
children: Cell, list of cells contained in mesh
vertex_list: Vertex, list of vertices (run number_vertices)
connectivity: int, numpy array - element connectivity matrix (run build_connectivity)
max_depth: int, maximum number of times each of the mesh's cell can be refined
balanced: bool, true if mesh is balanced.
Methods:
'''
def __init__(self, box=[0.,1.,0.,1.], nx=2, ny=2):
'''
Description: Constructor, initialize rectangular grid
Inputs:
box: double, boundary vertices of rectangular grid, box = [x_min, x_max, y_min, y_max]
nx: int, number of cells in x-direction
ny: int, number of cells in y-direction
type: 'MESH'
'''
self.bounding_box = box
self.type = 'MESH'
self.children_array_size = (nx,ny)
#
# Define cells in mesh
#
xmin, xmax, ymin, ymax = box
x = numpy.linspace(xmin, xmax, nx+1)
y = numpy.linspace(ymin, ymax, ny+1)
mesh_cells = {}
for i in range(nx):
for j in range(ny):
if i == 0 and j == 0:
v_sw = Vertex((x[i] ,y[j] ))
v_se = Vertex((x[i+1],y[j] ))
v_ne = Vertex((x[i+1],y[j+1]))
v_nw = Vertex((x[i] ,y[j+1]))
elif i > 0 and j == 0:
v_se = Vertex((x[i+1],y[j] ))
v_ne = Vertex((x[i+1],y[j+1]))
v_sw = mesh_cells[i-1,j].vertices['SE']
v_nw = mesh_cells[i-1,j].vertices['NE']
elif i == 0 and j > 0:
v_ne = Vertex((x[i+1],y[j+1]))
v_nw = Vertex((x[i] ,y[j+1]))
v_sw = mesh_cells[i,j-1].vertices['NW']
v_se = mesh_cells[i,j-1].vertices['NE']
elif i > 0 and j > 0:
v_ne = Vertex((x[i+1],y[j+1]))
v_nw = mesh_cells[i-1,j].vertices['NE']
v_sw = mesh_cells[i,j-1].vertices['NW']
v_se = mesh_cells[i,j-1].vertices['NE']
cell_vertices = {'SW': v_sw, 'SE': v_se, 'NE': v_ne, 'NW': v_nw}
cell_address = [i,j]
mesh_cells[i,j] = Cell(cell_vertices, self, cell_address)
self.children = mesh_cells
self.vertex_list = []
self.connectivity = None
self.max_depth = 0
self.__num_vertices = 0
self.__num_cells = 0
self.__balanced = False
self.__triangles = []
def leaves(self):
"""
Description: Returns a list of all leaf sub-cells of the mesh
Input:
group: string, optional sorting criterium (None, or 'depth')
Output:
leaves: list of LEAF cells
"""
#
# All leaves go in a long list
#
leaves = []
for child in self.children.itervalues():
leaves.extend(child.find_leaves())
self.__num_cells = len(leaves)
return leaves
def triangles(self):
"""
Returns a list of triangles
"""
if len(self.__triangles) == 0:
#
# Mesh has not been triangulated yet
#
self.triangulate()
return self.__triangles
else:
#
# Mesh triangulated
#
return self.__triangles
def vertices(self):
"""
Returns a list of vertices.
POSSIBLE BUG: if vertex has been marked outside of
this function, it will not show up in the list.
"""
n_vertices = -1
vertices = []
for leaf in self.leaves():
for v in leaf.vertices.itervalues():
if not v.is_marked():
n_vertices += 1
vertices.append(v)
v.set_node_number(n_vertices)
#
# Mark vertices in the list
#
v.mark()
self.__num_vertices = n_vertices
#
# Unmark all vertices again
#
for v in vertices:
v.unmark()
def cells_at_depth(self, depth):
"""
Return all cells at a given depth > 0
"""
cells = []
for child in self.children.itervalues():
cells.extend(child.cells_at_depth(depth))
return cells
def has_children(self):
"""
Determine whether the mesh has children
"""
return any(child != None for child in self.children.itervalues())
def get_max_depth(self):
"""
Determine the maximum depth of the mesh
"""
def unmark_all(self):
"""
Unmark all cells in mesh
"""
if self.has_children():
for child in self.children.itervalues():
child.unmark_all()
def refine(self):
"""
Refine mesh by splitting marked cells.
"""
leaves = self.leaves()
for leaf in leaves:
if leaf.flag:
leaf.split()
leaf.unmark()
self.__balanced = False
def coarsen(self):
"""
Coarsen mesh by collapsing marked cells
"""
leaves = self.leaves()
for leaf in leaves:
parent = leaf.parent
if parent.flag:
parent.children.clear()
self.remove_supports()
self.__balanced = False
def balance_tree(self):
"""
Ensure the 2:1 rule holds
"""
leaves = self.leaves()
leaf_dict = {'N': ['SE', 'SW'], 'S': ['NE', 'NW'],
'E': ['NW', 'SW'], 'W': ['NE', 'SE']}
while len(leaves) > 0:
leaf = leaves.pop()
flag = False
#
# Check if leaf needs to be split
#
for direction in ['N', 'S', 'E', 'W']:
nb = leaf.find_neighbor(direction)
if nb == None:
pass
elif nb.type == 'LEAF':
pass
else:
for pos in leaf_dict[direction]:
#
# If neighor's children nearest to you aren't LEAVES,
# then split and add children to list of leaves!
#
if nb.children[pos].type != 'LEAF':
leaf.mark()
leaf.split()
for child in leaf.children.itervalues():
child.mark_support_cell()
leaves.append(child)
#
# Check if there are any neighbors that should
# now also be split.
#
for direction in ['N', 'S', 'E', 'W']:
nb = leaf.find_neighbor(direction)
if nb != None and nb.depth < leaf.depth:
leaves.append(nb)
flag = True
break
if flag:
break
self.__balanced = True
def remove_supports(self):
"""
Remove the supporting cells
"""
leaves = self.leaves()
while len(leaves) > 0:
leaf = leaves.pop()
if leaf.support_cell:
#
# Check whether its safe to delete the support cell
#
safe_to_coarsen = True
for direction in ['N', 'S', 'E', 'W']:
nb = leaf.find_neighbor(direction)
if nb.has_children():
safe_to_coarsen = False
break
if safe_to_coarsen:
parent = leaf.parent
for child in parent.children.itervalues():
#
# Delete cells individually
#
del child
parent.children.clear()
leaves.append(parent)
self.__balanced = False
def triangulate(self):
"""
Generate triangulation of mesh:
balance if necessary
populate cells with triangles
generate connectivity matrix.
#TODO: unfinished
"""
triangles = []
if not self.__balanced:
#
# Balance mesh first
#
self.balance_tree()
for leaf in self.leaves():
v = leaf.vertices
#
# Determine whether Steiner Point is necessary
#
if any([v.has_key(direction) for direction in ['N','S','E','W']]):
#
# Add Steiner vertex
#
x0, x1, y0, y1 = leaf.box()
vm = Vertex((0.5*(x0 + x1), 0.5*(y0 + y1)))
leaf.vertices['M'] = vm
sub_edge_dict = {'S': ['SW','S','SE'], \
'E': ['NE','E','SE'], \
'N': ['NE','N','NW'], \
'W': ['NW','W','SW']}
for direction in ['S','E','N','W']:
se = sub_edge_dict[direction]
if v.has_key(direction):
#
# Midpoint on this edge
#
tri = [Triangle([v[se[0]],v[se[1]],vm],parent_cell=leaf),
Triangle([v[se[1]],v[se[2]],vm],parent_cell=leaf)]
else:
#
# No midpoint
#
tri = [Triangle([v[se[0]],v[se[2]],vm],parent_cell=leaf)]
triangles.extend(tri)
else:
#
# No Steiner vertex - simple triangulation
#
tri = [Triangle([v['SW'],v['SE'],v['NE']], parent_cell=leaf), \
Triangle([v['NE'],v['NW'],v['SW']], parent_cell=leaf)]
triangles.extend(tri)
self.__triangles = triangles
def build_connectivity(self):
"""
Returns the connectivity matrix for the tree
"""
# TODO: FIX build_connectivity
econn = []
num_vertices = len(self.vertex_list)
#
# Balance tree first
#
#self.balance_tree()
for leaf in self.leaves():
add_steiner_pt = False
#
# Get global indices for each corner vertex
#
gi = {}
for pos in ['NW', 'SW', 'NE', 'SE']:
gi[pos] = leaf.vertices[pos].node_number
edges = {'S': [[gi['SW'], gi['SE']]], 'N': [[gi['NE'], gi['NW']]],
'W': [[gi['NW'], gi['SW']]], 'E': [[gi['SE'], gi['NE']]] }
opposite_direction = {'N': 'S', 'S': 'N', 'W': 'E', 'E': 'W'}
for direction in ['S', 'N', 'E', 'W']:
neighbor = leaf.find_neighbor(direction)
if neighbor != None and neighbor.type != 'LEAF':
# If neighbor has children, then add the midpoint to
# your list of vertices, update the list of edges and
# remember to add the Steiner point later on.
#
od = opposite_direction[direction]
leaf.vertices[direction] = neighbor.vertices[od]
gi[direction] = leaf.vertices[direction].node_number
add_steiner_pt = True
edges[direction] = [[edges[direction][0][0], gi[direction]],
[gi[direction], edges[direction][0][1]]]
#
# Add the Triangles to connectivity
#
if not add_steiner_pt:
#
# Simple Triangulation
#
econn.extend([[gi['SW'], gi['SE'], gi['NE']],
[gi['NE'], gi['NW'], gi['SW']]] )
elif not leaf.vertices.has_key('M') or leaf.vertices['M'] == None:
#
# Add Steiner Vertex
#
x0, x1, y0, y1 = leaf.box()
vm = Vertex((0.5*(x0 + x1), 0.5*(y0 + y1)), node_number=num_vertices)
leaf.vertices['M'] = vm
gi['M'] = vm.node_number
self.vertex_list.append(vm)
num_vertices += 1
for direction in ['N', 'S', 'E', 'W']:
for sub_edge in edges[direction]:
econn.append([sub_edge[0], sub_edge[1], gi['M']])
return econn
def plot_quadmesh(self, ax, name=None, show=True, set_axis=True):
'''
Plot the current quadmesh
'''
if self.has_children():
if set_axis:
x0, x1, y0, y1 = self.bounding_box
hx = x1 - x0
hy = y1 - y0
ax.set_xlim(x0-0.1*hx, x1+0.1*hx)
ax.set_ylim(y0-0.1*hy, y1+0.1*hy)
for child in self.children.itervalues():
ax = child.plot(ax, set_axis=False)
else:
x0, y0 = self.vertices['SW'].coordinate
x1, y1 = self.vertices['NE'].coordinate
# Plot current cell
plt.plot([x0, x0, x1, x1],[y0, y1, y0, y1],'r.')
points = [[x0, y0], [x1, y0], [x1, y1], [x0, y1]]
if self.flag:
rect = plt.Polygon(points, fc='r', edgecolor='k')
else:
rect = plt.Polygon(points, fc='w', edgecolor='k')
ax.add_patch(rect)
return ax
def plot_trimesh(self, ax):
"""
Plot triangular mesh
"""
e_conn = self.build_connectivity()
for element in e_conn:
points = []
for node_num in element:
x, y = self.vertex_list[node_num].coordinate
points.append([x,y])
triangle = plt.Polygon(points, fc='w', ec='k')
ax.add_patch(triangle)
| mit |
peiyuwang/pants | tests/python/pants_test/backend/codegen/antlr/java/test_java_antlr_library.py | 8 | 1499 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from pants.backend.codegen.antlr.java.java_antlr_library import JavaAntlrLibrary
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants_test.base_test import BaseTest
class JavaAntlrLibraryTest(BaseTest):
@property
def alias_groups(self):
return BuildFileAliases(targets={'java_antlr_library': JavaAntlrLibrary})
def test_empty(self):
with self.assertRaisesRegexp(ValueError, "Missing required 'sources' parameter"):
self.add_to_build_file('BUILD', dedent('''
java_antlr_library(name='foo',
sources=[],
)'''))
self.foo = self.target('//:foo')
def test_valid(self):
self.add_to_build_file('BUILD', dedent('''
java_antlr_library(name='foo',
sources=['foo'],
)'''))
self.foo = self.target('//:foo')
self.assertIsInstance(self.foo, JavaAntlrLibrary)
def test_invalid_compiler(self):
with self.assertRaisesRegexp(ValueError, "Illegal value for 'compiler'"):
self.add_to_build_file('BUILD', dedent('''
java_antlr_library(name='foo',
sources=['foo'],
compiler='antlr9'
)'''))
self.foo = self.target('//:foo')
| apache-2.0 |
SANBI-SA/tools-iuc | data_managers/data_manager_build_kraken_database/data_manager/make_json.py | 8 | 1044 | import argparse
import json
import os
import shutil
def main(args):
data_manager_entry = {}
data_manager_entry['value'] = args.database.lower()
data_manager_entry['name'] = args.database
data_manager_entry['path'] = '.'
data_manager_json = dict(data_tables=dict(kraken_databases=data_manager_entry))
params = json.loads(open(args.output).read())
target_directory = params['output_data'][0]['extra_files_path']
os.mkdir(target_directory)
output_path = os.path.join(os.getcwd(), 'kraken-database')
for filename in os.listdir(output_path):
shutil.move(os.path.join(output_path, filename), target_directory)
file(args.output, 'w').write(json.dumps(data_manager_json))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create data manager json.')
parser.add_argument('--db', dest='database', action='store', help='Database name')
parser.add_argument('--out', dest='output', action='store', help='JSON filename')
args = parser.parse_args()
main(args)
| mit |
lthurlow/Boolean-Constrained-Routing | networkx-1.8.1/build/lib/networkx/readwrite/tests/test_shp.py | 31 | 4545 | """Unit tests for shp.
"""
import os
import tempfile
from nose import SkipTest
from nose.tools import assert_equal
import networkx as nx
class TestShp(object):
@classmethod
def setupClass(cls):
global ogr
try:
from osgeo import ogr
except ImportError:
raise SkipTest('ogr not available.')
def deletetmp(self, drv, *paths):
for p in paths:
if os.path.exists(p):
drv.DeleteDataSource(p)
def setUp(self):
def createlayer(driver):
lyr = shp.CreateLayer("edges", None, ogr.wkbLineString)
namedef = ogr.FieldDefn("Name", ogr.OFTString)
namedef.SetWidth(32)
lyr.CreateField(namedef)
return lyr
drv = ogr.GetDriverByName("ESRI Shapefile")
testdir = os.path.join(tempfile.gettempdir(), 'shpdir')
shppath = os.path.join(tempfile.gettempdir(), 'tmpshp.shp')
self.deletetmp(drv, testdir, shppath)
os.mkdir(testdir)
shp = drv.CreateDataSource(shppath)
lyr = createlayer(shp)
self.names = ['a', 'b', 'c'] # edgenames
self.paths = ( [(1.0, 1.0), (2.0, 2.0)],
[(2.0, 2.0), (3.0, 3.0)],
[(0.9, 0.9), (4.0, 2.0)]
)
for path, name in zip(self.paths, self.names):
feat = ogr.Feature(lyr.GetLayerDefn())
g = ogr.Geometry(ogr.wkbLineString)
map(lambda xy: g.AddPoint_2D(*xy), path)
feat.SetGeometry(g)
feat.SetField("Name", name)
lyr.CreateFeature(feat)
self.shppath = shppath
self.testdir = testdir
self.drv = drv
def testload(self):
expected = nx.DiGraph()
map(expected.add_path, self.paths)
G = nx.read_shp(self.shppath)
assert_equal(sorted(expected.node), sorted(G.node))
assert_equal(sorted(expected.edges()), sorted(G.edges()))
names = [G.get_edge_data(s, e)['Name'] for s, e in G.edges()]
assert_equal(self.names, sorted(names))
def checkgeom(self, lyr, expected):
feature = lyr.GetNextFeature()
actualwkt = []
while feature:
actualwkt.append(feature.GetGeometryRef().ExportToWkt())
feature = lyr.GetNextFeature()
assert_equal(sorted(expected), sorted(actualwkt))
def test_geometryexport(self):
expectedpoints = (
"POINT (1 1)",
"POINT (2 2)",
"POINT (3 3)",
"POINT (0.9 0.9)",
"POINT (4 2)"
)
expectedlines = (
"LINESTRING (1 1,2 2)",
"LINESTRING (2 2,3 3)",
"LINESTRING (0.9 0.9,4 2)"
)
tpath = os.path.join(tempfile.gettempdir(), 'shpdir')
G = nx.read_shp(self.shppath)
nx.write_shp(G, tpath)
shpdir = ogr.Open(tpath)
self.checkgeom(shpdir.GetLayerByName("nodes"), expectedpoints)
self.checkgeom(shpdir.GetLayerByName("edges"), expectedlines)
def test_attributeexport(self):
def testattributes(lyr, graph):
feature = lyr.GetNextFeature()
while feature:
coords = []
ref = feature.GetGeometryRef()
for i in xrange(ref.GetPointCount()):
coords.append(ref.GetPoint_2D(i))
name = feature.GetFieldAsString('Name')
assert_equal(graph.get_edge_data(*coords)['Name'], name)
feature = lyr.GetNextFeature()
tpath = os.path.join(tempfile.gettempdir(), 'shpdir')
G = nx.read_shp(self.shppath)
nx.write_shp(G, tpath)
shpdir = ogr.Open(tpath)
edges = shpdir.GetLayerByName("edges")
testattributes(edges, G)
def test_wkt_export(self):
G = nx.DiGraph()
tpath = os.path.join(tempfile.gettempdir(), 'shpdir')
points = (
"POINT (0.9 0.9)",
"POINT (4 2)"
)
line = (
"LINESTRING (0.9 0.9,4 2)",
)
G.add_node(1, Wkt=points[0])
G.add_node(2, Wkt=points[1])
G.add_edge(1, 2, Wkt=line[0])
try:
nx.write_shp(G, tpath)
except Exception as e:
assert False, e
shpdir = ogr.Open(tpath)
self.checkgeom(shpdir.GetLayerByName("nodes"), points)
self.checkgeom(shpdir.GetLayerByName("edges"), line)
def tearDown(self):
self.deletetmp(self.drv, self.testdir, self.shppath)
| mit |
adminneyk/codificacionproyectando | application/views/Generacion/Generacion/lib/openoffice/openoffice.org/basis3.4/program/python-core-2.6.1/lib/encodings/cp856.py | 593 | 12679 | """ Python Character Mapping Codec cp856 generated from 'MAPPINGS/VENDORS/MISC/CP856.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp856',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u05d0' # 0x80 -> HEBREW LETTER ALEF
u'\u05d1' # 0x81 -> HEBREW LETTER BET
u'\u05d2' # 0x82 -> HEBREW LETTER GIMEL
u'\u05d3' # 0x83 -> HEBREW LETTER DALET
u'\u05d4' # 0x84 -> HEBREW LETTER HE
u'\u05d5' # 0x85 -> HEBREW LETTER VAV
u'\u05d6' # 0x86 -> HEBREW LETTER ZAYIN
u'\u05d7' # 0x87 -> HEBREW LETTER HET
u'\u05d8' # 0x88 -> HEBREW LETTER TET
u'\u05d9' # 0x89 -> HEBREW LETTER YOD
u'\u05da' # 0x8A -> HEBREW LETTER FINAL KAF
u'\u05db' # 0x8B -> HEBREW LETTER KAF
u'\u05dc' # 0x8C -> HEBREW LETTER LAMED
u'\u05dd' # 0x8D -> HEBREW LETTER FINAL MEM
u'\u05de' # 0x8E -> HEBREW LETTER MEM
u'\u05df' # 0x8F -> HEBREW LETTER FINAL NUN
u'\u05e0' # 0x90 -> HEBREW LETTER NUN
u'\u05e1' # 0x91 -> HEBREW LETTER SAMEKH
u'\u05e2' # 0x92 -> HEBREW LETTER AYIN
u'\u05e3' # 0x93 -> HEBREW LETTER FINAL PE
u'\u05e4' # 0x94 -> HEBREW LETTER PE
u'\u05e5' # 0x95 -> HEBREW LETTER FINAL TSADI
u'\u05e6' # 0x96 -> HEBREW LETTER TSADI
u'\u05e7' # 0x97 -> HEBREW LETTER QOF
u'\u05e8' # 0x98 -> HEBREW LETTER RESH
u'\u05e9' # 0x99 -> HEBREW LETTER SHIN
u'\u05ea' # 0x9A -> HEBREW LETTER TAV
u'\ufffe' # 0x9B -> UNDEFINED
u'\xa3' # 0x9C -> POUND SIGN
u'\ufffe' # 0x9D -> UNDEFINED
u'\xd7' # 0x9E -> MULTIPLICATION SIGN
u'\ufffe' # 0x9F -> UNDEFINED
u'\ufffe' # 0xA0 -> UNDEFINED
u'\ufffe' # 0xA1 -> UNDEFINED
u'\ufffe' # 0xA2 -> UNDEFINED
u'\ufffe' # 0xA3 -> UNDEFINED
u'\ufffe' # 0xA4 -> UNDEFINED
u'\ufffe' # 0xA5 -> UNDEFINED
u'\ufffe' # 0xA6 -> UNDEFINED
u'\ufffe' # 0xA7 -> UNDEFINED
u'\ufffe' # 0xA8 -> UNDEFINED
u'\xae' # 0xA9 -> REGISTERED SIGN
u'\xac' # 0xAA -> NOT SIGN
u'\xbd' # 0xAB -> VULGAR FRACTION ONE HALF
u'\xbc' # 0xAC -> VULGAR FRACTION ONE QUARTER
u'\ufffe' # 0xAD -> UNDEFINED
u'\xab' # 0xAE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xAF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0xB0 -> LIGHT SHADE
u'\u2592' # 0xB1 -> MEDIUM SHADE
u'\u2593' # 0xB2 -> DARK SHADE
u'\u2502' # 0xB3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0xB4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\ufffe' # 0xB5 -> UNDEFINED
u'\ufffe' # 0xB6 -> UNDEFINED
u'\ufffe' # 0xB7 -> UNDEFINED
u'\xa9' # 0xB8 -> COPYRIGHT SIGN
u'\u2563' # 0xB9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0xBA -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0xBB -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0xBC -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\xa2' # 0xBD -> CENT SIGN
u'\xa5' # 0xBE -> YEN SIGN
u'\u2510' # 0xBF -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0xC0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0xC1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0xC2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0xC3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0xC4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0xC5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\ufffe' # 0xC6 -> UNDEFINED
u'\ufffe' # 0xC7 -> UNDEFINED
u'\u255a' # 0xC8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0xC9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0xCA -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0xCB -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0xCC -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0xCD -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0xCE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0xCF -> CURRENCY SIGN
u'\ufffe' # 0xD0 -> UNDEFINED
u'\ufffe' # 0xD1 -> UNDEFINED
u'\ufffe' # 0xD2 -> UNDEFINED
u'\ufffe' # 0xD3 -> UNDEFINEDS
u'\ufffe' # 0xD4 -> UNDEFINED
u'\ufffe' # 0xD5 -> UNDEFINED
u'\ufffe' # 0xD6 -> UNDEFINEDE
u'\ufffe' # 0xD7 -> UNDEFINED
u'\ufffe' # 0xD8 -> UNDEFINED
u'\u2518' # 0xD9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0xDA -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0xDB -> FULL BLOCK
u'\u2584' # 0xDC -> LOWER HALF BLOCK
u'\xa6' # 0xDD -> BROKEN BAR
u'\ufffe' # 0xDE -> UNDEFINED
u'\u2580' # 0xDF -> UPPER HALF BLOCK
u'\ufffe' # 0xE0 -> UNDEFINED
u'\ufffe' # 0xE1 -> UNDEFINED
u'\ufffe' # 0xE2 -> UNDEFINED
u'\ufffe' # 0xE3 -> UNDEFINED
u'\ufffe' # 0xE4 -> UNDEFINED
u'\ufffe' # 0xE5 -> UNDEFINED
u'\xb5' # 0xE6 -> MICRO SIGN
u'\ufffe' # 0xE7 -> UNDEFINED
u'\ufffe' # 0xE8 -> UNDEFINED
u'\ufffe' # 0xE9 -> UNDEFINED
u'\ufffe' # 0xEA -> UNDEFINED
u'\ufffe' # 0xEB -> UNDEFINED
u'\ufffe' # 0xEC -> UNDEFINED
u'\ufffe' # 0xED -> UNDEFINED
u'\xaf' # 0xEE -> MACRON
u'\xb4' # 0xEF -> ACUTE ACCENT
u'\xad' # 0xF0 -> SOFT HYPHEN
u'\xb1' # 0xF1 -> PLUS-MINUS SIGN
u'\u2017' # 0xF2 -> DOUBLE LOW LINE
u'\xbe' # 0xF3 -> VULGAR FRACTION THREE QUARTERS
u'\xb6' # 0xF4 -> PILCROW SIGN
u'\xa7' # 0xF5 -> SECTION SIGN
u'\xf7' # 0xF6 -> DIVISION SIGN
u'\xb8' # 0xF7 -> CEDILLA
u'\xb0' # 0xF8 -> DEGREE SIGN
u'\xa8' # 0xF9 -> DIAERESIS
u'\xb7' # 0xFA -> MIDDLE DOT
u'\xb9' # 0xFB -> SUPERSCRIPT ONE
u'\xb3' # 0xFC -> SUPERSCRIPT THREE
u'\xb2' # 0xFD -> SUPERSCRIPT TWO
u'\u25a0' # 0xFE -> BLACK SQUARE
u'\xa0' # 0xFF -> NO-BREAK SPACE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
zettrider/learning_python | 02_Week2/02_03_exercise.py | 1 | 1606 | '''
III. You have the following four lines from 'show ip bgp':
entry1 = "* 1.0.192.0/18 157.130.10.233 0 701 38040 9737 i"
entry2 = "* 1.1.1.0/24 157.130.10.233 0 701 1299 15169 i"
entry3 = "* 1.1.42.0/24 157.130.10.233 0 701 9505 17408 2.1465 i"
entry4 = "* 1.0.192.0/19 157.130.10.233 0 701 6762 6762 6762 6762 38040 9737 i"
Note, in each case the AS_PATH starts with '701'.
Using split() and a list slice, how could you process each of these such that--for each entry, you return an ip_prefix
and the AS_PATH (the ip_prefix should be a string; the AS_PATH should be a list):
Your output should look like this:
ip_prefix as_path
1.0.192.0/18 ['701', '38040', '9737']
1.1.1.0/24 ['701', '1299', '15169']
1.1.42.0/24 ['701', '9505', '17408', '2.1465']
1.0.192.0/19 ['701', '6762', '6762', '6762', '6762', '38040', '9737']
'''
entry1 = "* 1.0.192.0/18 157.130.10.233 0 701 38040 9737 i"
entry2 = "* 1.1.1.0/24 157.130.10.233 0 701 1299 15169 i"
entry3 = "* 1.1.42.0/24 157.130.10.233 0 701 9505 17408 2.1465 i"
entry4 = "* 1.0.192.0/19 157.130.10.233 0 701 6762 6762 6762 6762 38040 9737 i"
entry_combined = [entry1, entry2, entry3, entry4]
print("%-20s %s" % ("ip_prefix", "as_path"))
for item in entry_combined:
entry_list = (str.split(item))
ip_prefix = (entry_list[1])
as_path = (entry_list[4:len(entry_list)-1])
print("%-20s %s" % (ip_prefix, as_path))
| gpl-3.0 |
jbzdarkid/Random | witness.py | 1 | 18046 | # Work isValidSolution() into stage 1 not stage 0... May not work because I need to know which blue_paths are a valid start -- which involves doing all the work anyways.
from collections import deque
from time import time
class Path():
def __init__(self, path):
if isinstance(path, list):
self.path = path
elif isinstance(path, tuple):
self.path = [path]
else:
raise TypeError('Path.__init__ called with: %s' % str(path))
self.sortedpath = sorted(self.path)
# Required for dictionary entry. If two paths are the same, their hashes must be the same.
def __hash__(self):
hash = 0
for point in self.path:
hash << 5 # Multiply by 32, which isn't quite 36 -- but a hash doesn't have to be unique
hash += point[1]<<3 + point[0] # (0-4)*8+(0-6) = (0-36)
return hash
# Required for dictionary entry.
def __eq__(self, other):
if len(self.sortedpath) != len(other.sortedpath):
return False
i = 0
while i < len(self.sortedpath):
if self.sortedpath[i] != other.sortedpath[i]:
return False
i += 1
return True
# Called with len()
def __len__(self):
return len(self.path)
# Called when trying to print.
def __repr__(self):
return 'Path(%s)' % repr(self.path)
# Returns if there is an overlap between the two paths
def collides(self, other):
i = j = 0
while i < len(self.sortedpath) and j < len(other.sortedpath):
if self.sortedpath[i] == other.sortedpath[j]:
return True
elif self.sortedpath[i] < other.sortedpath[j]:
i += 1
elif self.sortedpath[i] > other.sortedpath[j]:
j += 1
return False
# Adds (b, c) to a=(x, y)
def plus(a, b, c):
return (a[0]+b, a[1]+c)
# Checks if a path is valid. Considerations are:
# 1. Steps forward should avoid forming loops (pictured in the comments, with o as the head and .. as the potential next step)
# 1a. Loops are OK when capturing an interesting point, such as a star or a square.
# 1b. Loops are OK when they hit the edge of the board, as this can divide stars.
# 2. Steps forward need to not collide with other paths
# 3. Steps forward need to be in bounds
# 4. Steps forward need to respect the breaks in the board
def isValidConnection(color, blue_path, orange_path, dir):
banned_connections = {
(0,2):['down'],
(0,3):['up', 'down'],
(0,4):['up'],
(1,0):['right'],
(2,0):['left'],
(4,0):['right'],
(5,0):['left']
}
head2 = None
if color == 'blue':
head = blue_path[-1]
if len(blue_path) > 2:
head2 = blue_path[-3]
elif color == 'orange':
head = orange_path[-1]
if len(orange_path) > 2:
head2 = orange_path[-3]
if head in banned_connections and dir in banned_connections[head]:
return False
# 6 stars, 2 squares
interesting_points = [(2, 0), (3, 1), (0, 2), (0, 3), (5, 2), (5, 3), (0, 1), (5, 1)]
if dir == 'right':
if head[0] == 6: # Issue 3
return False
if head[0] != 0: # Issue 1b
# o..
# |
# +--
if plus(head, 1, 1) == head2 and plus(head, 0, 0) not in interesting_points:
return False # Issues 1 and 1a
# +--
# |
# o..
if plus(head, 1, -1) == head2 and plus(head, 0, -1) not in interesting_points:
return False # Issues 1 and 1a
if plus(head, 1, 0) in blue_path:
return False # Issue 2
if plus(head, 1, 0) in orange_path:
return False # Issue 2
elif dir == 'down':
if head[1] == 4:
return False
if head[1] != 0:
# +-o
# | .
# | .
if plus(head, -1, 1) == head2 and plus(head, -1, 0) not in interesting_points:
return False
# o-+
# . |
# . |
if plus(head, 1, 1) == head2 and plus(head, 0, 0) not in interesting_points:
return False
if plus(head, 0, 1) in blue_path:
return False
elif plus(head, 0, 1) in orange_path:
return False
elif dir == 'left':
if head[0] == 0:
return False
if head[0] != 6:
# --+
# |
# ..o
if plus(head, -1, -1) == head2 and plus(head, -1, -1) not in interesting_points:
return False
# ..o
# |
# --+
if plus(head, -1, 1) == head2 and plus(head, -1, 0) not in interesting_points:
return False
if plus(head, -1, 0) in blue_path:
return False
if plus(head, -1, 0) in orange_path:
return False
elif dir == 'up':
if head[1] == 0:
return False
if head[1] != 4:
# . |
# . |
# o-+
if plus(head, 1, -1) == head2 and plus(head, 0, -1) not in interesting_points:
return False
# | .
# | .
# +-o
if plus(head, -1, -1) == head2 and plus(head, -1, -1) not in interesting_points:
return False
if plus(head, 0, -1) in blue_path:
return False
if plus(head, 0, -1) in orange_path:
return False
return True
# Convert seconds into a more human-readable time amount
def format_time(seconds):
from math import floor
if seconds < 1:
return '%0.2f milliseconds' % (seconds*1000)
seconds = floor(seconds)
time_spent = ''
if seconds > 3600:
time_spent += '%d hour%s' % (seconds/3600, '' if seconds/3600 == 1 else 's')
seconds %= 60
if seconds > 60:
time_spent += '%d minute%s, ' % (seconds/60, '' if seconds/60 == 1 else 's')
seconds %= 60
if seconds > 0:
time_spent += '%d second%s, ' % (seconds, '' if seconds == 1 else 's')
return time_spent[:-2]
# Find all valid blue and orange paths from a given base point
def findSolutions(base):
to_visit = deque([base])
solutions = []
while len(to_visit) > 0:
color, blue_path, orange_path = to_visit.popleft()
if color == 'blue':
head = blue_path[-1]
elif color == 'orange':
head = orange_path[-1]
if head in [(0, 0), (6, 0), (0, 3), (0, 4), (6, 4)]: # Valid exits
if color == 'blue':
solutions.append(Path(blue_path))
elif color == 'orange':
solutions.append(Path(orange_path))
if isValidConnection(color, blue_path, orange_path, 'left'):
if color == 'blue':
to_visit.append((color, blue_path + [plus(head, -1, 0)], orange_path))
elif color == 'orange':
to_visit.append((color, blue_path, orange_path + [plus(head, -1, 0)]))
if isValidConnection(color, blue_path, orange_path, 'right'):
if color == 'blue':
to_visit.append((color, blue_path + [plus(head, 1, 0)], orange_path))
elif color == 'orange':
to_visit.append((color, blue_path, orange_path + [plus(head, 1, 0)]))
if isValidConnection(color, blue_path, orange_path, 'up'):
if color == 'blue':
to_visit.append((color, blue_path + [plus(head, 0, -1)], orange_path))
elif color == 'orange':
to_visit.append((color, blue_path, orange_path + [plus(head, 0, -1)]))
if isValidConnection(color, blue_path, orange_path, 'down'):
if color == 'blue':
to_visit.append((color, blue_path + [plus(head, 0, 1)], orange_path))
elif color == 'orange':
to_visit.append((color, blue_path, orange_path + [plus(head, 0, 1)]))
return solutions
# Check if a square is contiguous to a square in the given direction.
def isConnected(blue_path, orange_path, square, dir):
x, y = square
if dir == 'left':
if x == 0:
return False
try:
index = blue_path.path.index((x, y))
if index > 0 and blue_path.path[index-1] == (x, y+1):
return False
if index < len(blue_path)-1 and blue_path.path[index+1] == (x, y+1):
return False
except ValueError:
pass
try:
index = orange_path.path.index((x, y))
if index > 0 and orange_path.path[index-1] == (x, y+1):
return False
if index < len(orange_path)-1 and orange_path.path[index+1] == (x, y+1):
return False
except ValueError:
pass
elif dir == 'up':
if y == 0:
return False
try:
index = blue_path.path.index((x, y))
if index > 0 and blue_path.path[index-1] == (x+1, y):
return False
if index < len(blue_path)-1 and blue_path.path[index+1] == (x+1, y):
return False
except ValueError:
pass
try:
index = orange_path.path.index((x, y))
if index > 0 and orange_path.path[index-1] == (x+1, y):
return False
if index < len(orange_path)-1 and orange_path.path[index+1] == (x+1, y):
return False
except ValueError:
pass
elif dir == 'right':
if x == 5:
return False
try:
index = blue_path.path.index((x+1, y+1))
if index > 0 and blue_path.path[index-1] == (x+1, y):
return False
if index < len(blue_path)-1 and blue_path.path[index+1] == (x+1, y):
return False
except ValueError:
pass
try:
index = orange_path.path.index((x+1, y+1))
if index > 0 and orange_path.path[index-1] == (x+1, y):
return False
if index < len(orange_path)-1 and orange_path.path[index+1] == (x+1, y):
return False
except ValueError:
pass
elif dir == 'down':
if y == 3:
return False
try:
index = blue_path.path.index((x+1, y+1))
if index > 0 and blue_path.path[index-1] == (x, y+1):
return False
if index < len(blue_path)-1 and blue_path.path[index+1] == (x, y+1):
return False
except ValueError:
pass
try:
index = orange_path.path.index((x+1, y+1))
if index > 0 and orange_path.path[index-1] == (x, y+1):
return False
if index < len(orange_path)-1 and orange_path.path[index+1] == (x, y+1):
return False
except ValueError:
pass
return True
def isValidSolution(blue_path, orange_path):
# Given n paths of length m each:
# Comparing naively is O(n^2 * m^2)
# Sorting first costs O(n * log(m)) and comparing is then O(n^2 * m)
if blue_path.collides(orange_path):
return False
# For region definitions, we use square centers rather than square corners. The range for stars is thus [0-5, 0-3]
stars = [(2, 0), (3, 1), (0, 2), (0, 3), (5, 2), (5, 3)]
for i in range(3): # There are 6 stars, and each time we find one it needs to remove exactly 1 other star.
pair_star = None
visit_list = [stars.pop()]
j = 0
while j < len(visit_list):
square = visit_list[j]
if square in stars:
if pair_star is not None:
return False
pair_star = square
if isConnected(blue_path, orange_path, square, 'left'):
if plus(square, -1, 0) not in visit_list:
visit_list.append(plus(square, -1, 0))
if isConnected(blue_path, orange_path, square, 'up'):
if plus(square, 0, -1) not in visit_list:
visit_list.append(plus(square, 0, -1))
if isConnected(blue_path, orange_path, square, 'right'):
if plus(square, 1, 0) not in visit_list:
visit_list.append(plus(square, 1, 0))
if isConnected(blue_path, orange_path, square, 'down'):
if plus(square, 0, 1) not in visit_list:
visit_list.append(plus(square, 0, 1))
j += 1
if pair_star is None:
return False
stars.remove(pair_star)
# All stars verified, now check the colored squares
# Black square
visit_list = [(0, 1)]
j = 0
while j < len(visit_list):
square = visit_list[j]
# White square
if square == (5, 1):
return False
if isConnected(blue_path, orange_path, square, 'left'):
if plus(square, -1, 0) not in visit_list:
visit_list.append(plus(square, -1, 0))
if isConnected(blue_path, orange_path, square, 'up'):
if plus(square, 0, -1) not in visit_list:
visit_list.append(plus(square, 0, -1))
if isConnected(blue_path, orange_path, square, 'right'):
if plus(square, 1, 0) not in visit_list:
visit_list.append(plus(square, 1, 0))
if isConnected(blue_path, orange_path, square, 'down'):
if plus(square, 0, 1) not in visit_list:
visit_list.append(plus(square, 0, 1))
j += 1
return True
# Stage 0: Calculate all valid blue and orange paths.
stageStart = time()
blue_paths = findSolutions(('blue', [(3, 4)], [(3, 0)]))
blue_paths.append(Path((3, 4))) # Added in for the start point
orange_paths = findSolutions(('orange', [(3, 4)], [(3, 0)]))
orange_paths.append(Path((3, 0))) # Added in for the start point
# These are initialized separately because A. they aren't valid paths, and B. they need to have a children array defined (as a base case).
path_combos = {Path((3, 4)):{Path((3, 0)):{'parents':[], 'bCost':None, 'oCost':None, 'pCost': (0, None)}}}
for bPath in blue_paths:
for oPath in orange_paths:
if isValidSolution(bPath, oPath):
if bPath not in path_combos:
path_combos[bPath] = {}
# if oPath in path_combos[bPath]:
# raise Exception
path_combos[bPath][oPath] = {'parents':[], 'bCost':None, 'oCost':None, 'pCost':None}
stageEnd = time()
print 'Stage 0 done in', format_time(stageEnd-stageStart)
stageStart = stageEnd
# Stage 1: Create a tree through combos which connects all the possible paths (from start)
exits_b = []
exits_o = []
to_visit = deque([(Path((3, 4)), Path((3, 0)), 'blue')]) # Base starting point: Each path is length 1, and we start on the blue side.
while len(to_visit) > 0:
bPath, oPath, color = to_visit.popleft()
if color == 'blue' or oPath.path[-1][1] == 0: # Orange path connects to blue side or we're on the blue side, look for a new blue path.
for new_bPath in blue_paths:
if new_bPath not in path_combos:
continue
if new_bPath == bPath:
continue
if oPath in path_combos[new_bPath]: # Valid path
path_combos[new_bPath][oPath]['parents'].append((bPath, oPath))
if path_combos[new_bPath][oPath]['pCost'] == None:
path_combos[new_bPath][oPath]['pCost'] = (path_combos[bPath][oPath]['pCost'][0]+len(bPath), (bPath, oPath))
to_visit.append((new_bPath, oPath, 'blue'))
if path_combos[bPath][oPath]['pCost'][0] + len(bPath) < path_combos[new_bPath][oPath]['pCost'][0]:
path_combos[new_bPath][oPath]['pCost'] = (path_combos[bPath][oPath]['pCost'][0]+len(bPath), (bPath, oPath))
if new_bPath.path[-1] == (0, 3) and len(oPath) > 1: # Found a solution!
path_combos[new_bPath][oPath]['bCost'] = (0, None)
exits_b.append((new_bPath, oPath))
if color == 'orange' or bPath.path[-1][1] == 4: # Blue path connects to orange side or we're on the orange side, look for a new orange path.
for new_oPath in orange_paths:
if new_oPath == oPath:
continue
if new_oPath in path_combos[bPath]: # Valid path
path_combos[bPath][new_oPath]['parents'].append((bPath, oPath))
if path_combos[bPath][new_oPath]['pCost'] == None:
path_combos[bPath][new_oPath]['pCost'] = (path_combos[bPath][oPath]['pCost'][0]+len(oPath), (bPath, oPath))
to_visit.append((bPath, new_oPath, 'orange'))
if path_combos[bPath][oPath]['pCost'][0] + len(oPath) < path_combos[bPath][new_oPath]['pCost'][0]:
path_combos[bPath][new_oPath]['pCost'] = (path_combos[bPath][oPath]['pCost'][0]+len(oPath), (bPath, oPath))
if new_oPath.path[-1] == (0, 3) and len(bPath) > 1: # Found a solution!
path_combos[bPath][new_oPath]['oCost'] = (0, None)
exits_o.append((bPath, new_oPath))
stageEnd = time()
print 'Stage 1 done in', format_time(stageEnd-stageStart)
stageStart = stageEnd
# Stage 2: Calculate distance to exit at each node
def update_cost(cost, parent, child):
parent_cost = path_combos[parent[0]][parent[1]][cost]
child_cost = path_combos[child[0]][child[1]][cost]
if parent[0] == child[0]: # Orange path was changed to make this connection
if parent_cost == None:
path_combos[parent[0]][parent[1]][cost] = (child_cost[0]+len(child[1]), child)
return True
if parent_cost[0] > child_cost[0] + len(child[1]):
path_combos[parent[0]][parent[1]][cost] = (child_cost[0]+len(child[1]), child)
else: # Blue path was changed to make this connection
if parent_cost == None:
path_combos[parent[0]][parent[1]][cost] = (child_cost[0]+len(child[0]), child)
return True
if parent_cost[0] > child_cost[0] + len(child[0]):
path_combos[parent[0]][parent[1]][cost] = (child_cost[0]+len(child[0]), child)
return False
# Calculates cost at each node to reach a blue solution
to_visit = deque(exits_b)
while len(to_visit) > 0:
bPath, oPath = to_visit.popleft()
for parent in path_combos[bPath][oPath]['parents']:
if update_cost('bCost', parent, (bPath, oPath)):
to_visit.append(parent)
# Calculates cost at each node to reach an orange solution
to_visit = deque(exits_o)
while len(to_visit) > 0:
bPath, oPath = to_visit.popleft()
for parent in path_combos[bPath][oPath]['parents']:
if update_cost('oCost', parent, (bPath, oPath)):
to_visit.append(parent)
stageEnd = time()
print 'Stage 2 done in', format_time(stageEnd-stageStart)
stageStart = stageEnd
# Stage 3: Find and print the optimal solutions.
min_both = (999, None)
min_single = (999, None)
for exit in exits_b:
bPath, oPath = exit
cost_single = path_combos[bPath][oPath]['pCost'][0]
cost_both = cost_single+path_combos[bPath][oPath]['oCost'][0]
if cost_single < min_single[0]:
min_single = (cost_single, exit)
if cost_both < min_both[0]:
min_both = (cost_both, exit)
for exit in exits_o:
bPath, oPath = exit
cost_single = path_combos[bPath][oPath]['pCost'][0]
cost_both = cost_single+path_combos[bPath][oPath]['bCost'][0]
if cost_single < min_single[0]:
min_single = (cost_single, exit)
if cost_both < min_both[0]:
min_both = (cost_both, exit)
print 'Minimum cost for a single exit:', min_single[0]
node = min_single[1]
while node is not None:
print node
node = path_combos[node[0]][node[1]]['pCost'][1]
print 'Minimum cost for a both exits:', min_both[0]
node = min_both[1]
while node is not None:
print node
node = path_combos[node[0]][node[1]]['pCost'][1]
| apache-2.0 |
michelts/lettuce | tests/integration/lib/Django-1.3/tests/regressiontests/model_formsets_regress/tests.py | 51 | 15361 | from django import forms
from django.forms.formsets import BaseFormSet, DELETION_FIELD_NAME
from django.forms.util import ErrorDict, ErrorList
from django.forms.models import modelform_factory, inlineformset_factory, modelformset_factory, BaseModelFormSet
from django.test import TestCase
from models import User, UserSite, Restaurant, Manager, Network, Host
class InlineFormsetTests(TestCase):
def test_formset_over_to_field(self):
"A formset over a ForeignKey with a to_field can be saved. Regression for #10243"
Form = modelform_factory(User)
FormSet = inlineformset_factory(User, UserSite)
# Instantiate the Form and FormSet to prove
# you can create a form with no data
form = Form()
form_set = FormSet(instance=User())
# Now create a new User and UserSite instance
data = {
'serial': u'1',
'username': u'apollo13',
'usersite_set-TOTAL_FORMS': u'1',
'usersite_set-INITIAL_FORMS': u'0',
'usersite_set-MAX_NUM_FORMS': u'0',
'usersite_set-0-data': u'10',
'usersite_set-0-user': u'apollo13'
}
user = User()
form = Form(data)
if form.is_valid():
user = form.save()
else:
self.fail('Errors found on form:%s' % form_set)
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.all().values()
self.assertEqual(usersite[0]['data'], 10)
self.assertEqual(usersite[0]['user_id'], u'apollo13')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now update the UserSite instance
data = {
'usersite_set-TOTAL_FORMS': u'1',
'usersite_set-INITIAL_FORMS': u'1',
'usersite_set-MAX_NUM_FORMS': u'0',
'usersite_set-0-id': unicode(usersite[0]['id']),
'usersite_set-0-data': u'11',
'usersite_set-0-user': u'apollo13'
}
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.all().values()
self.assertEqual(usersite[0]['data'], 11)
self.assertEqual(usersite[0]['user_id'], u'apollo13')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now add a new UserSite instance
data = {
'usersite_set-TOTAL_FORMS': u'2',
'usersite_set-INITIAL_FORMS': u'1',
'usersite_set-MAX_NUM_FORMS': u'0',
'usersite_set-0-id': unicode(usersite[0]['id']),
'usersite_set-0-data': u'11',
'usersite_set-0-user': u'apollo13',
'usersite_set-1-data': u'42',
'usersite_set-1-user': u'apollo13'
}
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.all().values().order_by('data')
self.assertEqual(usersite[0]['data'], 11)
self.assertEqual(usersite[0]['user_id'], u'apollo13')
self.assertEqual(usersite[1]['data'], 42)
self.assertEqual(usersite[1]['user_id'], u'apollo13')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
def test_formset_over_inherited_model(self):
"A formset over a ForeignKey with a to_field can be saved. Regression for #11120"
Form = modelform_factory(Restaurant)
FormSet = inlineformset_factory(Restaurant, Manager)
# Instantiate the Form and FormSet to prove
# you can create a form with no data
form = Form()
form_set = FormSet(instance=Restaurant())
# Now create a new Restaurant and Manager instance
data = {
'name': u"Guido's House of Pasta",
'manager_set-TOTAL_FORMS': u'1',
'manager_set-INITIAL_FORMS': u'0',
'manager_set-MAX_NUM_FORMS': u'0',
'manager_set-0-name': u'Guido Van Rossum'
}
restaurant = User()
form = Form(data)
if form.is_valid():
restaurant = form.save()
else:
self.fail('Errors found on form:%s' % form_set)
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.all().values()
self.assertEqual(manager[0]['name'], 'Guido Van Rossum')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now update the Manager instance
data = {
'manager_set-TOTAL_FORMS': u'1',
'manager_set-INITIAL_FORMS': u'1',
'manager_set-MAX_NUM_FORMS': u'0',
'manager_set-0-id': unicode(manager[0]['id']),
'manager_set-0-name': u'Terry Gilliam'
}
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.all().values()
self.assertEqual(manager[0]['name'], 'Terry Gilliam')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now add a new Manager instance
data = {
'manager_set-TOTAL_FORMS': u'2',
'manager_set-INITIAL_FORMS': u'1',
'manager_set-MAX_NUM_FORMS': u'0',
'manager_set-0-id': unicode(manager[0]['id']),
'manager_set-0-name': u'Terry Gilliam',
'manager_set-1-name': u'John Cleese'
}
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.all().values().order_by('name')
self.assertEqual(manager[0]['name'], 'John Cleese')
self.assertEqual(manager[1]['name'], 'Terry Gilliam')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
def test_formset_with_none_instance(self):
"A formset with instance=None can be created. Regression for #11872"
Form = modelform_factory(User)
FormSet = inlineformset_factory(User, UserSite)
# Instantiate the Form and FormSet to prove
# you can create a formset with an instance of None
form = Form(instance=None)
formset = FormSet(instance=None)
def test_empty_fields_on_modelformset(self):
"No fields passed to modelformset_factory should result in no fields on returned forms except for the id. See #14119."
UserFormSet = modelformset_factory(User, fields=())
formset = UserFormSet()
for form in formset.forms:
self.assertTrue('id' in form.fields)
self.assertEqual(len(form.fields), 1)
def test_save_as_new_with_new_inlines(self):
"""
Existing and new inlines are saved with save_as_new.
Regression for #14938.
"""
efnet = Network.objects.create(name="EFNet")
host1 = Host.objects.create(hostname="irc.he.net", network=efnet)
HostFormSet = inlineformset_factory(Network, Host)
# Add a new host, modify previous host, and save-as-new
data = {
'host_set-TOTAL_FORMS': u'2',
'host_set-INITIAL_FORMS': u'1',
'host_set-MAX_NUM_FORMS': u'0',
'host_set-0-id': unicode(host1.id),
'host_set-0-hostname': u'tranquility.hub.dal.net',
'host_set-1-hostname': u'matrix.de.eu.dal.net'
}
# To save a formset as new, it needs a new hub instance
dalnet = Network.objects.create(name="DALnet")
formset = HostFormSet(data, instance=dalnet, save_as_new=True)
self.assertTrue(formset.is_valid())
formset.save()
self.assertQuerysetEqual(
dalnet.host_set.order_by("hostname"),
["<Host: matrix.de.eu.dal.net>", "<Host: tranquility.hub.dal.net>"]
)
class FormsetTests(TestCase):
def test_error_class(self):
'''
Test the type of Formset and Form error attributes
'''
Formset = modelformset_factory(User)
data = {
'form-TOTAL_FORMS': u'2',
'form-INITIAL_FORMS': u'0',
'form-MAX_NUM_FORMS': u'0',
'form-0-id': '',
'form-0-username': u'apollo13',
'form-0-serial': u'1',
'form-1-id': '',
'form-1-username': u'apollo13',
'form-1-serial': u'2',
}
formset = Formset(data)
# check if the returned error classes are correct
# note: formset.errors returns a list as documented
self.assertTrue(isinstance(formset.errors, list))
self.assertTrue(isinstance(formset.non_form_errors(), ErrorList))
for form in formset.forms:
self.assertTrue(isinstance(form.errors, ErrorDict))
self.assertTrue(isinstance(form.non_field_errors(), ErrorList))
class CustomWidget(forms.CharField):
pass
class UserSiteForm(forms.ModelForm):
class Meta:
model = UserSite
widgets = {'data': CustomWidget}
class Callback(object):
def __init__(self):
self.log = []
def __call__(self, db_field, **kwargs):
self.log.append((db_field, kwargs))
return db_field.formfield(**kwargs)
class FormfieldCallbackTests(TestCase):
"""
Regression for #13095: Using base forms with widgets
defined in Meta should not raise errors.
"""
def test_inlineformset_factory_default(self):
Formset = inlineformset_factory(User, UserSite, form=UserSiteForm)
form = Formset().forms[0]
self.assertTrue(isinstance(form['data'].field.widget, CustomWidget))
def test_modelformset_factory_default(self):
Formset = modelformset_factory(UserSite, form=UserSiteForm)
form = Formset().forms[0]
self.assertTrue(isinstance(form['data'].field.widget, CustomWidget))
def assertCallbackCalled(self, callback):
id_field, user_field, data_field = UserSite._meta.fields
expected_log = [
(id_field, {}),
(user_field, {}),
(data_field, {'widget': CustomWidget}),
]
self.assertEqual(callback.log, expected_log)
def test_inlineformset_custom_callback(self):
callback = Callback()
inlineformset_factory(User, UserSite, form=UserSiteForm,
formfield_callback=callback)
self.assertCallbackCalled(callback)
def test_modelformset_custom_callback(self):
callback = Callback()
modelformset_factory(UserSite, form=UserSiteForm,
formfield_callback=callback)
self.assertCallbackCalled(callback)
class BaseCustomDeleteFormSet(BaseFormSet):
"""
A formset mix-in that lets a form decide if it's to be deleted.
Works for BaseFormSets. Also works for ModelFormSets with #14099 fixed.
form.should_delete() is called. The formset delete field is also suppressed.
"""
def add_fields(self, form, index):
super(BaseCustomDeleteFormSet, self).add_fields(form, index)
self.can_delete = True
if DELETION_FIELD_NAME in form.fields:
del form.fields[DELETION_FIELD_NAME]
def _should_delete_form(self, form):
return hasattr(form, 'should_delete') and form.should_delete()
class FormfieldShouldDeleteFormTests(TestCase):
"""
Regression for #14099: BaseModelFormSet should use ModelFormSet method _should_delete_form
"""
class BaseCustomDeleteModelFormSet(BaseModelFormSet, BaseCustomDeleteFormSet):
""" Model FormSet with CustomDelete MixIn """
class CustomDeleteUserForm(forms.ModelForm):
""" A model form with a 'should_delete' method """
class Meta:
model = User
def should_delete(self):
""" delete form if odd PK """
return self.instance.id % 2 != 0
NormalFormset = modelformset_factory(User, form=CustomDeleteUserForm, can_delete=True)
DeleteFormset = modelformset_factory(User, form=CustomDeleteUserForm, formset=BaseCustomDeleteModelFormSet)
data = {
'form-TOTAL_FORMS': '4',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '4',
'form-0-username': 'John',
'form-0-serial': '1',
'form-1-username': 'Paul',
'form-1-serial': '2',
'form-2-username': 'George',
'form-2-serial': '3',
'form-3-username': 'Ringo',
'form-3-serial': '5',
}
delete_all_ids = {
'form-0-DELETE': '1',
'form-1-DELETE': '1',
'form-2-DELETE': '1',
'form-3-DELETE': '1',
}
def test_init_database(self):
""" Add test data to database via formset """
formset = self.NormalFormset(self.data)
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 4)
def test_no_delete(self):
""" Verify base formset doesn't modify database """
# reload database
self.test_init_database()
# pass standard data dict & see none updated
data = dict(self.data)
data['form-INITIAL_FORMS'] = 4
data.update(dict(
('form-%d-id' % i, user.id)
for i,user in enumerate(User.objects.all())
))
formset = self.NormalFormset(data, queryset=User.objects.all())
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(len(User.objects.all()), 4)
def test_all_delete(self):
""" Verify base formset honors DELETE field """
# reload database
self.test_init_database()
# create data dict with all fields marked for deletion
data = dict(self.data)
data['form-INITIAL_FORMS'] = 4
data.update(dict(
('form-%d-id' % i, user.id)
for i,user in enumerate(User.objects.all())
))
data.update(self.delete_all_ids)
formset = self.NormalFormset(data, queryset=User.objects.all())
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(len(User.objects.all()), 0)
def test_custom_delete(self):
""" Verify DeleteFormset ignores DELETE field and uses form method """
# reload database
self.test_init_database()
# Create formset with custom Delete function
# create data dict with all fields marked for deletion
data = dict(self.data)
data['form-INITIAL_FORMS'] = 4
data.update(dict(
('form-%d-id' % i, user.id)
for i,user in enumerate(User.objects.all())
))
data.update(self.delete_all_ids)
formset = self.DeleteFormset(data, queryset=User.objects.all())
# verify two were deleted
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(len(User.objects.all()), 2)
# verify no "odd" PKs left
odd_ids = [user.id for user in User.objects.all() if user.id % 2]
self.assertEqual(len(odd_ids), 0)
| gpl-3.0 |
idrogeno/enigma2 | lib/python/Components/Timeshift.py | 3 | 75117 | # -*- coding: utf-8 -*-
# InfoBarTimeshift requires InfoBarSeek, instantiated BEFORE!
# Hrmf.
#
# Timeshift works the following way:
# demux0 demux1 "TimeshiftActions" "TimeshiftActivateActions" "SeekActions"
# - normal playback TUNER unused PLAY enable disable disable
# - user presses "yellow" button. FILE record PAUSE enable disable enable
# - user presess pause again FILE record PLAY enable disable enable
# - user fast forwards FILE record FF enable disable enable
# - end of timeshift buffer reached TUNER record PLAY enable enable disable
# - user backwards FILE record BACK # !! enable disable enable
#
# in other words:
# - when a service is playing, pressing the "timeshiftStart" button ("yellow") enables recording ("enables timeshift"),
# freezes the picture (to indicate timeshift), sets timeshiftMode ("activates timeshift")
# now, the service becomes seekable, so "SeekActions" are enabled, "TimeshiftEnableActions" are disabled.
# - the user can now PVR around
# - if it hits the end, the service goes into live mode ("deactivates timeshift", it's of course still "enabled")
# the service looses it's "seekable" state. It can still be paused, but just to activate timeshift right
# after!
# the seek actions will be disabled, but the timeshiftActivateActions will be enabled
# - if the user rewinds, or press pause, timeshift will be activated again
# note that a timeshift can be enabled ("recording") and
# activated (currently time-shifting).
from Components.ActionMap import ActionMap, HelpableActionMap
from Components.ServiceEventTracker import ServiceEventTracker
from Components.config import config
from Components.SystemInfo import SystemInfo
from Components.Task import job_manager as JobManager
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
import Screens.Standby
from ServiceReference import ServiceReference
from RecordTimer import RecordTimerEntry, parseEvent
from timer import TimerEntry
from Tools import ASCIItranslit, Notifications
from Tools.BoundFunction import boundFunction
from Tools.Directories import pathExists, fileExists, getRecordingFilename, copyfile, resolveFilename, SCOPE_TIMESHIFT, SCOPE_AUTORECORD
from Tools.TimeShift import CopyTimeshiftJob, MergeTimeshiftJob, CreateAPSCFilesJob
from enigma import eBackgroundFileEraser, eTimer, eServiceCenter, iServiceInformation, iPlayableService, eEPGCache
from boxbranding import getBoxType, getBrandOEM
from time import time, localtime, strftime
from random import randint
import os
class InfoBarTimeshift:
ts_disabled = False
def __init__(self):
self["TimeshiftActions"] = HelpableActionMap(self, "InfobarTimeshiftActions",
{
"timeshiftStart": (self.startTimeshift, _("Start timeshift")), # the "yellow key"
"timeshiftStop": (self.stopTimeshift, _("Stop timeshift")), # currently undefined :), probably 'TV'
"instantRecord": self.instantRecord,
"restartTimeshift": self.restartTimeshift
}, prio=1)
self["TimeshiftActivateActions"] = ActionMap(["InfobarTimeshiftActivateActions"],
{
"timeshiftActivateEnd": self.activateTimeshiftEnd, # something like "rewind key"
"timeshiftActivateEndAndPause": self.activateTimeshiftEndAndPause # something like "pause key"
}, prio=-1) # priority over record
self["TimeshiftSeekPointerActions"] = ActionMap(["InfobarTimeshiftSeekPointerActions"],
{
"SeekPointerOK": self.ptsSeekPointerOK,
"SeekPointerLeft": self.ptsSeekPointerLeft,
"SeekPointerRight": self.ptsSeekPointerRight
}, prio=-1)
self["TimeshiftFileActions"] = ActionMap(["InfobarTimeshiftActions"],
{
#"jumpPreviousFile": self.__evSOF,
"jumpPreviousFile": self.__evSOFjump,
"jumpNextFile": self.__evEOF
}, prio=-1) # priority over history
self["TimeshiftActions"].setEnabled(False)
self["TimeshiftActivateActions"].setEnabled(False)
self["TimeshiftSeekPointerActions"].setEnabled(False)
self["TimeshiftFileActions"].setEnabled(False)
self.switchToLive = True
self.ptsStop = False
self.ts_rewind_timer = eTimer()
self.ts_rewind_timer.callback.append(self.rewindService)
self.save_timeshift_file = False
self.saveTimeshiftEventPopupActive = False
self.__event_tracker = ServiceEventTracker(screen = self, eventmap =
{
iPlayableService.evStart: self.__serviceStarted,
iPlayableService.evSeekableStatusChanged: self.__seekableStatusChanged,
iPlayableService.evEnd: self.__serviceEnd,
iPlayableService.evSOF: self.__evSOF,
iPlayableService.evUpdatedInfo: self.__evInfoChanged,
iPlayableService.evUpdatedEventInfo: self.__evEventInfoChanged,
iPlayableService.evUser+1: self.ptsTimeshiftFileChanged
})
self.pts_begintime = 0
self.pts_switchtolive = False
self.pts_firstplayable = 1
self.pts_lastposition = 0
self.pts_lastplaying = 1
self.pts_currplaying = 1
self.pts_nextplaying = 0
self.pts_lastseekspeed = 0
self.pts_service_changed = False
self.pts_file_changed = False
self.pts_record_running = self.session.nav.RecordTimer.isRecording()
self.save_current_timeshift = False
self.save_timeshift_postaction = None
self.service_changed = 0
self.event_changed = False
self.checkEvents_value = int(config.timeshift.timeshiftCheckEvents.value)
self.pts_starttime = time()
self.ptsAskUser_wait = False
self.posDiff = 0
# Init Global Variables
self.session.ptsmainloopvalue = 0
config.timeshift.isRecording.value = False
# Init eBackgroundFileEraser
self.BgFileEraser = eBackgroundFileEraser.getInstance()
# Init PTS Delay-Timer
self.pts_delay_timer = eTimer()
self.pts_delay_timer.callback.append(self.autostartAutorecordTimeshift)
# Init PTS MergeRecords-Timer
self.pts_mergeRecords_timer = eTimer()
self.pts_mergeRecords_timer.callback.append(self.ptsMergeRecords)
# Init PTS Merge Cleanup-Timer
self.pts_mergeCleanUp_timer = eTimer()
self.pts_mergeCleanUp_timer.callback.append(self.ptsMergePostCleanUp)
# Init PTS QuitMainloop-Timer
self.pts_QuitMainloop_timer = eTimer()
self.pts_QuitMainloop_timer.callback.append(self.ptsTryQuitMainloop)
# Init PTS CleanUp-Timer
self.pts_cleanUp_timer = eTimer()
self.pts_cleanUp_timer.callback.append(self.ptsCleanTimeshiftFolder)
# Init PTS CleanEvent-Timer
self.pts_cleanEvent_timer = eTimer()
self.pts_cleanEvent_timer.callback.append(self.ptsEventCleanTimeshiftFolder)
# Init PTS SeekBack-Timer
self.pts_SeekBack_timer = eTimer()
self.pts_SeekBack_timer.callback.append(self.ptsSeekBackTimer)
self.pts_StartSeekBackTimer = eTimer()
self.pts_StartSeekBackTimer.callback.append(self.ptsStartSeekBackTimer)
# Init PTS SeekToPos-Timer
self.pts_SeekToPos_timer = eTimer()
self.pts_SeekToPos_timer.callback.append(self.ptsSeekToPos)
# Init PTS CheckFileChanged-Timer
self.pts_CheckFileChanged_counter = 1
self.pts_CheckFileChanged_timer = eTimer()
self.pts_CheckFileChanged_timer.callback.append(self.ptsCheckFileChanged)
# Init Block-Zap Timer
self.pts_blockZap_timer = eTimer()
# Init PTS FileJump-Timer
self.pts_FileJump_timer = eTimer()
# Record Event Tracker
self.session.nav.RecordTimer.on_state_change.append(self.ptsTimerEntryStateChange)
# Keep Current Event Info for recordings
self.pts_eventcount = 0
self.pts_curevent_begin = int(time())
self.pts_curevent_end = 0
self.pts_curevent_name = _("Timeshift")
self.pts_curevent_description = ""
self.pts_curevent_servicerefname = ""
self.pts_curevent_station = ""
self.pts_curevent_eventid = None
# Init PTS Infobar
def __seekableStatusChanged(self):
# print '__seekableStatusChanged'
self["TimeshiftActivateActions"].setEnabled(not self.isSeekable() and self.timeshiftEnabled())
state = self.getSeek() is not None and self.timeshiftEnabled()
self["SeekActionsPTS"].setEnabled(state)
self["TimeshiftFileActions"].setEnabled(state)
# print ('__seekableStatusChanged - state %s, seekstate %s' % (state, self.seekstate))
if not state and self.pts_currplaying == self.pts_eventcount and self.timeshiftEnabled() and not self.event_changed:
self.setSeekState(self.SEEK_STATE_PLAY)
self.restartSubtitle()
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
if self.timeshiftEnabled() and not self.isSeekable():
self.ptsSeekPointerReset()
if int(config.timeshift.startdelay.value):
if self.pts_starttime <= (time()-5):
self.pts_blockZap_timer.start(3000, True)
self.pts_lastplaying = self.pts_currplaying = self.pts_eventcount
self.pts_nextplaying = 0
self.pts_file_changed = True
self.ptsSetNextPlaybackFile("pts_livebuffer_%s" % self.pts_eventcount)
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
def __serviceStarted(self):
# print '__serviceStarted'
self.service_changed = 1
self.pts_service_changed = True
# print 'self.timeshiftEnabled1',self.timeshiftEnabled()
if self.pts_delay_timer.isActive():
# print 'TS AUTO START TEST1'
self.pts_delay_timer.stop()
if int(config.timeshift.startdelay.value):
# print 'TS AUTO START TEST2'
self.pts_delay_timer.start(int(config.timeshift.startdelay.value) * 1000, True)
self["TimeshiftActions"].setEnabled(True)
#self.__seekableStatusChanged()
def __serviceEnd(self):
# print '!!!!! __serviceEnd'
if self.save_current_timeshift:
if self.pts_curevent_end > time():
self.SaveTimeshift("pts_livebuffer_%s" % self.pts_eventcount, mergelater=True)
self.ptsRecordCurrentEvent()
else:
self.SaveTimeshift("pts_livebuffer_%s" % self.pts_eventcount)
self.service_changed = 0
if not config.timeshift.isRecording.value:
self.__seekableStatusChanged()
self["TimeshiftActions"].setEnabled(False)
def __evSOFjump(self):
if not self.timeshiftEnabled() or self.pts_CheckFileChanged_timer.isActive() or self.pts_SeekBack_timer.isActive() or self.pts_StartSeekBackTimer.isActive() or self.pts_SeekToPos_timer.isActive():
return
if self.pts_FileJump_timer.isActive():
self.__evSOF()
else:
self.pts_FileJump_timer.start(5000, True)
self.setSeekState(self.SEEK_STATE_PLAY)
self.doSeek(0)
self.posDiff = 0
def evSOF(self, posDiff = 0): #called from InfoBarGenerics.py
self.posDiff = posDiff
self.__evSOF()
def __evSOF(self):
# print '!!!!! jumpToPrevTimeshiftedEvent'
if not self.timeshiftEnabled() or self.pts_CheckFileChanged_timer.isActive() or self.pts_SeekBack_timer.isActive() or self.pts_StartSeekBackTimer.isActive() or self.pts_SeekToPos_timer.isActive():
return
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
self.pts_switchtolive = False
self.pts_lastplaying = self.pts_currplaying
self.pts_nextplaying = 0
if self.pts_currplaying > self.pts_firstplayable:
self.pts_currplaying -= 1
else:
self.setSeekState(self.SEEK_STATE_PLAY)
self.doSeek(0)
self.posDiff = 0
if self.pts_FileJump_timer.isActive():
self.pts_FileJump_timer.stop()
Notifications.AddNotification(MessageBox, _("First playable timeshift file!"), MessageBox.TYPE_INFO, timeout=3)
if not self.pts_FileJump_timer.isActive():
self.pts_FileJump_timer.start(5000, True)
return
# Switch to previous TS file by seeking forward to next file
# print 'self.pts_currplaying2',self.pts_currplaying
# print ("'!!!!! %spts_livebuffer_%s" % (config.usage.timeshift_path.value, self.pts_currplaying))
if fileExists("%spts_livebuffer_%s" % (config.usage.timeshift_path.value, self.pts_currplaying), 'r'):
self.ptsSetNextPlaybackFile("pts_livebuffer_%s" % self.pts_currplaying)
self.setSeekState(self.SEEK_STATE_PLAY)
self.doSeek(3600 * 24 * 90000)
self.pts_CheckFileChanged_counter = 1
self.pts_CheckFileChanged_timer.start(1000, False)
self.pts_file_changed = False
else:
print ('[TIMESHIFT] - "pts_livebuffer_%s" file was not found -> put pointer to the first (current) "pts_livebuffer_%s" file' % (self.pts_currplaying, self.pts_currplaying + 1))
self.pts_currplaying += 1
self.pts_firstplayable += 1
self.setSeekState(self.SEEK_STATE_PLAY)
self.doSeek(0)
self.posDiff = 0
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
def evEOF(self, posDiff = 0): #called from InfoBarGenerics.py
self.posDiff = posDiff
self.__evEOF()
def __evEOF(self):
# print '!!!!! jumpToNextTimeshiftedEvent'
if not self.timeshiftEnabled() or self.pts_CheckFileChanged_timer.isActive() or self.pts_SeekBack_timer.isActive() or self.pts_StartSeekBackTimer.isActive() or self.pts_SeekToPos_timer.isActive():
return
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
self.pts_switchtolive = False
self.pts_lastposition = self.ptsGetPosition()
self.pts_lastplaying = self.pts_currplaying
self.pts_nextplaying = 0
self.pts_currplaying += 1
# Switch to next TS file by seeking forward to next file
# print 'self.pts_currplaying2',self.pts_currplaying
# print ("'!!!!! %spts_livebuffer_%s" % (config.usage.timeshift_path.value, self.pts_currplaying))
if fileExists("%spts_livebuffer_%s" % (config.usage.timeshift_path.value, self.pts_currplaying), 'r'):
self.ptsSetNextPlaybackFile("pts_livebuffer_%s" % self.pts_currplaying)
self.setSeekState(self.SEEK_STATE_PLAY)
self.doSeek(3600 * 24 * 90000)
self.pts_CheckFileChanged_counter = 1
self.pts_CheckFileChanged_timer.start(1000, False)
self.pts_file_changed = False
else:
if not int(config.timeshift.startdelay.value) and config.timeshift.showlivetvmsg.value:
Notifications.AddNotification(MessageBox, _("Switching to live TV - timeshift is still active!"), MessageBox.TYPE_INFO, timeout=3)
self.posDiff = 0
self.pts_lastposition = 0
self.pts_currplaying -= 1
self.pts_switchtolive = True
self.ptsSetNextPlaybackFile("")
self.setSeekState(self.SEEK_STATE_PLAY)
self.doSeek(3600 * 24 * 90000)
self.pts_CheckFileChanged_counter = 1
self.pts_CheckFileChanged_timer.start(1000, False)
self.pts_file_changed = False
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
def __evInfoChanged(self):
# print '__evInfoChanged'
# print 'service_changed',self.service_changed
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
if self.service_changed:
self.service_changed = 0
# We zapped away before saving the file, save it now!
if self.save_current_timeshift:
self.SaveTimeshift("pts_livebuffer_%s" % self.pts_eventcount)
# Delete Timeshift Records on zap
if config.timeshift.deleteAfterZap.value:
self.ptsEventCleanTimerSTOP()
self.pts_firstplayable = self.pts_eventcount + 1
if self.pts_eventcount == 0 and not int(config.timeshift.startdelay.value):
self.pts_cleanUp_timer.start(1000, True)
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
def __evEventInfoChanged(self):
# print '__evEventInfoChanged'
# Get Current Event Info
service = self.session.nav.getCurrentService()
old_begin_time = self.pts_begintime
info = service and service.info()
ptr = info and info.getEvent(0)
self.pts_begintime = ptr and ptr.getBeginTime() or 0
# Save current TimeShift permanently now ...
if info.getInfo(iServiceInformation.sVideoPID) != -1:
# Take care of Record Margin Time ...
if self.save_current_timeshift and self.timeshiftEnabled():
if config.recording.margin_after.value > 0 and len(self.recording) == 0:
self.SaveTimeshift(mergelater=True)
recording = RecordTimerEntry(ServiceReference(self.session.nav.getCurrentlyPlayingServiceOrGroup()), time(), time()+(config.recording.margin_after.value * 60), self.pts_curevent_name, self.pts_curevent_description, self.pts_curevent_eventid, dirname = config.usage.autorecord_path.value)
recording.dontSave = True
self.session.nav.RecordTimer.record(recording)
self.recording.append(recording)
else:
self.SaveTimeshift()
if not config.timeshift.filesplitting.value:
self.stopTimeshiftcheckTimeshiftRunningCallback(True)
#(Re)Start TimeShift
# print 'self.pts_delay_timer.isActive',self.pts_delay_timer.isActive()
if not self.pts_delay_timer.isActive():
# print 'TS AUTO START TEST4'
if old_begin_time != self.pts_begintime or old_begin_time == 0:
# print 'TS AUTO START TEST5'
if int(config.timeshift.startdelay.value) or self.timeshiftEnabled():
self.event_changed = True
self.pts_delay_timer.start(1000, True)
def getTimeshift(self):
if self.ts_disabled:
return None
service = self.session.nav.getCurrentService()
return service and service.timeshift()
def timeshiftEnabled(self):
ts = self.getTimeshift()
return ts and ts.isTimeshiftEnabled()
def startTimeshift(self):
ts = self.getTimeshift()
if ts is None:
# self.session.open(MessageBox, _("Timeshift not possible!"), MessageBox.TYPE_ERROR, timeout=5)
return 0
if ts.isTimeshiftEnabled():
print "hu, timeshift already enabled?"
else:
self.activateAutorecordTimeshift()
self.activateTimeshiftEndAndPause()
def stopTimeshift(self):
# print 'stopTimeshift'
ts = self.getTimeshift()
if ts and ts.isTimeshiftEnabled():
# print 'TEST1'
if int(config.timeshift.startdelay.value) and self.isSeekable():
# print 'TEST2'
self.switchToLive = True
self.ptsStop = True
self.checkTimeshiftRunning(self.stopTimeshiftcheckTimeshiftRunningCallback)
elif not int(config.timeshift.startdelay.value):
# print 'TEST2b'
self.checkTimeshiftRunning(self.stopTimeshiftcheckTimeshiftRunningCallback)
else:
# print 'TES2c'
return 0
else:
# print 'TEST3'
return 0
def stopTimeshiftcheckTimeshiftRunningCallback(self, answer):
# print 'stopTimeshiftcheckTimeshiftRunningCallback'
# print ' answer', answer
if answer and int(config.timeshift.startdelay.value) and self.switchToLive and self.isSeekable():
# print 'TEST4'
self.posDiff = 0
self.pts_lastposition = 0
if self.pts_currplaying != self.pts_eventcount:
self.pts_lastposition = self.ptsGetPosition()
self.pts_lastplaying = self.pts_currplaying
self.ptsStop = False
self.pts_nextplaying = 0
self.pts_switchtolive = True
self.setSeekState(self.SEEK_STATE_PLAY)
self.ptsSetNextPlaybackFile("")
self.doSeek(3600 * 24 * 90000)
self.pts_CheckFileChanged_counter = 1
self.pts_CheckFileChanged_timer.start(1000, False)
self.pts_file_changed = False
#self.__seekableStatusChanged()
return 0
ts = self.getTimeshift()
if answer and ts:
# print 'TEST6'
if int(config.timeshift.startdelay.value):
# print 'TEST7'
ts.stopTimeshift(self.switchToLive)
else:
# print 'TEST8', str(self.event_changed)
ts.stopTimeshift(not self.event_changed)
self.__seekableStatusChanged()
# activates timeshift, and seeks to (almost) the end
def activateTimeshiftEnd(self, back = True):
ts = self.getTimeshift()
if ts is None:
return
if ts.isTimeshiftActive():
self.pauseService()
else:
ts.activateTimeshift() # activate timeshift will automatically pause
self.setSeekState(self.SEEK_STATE_PAUSE)
seekable = self.getSeek()
if seekable is not None:
seekable.seekTo(-90000) # seek approx. 1 sec before end
if back:
if getBrandOEM() == 'xtrend':
self.ts_rewind_timer.start(1000, 1)
else:
self.ts_rewind_timer.start(500, 1)
def rewindService(self):
if getBrandOEM() in ('gigablue', 'xp'):
self.setSeekState(self.SEEK_STATE_PLAY)
self.setSeekState(self.makeStateBackward(int(config.seek.enter_backward.value)))
def callServiceStarted(self):
self.__serviceStarted()
# same as activateTimeshiftEnd, but pauses afterwards.
def activateTimeshiftEndAndPause(self):
self.activateTimeshiftEnd(False)
def checkTimeshiftRunning(self, returnFunction):
# print 'checkTimeshiftRunning'
# print 'self.switchToLive',self.switchToLive
if self.ptsStop:
returnFunction(True)
elif (self.isSeekable() or (self.timeshiftEnabled() and not int(config.timeshift.startdelay.value)) or self.save_current_timeshift) and config.usage.check_timeshift.value:
# print 'TEST1'
if config.timeshift.favoriteSaveAction.value == "askuser":
# print 'TEST2'
if self.save_current_timeshift:
# print 'TEST3'
message = _("You have chosen to save the current timeshift event, but the event has not yet finished\nWhat do you want to do ?")
choice = [(_("Save timeshift as movie and continue recording"), "savetimeshiftandrecord"),
(_("Save timeshift as movie and stop recording"), "savetimeshift"),
(_("Cancel save timeshift as movie"), "noSave"),
(_("Nothing, just leave this menu"), "no")]
self.session.openWithCallback(boundFunction(self.checkTimeshiftRunningCallback, returnFunction), MessageBox, message, simple = True, list = choice, timeout=30)
else:
# print 'TEST4'
message = _("You seem to be in timeshift, Do you want to leave timeshift ?")
choice = [(_("Yes, but don't save timeshift as movie"), "noSave"),
(_("Yes, but save timeshift as movie and continue recording"), "savetimeshiftandrecord"),
(_("Yes, but save timeshift as movie and stop recording"), "savetimeshift"),
(_("No"), "no")]
self.session.openWithCallback(boundFunction(self.checkTimeshiftRunningCallback, returnFunction), MessageBox, message, simple = True, list = choice, timeout=30)
else:
# print 'TEST5'
if self.save_current_timeshift:
# print 'TEST6'
# the user has previously activated "Timeshift save recording" of current event - so must be necessarily saved of the timeshift!
# workaround - without the message box can the box no longer be operated when goes in standby(no freezing - no longer can use - unhandled key screen comes when key press -)
message = _("You have chosen to save the current timeshift")
choice = [(_("Now save timeshift as movie and continues recording"), "savetimeshiftandrecord")]
self.session.openWithCallback(boundFunction(self.checkTimeshiftRunningCallback, returnFunction), MessageBox, message, simple = True, list = choice, timeout=1)
#InfoBarTimeshift.saveTimeshiftActions(self, "savetimeshiftandrecord", returnFunction)
else:
# print 'TEST7'
message = _("You seem to be in timeshift, Do you want to leave timeshift ?")
choice = [(_("Yes"), config.timeshift.favoriteSaveAction.value), (_("No"), "no")]
self.session.openWithCallback(boundFunction(self.checkTimeshiftRunningCallback, returnFunction), MessageBox, message, simple = True, list = choice, timeout=30)
elif self.save_current_timeshift:
# the user has chosen "no warning" when timeshift is stopped (config.usage.check_timeshift=False)
# but the user has previously activated "Timeshift save recording" of current event
# so we silently do "savetimeshiftandrecord" when switching channel independent of config.timeshift.favoriteSaveAction
# workaround - without the message box can the box no longer be operated when goes in standby(no freezing - no longer can use - unhandled key screen comes when key press -)
message = _("You have chosen to save the current timeshift")
choice = [(_("Now save timeshift as movie and continues recording"), "savetimeshiftandrecord")]
self.session.openWithCallback(boundFunction(self.checkTimeshiftRunningCallback, returnFunction), MessageBox, message, simple = True, list = choice, timeout=1)
#InfoBarTimeshift.saveTimeshiftActions(self, "savetimeshiftandrecord", returnFunction)
else:
returnFunction(True)
def checkTimeshiftRunningCallback(self, returnFunction, answer):
# print 'checkTimeshiftRunningCallback'
# print 'returnFunction',returnFunction
# print 'answer',answer
if answer:
if answer == "savetimeshift" or answer == "savetimeshiftandrecord":
self.save_current_timeshift = True
elif answer == "noSave":
self.save_current_timeshift = False
elif answer == "no":
pass
InfoBarTimeshift.saveTimeshiftActions(self, answer, returnFunction)
def eraseTimeshiftFile(self):
for filename in os.listdir(config.usage.timeshift_path.value):
if filename.startswith("timeshift.") and not filename.endswith(".del") and not filename.endswith(".copy"):
self.BgFileEraser.erase("%s%s" % (config.usage.timeshift_path.value,filename))
def autostartAutorecordTimeshift(self):
# print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!autostartAutorecordTimeshift'
#self["TimeshiftActions"].setEnabled(True)
ts = self.getTimeshift()
if ts is None:
# print '[TimeShift] tune lock failed, so could not start.'
return 0
if self.pts_delay_timer.isActive():
self.pts_delay_timer.stop()
if (int(config.timeshift.startdelay.value) and not self.timeshiftEnabled()) or self.event_changed:
self.activateAutorecordTimeshift()
def activateAutorecordTimeshift(self):
# print 'activateAutorecordTimeshift'
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
self.createTimeshiftFolder()
if self.pts_eventcount == 0: #only cleanup folder after switching channels, not when a new event starts, to allow saving old events from timeshift buffer
self.ptsCleanTimeshiftFolder(justZapped = True) #remove all timeshift files
else:
self.ptsCleanTimeshiftFolder(justZapped = False) #only delete very old timeshift files based on config.usage.timeshiftMaxHours
if self.ptsCheckTimeshiftPath() is False or self.session.screen["Standby"].boolean is True or self.ptsLiveTVStatus() is False or (config.timeshift.stopwhilerecording.value and self.pts_record_running):
return
# (Re)start Timeshift now
if config.timeshift.filesplitting.value:
# setNextPlaybackFile() on event change while timeshifting
if self.isSeekable():
self.pts_nextplaying = self.pts_currplaying + 1
self.ptsSetNextPlaybackFile("pts_livebuffer_%s" % self.pts_nextplaying)
# Do not switch back to LiveTV while timeshifting
self.switchToLive = False
else:
self.switchToLive = True
self.stopTimeshiftcheckTimeshiftRunningCallback(True)
else:
if self.pts_currplaying < self.pts_eventcount:
self.pts_nextplaying = self.pts_currplaying + 1
self.ptsSetNextPlaybackFile("pts_livebuffer_%s" % self.pts_nextplaying)
else:
self.pts_nextplaying = 0
self.ptsSetNextPlaybackFile("")
self.event_changed = False
ts = self.getTimeshift()
if ts and (not ts.startTimeshift() or self.pts_eventcount == 0):
# Update internal Event Counter
self.pts_eventcount += 1
if (getBoxType() == 'vuuno' or getBoxType() == 'vuduo') and os.path.exists("/proc/stb/lcd/symbol_timeshift"):
if self.session.nav.RecordTimer.isRecording():
f = open("/proc/stb/lcd/symbol_timeshift", "w")
f.write("0")
f.close()
self.pts_starttime = time()
self.save_timeshift_postaction = None
self.ptsGetEventInfo()
self.ptsCreateHardlink()
self.__seekableStatusChanged()
self.ptsEventCleanTimerSTART()
elif ts and ts.startTimeshift():
self.ptsGetEventInfo()
try:
# rewrite .meta and .eit files
metafile = open("%spts_livebuffer_%s.meta" % (config.usage.timeshift_path.value,self.pts_eventcount), "w")
metafile.write("%s\n%s\n%s\n%i\n" % (self.pts_curevent_servicerefname,self.pts_curevent_name.replace("\n", ""),self.pts_curevent_description.replace("\n", ""),int(self.pts_starttime)))
metafile.close()
self.ptsCreateEITFile("%spts_livebuffer_%s" % (config.usage.timeshift_path.value,self.pts_eventcount))
except:
print "[Timeshift] - failure rewrite meta and eit files."
self.ptsEventCleanTimerSTART()
else:
self.ptsEventCleanTimerSTOP()
try:
self.session.open(MessageBox, _("Timeshift not possible!"), MessageBox.TYPE_ERROR, timeout=2)
except:
print '[TIMESHIFT] Failed to open MessageBox, Timeshift not possible, probably another MessageBox was active.'
if self.pts_eventcount < self.pts_firstplayable:
self.pts_firstplayable = self.pts_eventcount
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
def createTimeshiftFolder(self):
timeshiftdir = resolveFilename(SCOPE_TIMESHIFT)
if not pathExists(timeshiftdir):
try:
os.makedirs(timeshiftdir)
except:
print "[TimeShift] Failed to create %s !!" %timeshiftdir
def restartTimeshift(self):
self.activateAutorecordTimeshift()
Notifications.AddNotification(MessageBox, _("[TimeShift] Restarting Timeshift!"), MessageBox.TYPE_INFO, timeout=5)
def saveTimeshiftEventPopup(self):
self.saveTimeshiftEventPopupActive = True
filecount = 0
entrylist = [(_("Current Event:") + " %s" % self.pts_curevent_name, "savetimeshift")]
filelist = os.listdir(config.usage.timeshift_path.value)
if filelist is not None:
try:
filelist = sorted(filelist, key=lambda x: int(x.split('pts_livebuffer_')[1]) if x.startswith("pts_livebuffer") and not os.path.splitext(x)[1] else x)
except:
print '[TIMESHIFT] - file sorting error, use standard sorting method'
filelist.sort()
# print filelist
for filename in filelist:
if filename.startswith("pts_livebuffer") and not os.path.splitext(filename)[1]:
# print "TRUE"
statinfo = os.stat("%s%s" % (config.usage.timeshift_path.value,filename))
if statinfo.st_mtime < (time()-5.0):
# Get Event Info from meta file
readmetafile = open("%s%s.meta" % (config.usage.timeshift_path.value,filename), "r")
servicerefname = readmetafile.readline()[0:-1]
eventname = readmetafile.readline()[0:-1]
description = readmetafile.readline()[0:-1]
begintime = readmetafile.readline()[0:-1]
readmetafile.close()
# Add Event to list
filecount += 1
if config.timeshift.deleteAfterZap.value and servicerefname == self.pts_curevent_servicerefname:
entrylist.append((_("Record") + " #%s (%s): %s" % (filecount,strftime("%H:%M",localtime(int(begintime))),eventname), "%s" % filename))
else:
servicename = ServiceReference(servicerefname).getServiceName()
#entrylist.append((_("Record") + " #%s (%s,%s): %s" % (filecount,strftime("%H:%M",localtime(int(begintime))),servicename,eventname), "%s" % filename))
entrylist.append(("[%s] %s : %s" % (strftime("%H:%M",localtime(int(begintime))),servicename,eventname), "%s" % filename))
self.session.openWithCallback(self.recordQuestionCallback, ChoiceBox, title=_("Which event do you want to save permanently?"), list=entrylist)
def saveTimeshiftActions(self, action=None, returnFunction=None):
# print 'saveTimeshiftActions'
# print 'action',action
if action == "savetimeshift":
self.SaveTimeshift()
elif action == "savetimeshiftandrecord":
if self.pts_curevent_end > time():
self.SaveTimeshift(mergelater=True)
self.ptsRecordCurrentEvent()
else:
self.SaveTimeshift()
elif action == "noSave":
config.timeshift.isRecording.value = False
self.save_current_timeshift = False
elif action == "no":
pass
# Get rid of old timeshift file before E2 truncates its filesize
if returnFunction is not None and action != "no":
self.eraseTimeshiftFile()
# print 'action returnFunction'
returnFunction(action and action != "no")
def SaveTimeshift(self, timeshiftfile=None, mergelater=False):
# print 'SaveTimeshift'
self.save_current_timeshift = False
savefilename = None
if timeshiftfile is not None:
savefilename = timeshiftfile
# print 'savefilename',savefilename
if savefilename is None:
# print 'TEST1'
for filename in os.listdir(config.usage.timeshift_path.value):
# print 'filename',filename
if filename.startswith("timeshift.") and not filename.endswith(".del") and not filename.endswith(".copy") and not filename.endswith(".sc"):
statinfo = os.stat("%s%s" % (config.usage.timeshift_path.value,filename))
if statinfo.st_mtime > (time()-5.0):
savefilename=filename
# print 'savefilename',savefilename
if savefilename is None:
Notifications.AddNotification(MessageBox, _("No Timeshift found to save as recording!"), MessageBox.TYPE_ERROR, timeout=30)
else:
timeshift_saved = True
timeshift_saveerror1 = ""
timeshift_saveerror2 = ""
metamergestring = ""
config.timeshift.isRecording.value = True
if mergelater:
self.pts_mergeRecords_timer.start(120000, True)
metamergestring = "pts_merge\n"
try:
if timeshiftfile is None:
# Save Current Event by creating hardlink to ts file
if self.pts_starttime >= (time()-60):
self.pts_starttime -= 60
ptsfilename = "%s - %s - %s" % (strftime("%Y%m%d %H%M",localtime(self.pts_starttime)),self.pts_curevent_station,self.pts_curevent_name.replace("\n", ""))
try:
if config.usage.setup_level.index >= 2:
if config.recording.filename_composition.value == "long" and self.pts_curevent_name.replace("\n", "") != self.pts_curevent_description.replace("\n", ""):
ptsfilename = "%s - %s - %s - %s" % (strftime("%Y%m%d %H%M",localtime(self.pts_starttime)),self.pts_curevent_station,self.pts_curevent_name.replace("\n", ""),self.pts_curevent_description.replace("\n", ""))
elif config.recording.filename_composition.value == "short":
ptsfilename = "%s - %s" % (strftime("%Y%m%d",localtime(self.pts_starttime)),self.pts_curevent_name.replace("\n", ""))
elif config.recording.filename_composition.value == "veryshort":
ptsfilename = "%s - %s" % (self.pts_curevent_name.replace("\n", ""),strftime("%Y%m%d %H%M",localtime(self.pts_starttime)))
elif config.recording.filename_composition.value == "veryveryshort":
ptsfilename = "%s - %s" % (self.pts_curevent_name.replace("\n", ""),strftime("%Y%m%d %H%M",localtime(self.pts_starttime)))
except Exception, errormsg:
print "[TimeShift] Using default filename"
if config.recording.ascii_filenames.value:
ptsfilename = ASCIItranslit.legacyEncode(ptsfilename)
# print 'ptsfilename',ptsfilename
fullname = getRecordingFilename(ptsfilename,config.usage.autorecord_path.value)
# print 'fullname',fullname
os.link("%s%s" % (config.usage.timeshift_path.value,savefilename), "%s.ts" % fullname)
metafile = open("%s.ts.meta" % fullname, "w")
metafile.write("%s\n%s\n%s\n%i\n%s" % (self.pts_curevent_servicerefname,self.pts_curevent_name.replace("\n", ""),self.pts_curevent_description.replace("\n", ""),int(self.pts_starttime),metamergestring))
metafile.close()
self.ptsCreateEITFile(fullname)
elif timeshiftfile.startswith("pts_livebuffer"):
# Save stored timeshift by creating hardlink to ts file
readmetafile = open("%s%s.meta" % (config.usage.timeshift_path.value,timeshiftfile), "r")
servicerefname = readmetafile.readline()[0:-1]
eventname = readmetafile.readline()[0:-1]
description = readmetafile.readline()[0:-1]
begintime = readmetafile.readline()[0:-1]
readmetafile.close()
if config.timeshift.deleteAfterZap.value and servicerefname == self.pts_curevent_servicerefname:
servicename = self.pts_curevent_station
else:
servicename = ServiceReference(servicerefname).getServiceName()
ptsfilename = "%s - %s - %s" % (strftime("%Y%m%d %H%M",localtime(int(begintime))),servicename,eventname)
try:
if config.usage.setup_level.index >= 2:
if config.recording.filename_composition.value == "long" and eventname != description:
ptsfilename = "%s - %s - %s - %s" % (strftime("%Y%m%d %H%M",localtime(int(begintime))),servicename,eventname,description)
elif config.recording.filename_composition.value == "short":
ptsfilename = "%s - %s" % (strftime("%Y%m%d",localtime(int(begintime))),eventname)
elif config.recording.filename_composition.value == "veryshort":
ptsfilename = "%s - %s" % (eventname,strftime("%Y%m%d %H%M",localtime(int(begintime))))
elif config.recording.filename_composition.value == "veryveryshort":
ptsfilename = "%s - %s" % (eventname,strftime("%Y%m%d %H%M",localtime(int(begintime))))
except Exception, errormsg:
print "[TimeShift] Using default filename"
if config.recording.ascii_filenames.value:
ptsfilename = ASCIItranslit.legacyEncode(ptsfilename)
fullname=getRecordingFilename(ptsfilename,config.usage.autorecord_path.value)
os.link("%s%s" % (config.usage.timeshift_path.value,timeshiftfile),"%s.ts" % fullname)
os.link("%s%s.meta" % (config.usage.timeshift_path.value,timeshiftfile),"%s.ts.meta" % fullname)
if os.path.exists("%s%s.eit" % (config.usage.timeshift_path.value,timeshiftfile)):
os.link("%s%s.eit" % (config.usage.timeshift_path.value,timeshiftfile),"%s.eit" % fullname)
# Add merge-tag to metafile
if mergelater:
metafile = open("%s.ts.meta" % fullname, "a")
metafile.write("%s\n" % metamergestring)
metafile.close()
# Create AP and SC Files when not merging
if not mergelater:
self.ptsCreateAPSCFiles(fullname+".ts")
except Exception, errormsg:
timeshift_saved = False
timeshift_saveerror1 = errormsg
# Hmpppf! Saving Timeshift via Hardlink-Method failed. Probably other device?
# Let's try to copy the file in background now! This might take a while ...
if not timeshift_saved:
try:
stat = os.statvfs(config.usage.autorecord_path.value)
freespace = stat.f_bfree / 1000 * stat.f_bsize / 1000
randomint = randint(1, 999)
if timeshiftfile is None:
# Get Filesize for Free Space Check
filesize = int(os.path.getsize("%s%s" % (config.usage.timeshift_path.value,savefilename)) / (1024*1024))
# Save Current Event by copying it to the other device
if filesize <= freespace:
os.link("%s%s" % (config.usage.timeshift_path.value,savefilename), "%s%s.%s.copy" % (config.usage.timeshift_path.value,savefilename,randomint))
copy_file = savefilename
metafile = open("%s.ts.meta" % fullname, "w")
metafile.write("%s\n%s\n%s\n%i\n%s" % (self.pts_curevent_servicerefname,self.pts_curevent_name.replace("\n", ""),self.pts_curevent_description.replace("\n", ""),int(self.pts_starttime),metamergestring))
metafile.close()
self.ptsCreateEITFile(fullname)
elif timeshiftfile.startswith("pts_livebuffer"):
# Get Filesize for Free Space Check
filesize = int(os.path.getsize("%s%s" % (config.usage.timeshift_path.value, timeshiftfile)) / (1024*1024))
# Save stored timeshift by copying it to the other device
if filesize <= freespace:
os.link("%s%s" % (config.usage.timeshift_path.value,timeshiftfile), "%s%s.%s.copy" % (config.usage.timeshift_path.value,timeshiftfile,randomint))
copyfile("%s%s.meta" % (config.usage.timeshift_path.value,timeshiftfile),"%s.ts.meta" % fullname)
if os.path.exists("%s%s.eit" % (config.usage.timeshift_path.value,timeshiftfile)):
copyfile("%s%s.eit" % (config.usage.timeshift_path.value,timeshiftfile),"%s.eit" % fullname)
copy_file = timeshiftfile
# Add merge-tag to metafile
if mergelater:
metafile = open("%s.ts.meta" % fullname, "a")
metafile.write("%s\n" % metamergestring)
metafile.close()
# Only copy file when enough disk-space available!
if filesize <= freespace:
timeshift_saved = True
copy_file = copy_file+"."+str(randomint)
# Get Event Info from meta file
if os.path.exists("%s.ts.meta" % fullname):
readmetafile = open("%s.ts.meta" % fullname, "r")
servicerefname = readmetafile.readline()[0:-1]
eventname = readmetafile.readline()[0:-1]
readmetafile.close()
else:
eventname = ""
JobManager.AddJob(CopyTimeshiftJob(self, "mv \"%s%s.copy\" \"%s.ts\"" % (config.usage.timeshift_path.value,copy_file,fullname), copy_file, fullname, eventname))
if not Screens.Standby.inTryQuitMainloop and not Screens.Standby.inStandby and not mergelater and self.save_timeshift_postaction != "standby":
Notifications.AddNotification(MessageBox, _("Saving timeshift as movie now. This might take a while!"), MessageBox.TYPE_INFO, timeout=30)
else:
timeshift_saved = False
timeshift_saveerror1 = ""
timeshift_saveerror2 = _("Not enough free Diskspace!\n\nFilesize: %sMB\nFree Space: %sMB\nPath: %s" % (filesize,freespace,config.usage.autorecord_path.value))
except Exception, errormsg:
timeshift_saved = False
timeshift_saveerror2 = errormsg
if not timeshift_saved:
config.timeshift.isRecording.value = False
self.save_timeshift_postaction = None
errormessage = str(timeshift_saveerror1) + "\n" + str(timeshift_saveerror2)
Notifications.AddNotification(MessageBox, _("Timeshift save failed!")+"\n\n%s" % errormessage, MessageBox.TYPE_ERROR, timeout=30)
# print 'SAVE COMPLETED'
def ptsAskUser(self, what):
if self.ptsAskUser_wait:
return
message_time = _("The buffer time for timeshift exceeds the specified limit in the settings.\nWhat do you want to do ?")
message_space = _("The available disk space for timeshift is less than specified in the settings.\nWhat do you want to do ?")
message_livetv = _("Can't going to live TV!\nSwitch to live TV and restart Timeshift ?")
message_nextfile = _("Can't play the next Timeshift file!\nSwitch to live TV and restart Timeshift ?")
choice_restart =[(_("Delete the current timeshift buffer and restart timeshift"), "restarttimeshift"),
(_("Nothing, just leave this menu"), "no")]
choice_save = [(_("Stop timeshift and save timeshift buffer as movie and start recording of current event"), "savetimeshiftandrecord"),
(_("Stop timeshift and save timeshift buffer as movie"), "savetimeshift"),
(_("Stop timeshift"), "noSave"),
(_("Nothing, just leave this menu"), "no")]
choice_livetv = [(_("No"), "nolivetv"),
(_("Yes"), "golivetv")]
if what == "time":
message = message_time
choice = choice_restart
elif what == "space":
message = message_space
choice = choice_restart
elif what == "time_and_save":
message = message_time
choice = choice_save
elif what == "space_and_save":
message = message_space
choice = choice_save
elif what == "livetv":
message = message_livetv
choice = choice_livetv
elif what == "nextfile":
message = message_nextfile
choice = choice_livetv
else:
return
self.ptsAskUser_wait = True
self.session.openWithCallback(self.ptsAskUserCallback, MessageBox, message, simple = True, list = choice, timeout=30)
def ptsAskUserCallback(self, answer):
self.ptsAskUser_wait = False
if answer:
if answer == "restarttimeshift":
self.ptsEventCleanTimerSTOP()
self.save_current_timeshift = False
self.stopTimeshiftAskUserCallback(True)
self.restartTimeshift()
elif answer == "noSave":
self.ptsEventCleanTimerSTOP()
self.save_current_timeshift = False
self.stopTimeshiftAskUserCallback(True)
elif answer == "savetimeshift" or answer == "savetimeshiftandrecord":
self.ptsEventCleanTimerSTOP()
self.save_current_timeshift = True
InfoBarTimeshift.saveTimeshiftActions(self, answer, self.stopTimeshiftAskUserCallback)
elif answer == "golivetv":
self.ptsEventCleanTimerSTOP(True)
self.stopTimeshiftAskUserCallback(True)
self.restartTimeshift()
elif answer == "nolivetv":
if self.pts_lastposition:
self.setSeekState(self.SEEK_STATE_PLAY)
self.doSeek(self.pts_lastposition)
def stopTimeshiftAskUserCallback(self, answer):
ts = self.getTimeshift()
if answer and ts:
ts.stopTimeshift(True)
self.__seekableStatusChanged()
def ptsEventCleanTimerSTOP(self, justStop = False):
if justStop is False:
self.pts_eventcount = 0
if self.pts_cleanEvent_timer.isActive():
self.pts_cleanEvent_timer.stop()
print "[TIMESHIFT] - 'cleanEvent_timer' is stopped"
def ptsEventCleanTimerSTART(self):
if not self.pts_cleanEvent_timer.isActive() and int(config.timeshift.timeshiftCheckEvents.value):
self.pts_cleanEvent_timer.start(60000*int(config.timeshift.timeshiftCheckEvents.value), False)
print "[TIMESHIFT] - 'cleanEvent_timer' is starting"
def ptsEventCleanTimeshiftFolder(self):
print "[TIMESHIFT] - 'cleanEvent_timer' is running"
self.ptsCleanTimeshiftFolder(justZapped = False)
def ptsCleanTimeshiftFolder(self, justZapped = True):
# print '!!!!!!!!!!!!!!!!!!!!! ptsCleanTimeshiftFolder'
if self.ptsCheckTimeshiftPath() is False or self.session.screen["Standby"].boolean is True:
self.ptsEventCleanTimerSTOP()
return
freespace = int(config.timeshift.timeshiftCheckFreeSpace.value)
timeshiftEnabled = self.timeshiftEnabled()
isSeekable = self.isSeekable()
filecounter = 0
filesize = 0
lockedFiles = []
removeFiles = []
if timeshiftEnabled:
if isSeekable:
for i in range(self.pts_currplaying,self.pts_eventcount + 1):
lockedFiles.append(("pts_livebuffer_%s") % i)
else:
if not self.event_changed:
lockedFiles.append(("pts_livebuffer_%s") % self.pts_currplaying)
if freespace:
try:
stat = os.statvfs(config.usage.timeshift_path.value)
freespace = stat.f_bavail * stat.f_bsize / 1024 / 1024
except:
print "[TIMESHIFT] - error reading disk space - function 'checking for free space' can't used"
if freespace < int(config.timeshift.timeshiftCheckFreeSpace.value):
for i in range(1,self.pts_eventcount + 1):
removeFiles.append(("pts_livebuffer_%s") % i)
print "[TIMESHIFT] - less than %s MByte disk space available - try to the deleting all unused timeshift files" % config.timeshift.timeshiftCheckFreeSpace.value
elif self.pts_eventcount - config.timeshift.timeshiftMaxEvents.value >= 0:
if self.event_changed or len(lockedFiles) == 0:
for i in range(1,self.pts_eventcount - config.timeshift.timeshiftMaxEvents.value + 2):
removeFiles.append(("pts_livebuffer_%s") % i)
else:
for i in range(1,self.pts_eventcount - config.timeshift.timeshiftMaxEvents.value + 1):
removeFiles.append(("pts_livebuffer_%s") % i)
for filename in os.listdir(config.usage.timeshift_path.value):
if (os.path.exists("%s%s" % (config.usage.timeshift_path.value,filename))) and ((filename.startswith("timeshift.") or filename.startswith("pts_livebuffer_"))):
# print 'filename:',filename
statinfo = os.stat("%s%s" % (config.usage.timeshift_path.value,filename))
if (justZapped is True) and (filename.endswith(".del") is False) and (filename.endswith(".copy") is False):
# after zapping, remove all regular timeshift files
# print "[TimeShift] Erasing stranded timeshift file %s" % filename
filesize += os.path.getsize("%s%s" % (config.usage.timeshift_path.value,filename))
self.BgFileEraser.erase("%s%s" % (config.usage.timeshift_path.value,filename))
elif (filename.endswith(".eit") is False) and (filename.endswith(".meta") is False) and (filename.endswith(".sc") is False) and (filename.endswith(".del") is False) and (filename.endswith(".copy") is False):
# remove old files, but only complete sets of files (base file, .eit, .meta, .sc),
# and not while saveTimeshiftEventPopup is active (avoid deleting files about to be saved)
# and don't delete files from currently playing up to the last event
if not filename.startswith("timeshift."):
filecounter += 1
if ((statinfo.st_mtime < (time()-3600*config.timeshift.timeshiftMaxHours.value)) or any(filename in s for s in removeFiles)) and (self.saveTimeshiftEventPopupActive is False) and not any(filename in s for s in lockedFiles):
# print "[TimeShift] Erasing set of old timeshift files (base file, .eit, .meta, .sc) %s" % filename
filesize += os.path.getsize("%s%s" % (config.usage.timeshift_path.value,filename))
self.BgFileEraser.erase("%s%s" % (config.usage.timeshift_path.value,filename))
if os.path.exists("%s%s.eit" % (config.usage.timeshift_path.value,filename)):
filesize += os.path.getsize("%s%s.eit" % (config.usage.timeshift_path.value,filename))
self.BgFileEraser.erase("%s%s.eit" % (config.usage.timeshift_path.value,filename))
if os.path.exists("%s%s.meta" % (config.usage.timeshift_path.value,filename)):
filesize += os.path.getsize("%s%s.meta" % (config.usage.timeshift_path.value,filename))
self.BgFileEraser.erase("%s%s.meta" % (config.usage.timeshift_path.value,filename))
if os.path.exists("%s%s.sc" % (config.usage.timeshift_path.value,filename)):
filesize += os.path.getsize("%s%s.sc" % (config.usage.timeshift_path.value,filename))
self.BgFileEraser.erase("%s%s.sc" % (config.usage.timeshift_path.value,filename))
if not filename.startswith("timeshift."):
filecounter -= 1
else:
# remove anything still left over another 24h later
if statinfo.st_mtime < (time()-3600*(24+config.timeshift.timeshiftMaxHours.value)):
# print "[TimeShift] Erasing very old timeshift file %s" % filename
if filename.endswith(".del") is True:
filesize += os.path.getsize("%s%s" % (config.usage.timeshift_path.value,filename))
try:
os.rename("%s%s" % (config.usage.timeshift_path.value,filename), "%s%s.del_again" % (config.usage.timeshift_path.value,filename))
self.BgFileEraser.erase("%s%s.del_again" % (config.usage.timeshift_path.value,filename))
except:
print "[TimeShift] - can't rename %s%s." % (config.usage.timeshift_path.value,filename)
self.BgFileEraser.erase("%s%s" % (config.usage.timeshift_path.value,filename))
else:
filesize += os.path.getsize("%s%s" % (config.usage.timeshift_path.value,filename))
self.BgFileEraser.erase("%s%s" % (config.usage.timeshift_path.value,filename))
if filecounter == 0:
self.ptsEventCleanTimerSTOP()
else:
if timeshiftEnabled and not isSeekable:
if freespace + (filesize / 1024 / 1024) < int(config.timeshift.timeshiftCheckFreeSpace.value):
self.ptsAskUser("space")
elif time() - self.pts_starttime > 3600 * config.timeshift.timeshiftMaxHours.value:
self.ptsAskUser("time")
elif isSeekable:
if freespace + (filesize / 1024 / 1024) < int(config.timeshift.timeshiftCheckFreeSpace.value):
self.ptsAskUser("space_and_save")
elif time() - self.pts_starttime > 3600 * config.timeshift.timeshiftMaxHours.value:
self.ptsAskUser("time_and_save")
if self.checkEvents_value != int(config.timeshift.timeshiftCheckEvents.value):
if self.pts_cleanEvent_timer.isActive():
print "[TIMESHIFT] - 'cleanEvent_timer' was changed"
self.pts_cleanEvent_timer.stop()
if int(config.timeshift.timeshiftCheckEvents.value):
self.ptsEventCleanTimerSTART()
else:
print "[TIMESHIFT] - 'cleanEvent_timer' is deactivated"
self.checkEvents_value = int(config.timeshift.timeshiftCheckEvents.value)
def ptsGetEventInfo(self):
event = None
try:
serviceref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
serviceHandler = eServiceCenter.getInstance()
info = serviceHandler.info(serviceref)
self.pts_curevent_servicerefname = serviceref.toString()
self.pts_curevent_station = info.getName(serviceref)
service = self.session.nav.getCurrentService()
info = service and service.info()
event = info and info.getEvent(0)
except Exception, errormsg:
Notifications.AddNotification(MessageBox, _("Getting Event Info failed!")+"\n\n%s" % errormsg, MessageBox.TYPE_ERROR, timeout=10)
if event is not None:
curEvent = parseEvent(event)
self.pts_curevent_begin = int(curEvent[0])
self.pts_curevent_end = int(curEvent[1])
self.pts_curevent_name = curEvent[2]
self.pts_curevent_description = curEvent[3]
self.pts_curevent_eventid = curEvent[4]
def ptsFrontpanelActions(self, action=None):
if self.session.nav.RecordTimer.isRecording() or SystemInfo.get("NumFrontpanelLEDs", 0) == 0:
return
if action == "start":
if os.path.exists("/proc/stb/fp/led_set_pattern"):
f = open("/proc/stb/fp/led_set_pattern", "w")
f.write("0xa7fccf7a")
f.close()
elif os.path.exists("/proc/stb/fp/led0_pattern"):
f = open("/proc/stb/fp/led0_pattern", "w")
f.write("0x55555555")
f.close()
if os.path.exists("/proc/stb/fp/led_pattern_speed"):
f = open("/proc/stb/fp/led_pattern_speed", "w")
f.write("20")
f.close()
elif os.path.exists("/proc/stb/fp/led_set_speed"):
f = open("/proc/stb/fp/led_set_speed", "w")
f.write("20")
f.close()
elif action == "stop":
if os.path.exists("/proc/stb/fp/led_set_pattern"):
f = open("/proc/stb/fp/led_set_pattern", "w")
f.write("0")
f.close()
elif os.path.exists("/proc/stb/fp/led0_pattern"):
f = open("/proc/stb/fp/led0_pattern", "w")
f.write("0")
f.close()
def ptsCreateHardlink(self):
# print 'ptsCreateHardlink'
for filename in os.listdir(config.usage.timeshift_path.value):
# if filename.startswith("timeshift") and not os.path.splitext(filename)[1]:
if filename.startswith("timeshift") and not filename.endswith(".sc") and not filename.endswith(".del") and not filename.endswith(".copy"):
if os.path.exists("%spts_livebuffer_%s.eit" % (config.usage.timeshift_path.value,self.pts_eventcount)):
self.BgFileEraser.erase("%spts_livebuffer_%s.eit" % (config.usage.timeshift_path.value,self.pts_eventcount))
if os.path.exists("%spts_livebuffer_%s.meta" % (config.usage.timeshift_path.value,self.pts_eventcount)):
self.BgFileEraser.erase("%spts_livebuffer_%s.meta" % (config.usage.timeshift_path.value,self.pts_eventcount))
if os.path.exists("%spts_livebuffer_%s" % (config.usage.timeshift_path.value,self.pts_eventcount)):
self.BgFileEraser.erase("%spts_livebuffer_%s" % (config.usage.timeshift_path.value,self.pts_eventcount))
if os.path.exists("%spts_livebuffer_%s.sc" % (config.usage.timeshift_path.value,self.pts_eventcount)):
self.BgFileEraser.erase("%spts_livebuffer_%s.sc" % (config.usage.timeshift_path.value,self.pts_eventcount))
try:
# Create link to pts_livebuffer file
os.link("%s%s" % (config.usage.timeshift_path.value,filename), "%spts_livebuffer_%s" % (config.usage.timeshift_path.value,self.pts_eventcount))
os.link("%s%s.sc" % (config.usage.timeshift_path.value,filename), "%spts_livebuffer_%s.sc" % (config.usage.timeshift_path.value,self.pts_eventcount))
# Create a Meta File
metafile = open("%spts_livebuffer_%s.meta" % (config.usage.timeshift_path.value,self.pts_eventcount), "w")
metafile.write("%s\n%s\n%s\n%i\n" % (self.pts_curevent_servicerefname,self.pts_curevent_name.replace("\n", ""),self.pts_curevent_description.replace("\n", ""),int(self.pts_starttime)))
metafile.close()
except Exception, errormsg:
Notifications.AddNotification(MessageBox, _("Creating Hardlink to Timeshift file failed!")+"\n"+_("The Filesystem on your Timeshift-Device does not support hardlinks.\nMake sure it is formatted in EXT2 or EXT3!")+"\n\n%s" % errormsg, MessageBox.TYPE_ERROR, timeout=30)
# Create EIT File
self.ptsCreateEITFile("%spts_livebuffer_%s" % (config.usage.timeshift_path.value,self.pts_eventcount))
# Autorecord
if config.timeshift.autorecord.value:
try:
fullname = getRecordingFilename("%s - %s - %s" % (strftime("%Y%m%d %H%M",localtime(self.pts_starttime)),self.pts_curevent_station,self.pts_curevent_name),config.usage.autorecord_path.value)
os.link("%s%s" % (config.usage.timeshift_path.value,filename), "%s.ts" % fullname)
# Create a Meta File
metafile = open("%s.ts.meta" % fullname, "w")
metafile.write("%s\n%s\n%s\n%i\nautosaved\n" % (self.pts_curevent_servicerefname,self.pts_curevent_name.replace("\n", ""),self.pts_curevent_description.replace("\n", ""),int(self.pts_starttime)))
metafile.close()
except Exception, errormsg:
print "[Timeshift] %s" % errormsg
def ptsRecordCurrentEvent(self):
recording = RecordTimerEntry(ServiceReference(self.session.nav.getCurrentlyPlayingServiceOrGroup()), time(), self.pts_curevent_end, self.pts_curevent_name, self.pts_curevent_description, self.pts_curevent_eventid, dirname = config.usage.autorecord_path.value)
recording.dontSave = True
self.session.nav.RecordTimer.record(recording)
self.recording.append(recording)
def ptsMergeRecords(self):
if self.session.nav.RecordTimer.isRecording():
self.pts_mergeRecords_timer.start(120000, True)
return
ptsmergeSRC = ""
ptsmergeDEST = ""
ptsmergeeventname = ""
ptsgetnextfile = False
ptsfilemerged = False
filelist = os.listdir(config.usage.autorecord_path.value)
if filelist is not None:
filelist.sort()
for filename in filelist:
if filename.endswith(".meta"):
# Get Event Info from meta file
readmetafile = open("%s%s" % (config.usage.autorecord_path.value,filename), "r")
servicerefname = readmetafile.readline()[0:-1]
eventname = readmetafile.readline()[0:-1]
eventtitle = readmetafile.readline()[0:-1]
eventtime = readmetafile.readline()[0:-1]
eventtag = readmetafile.readline()[0:-1]
readmetafile.close()
if ptsgetnextfile:
ptsgetnextfile = False
ptsmergeSRC = filename[0:-5]
if ASCIItranslit.legacyEncode(eventname) == ASCIItranslit.legacyEncode(ptsmergeeventname):
# Copy EIT File
if fileExists("%s%s.eit" % (config.usage.autorecord_path.value, ptsmergeSRC[0:-3])):
copyfile("%s%s.eit" % (config.usage.autorecord_path.value, ptsmergeSRC[0:-3]),"%s%s.eit" % (config.usage.autorecord_path.value, ptsmergeDEST[0:-3]))
# Delete AP and SC Files
if os.path.exists("%s%s.ap" % (config.usage.autorecord_path.value, ptsmergeDEST)):
self.BgFileEraser.erase("%s%s.ap" % (config.usage.autorecord_path.value, ptsmergeDEST))
if os.path.exists("%s%s.sc" % (config.usage.autorecord_path.value, ptsmergeDEST)):
self.BgFileEraser.erase("%s%s.sc" % (config.usage.autorecord_path.value, ptsmergeDEST))
# Add Merge Job to JobManager
JobManager.AddJob(MergeTimeshiftJob(self, "cat \"%s%s\" >> \"%s%s\"" % (config.usage.autorecord_path.value,ptsmergeSRC,config.usage.autorecord_path.value,ptsmergeDEST), ptsmergeSRC, ptsmergeDEST, eventname))
config.timeshift.isRecording.value = True
ptsfilemerged = True
else:
ptsgetnextfile = True
if eventtag == "pts_merge" and not ptsgetnextfile:
ptsgetnextfile = True
ptsmergeDEST = filename[0:-5]
ptsmergeeventname = eventname
ptsfilemerged = False
# If still recording or transfering, try again later ...
if fileExists("%s%s" % (config.usage.autorecord_path.value,ptsmergeDEST)):
statinfo = os.stat("%s%s" % (config.usage.autorecord_path.value,ptsmergeDEST))
if statinfo.st_mtime > (time()-10.0):
self.pts_mergeRecords_timer.start(120000, True)
return
# Rewrite Meta File to get rid of pts_merge tag
metafile = open("%s%s.meta" % (config.usage.autorecord_path.value,ptsmergeDEST), "w")
metafile.write("%s\n%s\n%s\n%i\n" % (servicerefname,eventname.replace("\n", ""),eventtitle.replace("\n", ""),int(eventtime)))
metafile.close()
# Merging failed :(
if not ptsfilemerged and ptsgetnextfile:
Notifications.AddNotification(MessageBox,_("[Timeshift] Merging records failed!"), MessageBox.TYPE_ERROR, timeout=30)
def ptsCreateAPSCFiles(self, filename):
if fileExists(filename, 'r'):
if fileExists(filename+".meta", 'r'):
# Get Event Info from meta file
readmetafile = open(filename+".meta", "r")
servicerefname = readmetafile.readline()[0:-1]
eventname = readmetafile.readline()[0:-1]
readmetafile.close()
else:
eventname = ""
JobManager.AddJob(CreateAPSCFilesJob(self, "/usr/lib/enigma2/python/Components/createapscfiles \"%s\" > /dev/null" % filename, eventname))
else:
self.ptsSaveTimeshiftFinished()
def ptsCreateEITFile(self, filename):
if self.pts_curevent_eventid is not None:
try:
serviceref = ServiceReference(self.session.nav.getCurrentlyPlayingServiceOrGroup()).ref.toString()
eEPGCache.getInstance().saveEventToFile(filename+".eit", serviceref, self.pts_curevent_eventid, -1, -1)
except Exception, errormsg:
print "[Timeshift] %s" % errormsg
def ptsCopyFilefinished(self, srcfile, destfile):
# Erase Source File
if fileExists(srcfile):
self.BgFileEraser.erase(srcfile)
# Restart Merge Timer
if self.pts_mergeRecords_timer.isActive():
self.pts_mergeRecords_timer.stop()
self.pts_mergeRecords_timer.start(15000, True)
else:
# Create AP and SC Files
self.ptsCreateAPSCFiles(destfile)
def ptsMergeFilefinished(self, srcfile, destfile):
if self.session.nav.RecordTimer.isRecording() or len(JobManager.getPendingJobs()) >= 1:
# Rename files and delete them later ...
self.pts_mergeCleanUp_timer.start(120000, True)
os.system("echo \"\" > \"%s.pts.del\"" % (srcfile[0:-3]))
else:
# Delete Instant Record permanently now ... R.I.P.
self.BgFileEraser.erase("%s" % srcfile)
self.BgFileEraser.erase("%s.ap" % srcfile)
self.BgFileEraser.erase("%s.sc" % srcfile)
self.BgFileEraser.erase("%s.meta" % srcfile)
self.BgFileEraser.erase("%s.cuts" % srcfile)
self.BgFileEraser.erase("%s.eit" % (srcfile[0:-3]))
# Create AP and SC Files
self.ptsCreateAPSCFiles(destfile)
# Run Merge-Process one more time to check if there are more records to merge
self.pts_mergeRecords_timer.start(10000, True)
def ptsSaveTimeshiftFinished(self):
if not self.pts_mergeCleanUp_timer.isActive():
self.ptsFrontpanelActions("stop")
config.timeshift.isRecording.value = False
if Screens.Standby.inTryQuitMainloop:
self.pts_QuitMainloop_timer.start(30000, True)
else:
Notifications.AddNotification(MessageBox, _("Timeshift saved to your harddisk!"), MessageBox.TYPE_INFO, timeout=30)
def ptsMergePostCleanUp(self):
if self.session.nav.RecordTimer.isRecording() or len(JobManager.getPendingJobs()) >= 1:
config.timeshift.isRecording.value = True
self.pts_mergeCleanUp_timer.start(120000, True)
return
self.ptsFrontpanelActions("stop")
config.timeshift.isRecording.value = False
filelist = os.listdir(config.usage.autorecord_path.value)
for filename in filelist:
if filename.endswith(".pts.del"):
srcfile = config.usage.autorecord_path.value + "/" + filename[0:-8] + ".ts"
self.BgFileEraser.erase("%s" % srcfile)
self.BgFileEraser.erase("%s.ap" % srcfile)
self.BgFileEraser.erase("%s.sc" % srcfile)
self.BgFileEraser.erase("%s.meta" % srcfile)
self.BgFileEraser.erase("%s.cuts" % srcfile)
self.BgFileEraser.erase("%s.eit" % (srcfile[0:-3]))
self.BgFileEraser.erase("%s.pts.del" % (srcfile[0:-3]))
# Restart QuitMainloop Timer to give BgFileEraser enough time
if Screens.Standby.inTryQuitMainloop and self.pts_QuitMainloop_timer.isActive():
self.pts_QuitMainloop_timer.start(60000, True)
def ptsTryQuitMainloop(self):
if Screens.Standby.inTryQuitMainloop and (len(JobManager.getPendingJobs()) >= 1 or self.pts_mergeCleanUp_timer.isActive()):
self.pts_QuitMainloop_timer.start(60000, True)
return
if Screens.Standby.inTryQuitMainloop and self.session.ptsmainloopvalue:
self.session.dialog_stack = []
self.session.summary_stack = [None]
self.session.open(Screens.Standby.TryQuitMainloop, self.session.ptsmainloopvalue)
def ptsGetSeekInfo(self):
s = self.session.nav.getCurrentService()
return s and s.seek()
def ptsGetPosition(self):
seek = self.ptsGetSeekInfo()
if seek is None:
return None
pos = seek.getPlayPosition()
if pos[0]:
return 0
return pos[1]
def ptsGetLength(self):
seek = self.ptsGetSeekInfo()
if seek is None:
return None
length = seek.getLength()
if length[0]:
return 0
return length[1]
def ptsGetTimeshiftStatus(self):
if (self.isSeekable() and self.timeshiftEnabled() or self.save_current_timeshift) and config.usage.check_timeshift.value:
return True
else:
return False
def ptsSeekPointerOK(self):
if self.pvrStateDialog.has_key("PTSSeekPointer") and self.timeshiftEnabled() and self.isSeekable():
if not self.pvrStateDialog.shown:
if self.seekstate != self.SEEK_STATE_PLAY or self.seekstate == self.SEEK_STATE_PAUSE:
self.setSeekState(self.SEEK_STATE_PLAY)
self.doShow()
return
length = self.ptsGetLength()
position = self.ptsGetPosition()
if length is None or position is None:
return
cur_pos = self.pvrStateDialog["PTSSeekPointer"].position
jumptox = int(cur_pos[0]) - (int(self.pvrStateDialog["PTSSeekBack"].instance.position().x())+8)
jumptoperc = round((jumptox / float(self.pvrStateDialog["PTSSeekBack"].instance.size().width())) * 100, 0)
jumptotime = int((length / 100) * jumptoperc)
jumptodiff = position - jumptotime
self.doSeekRelative(-jumptodiff)
else:
return
def ptsSeekPointerLeft(self):
if self.pvrStateDialog.has_key("PTSSeekPointer") and self.pvrStateDialog.shown and self.timeshiftEnabled() and self.isSeekable():
self.ptsMoveSeekPointer(direction="left")
else:
return
def ptsSeekPointerRight(self):
if self.pvrStateDialog.has_key("PTSSeekPointer") and self.pvrStateDialog.shown and self.timeshiftEnabled() and self.isSeekable():
self.ptsMoveSeekPointer(direction="right")
else:
return
def ptsSeekPointerReset(self):
if self.pvrStateDialog.has_key("PTSSeekPointer") and self.timeshiftEnabled():
self.pvrStateDialog["PTSSeekPointer"].setPosition(int(self.pvrStateDialog["PTSSeekBack"].instance.position().x())+8,self.pvrStateDialog["PTSSeekPointer"].position[1])
def ptsSeekPointerSetCurrentPos(self):
if not self.pvrStateDialog.has_key("PTSSeekPointer") or not self.timeshiftEnabled() or not self.isSeekable():
return
position = self.ptsGetPosition()
length = self.ptsGetLength()
if length >= 1:
tpixels = int((float(int((position*100)/length))/100)*self.pvrStateDialog["PTSSeekBack"].instance.size().width())
self.pvrStateDialog["PTSSeekPointer"].setPosition(int(self.pvrStateDialog["PTSSeekBack"].instance.position().x())+8+tpixels, self.pvrStateDialog["PTSSeekPointer"].position[1])
def ptsMoveSeekPointer(self, direction=None):
if direction is None or not self.pvrStateDialog.has_key("PTSSeekPointer"):
return
isvalidjump = False
cur_pos = self.pvrStateDialog["PTSSeekPointer"].position
self.doShow()
if direction == "left":
minmaxval = int(self.pvrStateDialog["PTSSeekBack"].instance.position().x())+8
movepixels = -15
if cur_pos[0]+movepixels > minmaxval:
isvalidjump = True
elif direction == "right":
minmaxval = int(self.pvrStateDialog["PTSSeekBack"].instance.size().width()*0.96)
movepixels = 15
if cur_pos[0]+movepixels < minmaxval:
isvalidjump = True
else:
return 0
if isvalidjump:
self.pvrStateDialog["PTSSeekPointer"].setPosition(cur_pos[0]+movepixels,cur_pos[1])
else:
self.pvrStateDialog["PTSSeekPointer"].setPosition(minmaxval,cur_pos[1])
def ptsCheckFileChanged(self):
# print '!!!!! ptsCheckFileChanged'
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
# print 'self.pts_file_changed',self.pts_file_changed
if not self.timeshiftEnabled():
self.pts_CheckFileChanged_timer.stop()
return
if self.pts_CheckFileChanged_counter >= 5 and not self.pts_file_changed:
if self.pts_switchtolive:
if config.timeshift.showlivetvmsg.value:
self.ptsAskUser("livetv")
elif self.pts_lastplaying <= self.pts_currplaying:
self.ptsAskUser("nextfile")
else:
Notifications.AddNotification(MessageBox, _("Can't play the previous timeshift file! You can try again."), MessageBox.TYPE_INFO, timeout=3)
self.doSeek(0)
self.setSeekState(self.SEEK_STATE_PLAY)
#self.pts_firstplayable = self.pts_lastplaying
self.pts_currplaying = self.pts_lastplaying
self.pts_CheckFileChanged_timer.stop()
return
self.pts_CheckFileChanged_counter += 1
if self.pts_file_changed:
self.pts_CheckFileChanged_timer.stop()
if self.posDiff:
self.pts_SeekToPos_timer.start(1000, True)
elif self.pts_FileJump_timer.isActive():
self.pts_FileJump_timer.stop()
elif self.pts_lastplaying > self.pts_currplaying:
self.pts_SeekBack_timer.start(1000, True)
else:
self.doSeek(3600 * 24 * 90000)
def ptsTimeshiftFileChanged(self):
# print '!!!!! ptsTimeshiftFileChanged'
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
self.pts_file_changed = True
# Reset Seek Pointer
self.ptsSeekPointerReset()
# print 'self.pts_switchtolive',self.pts_switchtolive
if self.pts_switchtolive:
self.pts_switchtolive = False
self.pts_nextplaying = 0
self.pts_currplaying = self.pts_eventcount
return
if self.pts_nextplaying:
self.pts_currplaying = self.pts_nextplaying
self.pts_nextplaying = self.pts_currplaying + 1
# Get next pts file ...
# print ("!!! %spts_livebuffer_%s" % (config.usage.timeshift_path.value,self.pts_nextplaying))
if fileExists("%spts_livebuffer_%s" % (config.usage.timeshift_path.value,self.pts_nextplaying), 'r'):
# print '!!!!! TEST1'
self.ptsSetNextPlaybackFile("pts_livebuffer_%s" % self.pts_nextplaying)
self.pts_switchtolive = False
else:
self.ptsSetNextPlaybackFile("")
self.pts_switchtolive = True
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
def ptsSetNextPlaybackFile(self, nexttsfile):
# print '!!!!! ptsSetNextPlaybackFile'
ts = self.getTimeshift()
if ts is None:
return
# print ("!!! SET NextPlaybackFile%s%s" % (config.usage.timeshift_path.value,nexttsfile))
if nexttsfile:
ts.setNextPlaybackFile("%s%s" % (config.usage.timeshift_path.value,nexttsfile))
else:
ts.setNextPlaybackFile("")
def ptsSeekToPos(self):
#print '!!!!! ptsSeekToPos', self.posDiff
length = self.ptsGetLength()
if length is None:
return
if self.posDiff < 0:
if length <= abs(self.posDiff):
self.posDiff = 0
else:
if length <= abs(self.posDiff):
tmp = length - 90000*10
if tmp < 0: tmp = 0
self.posDiff = tmp
self.setSeekState(self.SEEK_STATE_PLAY)
self.doSeek(self.posDiff)
self.posDiff = 0
def ptsSeekBackTimer(self):
# print '!!!!! ptsSeekBackTimer RUN'
self.doSeek(-90000*10) # seek ~10s before end
self.setSeekState(self.SEEK_STATE_PAUSE)
self.pts_StartSeekBackTimer.start(1000, True)
def ptsStartSeekBackTimer(self):
# print '!!!!! ptsStartSeekBackTimer RUN'
if self.pts_lastseekspeed == 0:
self.setSeekState(self.makeStateBackward(int(config.seek.enter_backward.value)))
else:
self.setSeekState(self.makeStateBackward(int(-self.pts_lastseekspeed)))
def ptsCheckTimeshiftPath(self):
if fileExists(config.usage.timeshift_path.value, 'w'):
return True
else:
# Notifications.AddNotification(MessageBox, _("Could not activate Autorecord-Timeshift!\nTimeshift-Path does not exist"), MessageBox.TYPE_ERROR, timeout=15)
if self.pts_delay_timer.isActive():
self.pts_delay_timer.stop()
if self.pts_cleanUp_timer.isActive():
self.pts_cleanUp_timer.stop()
return False
def ptsTimerEntryStateChange(self, timer):
# print 'ptsTimerEntryStateChange'
if not config.timeshift.stopwhilerecording.value:
return
self.pts_record_running = self.session.nav.RecordTimer.isRecording()
# Abort here when box is in standby mode
if self.session.screen["Standby"].boolean is True:
return
# Stop Timeshift when Record started ...
if timer.state == TimerEntry.StateRunning and self.timeshiftEnabled() and self.pts_record_running:
if self.seekstate != self.SEEK_STATE_PLAY:
self.setSeekState(self.SEEK_STATE_PLAY)
if self.isSeekable():
Notifications.AddNotification(MessageBox,_("Record started! Stopping timeshift now ..."), MessageBox.TYPE_INFO, timeout=30)
self.switchToLive = False
self.stopTimeshiftcheckTimeshiftRunningCallback(True)
if timer.state == TimerEntry.StateEnded:
# Restart Timeshift when all records stopped
if not self.timeshiftEnabled() and not self.pts_record_running:
self.autostartAutorecordTimeshift()
if self.pts_mergeRecords_timer.isActive():
# Restart Merge-Timer when all records stopped
self.pts_mergeRecords_timer.stop()
self.pts_mergeRecords_timer.start(15000, True)
# Restart FrontPanel LED when still copying or merging files
self.ptsFrontpanelActions("start")
config.timeshift.isRecording.value = True
else:
# Restart FrontPanel LED when still copying or merging files
jobs = JobManager.getPendingJobs()
if len(jobs) >= 1:
for job in jobs:
jobname = str(job.name)
if jobname == _("Saving Timeshift files") or jobname == _("Creating AP and SC Files") or jobname == _("Merging Timeshift files"):# or jobname != _("Cleaning Trashes"):
self.ptsFrontpanelActions("start")
config.timeshift.isRecording.value = True
break
def ptsLiveTVStatus(self):
service = self.session.nav.getCurrentService()
info = service and service.info()
sTSID = info and info.getInfo(iServiceInformation.sTSID) or -1
if sTSID is None or sTSID == -1:
return False
else:
return True
| gpl-2.0 |
nan86150/ImageFusion | lib/python2.7/site-packages/scipy/optimize/__init__.py | 12 | 6043 | """
=====================================================
Optimization and root finding (:mod:`scipy.optimize`)
=====================================================
.. currentmodule:: scipy.optimize
Optimization
============
Local Optimization
------------------
.. autosummary::
:toctree: generated/
minimize - Unified interface for minimizers of multivariate functions
minimize_scalar - Unified interface for minimizers of univariate functions
OptimizeResult - The optimization result returned by some optimizers
The `minimize` function supports the following methods:
.. toctree::
optimize.minimize-neldermead
optimize.minimize-powell
optimize.minimize-cg
optimize.minimize-bfgs
optimize.minimize-newtoncg
optimize.minimize-lbfgsb
optimize.minimize-tnc
optimize.minimize-cobyla
optimize.minimize-slsqp
optimize.minimize-dogleg
optimize.minimize-trustncg
The `minimize_scalar` function supports the following methods:
.. toctree::
optimize.minimize_scalar-brent
optimize.minimize_scalar-bounded
optimize.minimize_scalar-golden
The specific optimization method interfaces below in this subsection are
not recommended for use in new scripts; all of these methods are accessible
via a newer, more consistent interface provided by the functions above.
General-purpose multivariate methods:
.. autosummary::
:toctree: generated/
fmin - Nelder-Mead Simplex algorithm
fmin_powell - Powell's (modified) level set method
fmin_cg - Non-linear (Polak-Ribiere) conjugate gradient algorithm
fmin_bfgs - Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno)
fmin_ncg - Line-search Newton Conjugate Gradient
Constrained multivariate methods:
.. autosummary::
:toctree: generated/
fmin_l_bfgs_b - Zhu, Byrd, and Nocedal's constrained optimizer
fmin_tnc - Truncated Newton code
fmin_cobyla - Constrained optimization by linear approximation
fmin_slsqp - Minimization using sequential least-squares programming
differential_evolution - stochastic minimization using differential evolution
Univariate (scalar) minimization methods:
.. autosummary::
:toctree: generated/
fminbound - Bounded minimization of a scalar function
brent - 1-D function minimization using Brent method
golden - 1-D function minimization using Golden Section method
Equation (Local) Minimizers
---------------------------
.. autosummary::
:toctree: generated/
leastsq - Minimize the sum of squares of M equations in N unknowns
nnls - Linear least-squares problem with non-negativity constraint
Global Optimization
-------------------
.. autosummary::
:toctree: generated/
basinhopping - Basinhopping stochastic optimizer
brute - Brute force searching optimizer
differential_evolution - stochastic minimization using differential evolution
Rosenbrock function
-------------------
.. autosummary::
:toctree: generated/
rosen - The Rosenbrock function.
rosen_der - The derivative of the Rosenbrock function.
rosen_hess - The Hessian matrix of the Rosenbrock function.
rosen_hess_prod - Product of the Rosenbrock Hessian with a vector.
Fitting
=======
.. autosummary::
:toctree: generated/
curve_fit -- Fit curve to a set of points
Root finding
============
Scalar functions
----------------
.. autosummary::
:toctree: generated/
brentq - quadratic interpolation Brent method
brenth - Brent method, modified by Harris with hyperbolic extrapolation
ridder - Ridder's method
bisect - Bisection method
newton - Secant method or Newton's method
Fixed point finding:
.. autosummary::
:toctree: generated/
fixed_point - Single-variable fixed-point solver
Multidimensional
----------------
General nonlinear solvers:
.. autosummary::
:toctree: generated/
root - Unified interface for nonlinear solvers of multivariate functions
fsolve - Non-linear multi-variable equation solver
broyden1 - Broyden's first method
broyden2 - Broyden's second method
The `root` function supports the following methods:
.. toctree::
optimize.root-hybr
optimize.root-lm
optimize.root-broyden1
optimize.root-broyden2
optimize.root-anderson
optimize.root-linearmixing
optimize.root-diagbroyden
optimize.root-excitingmixing
optimize.root-krylov
optimize.root-dfsane
Large-scale nonlinear solvers:
.. autosummary::
:toctree: generated/
newton_krylov
anderson
Simple iterations:
.. autosummary::
:toctree: generated/
excitingmixing
linearmixing
diagbroyden
:mod:`Additional information on the nonlinear solvers <scipy.optimize.nonlin>`
Linear Programming
==================
Simplex Algorithm:
.. autosummary::
:toctree: generated/
linprog -- Linear programming using the simplex algorithm
The `linprog` function supports the following methods:
.. toctree::
optimize.linprog-simplex
Utilities
=========
.. autosummary::
:toctree: generated/
approx_fprime - Approximate the gradient of a scalar function
bracket - Bracket a minimum, given two starting points
check_grad - Check the supplied derivative using finite differences
line_search - Return a step that satisfies the strong Wolfe conditions
show_options - Show specific options optimization solvers
LbfgsInvHessProduct - Linear operator for L-BFGS approximate inverse Hessian
"""
from __future__ import division, print_function, absolute_import
from .optimize import *
from ._minimize import *
from ._root import *
from .minpack import *
from .zeros import *
from .lbfgsb import fmin_l_bfgs_b, LbfgsInvHessProduct
from .tnc import fmin_tnc
from .cobyla import fmin_cobyla
from .nonlin import *
from .slsqp import fmin_slsqp
from .nnls import nnls
from ._basinhopping import basinhopping
from ._linprog import linprog, linprog_verbose_callback
from ._differentialevolution import differential_evolution
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
| mit |
nharraud/b2share | invenio/modules/indexer/tokenizers/BibIndexFilenameTokenizer.py | 12 | 2319 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2010, 2011, 2012, 2013, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibIndexFilenameTokenizer: 'tokenizes' finds file names.
Tokenizer is adapted to work with bibfield and its get_record function.
"""
from invenio.modules.indexer.tokenizers.BibIndexRecJsonTokenizer import BibIndexRecJsonTokenizer
class BibIndexFilenameTokenizer(BibIndexRecJsonTokenizer):
"""
Tokenizes for file names.
Tokenizer is adapted to work with bibfield and its get_record function.
It accepts as an input a record created by a get_record function:
from bibfield import get_record
record16 = get_record(16)
tokenizer = BibIndexFilenameTokenizer()
new_words = tokenizer.tokenize(record16)
Example of new_words:
'thesis.ps.gz' -> ['thesis', 'thesis.ps', 'thesis.ps.gz']
"""
def __init__(self, stemming_language = None,
remove_stopwords = False,
remove_html_markup = False,
remove_latex_markup = False):
pass
def tokenize(self, record):
"""'record' is a recjson record from bibfield.
Function uses derived field 'filenames'
from the record.
@param urls: recjson record
"""
values = []
try:
if 'filenames' in record:
values = record['filenames']
except KeyError:
pass
except TypeError:
return []
return values
def get_tokenizing_function(self, wordtable_type):
return self.tokenize
| gpl-2.0 |
sanzinger/pyflakes | pyflakes/test/test_imports.py | 39 | 16828 |
from sys import version_info
from pyflakes import messages as m
from pyflakes.test import harness
class Test(harness.Test):
def test_unusedImport(self):
self.flakes('import fu, bar', m.UnusedImport, m.UnusedImport)
self.flakes('from baz import fu, bar', m.UnusedImport, m.UnusedImport)
def test_aliasedImport(self):
self.flakes('import fu as FU, bar as FU', m.RedefinedWhileUnused, m.UnusedImport)
self.flakes('from moo import fu as FU, bar as FU', m.RedefinedWhileUnused, m.UnusedImport)
def test_usedImport(self):
self.flakes('import fu; print fu')
self.flakes('from baz import fu; print fu')
def test_redefinedWhileUnused(self):
self.flakes('import fu; fu = 3', m.RedefinedWhileUnused)
self.flakes('import fu; del fu', m.RedefinedWhileUnused)
self.flakes('import fu; fu, bar = 3', m.RedefinedWhileUnused)
self.flakes('import fu; [fu, bar] = 3', m.RedefinedWhileUnused)
def test_redefinedByFunction(self):
self.flakes('''
import fu
def fu():
pass
''', m.RedefinedWhileUnused)
def test_redefinedInNestedFunction(self):
"""
Test that shadowing a global name with a nested function definition
generates a warning.
"""
self.flakes('''
import fu
def bar():
def baz():
def fu():
pass
''', m.RedefinedWhileUnused, m.UnusedImport)
def test_redefinedByClass(self):
self.flakes('''
import fu
class fu:
pass
''', m.RedefinedWhileUnused)
def test_redefinedBySubclass(self):
"""
If an imported name is redefined by a class statement which also uses
that name in the bases list, no warning is emitted.
"""
self.flakes('''
from fu import bar
class bar(bar):
pass
''')
def test_redefinedInClass(self):
"""
Test that shadowing a global with a class attribute does not produce a
warning.
"""
self.flakes('''
import fu
class bar:
fu = 1
print fu
''')
def test_usedInFunction(self):
self.flakes('''
import fu
def fun():
print fu
''')
def test_shadowedByParameter(self):
self.flakes('''
import fu
def fun(fu):
print fu
''', m.UnusedImport)
self.flakes('''
import fu
def fun(fu):
print fu
print fu
''')
def test_newAssignment(self):
self.flakes('fu = None')
def test_usedInGetattr(self):
self.flakes('import fu; fu.bar.baz')
self.flakes('import fu; "bar".fu.baz', m.UnusedImport)
def test_usedInSlice(self):
self.flakes('import fu; print fu.bar[1:]')
def test_usedInIfBody(self):
self.flakes('''
import fu
if True: print fu
''')
def test_usedInIfConditional(self):
self.flakes('''
import fu
if fu: pass
''')
def test_usedInElifConditional(self):
self.flakes('''
import fu
if False: pass
elif fu: pass
''')
def test_usedInElse(self):
self.flakes('''
import fu
if False: pass
else: print fu
''')
def test_usedInCall(self):
self.flakes('import fu; fu.bar()')
def test_usedInClass(self):
self.flakes('''
import fu
class bar:
bar = fu
''')
def test_usedInClassBase(self):
self.flakes('''
import fu
class bar(object, fu.baz):
pass
''')
def test_notUsedInNestedScope(self):
self.flakes('''
import fu
def bleh():
pass
print fu
''')
def test_usedInFor(self):
self.flakes('''
import fu
for bar in range(9):
print fu
''')
def test_usedInForElse(self):
self.flakes('''
import fu
for bar in range(10):
pass
else:
print fu
''')
def test_redefinedByFor(self):
self.flakes('''
import fu
for fu in range(2):
pass
''', m.RedefinedWhileUnused)
def test_shadowedByFor(self):
"""
Test that shadowing a global name with a for loop variable generates a
warning.
"""
self.flakes('''
import fu
fu.bar()
for fu in ():
pass
''', m.ImportShadowedByLoopVar)
def test_shadowedByForDeep(self):
"""
Test that shadowing a global name with a for loop variable nested in a
tuple unpack generates a warning.
"""
self.flakes('''
import fu
fu.bar()
for (x, y, z, (a, b, c, (fu,))) in ():
pass
''', m.ImportShadowedByLoopVar)
def test_usedInReturn(self):
self.flakes('''
import fu
def fun():
return fu
''')
def test_usedInOperators(self):
self.flakes('import fu; 3 + fu.bar')
self.flakes('import fu; 3 % fu.bar')
self.flakes('import fu; 3 - fu.bar')
self.flakes('import fu; 3 * fu.bar')
self.flakes('import fu; 3 ** fu.bar')
self.flakes('import fu; 3 / fu.bar')
self.flakes('import fu; 3 // fu.bar')
self.flakes('import fu; -fu.bar')
self.flakes('import fu; ~fu.bar')
self.flakes('import fu; 1 == fu.bar')
self.flakes('import fu; 1 | fu.bar')
self.flakes('import fu; 1 & fu.bar')
self.flakes('import fu; 1 ^ fu.bar')
self.flakes('import fu; 1 >> fu.bar')
self.flakes('import fu; 1 << fu.bar')
def test_usedInAssert(self):
self.flakes('import fu; assert fu.bar')
def test_usedInSubscript(self):
self.flakes('import fu; fu.bar[1]')
def test_usedInLogic(self):
self.flakes('import fu; fu and False')
self.flakes('import fu; fu or False')
self.flakes('import fu; not fu.bar')
def test_usedInList(self):
self.flakes('import fu; [fu]')
def test_usedInTuple(self):
self.flakes('import fu; (fu,)')
def test_usedInTry(self):
self.flakes('''
import fu
try: fu
except: pass
''')
def test_usedInExcept(self):
self.flakes('''
import fu
try: fu
except: pass
''')
def test_redefinedByExcept(self):
self.flakes('''
import fu
try: pass
except Exception, fu: pass
''', m.RedefinedWhileUnused)
def test_usedInRaise(self):
self.flakes('''
import fu
raise fu.bar
''')
def test_usedInYield(self):
self.flakes('''
import fu
def gen():
yield fu
''')
def test_usedInDict(self):
self.flakes('import fu; {fu:None}')
self.flakes('import fu; {1:fu}')
def test_usedInParameterDefault(self):
self.flakes('''
import fu
def f(bar=fu):
pass
''')
def test_usedInAttributeAssign(self):
self.flakes('import fu; fu.bar = 1')
def test_usedInKeywordArg(self):
self.flakes('import fu; fu.bar(stuff=fu)')
def test_usedInAssignment(self):
self.flakes('import fu; bar=fu')
self.flakes('import fu; n=0; n+=fu')
def test_usedInListComp(self):
self.flakes('import fu; [fu for _ in range(1)]')
self.flakes('import fu; [1 for _ in range(1) if fu]')
def test_redefinedByListComp(self):
self.flakes('import fu; [1 for fu in range(1)]', m.RedefinedWhileUnused)
def test_usedInTryFinally(self):
self.flakes('''
import fu
try: pass
finally: fu
''')
self.flakes('''
import fu
try: fu
finally: pass
''')
def test_usedInWhile(self):
self.flakes('''
import fu
while 0:
fu
''')
self.flakes('''
import fu
while fu: pass
''')
def test_usedInGlobal(self):
self.flakes('''
import fu
def f(): global fu
''', m.UnusedImport)
def test_usedInBackquote(self):
self.flakes('import fu; `fu`')
def test_usedInExec(self):
self.flakes('import fu; exec "print 1" in fu.bar')
def test_usedInLambda(self):
self.flakes('import fu; lambda: fu')
def test_shadowedByLambda(self):
self.flakes('import fu; lambda fu: fu', m.UnusedImport)
def test_usedInSliceObj(self):
self.flakes('import fu; "meow"[::fu]')
def test_unusedInNestedScope(self):
self.flakes('''
def bar():
import fu
fu
''', m.UnusedImport, m.UndefinedName)
def test_methodsDontUseClassScope(self):
self.flakes('''
class bar:
import fu
def fun(self):
fu
''', m.UnusedImport, m.UndefinedName)
def test_nestedFunctionsNestScope(self):
self.flakes('''
def a():
def b():
fu
import fu
''')
def test_nestedClassAndFunctionScope(self):
self.flakes('''
def a():
import fu
class b:
def c(self):
print fu
''')
def test_importStar(self):
self.flakes('from fu import *', m.ImportStarUsed)
def test_packageImport(self):
"""
If a dotted name is imported and used, no warning is reported.
"""
self.flakes('''
import fu.bar
fu.bar
''')
def test_unusedPackageImport(self):
"""
If a dotted name is imported and not used, an unused import warning is
reported.
"""
self.flakes('import fu.bar', m.UnusedImport)
def test_duplicateSubmoduleImport(self):
"""
If a submodule of a package is imported twice, an unused import warning
and a redefined while unused warning are reported.
"""
self.flakes('''
import fu.bar, fu.bar
fu.bar
''', m.RedefinedWhileUnused)
self.flakes('''
import fu.bar
import fu.bar
fu.bar
''', m.RedefinedWhileUnused)
def test_differentSubmoduleImport(self):
"""
If two different submodules of a package are imported, no duplicate
import warning is reported for the package.
"""
self.flakes('''
import fu.bar, fu.baz
fu.bar, fu.baz
''')
self.flakes('''
import fu.bar
import fu.baz
fu.bar, fu.baz
''')
def test_assignRHSFirst(self):
self.flakes('import fu; fu = fu')
self.flakes('import fu; fu, bar = fu')
self.flakes('import fu; [fu, bar] = fu')
self.flakes('import fu; fu += fu')
def test_tryingMultipleImports(self):
self.flakes('''
try:
import fu
except ImportError:
import bar as fu
''')
test_tryingMultipleImports.todo = ''
def test_nonGlobalDoesNotRedefine(self):
self.flakes('''
import fu
def a():
fu = 3
return fu
fu
''')
def test_functionsRunLater(self):
self.flakes('''
def a():
fu
import fu
''')
def test_functionNamesAreBoundNow(self):
self.flakes('''
import fu
def fu():
fu
fu
''', m.RedefinedWhileUnused)
def test_ignoreNonImportRedefinitions(self):
self.flakes('a = 1; a = 2')
def test_importingForImportError(self):
self.flakes('''
try:
import fu
except ImportError:
pass
''')
test_importingForImportError.todo = ''
def test_importedInClass(self):
'''Imports in class scope can be used through self'''
self.flakes('''
class c:
import i
def __init__(self):
self.i
''')
test_importedInClass.todo = 'requires evaluating attribute access'
def test_futureImport(self):
'''__future__ is special'''
self.flakes('from __future__ import division')
self.flakes('''
"docstring is allowed before future import"
from __future__ import division
''')
def test_futureImportFirst(self):
"""
__future__ imports must come before anything else.
"""
self.flakes('''
x = 5
from __future__ import division
''', m.LateFutureImport)
self.flakes('''
from foo import bar
from __future__ import division
bar
''', m.LateFutureImport)
class TestSpecialAll(harness.Test):
"""
Tests for suppression of unused import warnings by C{__all__}.
"""
def test_ignoredInFunction(self):
"""
An C{__all__} definition does not suppress unused import warnings in a
function scope.
"""
self.flakes('''
def foo():
import bar
__all__ = ["bar"]
''', m.UnusedImport, m.UnusedVariable)
def test_ignoredInClass(self):
"""
An C{__all__} definition does not suppress unused import warnings in a
class scope.
"""
self.flakes('''
class foo:
import bar
__all__ = ["bar"]
''', m.UnusedImport)
def test_warningSuppressed(self):
"""
If a name is imported and unused but is named in C{__all__}, no warning
is reported.
"""
self.flakes('''
import foo
__all__ = ["foo"]
''')
def test_unrecognizable(self):
"""
If C{__all__} is defined in a way that can't be recognized statically,
it is ignored.
"""
self.flakes('''
import foo
__all__ = ["f" + "oo"]
''', m.UnusedImport)
self.flakes('''
import foo
__all__ = [] + ["foo"]
''', m.UnusedImport)
def test_unboundExported(self):
"""
If C{__all__} includes a name which is not bound, a warning is emitted.
"""
self.flakes('''
__all__ = ["foo"]
''', m.UndefinedExport)
# Skip this in __init__.py though, since the rules there are a little
# different.
for filename in ["foo/__init__.py", "__init__.py"]:
self.flakes('''
__all__ = ["foo"]
''', filename=filename)
def test_usedInGenExp(self):
"""
Using a global in a generator expression results in no warnings.
"""
self.flakes('import fu; (fu for _ in range(1))')
self.flakes('import fu; (1 for _ in range(1) if fu)')
def test_redefinedByGenExp(self):
"""
Re-using a global name as the loop variable for a generator
expression results in a redefinition warning.
"""
self.flakes('import fu; (1 for fu in range(1))', m.RedefinedWhileUnused)
def test_usedAsDecorator(self):
"""
Using a global name in a decorator statement results in no warnings,
but using an undefined name in a decorator statement results in an
undefined name warning.
"""
self.flakes('''
from interior import decorate
@decorate
def f():
return "hello"
''')
self.flakes('''
from interior import decorate
@decorate('value')
def f():
return "hello"
''')
self.flakes('''
@decorate
def f():
return "hello"
''', m.UndefinedName)
class Python26Tests(harness.Test):
"""
Tests for checking of syntax which is valid in PYthon 2.6 and newer.
"""
if version_info < (2, 6):
skip = "Python 2.6 required for class decorator tests."
def test_usedAsClassDecorator(self):
"""
Using an imported name as a class decorator results in no warnings,
but using an undefined name as a class decorator results in an
undefined name warning.
"""
self.flakes('''
from interior import decorate
@decorate
class foo:
pass
''')
self.flakes('''
from interior import decorate
@decorate("foo")
class bar:
pass
''')
self.flakes('''
@decorate
class foo:
pass
''', m.UndefinedName)
| mit |
JoepDriesen/Cardgame-Generator | create_preview.py | 1 | 3469 | import argparse, os, random
from generator.parser import parse_types, parse_cards
from generator.utils import get_image_size
from PIL import Image
def generate():
parser = argparse.ArgumentParser(
description='Generate game cards.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter )
parser.add_argument( '-t', '--types-folder', action='store', default='./assets/types/', help='The location of the folder containing the xml property files of the available game card types.' )
parser.add_argument( '-f', '--fonts-folder', action='store', default='./assets/fonts/', help='The location of the folder containing the font files used by the card types.' )
parser.add_argument( '-i', '--images-folder', action='store', default='./assets/images/', help='The location of the folder containing the global images used by the card types.' )
parser.add_argument( '-c', '--cards-folder', action='store', default='./assets/cards/', help='The location of the folder containing the xml property files of the game cards to be generated.' )
parser.add_argument( '-o', '--output-folder', action='store', default='./output/', help='The location of the folder in which to store generated cards.' )
parser.add_argument( '-l', '--language', action='store', default='en', help='The language to use when generating cards.' )
parser.add_argument( '-r', '--rows', action='store', default=3, help='The amount of card rows in the preview image. Minimum 1.' )
parser.add_argument( '-m', '--columns', action='store', default=10, help='The amount of columns in the preview image. Minimum 1.' )
parser.add_argument( '-d', '--debug', action='store_true', help='Show debug output' )
args = parser.parse_args()
types_folder = os.path.abspath( args.types_folder )
fonts_folder = os.path.abspath( args.fonts_folder )
images_folder = os.path.abspath( args.images_folder )
cards_folder = os.path.abspath( args.cards_folder )
output_folder = os.path.abspath( args.output_folder )
if os.path.exists( output_folder ) and not os.path.isdir( output_folder ):
print( "Error: Could not write to {} because it exists and is not a directory.".format( output_folder ) )
exit()
card_types = parse_types( types_folder, fonts_folder, images_folder, debug=args.debug )
cards = parse_cards( cards_folder, args.language, card_types, debug=args.debug )
for card in cards:
card.image_file = os.path.join( output_folder, '{}.png'.format( card.name ) )
if not os.path.exists( output_folder ):
os.mkdir( output_folder )
print( 'Creating preview image...' )
image_width, image_height = get_image_size( cards[0].image_file )
columns = max( 1, int( args.columns ) )
rows = max( 1, int( args.rows ) )
output_file = os.path.join( output_folder, 'preview.png' )
im = Image.new( 'RGB', ( image_width * columns, image_height * rows ) )
i, j = 0, 0
random.shuffle( cards )
for card in cards:
if i >= columns:
i = 0
j += 1
if j >= rows:
break
# Add card image file
card_im = Image.open( card.image_file )
im.paste( card_im, ( i * image_width, j * image_height ) )
if args.debug:
print( ' Pasted image {}'.format( j * columns + i + 1 ) )
i += 1
im.save( output_file )
if __name__ == '__main__':
generate()
| gpl-3.0 |
deput/leetcode | length_of_last_word.py | 2 | 1186 | # Since Mon Mar 2 17:00:43 CST 2015
"""
https://oj.leetcode.com/problems/length-of-last-word/
Given a string s consists of upper/lower-case alphabets and empty space characters ' ', return the length of last word in the string.
If the last word does not exist, return 0.
Note: A word is defined as a character sequence consists of non-space characters only.
For example,
Given s = "Hello World",
return 5.
"""
class Solution:
# @param s, a string
# @return an integer
def lengthOfLastWord(self, s):
i = -1
length = 0
while i >= -len(s):
if s[i] == ' ':
if length == 0:
i -= 1
continue
else:
break
else:
length += 1
i -= 1
return length
if __name__ == '__main__':
s = Solution()
assert s.lengthOfLastWord(' ') == 0
assert s.lengthOfLastWord(' ') == 0
assert s.lengthOfLastWord('s') == 1
assert s.lengthOfLastWord('s ') == 1
assert s.lengthOfLastWord(' s ') == 1
assert s.lengthOfLastWord('Hello world') == 5
assert s.lengthOfLastWord('Hello ') == 5
| mit |
ashengwang/node-gyp | gyp/pylib/gyp/xml_fix.py | 2767 | 2174 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Applies a fix to CR LF TAB handling in xml.dom.
Fixes this: http://code.google.com/p/chromium/issues/detail?id=76293
Working around this: http://bugs.python.org/issue5752
TODO(bradnelson): Consider dropping this when we drop XP support.
"""
import xml.dom.minidom
def _Replacement_write_data(writer, data, is_attrib=False):
"""Writes datachars to writer."""
data = data.replace("&", "&").replace("<", "<")
data = data.replace("\"", """).replace(">", ">")
if is_attrib:
data = data.replace(
"\r", "
").replace(
"\n", "
").replace(
"\t", "	")
writer.write(data)
def _Replacement_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_Replacement_write_data(writer, attrs[a_name].value, is_attrib=True)
writer.write("\"")
if self.childNodes:
writer.write(">%s" % newl)
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % newl)
class XmlFix(object):
"""Object to manage temporary patching of xml.dom.minidom."""
def __init__(self):
# Preserve current xml.dom.minidom functions.
self.write_data = xml.dom.minidom._write_data
self.writexml = xml.dom.minidom.Element.writexml
# Inject replacement versions of a function and a method.
xml.dom.minidom._write_data = _Replacement_write_data
xml.dom.minidom.Element.writexml = _Replacement_writexml
def Cleanup(self):
if self.write_data:
xml.dom.minidom._write_data = self.write_data
xml.dom.minidom.Element.writexml = self.writexml
self.write_data = None
def __del__(self):
self.Cleanup()
| mit |
tudorvio/nova | nova/tests/functional/v3/test_servers_ips.py | 28 | 1782 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.v3 import test_servers
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
class ServersIpsJsonTest(test_servers.ServersSampleBase):
extends_name = 'core_only'
sample_dir = 'server-ips'
extra_extensions_to_load = ["os-access-ips"]
# TODO(park): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
def test_get(self):
# Test getting a server's IP information.
uuid = self._post_server()
response = self._do_get('servers/%s/ips' % uuid)
subs = self._get_regexes()
self._verify_response('server-ips-resp', subs, response, 200)
def test_get_by_network(self):
# Test getting a server's IP information by network id.
uuid = self._post_server()
response = self._do_get('servers/%s/ips/private' % uuid)
subs = self._get_regexes()
self._verify_response('server-ips-network-resp', subs, response, 200)
| apache-2.0 |
MBeyens/pyAmpli | pyampli/make_vcf.py | 1 | 4588 | #!/usr/bin/python
import os, logging, pysam, vcf
def open_new_vcf(new_vcf_name):
vcf_out_reader = vcf.Reader(open(new_vcf_name, 'r'))
vcf_out_writer = vcf.Writer(open(new_vcf_name, 'w'), vcf_out_reader)
return vcf_out_writer
def create_tmp_vcf(filter_modus, filename, outdir, vcf_path, variant, variant_number):
tmp_results_dir = os.path.abspath(outdir) + '/tmp/'
vcf_file = pysam.VariantFile(vcf_path)
new_header = vcf_file.header
if filter_modus == 'germline':
new_header = create_germline_vcf(new_header)
if filter_modus == 'somatic':
new_header = create_somatic_vcf(new_header)
new_vcf_name = tmp_results_dir + os.path.basename(vcf_path)[:-3] + filename + '_' + str(variant_number) + '.vcf'
new_vcf_file = pysam.VariantFile(new_vcf_name, 'w', header=new_header)
new_vcf_file.close()
logging.debug('Created tmp vcf file (%s)', new_vcf_name)
vcf_out_reader = vcf.Reader(open(new_vcf_name, 'r'))
vcf_out_writer = vcf.Writer(open(new_vcf_name, 'w'), vcf_out_reader)
vcf_out_writer.write_record(variant)
logging.debug('Tmp vcf file ready for writing')
return new_vcf_name
def create_germline_vcf(new_header):
logging.debug('Created germline file header')
new_header.info.add('AmpFA', 1, "Float",
"Amplicon Fraction for Alternate allele. Combined for multiallelic variants")
new_header.info.add('AmpFR', 1, "Float", "Amplicon Fraction for Reference allele.")
new_header.info.add('AmpCR', 1, "Integer", "Amplicon Count for Reference allele.")
new_header.info.add('AmpCA', 1, "Integer", "Amplicon Count for Alternate allele.")
new_header.info.add('AmpC', 1, "Integer", "Total Amplicon Count")
new_header.info.add('AmpF_OA', 1, "Integer",
"Amplicon Count offset compared to AD field, for alternate allele. Combined for multiallelic variants")
new_header.info.add('AmpF_OR', 1, "Integer",
"Amplicon Count offset compared to AD field, for reference allele. Combined for multiallelic variants")
return new_header
def create_somatic_vcf(new_header):
logging.debug('Created somatic vcf file header')
new_header.info.add('AmpFA_n', 1, "Float",
"Amplicon Fraction for Alternate allele of normal. Combined for multiallelic variants")
new_header.info.add('AmpFR_n', 1, "Float", "Amplicon Fraction for Reference allele of normal.")
new_header.info.add('AmpCR_n', 1, "Integer", "Amplicon Count for Reference allele of normal.")
new_header.info.add('AmpCA_n', 1, "Integer", "Amplicon Count for Alternate allele of normal.")
new_header.info.add('AmpC_n', 1, "Integer", "Total Amplicon Count")
new_header.info.add('AmpFA_t', 1, "Float",
"Amplicon Fraction for Alternate allele of tumor. Combined for multiallelic variants")
new_header.info.add('AmpFR_t', 1, "Float", "Amplicon Fraction for Reference allele of tumor.")
new_header.info.add('AmpCR_t', 1, "Integer", "Amplicon Count for Reference allele of tumor.")
new_header.info.add('AmpCA_t', 1, "Integer", "Amplicon Count for Alternate allele of tumor.")
new_header.info.add('AmpC_t', 1, "Integer", "Total Amplicon Count of tumor")
new_header.info.add('AmpF_OA', 1, "Integer",
"Amplicon Count offset compared to AD field, for alternate allele. Combined for multiallelic variants")
new_header.info.add('AmpF_OR', 1, "Integer",
"Amplicon Count offset compared to AD field, for reference allele. Combined for multiallelic variants")
return new_header
def create_vcf(filter_modus, filename, outdir, vcf_path, input_arguments):
vcf_file = pysam.VariantFile(vcf_path)
new_header = vcf_file.header
if filter_modus == 'germline':
new_header = create_germline_vcf(new_header)
if filter_modus == 'somatic':
new_header = create_somatic_vcf(new_header)
new_vcf_name = outdir + os.path.basename(vcf_path)[:-3] + filename + '.vcf'
new_vcf_file = pysam.VariantFile(new_vcf_name, 'w', header=new_header)
new_vcf_file.close()
input_arguments['filter_vcf'] = new_vcf_name
logging.info('Created new %s vcf for filtered variants (%s)', filter_modus, new_vcf_name)
# vcf_out_writer = open_new_vcf(new_vcf_name)
logging.debug('New vcf ready for processing')
return input_arguments
def open_unfiltered_vcf(vcf_path):
unfiltered_vcf_file = vcf.Reader(open(vcf_path, 'r'))
logging.debug('Opened unfiltered VCF')
return unfiltered_vcf_file
| gpl-3.0 |
wunderlins/learning | python/django/lib/python2.7/site-packages/django/contrib/gis/maps/google/overlays.py | 117 | 11950 | from __future__ import unicode_literals
from functools import total_ordering
from django.contrib.gis.geos import (
LinearRing, LineString, Point, Polygon, fromstr,
)
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
from django.utils.html import html_safe
@html_safe
@python_2_unicode_compatible
class GEvent(object):
"""
A Python wrapper for the Google GEvent object.
Events can be attached to any object derived from GOverlayBase with the
add_event() call.
For more information please see the Google Maps API Reference:
https://developers.google.com/maps/documentation/javascript/reference#event
Example:
from django.shortcuts import render
from django.contrib.gis.maps.google import GoogleMap, GEvent, GPolyline
def sample_request(request):
polyline = GPolyline('LINESTRING(101 26, 112 26, 102 31)')
event = GEvent('click',
'function() { location.href = "http://www.google.com"}')
polyline.add_event(event)
return render(request, 'mytemplate.html', {
'google': GoogleMap(polylines=[polyline]),
})
"""
def __init__(self, event, action):
"""
Initializes a GEvent object.
Parameters:
event:
string for the event, such as 'click'. The event must be a valid
event for the object in the Google Maps API.
There is no validation of the event type within Django.
action:
string containing a Javascript function, such as
'function() { location.href = "newurl";}'
The string must be a valid Javascript function. Again there is no
validation fo the function within Django.
"""
self.event = event
self.action = action
def __str__(self):
"Returns the parameter part of a GEvent."
return '"%s", %s' % (self.event, self.action)
@html_safe
@python_2_unicode_compatible
class GOverlayBase(object):
def __init__(self):
self.events = []
def latlng_from_coords(self, coords):
"Generates a JavaScript array of GLatLng objects for the given coordinates."
return '[%s]' % ','.join('new GLatLng(%s,%s)' % (y, x) for x, y in coords)
def add_event(self, event):
"Attaches a GEvent to the overlay object."
self.events.append(event)
def __str__(self):
"The string representation is the JavaScript API call."
return '%s(%s)' % (self.__class__.__name__, self.js_params)
class GPolygon(GOverlayBase):
"""
A Python wrapper for the Google GPolygon object. For more information
please see the Google Maps API Reference:
https://developers.google.com/maps/documentation/javascript/reference#Polygon
"""
def __init__(self, poly,
stroke_color='#0000ff', stroke_weight=2, stroke_opacity=1,
fill_color='#0000ff', fill_opacity=0.4):
"""
The GPolygon object initializes on a GEOS Polygon or a parameter that
may be instantiated into GEOS Polygon. Please note that this will not
depict a Polygon's internal rings.
Keyword Options:
stroke_color:
The color of the polygon outline. Defaults to '#0000ff' (blue).
stroke_weight:
The width of the polygon outline, in pixels. Defaults to 2.
stroke_opacity:
The opacity of the polygon outline, between 0 and 1. Defaults to 1.
fill_color:
The color of the polygon fill. Defaults to '#0000ff' (blue).
fill_opacity:
The opacity of the polygon fill. Defaults to 0.4.
"""
if isinstance(poly, six.string_types):
poly = fromstr(poly)
if isinstance(poly, (tuple, list)):
poly = Polygon(poly)
if not isinstance(poly, Polygon):
raise TypeError('GPolygon may only initialize on GEOS Polygons.')
# Getting the envelope of the input polygon (used for automatically
# determining the zoom level).
self.envelope = poly.envelope
# Translating the coordinates into a JavaScript array of
# Google `GLatLng` objects.
self.points = self.latlng_from_coords(poly.shell.coords)
# Stroke settings.
self.stroke_color, self.stroke_opacity, self.stroke_weight = stroke_color, stroke_opacity, stroke_weight
# Fill settings.
self.fill_color, self.fill_opacity = fill_color, fill_opacity
super(GPolygon, self).__init__()
@property
def js_params(self):
return '%s, "%s", %s, %s, "%s", %s' % (self.points, self.stroke_color, self.stroke_weight, self.stroke_opacity,
self.fill_color, self.fill_opacity)
class GPolyline(GOverlayBase):
"""
A Python wrapper for the Google GPolyline object. For more information
please see the Google Maps API Reference:
https://developers.google.com/maps/documentation/javascript/reference#Polyline
"""
def __init__(self, geom, color='#0000ff', weight=2, opacity=1):
"""
The GPolyline object may be initialized on GEOS LineStirng, LinearRing,
and Polygon objects (internal rings not supported) or a parameter that
may instantiated into one of the above geometries.
Keyword Options:
color:
The color to use for the polyline. Defaults to '#0000ff' (blue).
weight:
The width of the polyline, in pixels. Defaults to 2.
opacity:
The opacity of the polyline, between 0 and 1. Defaults to 1.
"""
# If a GEOS geometry isn't passed in, try to construct one.
if isinstance(geom, six.string_types):
geom = fromstr(geom)
if isinstance(geom, (tuple, list)):
geom = Polygon(geom)
# Generating the lat/lng coordinate pairs.
if isinstance(geom, (LineString, LinearRing)):
self.latlngs = self.latlng_from_coords(geom.coords)
elif isinstance(geom, Polygon):
self.latlngs = self.latlng_from_coords(geom.shell.coords)
else:
raise TypeError('GPolyline may only initialize on GEOS LineString, LinearRing, and/or Polygon geometries.')
# Getting the envelope for automatic zoom determination.
self.envelope = geom.envelope
self.color, self.weight, self.opacity = color, weight, opacity
super(GPolyline, self).__init__()
@property
def js_params(self):
return '%s, "%s", %s, %s' % (self.latlngs, self.color, self.weight, self.opacity)
@total_ordering
class GIcon(object):
"""
Creates a GIcon object to pass into a Gmarker object.
The keyword arguments map to instance attributes of the same name. These,
in turn, correspond to a subset of the attributes of the official GIcon
javascript object:
https://developers.google.com/maps/documentation/javascript/reference#Icon
Because a Google map often uses several different icons, a name field has
been added to the required arguments.
Required Arguments:
varname:
A string which will become the basis for the js variable name of
the marker, for this reason, your code should assign a unique
name for each GIcon you instantiate, otherwise there will be
name space collisions in your javascript.
Keyword Options:
image:
The url of the image to be used as the icon on the map defaults
to 'G_DEFAULT_ICON'
iconsize:
a tuple representing the pixel size of the foreground (not the
shadow) image of the icon, in the format: (width, height) ex.:
GIcon('fast_food',
image="/media/icon/star.png",
iconsize=(15,10))
Would indicate your custom icon was 15px wide and 10px height.
shadow:
the url of the image of the icon's shadow
shadowsize:
a tuple representing the pixel size of the shadow image, format is
the same as ``iconsize``
iconanchor:
a tuple representing the pixel coordinate relative to the top left
corner of the icon image at which this icon is anchored to the map.
In (x, y) format. x increases to the right in the Google Maps
coordinate system and y increases downwards in the Google Maps
coordinate system.)
infowindowanchor:
The pixel coordinate relative to the top left corner of the icon
image at which the info window is anchored to this icon.
"""
def __init__(self, varname, image=None, iconsize=None,
shadow=None, shadowsize=None, iconanchor=None,
infowindowanchor=None):
self.varname = varname
self.image = image
self.iconsize = iconsize
self.shadow = shadow
self.shadowsize = shadowsize
self.iconanchor = iconanchor
self.infowindowanchor = infowindowanchor
def __eq__(self, other):
return self.varname == other.varname
def __lt__(self, other):
return self.varname < other.varname
def __hash__(self):
# XOR with hash of GIcon type so that hash('varname') won't
# equal hash(GIcon('varname')).
return hash(self.__class__) ^ hash(self.varname)
class GMarker(GOverlayBase):
"""
A Python wrapper for the Google GMarker object. For more information
please see the Google Maps API Reference:
https://developers.google.com/maps/documentation/javascript/reference#Marker
Example:
from django.shortcuts import render
from django.contrib.gis.maps.google.overlays import GMarker, GEvent
def sample_request(request):
marker = GMarker('POINT(101 26)')
event = GEvent('click',
'function() { location.href = "http://www.google.com"}')
marker.add_event(event)
return render(request, 'mytemplate.html', {
'google': GoogleMap(markers=[marker]),
})
"""
def __init__(self, geom, title=None, draggable=False, icon=None):
"""
The GMarker object may initialize on GEOS Points or a parameter
that may be instantiated into a GEOS point. Keyword options map to
GMarkerOptions -- so far only the title option is supported.
Keyword Options:
title:
Title option for GMarker, will be displayed as a tooltip.
draggable:
Draggable option for GMarker, disabled by default.
"""
# If a GEOS geometry isn't passed in, try to construct one.
if isinstance(geom, six.string_types):
geom = fromstr(geom)
if isinstance(geom, (tuple, list)):
geom = Point(geom)
if isinstance(geom, Point):
self.latlng = self.latlng_from_coords(geom.coords)
else:
raise TypeError('GMarker may only initialize on GEOS Point geometry.')
# Getting the envelope for automatic zoom determination.
self.envelope = geom.envelope
# TODO: Add support for more GMarkerOptions
self.title = title
self.draggable = draggable
self.icon = icon
super(GMarker, self).__init__()
def latlng_from_coords(self, coords):
return 'new GLatLng(%s,%s)' % (coords[1], coords[0])
def options(self):
result = []
if self.title:
result.append('title: "%s"' % self.title)
if self.icon:
result.append('icon: %s' % self.icon.varname)
if self.draggable:
result.append('draggable: true')
return '{%s}' % ','.join(result)
@property
def js_params(self):
return '%s, %s' % (self.latlng, self.options())
| gpl-2.0 |
rizumu/django | tests/view_tests/models.py | 281 | 1329 | """
Regression tests for Django built-in views.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
def get_absolute_url(self):
return '/authors/%s/' % self.id
@python_2_unicode_compatible
class BaseArticle(models.Model):
"""
An abstract article Model so that we can create article models with and
without a get_absolute_url method (for create_update generic views tests).
"""
title = models.CharField(max_length=100)
slug = models.SlugField()
author = models.ForeignKey(Author, models.CASCADE)
class Meta:
abstract = True
def __str__(self):
return self.title
class Article(BaseArticle):
date_created = models.DateTimeField()
class UrlArticle(BaseArticle):
"""
An Article class with a get_absolute_url defined.
"""
date_created = models.DateTimeField()
def get_absolute_url(self):
return '/urlarticles/%s/' % self.slug
get_absolute_url.purge = True
class DateArticle(BaseArticle):
"""
An article Model with a DateField instead of DateTimeField,
for testing #7602
"""
date_created = models.DateField()
| bsd-3-clause |
shinfan/api-client-staging | generated/python/gapic-google-cloud-speech-v1/google/cloud/gapic/speech/v1/speech_client.py | 7 | 12642 | # Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# EDITING INSTRUCTIONS
# This file was generated from the file
# https://github.com/google/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto,
# and updates to that file get reflected here through a refresh process.
# For the short term, the refresh process will only be runnable by Google engineers.
#
# The only allowed edits are to method and file documentation. A 3-way
# merge preserves those additions if the generated source changes.
"""Accesses the google.cloud.speech.v1 Speech API."""
import collections
import json
import os
import pkg_resources
import platform
from google.gapic.longrunning import operations_client
from google.gax import api_callable
from google.gax import config
from google.gax import path_template
from google.gax.utils import oneof
import google.gax
from google.cloud.gapic.speech.v1 import enums
from google.cloud.proto.speech.v1 import cloud_speech_pb2
class SpeechClient(object):
"""Service that implements Google Cloud Speech API."""
SERVICE_ADDRESS = 'speech.googleapis.com'
"""The default address of the service."""
DEFAULT_SERVICE_PORT = 443
"""The default port of the service."""
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_ALL_SCOPES = ('https://www.googleapis.com/auth/cloud-platform', )
def __init__(self,
service_path=SERVICE_ADDRESS,
port=DEFAULT_SERVICE_PORT,
channel=None,
credentials=None,
ssl_credentials=None,
scopes=None,
client_config=None,
app_name=None,
app_version='',
lib_name=None,
lib_version='',
metrics_headers=()):
"""Constructor.
Args:
service_path (string): The domain name of the API remote host.
port (int): The port on which to connect to the remote host.
channel (:class:`grpc.Channel`): A ``Channel`` instance through
which to make calls.
credentials (object): The authorization credentials to attach to
requests. These credentials identify this application to the
service.
ssl_credentials (:class:`grpc.ChannelCredentials`): A
``ChannelCredentials`` instance for use with an SSL-enabled
channel.
scopes (list[string]): A list of OAuth2 scopes to attach to requests.
client_config (dict):
A dictionary for call options for each method. See
:func:`google.gax.construct_settings` for the structure of
this data. Falls back to the default config if not specified
or the specified config is missing data points.
app_name (string): The name of the application calling
the service. Recommended for analytics purposes.
app_version (string): The version of the application calling
the service. Recommended for analytics purposes.
lib_name (string): The API library software used for calling
the service. (Unless you are writing an API client itself,
leave this as default.)
lib_version (string): The API library software version used
for calling the service. (Unless you are writing an API client
itself, leave this as default.)
metrics_headers (dict): A dictionary of values for tracking
client library metrics. Ultimately serializes to a string
(e.g. 'foo/1.2.3 bar/3.14.1'). This argument should be
considered private.
Returns:
A SpeechClient object.
"""
# Unless the calling application specifically requested
# OAuth scopes, request everything.
if scopes is None:
scopes = self._ALL_SCOPES
# Initialize an empty client config, if none is set.
if client_config is None:
client_config = {}
# Initialize metrics_headers as an ordered dictionary
# (cuts down on cardinality of the resulting string slightly).
metrics_headers = collections.OrderedDict(metrics_headers)
metrics_headers['gl-python'] = platform.python_version()
# The library may or may not be set, depending on what is
# calling this client. Newer client libraries set the library name
# and version.
if lib_name:
metrics_headers[lib_name] = lib_version
# Finally, track the GAPIC package version.
metrics_headers['gapic'] = pkg_resources.get_distribution(
'gapic-google-cloud-speech-v1', ).version
# Load the configuration defaults.
default_client_config = json.loads(
pkg_resources.resource_string(
__name__, 'speech_client_config.json').decode())
defaults = api_callable.construct_settings(
'google.cloud.speech.v1.Speech',
default_client_config,
client_config,
config.STATUS_CODE_NAMES,
metrics_headers=metrics_headers, )
self.speech_stub = config.create_stub(
cloud_speech_pb2.SpeechStub,
channel=channel,
service_path=service_path,
service_port=port,
credentials=credentials,
scopes=scopes,
ssl_credentials=ssl_credentials)
self.operations_client = operations_client.OperationsClient(
service_path=service_path,
port=port,
channel=channel,
credentials=credentials,
ssl_credentials=ssl_credentials,
scopes=scopes,
client_config=client_config,
metrics_headers=metrics_headers, )
self._recognize = api_callable.create_api_call(
self.speech_stub.Recognize, settings=defaults['recognize'])
self._long_running_recognize = api_callable.create_api_call(
self.speech_stub.LongRunningRecognize,
settings=defaults['long_running_recognize'])
self._streaming_recognize = api_callable.create_api_call(
self.speech_stub.StreamingRecognize,
settings=defaults['streaming_recognize'])
# Service calls
def recognize(self, config, audio, options=None):
"""
Performs synchronous speech recognition: receive results after all audio
has been sent and processed.
Example:
>>> from google.cloud.gapic.speech.v1 import speech_client
>>> from google.cloud.gapic.speech.v1 import enums
>>> from google.cloud.proto.speech.v1 import cloud_speech_pb2
>>> client = speech_client.SpeechClient()
>>> encoding = enums.RecognitionConfig.AudioEncoding.FLAC
>>> sample_rate_hertz = 44100
>>> language_code = 'en-US'
>>> config = cloud_speech_pb2.RecognitionConfig(encoding=encoding, sample_rate_hertz=sample_rate_hertz, language_code=language_code)
>>> uri = 'gs://bucket_name/file_name.flac'
>>> audio = cloud_speech_pb2.RecognitionAudio(uri=uri)
>>> response = client.recognize(config, audio)
Args:
config (:class:`google.cloud.proto.speech.v1.cloud_speech_pb2.RecognitionConfig`): *Required* Provides information to the recognizer that specifies how to
process the request.
audio (:class:`google.cloud.proto.speech.v1.cloud_speech_pb2.RecognitionAudio`): *Required* The audio data to be recognized.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.speech.v1.cloud_speech_pb2.RecognizeResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = cloud_speech_pb2.RecognizeRequest(config=config, audio=audio)
return self._recognize(request, options)
def long_running_recognize(self, config, audio, options=None):
"""
Performs asynchronous speech recognition: receive results via the
google.longrunning.Operations interface. Returns either an
``Operation.error`` or an ``Operation.response`` which contains
a ``LongRunningRecognizeResponse`` message.
Example:
>>> from google.cloud.gapic.speech.v1 import speech_client
>>> from google.cloud.gapic.speech.v1 import enums
>>> from google.cloud.proto.speech.v1 import cloud_speech_pb2
>>> client = speech_client.SpeechClient()
>>> encoding = enums.RecognitionConfig.AudioEncoding.FLAC
>>> sample_rate_hertz = 44100
>>> language_code = 'en-US'
>>> config = cloud_speech_pb2.RecognitionConfig(encoding=encoding, sample_rate_hertz=sample_rate_hertz, language_code=language_code)
>>> uri = 'gs://bucket_name/file_name.flac'
>>> audio = cloud_speech_pb2.RecognitionAudio(uri=uri)
>>> response = client.long_running_recognize(config, audio)
>>>
>>> def callback(operation_future):
>>> # Handle result.
>>> result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
config (:class:`google.cloud.proto.speech.v1.cloud_speech_pb2.RecognitionConfig`): *Required* Provides information to the recognizer that specifies how to
process the request.
audio (:class:`google.cloud.proto.speech.v1.cloud_speech_pb2.RecognitionAudio`): *Required* The audio data to be recognized.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax._OperationFuture` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = cloud_speech_pb2.LongRunningRecognizeRequest(
config=config, audio=audio)
return google.gax._OperationFuture(
self._long_running_recognize(request,
options), self.operations_client,
cloud_speech_pb2.LongRunningRecognizeResponse,
cloud_speech_pb2.LongRunningRecognizeMetadata, options)
def streaming_recognize(self, requests, options=None):
"""
Performs bidirectional streaming speech recognition: receive results while
sending audio. This method is only available via the gRPC API (not REST).
EXPERIMENTAL: This method interface might change in the future.
Example:
>>> from google.cloud.gapic.speech.v1 import speech_client
>>> from google.cloud.proto.speech.v1 import cloud_speech_pb2
>>> client = speech_client.SpeechClient()
>>> request = cloud_speech_pb2.StreamingRecognizeRequest()
>>> requests = [request]
>>> for element in client.streaming_recognize(requests):
>>> # process element
>>> pass
Args:
requests (iterator[:class:`google.cloud.proto.speech.v1.cloud_speech_pb2.StreamingRecognizeRequest`]): The input objects.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
iterator[:class:`google.cloud.proto.speech.v1.cloud_speech_pb2.StreamingRecognizeResponse`].
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
return self._streaming_recognize(requests, options)
| bsd-3-clause |
philipl/xbmc | tools/EventClients/examples/python/example_button1.py | 192 | 2715 | #!/usr/bin/python
# This is a simple example showing how you can send 2 button events
# to XBMC in a queued fashion to shut it down.
# Queued button events are not repeatable.
# The basic idea is to create single packets and shoot them to XBMC
# The provided library implements some of the support commands and
# takes care of creating the actual packet. Using it is as simple
# as creating an object with the required constructor arguments and
# sending it through a socket.
# Currently, only keyboard keys are supported so the key codes used
# below are the same key codes used in guilib/common/SDLKeyboard.cpp
# In effect, anything that can be done with the keyboard can be done
# using the event client.
# import the XBMC client library
# NOTE: The library is not complete yet but is usable at this stage.
import sys
sys.path.append("../../lib/python")
from xbmcclient import *
from socket import *
def main():
import time
import sys
# connect to localhost, port 9777 using a UDP socket
# this only needs to be done once.
# by default this is where XBMC will be listening for incoming
# connections.
host = "localhost"
port = 9777
addr = (host, port)
sock = socket(AF_INET,SOCK_DGRAM)
# First packet must be HELO (no it's not a typo) and can contain an icon
# 'icon_type' can be one of ICON_NONE, ICON_PNG, ICON_JPG or ICON_GIF
packet = PacketHELO(devicename="Example Remote",
icon_type=ICON_PNG,
icon_file="../../icons/bluetooth.png")
packet.send(sock, addr)
# IMPORTANT: After a HELO packet is sent, the client needs to "ping" XBMC
# at least once every 60 seconds or else the client will time out.
# Every valid packet sent to XBMC acts as a ping, however if no valid
# packets NEED to be sent (eg. the user hasn't pressed a key in 50 seconds)
# then you can use the PacketPING class to send a ping packet (which is
# basically just an emppty packet). See below.
# Once a client times out, it will need to reissue the HELO packet.
# Currently, since this is a unidirectional protocol, there is no way
# for the client to know if it has timed out.
# wait for notification window to close (in XBMC)
time.sleep(5)
# press 'S'
packet = PacketBUTTON(code='S', queue=1)
packet.send(sock, addr)
# wait for a few seconds
time.sleep(2)
# press the enter key (13 = enter)
packet = PacketBUTTON(code=13, queue=1)
packet.send(sock, addr)
# BYE is not required since XBMC would have shut down
packet = PacketBYE() # PacketPING if you want to ping
packet.send(sock, addr)
if __name__=="__main__":
main()
| gpl-2.0 |
insiderr/insiderr-app | ios-patches/basemodules/twisted/names/common.py | 55 | 7726 | # -*- test-case-name: twisted.names.test -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Base functionality useful to various parts of Twisted Names.
"""
from __future__ import division, absolute_import
import socket
from zope.interface import implementer
from twisted.names import dns
from twisted.names.error import DNSFormatError, DNSServerError, DNSNameError
from twisted.names.error import DNSNotImplementedError, DNSQueryRefusedError
from twisted.names.error import DNSUnknownError
from twisted.internet import defer, error, interfaces
from twisted.python import failure
# Helpers for indexing the three-tuples that get thrown around by this code a
# lot.
_ANS, _AUTH, _ADD = range(3)
EMPTY_RESULT = (), (), ()
@implementer(interfaces.IResolver)
class ResolverBase:
"""
L{ResolverBase} is a base class for implementations of
L{interfaces.IResolver} which deals with a lot
of the boilerplate of implementing all of the lookup methods.
@cvar _errormap: A C{dict} mapping DNS protocol failure response codes
to exception classes which will be used to represent those failures.
"""
_errormap = {
dns.EFORMAT: DNSFormatError,
dns.ESERVER: DNSServerError,
dns.ENAME: DNSNameError,
dns.ENOTIMP: DNSNotImplementedError,
dns.EREFUSED: DNSQueryRefusedError}
typeToMethod = None
def __init__(self):
self.typeToMethod = {}
for (k, v) in typeToMethod.items():
self.typeToMethod[k] = getattr(self, v)
def exceptionForCode(self, responseCode):
"""
Convert a response code (one of the possible values of
L{dns.Message.rCode} to an exception instance representing it.
@since: 10.0
"""
return self._errormap.get(responseCode, DNSUnknownError)
def query(self, query, timeout=None):
try:
method = self.typeToMethod[query.type]
except KeyError:
return defer.fail(failure.Failure(NotImplementedError(
str(self.__class__) + " " + str(query.type))))
else:
return defer.maybeDeferred(method, query.name.name, timeout)
def _lookup(self, name, cls, type, timeout):
return defer.fail(NotImplementedError("ResolverBase._lookup"))
def lookupAddress(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.A, timeout)
def lookupIPV6Address(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.AAAA, timeout)
def lookupAddress6(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.A6, timeout)
def lookupMailExchange(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.MX, timeout)
def lookupNameservers(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.NS, timeout)
def lookupCanonicalName(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.CNAME, timeout)
def lookupMailBox(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.MB, timeout)
def lookupMailGroup(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.MG, timeout)
def lookupMailRename(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.MR, timeout)
def lookupPointer(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.PTR, timeout)
def lookupAuthority(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.SOA, timeout)
def lookupNull(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.NULL, timeout)
def lookupWellKnownServices(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.WKS, timeout)
def lookupService(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.SRV, timeout)
def lookupHostInfo(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.HINFO, timeout)
def lookupMailboxInfo(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.MINFO, timeout)
def lookupText(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.TXT, timeout)
def lookupSenderPolicy(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.SPF, timeout)
def lookupResponsibility(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.RP, timeout)
def lookupAFSDatabase(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.AFSDB, timeout)
def lookupZone(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.AXFR, timeout)
def lookupNamingAuthorityPointer(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.NAPTR, timeout)
def lookupAllRecords(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.ALL_RECORDS, timeout)
# IResolverSimple
def getHostByName(self, name, timeout=None, effort=10):
# XXX - respect timeout
return self.lookupAllRecords(name, timeout
).addCallback(self._cbRecords, name, effort
)
def _cbRecords(self, records, name, effort):
(ans, auth, add) = records
result = extractRecord(self, dns.Name(name), ans + auth + add, effort)
if not result:
raise error.DNSLookupError(name)
return result
def extractRecord(resolver, name, answers, level=10):
if not level:
return None
if hasattr(socket, 'inet_ntop'):
for r in answers:
if r.name == name and r.type == dns.A6:
return socket.inet_ntop(socket.AF_INET6, r.payload.address)
for r in answers:
if r.name == name and r.type == dns.AAAA:
return socket.inet_ntop(socket.AF_INET6, r.payload.address)
for r in answers:
if r.name == name and r.type == dns.A:
return socket.inet_ntop(socket.AF_INET, r.payload.address)
for r in answers:
if r.name == name and r.type == dns.CNAME:
result = extractRecord(
resolver, r.payload.name, answers, level - 1)
if not result:
return resolver.getHostByName(
str(r.payload.name), effort=level - 1)
return result
# No answers, but maybe there's a hint at who we should be asking about
# this
for r in answers:
if r.type == dns.NS:
from twisted.names import client
r = client.Resolver(servers=[(str(r.payload.name), dns.PORT)])
return r.lookupAddress(str(name)
).addCallback(
lambda records: extractRecord(
r, name,
records[_ANS] + records[_AUTH] + records[_ADD],
level - 1))
typeToMethod = {
dns.A: 'lookupAddress',
dns.AAAA: 'lookupIPV6Address',
dns.A6: 'lookupAddress6',
dns.NS: 'lookupNameservers',
dns.CNAME: 'lookupCanonicalName',
dns.SOA: 'lookupAuthority',
dns.MB: 'lookupMailBox',
dns.MG: 'lookupMailGroup',
dns.MR: 'lookupMailRename',
dns.NULL: 'lookupNull',
dns.WKS: 'lookupWellKnownServices',
dns.PTR: 'lookupPointer',
dns.HINFO: 'lookupHostInfo',
dns.MINFO: 'lookupMailboxInfo',
dns.MX: 'lookupMailExchange',
dns.TXT: 'lookupText',
dns.SPF: 'lookupSenderPolicy',
dns.RP: 'lookupResponsibility',
dns.AFSDB: 'lookupAFSDatabase',
dns.SRV: 'lookupService',
dns.NAPTR: 'lookupNamingAuthorityPointer',
dns.AXFR: 'lookupZone',
dns.ALL_RECORDS: 'lookupAllRecords',
}
| gpl-3.0 |
joberreiter/pyload | module/plugins/hoster/TwoSharedCom.py | 15 | 1037 | # -*- coding: utf-8 -*-
import re
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class TwoSharedCom(SimpleHoster):
__name__ = "TwoSharedCom"
__type__ = "hoster"
__version__ = "0.14"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?2shared\.com/(account/)?(download|get|file|document|photo|video|audio)/.+'
__config__ = [("use_premium", "bool", "Use premium account if available", True)]
__description__ = """2Shared.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
NAME_PATTERN = r'<h1>(?P<N>.*)</h1>'
SIZE_PATTERN = r'<span class="dtitle">File size:</span>\s*(?P<S>[\d.,]+) (?P<U>[\w^_]+)'
OFFLINE_PATTERN = r'The file link that you requested is not valid\.|This file was deleted\.'
LINK_FREE_PATTERN = r'window.location =\'(.+?)\';'
def setup(self):
self.resume_download = True
self.multiDL = True
getInfo = create_getInfo(TwoSharedCom)
| gpl-3.0 |
Cosmius/jpype | test/jpypetest/jvmfinder.py | 4 | 4289 | # part of JPype1; author Martin K. Scherer; 2014
try:
import unittest2 as unittest
except ImportError:
import unittest
import mock
from jpype._jvmfinder import *
from jpype._linux import *
from jpype._darwin import *
import sys
class JVMFinderTest(unittest.TestCase):
"""
test some methods to obtain a jvm.
TODO: add windows (need to mock registry)
"""
def test_find_libjvm(self):
"""
test JVMFinder.find_libjvm does not return broken jvm implementation.
"""
walk_fake = [('jre', ('lib',), ()),
('jre/lib', ('amd64',), ()),
('jre/lib/amd64',
('cacao', 'jamvm', 'server'), ()),
('jre/lib/amd64/cacao',
('',), ('libjvm.so',)),
('jre/lib/amd64/jamvm',
('',), ('libjvm.so',)),
('jre/lib/amd64/server',
('',), ('libjvm.so',)),
]
with mock.patch('os.walk') as mockwalk:
# contains broken and working jvms
mockwalk.return_value = walk_fake
finder = LinuxJVMFinder()
p = finder.find_libjvm('arbitrary java home')
self.assertEqual(
p, 'jre/lib/amd64/server/libjvm.so', 'wrong jvm returned')
with mock.patch('os.walk') as mockwalk:
# contains only broken jvms, since server impl is removed
walk_fake[-1] = ((), (), (),)
mockwalk.return_value = walk_fake
finder = LinuxJVMFinder()
with self.assertRaises(JVMNotSupportedException) as context:
finder.find_libjvm('arbitrary java home')
@mock.patch('os.walk')
@mock.patch('os.path.exists')
@mock.patch('os.path.realpath')
def test_get_from_bin(self, mock_real_path, mock_path_exists, mock_os_walk):
"""
test _get_from_bin method (used in linux and darwin)
'/usr/bin/java' => some jre/jdk path
"""
java_path = '/usr/lib/jvm/java-6-openjdk-amd64/bin/java'
mock_os_walk.return_value = [
('/usr/lib/jvm/java-6-openjdk-amd64/jre/lib/amd64/server', ('',), ('libjvm.so',))]
mock_path_exists.return_value = True
mock_real_path.return_value = '/usr/lib/jvm/java-6-openjdk-amd64/bin/java'
finder = LinuxJVMFinder()
p = finder._get_from_bin()
self.assertEqual(
p, '/usr/lib/jvm/java-6-openjdk-amd64/jre/lib/amd64/server/libjvm.so')
@unittest.skipIf(sys.version_info[:2] == (2, 6), "skip on py26")
@mock.patch('platform.mac_ver')
def test_javahome_binary_py27(self, mock_mac_ver):
# this version has java_home binary
mock_mac_ver.return_value = ('10.6.8', '', '')
expected = '/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Home\n'
finder = DarwinJVMFinder()
# fake check_output
with mock.patch('subprocess.check_output') as mock_checkoutput:
mock_checkoutput.return_value = expected
p = finder._javahome_binary()
self.assertEqual(p, expected.strip())
# this version has no java_home binary
mock_mac_ver.return_value = ('10.5', '', '')
p = finder._javahome_binary()
self.assertEqual(p, None)
@unittest.skipUnless(sys.version_info[:2] == (2, 6), "only py26")
@mock.patch('platform.mac_ver')
def test_javahome_binary_py26(self, mock_mac_ver):
# this version has java_home binary
mock_mac_ver.return_value = ('10.6.8', '', '')
expected = '/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Home\n'
finder = DarwinJVMFinder()
# fake check_output
with mock.patch('subprocess.Popen') as mock_popen:
class proc:
def communicate(self):
return (expected, )
mock_popen.return_value = proc()
p = finder._javahome_binary()
self.assertEqual(p.strip(), expected.strip())
# this version has no java_home binary
mock_mac_ver.return_value = ('10.5', '', '')
p = finder._javahome_binary()
self.assertEqual(p, None)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
rubenlorenzo/fab-city-dashboard | app/views.py | 1 | 1785 | # -*- encoding: utf-8 -*-
from app import app
from flask import Flask, render_template, jsonify
import pandas as pd
import makerlabs.fablabs_io as fio
from werkzeug.routing import Rule
# import global variables for Z2N
from .scripts.app_vars import title, metas, description, subtitle, version, authors, license, static_dir, URLroot_, fabcities
# gather global names
global_names = {
'titleApp': title, # name/brand of the app
'subtitleApp': subtitle, # explanation of what the app does
'metas': metas, # meta for referencing
'description': description, # description of the app
'version': version, # explanation of what the app does
'authors': authors, # authors in metas
'license': license
}
@app.route('/')
@app.route('/index')
def index():
print '-' * 10, 'VIEW INDEX', '-' * 50
return render_template(
"index.html",
index=True,
glob=global_names, )
# Tests by massimo
@app.route("/api/cities")
def fabicites_list():
return jsonify(fabcities)
@app.route("/api/labs")
def labs_map():
labs_geojson = fio.get_labs(format="geojson")
return labs_geojson
@app.route("/oecd/regional-data")
def regional_data():
regional_data = pd.read_csv(
app.static_folder + "/data_custom/json_stats/OECD/regional.csv",
encoding="utf-8")
# return regional_data.to_html()
return regional_data.to_json(orient='records')
@app.route("/oecd/national-data")
def national_data():
national_data = pd.read_csv(
app.static_folder + "/data_custom/json_stats/OECD/national.csv",
encoding="utf-8")
# return national_data.to_html()
return national_data.to_json(orient='records')
@app.route('/viz_format')
def info():
return render_template('modules/mod_viz.html')
| agpl-3.0 |
DerekYangYC/edx-dl | edx_dl/edx_dl.py | 1 | 36894 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Main module for the edx-dl downloader.
It corresponds to the cli interface
"""
import argparse
import getpass
import json
import logging
import os
import pickle
import re
import sys
from functools import partial
from multiprocessing.dummy import Pool as ThreadPool
from six.moves.http_cookiejar import CookieJar
from six.moves.urllib.error import HTTPError, URLError
from six.moves.urllib.parse import urlencode
from six.moves.urllib.request import (
urlopen,
build_opener,
install_opener,
HTTPCookieProcessor,
Request,
urlretrieve,
)
from ._version import __version__
from .common import (
YOUTUBE_DL_CMD,
DEFAULT_CACHE_FILENAME,
Unit,
Video,
ExitCode,
DEFAULT_FILE_FORMATS,
)
from .parsing import (
edx_json2srt,
get_page_extractor,
is_youtube_url,
)
from .utils import (
clean_filename,
directory_name,
execute_command,
get_filename_from_prefix,
get_page_contents,
get_page_contents_as_json,
mkdir_p,
remove_duplicates,
)
OPENEDX_SITES = {
'edx': {
'url': 'https://courses.edx.org',
'courseware-selector': ('nav', {'aria-label': 'Course Navigation'}),
},
'stanford': {
'url': 'https://lagunita.stanford.edu',
'courseware-selector': ('nav', {'aria-label': 'Course Navigation'}),
},
'usyd-sit': {
'url': 'http://online.it.usyd.edu.au',
'courseware-selector': ('nav', {'aria-label': 'Course Navigation'}),
},
'fun': {
'url': 'https://www.fun-mooc.fr',
'courseware-selector': ('section', {'aria-label': 'Menu du cours'}),
},
'gwu-seas': {
'url': 'http://openedx.seas.gwu.edu',
'courseware-selector': ('nav', {'aria-label': 'Course Navigation'}),
},
'gwu-open': {
'url': 'http://mooc.online.gwu.edu',
'courseware-selector': ('nav', {'aria-label': 'Course Navigation'}),
},
'mitprox': {
'url': 'https://mitprofessionalx.mit.edu',
'courseware-selector': ('nav', {'aria-label': 'Course Navigation'}),
},
'bits':{
'url':'http://any-learn.bits-pilani.ac.in',
'courseware-selector': ('nav', {'aria-label': 'Course Navigation'}),
}
}
BASE_URL = OPENEDX_SITES['edx']['url']
EDX_HOMEPAGE = BASE_URL + '/login_ajax'
LOGIN_API = BASE_URL + '/login_ajax'
DASHBOARD = BASE_URL + '/dashboard'
COURSEWARE_SEL = OPENEDX_SITES['edx']['courseware-selector']
def change_openedx_site(site_name):
"""
Changes the openedx website for the given one via the key
"""
global BASE_URL
global EDX_HOMEPAGE
global LOGIN_API
global DASHBOARD
global COURSEWARE_SEL
sites = sorted(OPENEDX_SITES.keys())
if site_name not in sites:
logging.error("OpenEdX platform should be one of: %s", ', '.join(sites))
sys.exit(ExitCode.UNKNOWN_PLATFORM)
BASE_URL = OPENEDX_SITES[site_name]['url']
EDX_HOMEPAGE = BASE_URL + '/login_ajax'
LOGIN_API = BASE_URL + '/login_ajax'
DASHBOARD = BASE_URL + '/dashboard'
COURSEWARE_SEL = OPENEDX_SITES[site_name]['courseware-selector']
def _display_courses(courses):
"""
List the courses that the user has enrolled.
"""
logging.info('You can access %d courses', len(courses))
for i, course in enumerate(courses, 1):
logging.info('%2d - %s [%s]', i, course.name, course.id)
logging.info(' %s', course.url)
def get_courses_info(url, headers):
"""
Extracts the courses information from the dashboard.
"""
logging.info('Extracting course information from dashboard.')
page = get_page_contents(url, headers)
page_extractor = get_page_extractor(url)
courses = page_extractor.extract_courses_from_html(page, BASE_URL)
logging.debug('Data extracted: %s', courses)
return courses
def _get_initial_token(url):
"""
Create initial connection to get authentication token for future
requests.
Returns a string to be used in subsequent connections with the
X-CSRFToken header or the empty string if we didn't find any token in
the cookies.
"""
logging.info('Getting initial CSRF token.')
cookiejar = CookieJar()
opener = build_opener(HTTPCookieProcessor(cookiejar))
install_opener(opener)
opener.open(url)
for cookie in cookiejar:
if cookie.name == 'csrftoken':
logging.info('Found CSRF token.')
return cookie.value
logging.warn('Did not find the CSRF token.')
return ''
def get_available_sections(url, headers):
"""
Extracts the sections and subsections from a given url
"""
logging.debug("Extracting sections for :" + url)
page = get_page_contents(url, headers)
page_extractor = get_page_extractor(url)
sections = page_extractor.extract_sections_from_html(page, BASE_URL)
logging.debug("Extracted sections: " + str(sections))
return sections
def edx_get_subtitle(url, headers, get_page_contents=get_page_contents, get_page_contents_as_json=get_page_contents_as_json):
"""
Return a string with the subtitles content from the url or None if no
subtitles are available.
"""
try:
if ';' in url: # non-JSON format (e.g. Stanford)
return get_page_contents(url, headers)
else:
json_object = get_page_contents_as_json(url, headers)
return edx_json2srt(json_object)
except URLError as exception:
logging.warn('edX subtitles (error: %s)', exception)
return None
except ValueError as exception:
logging.warn('edX subtitles (error: %s)', exception.message)
return None
def edx_login(url, headers, username, password):
"""
Log in user into the openedx website.
"""
logging.info('Logging into Open edX site: %s', url)
post_data = urlencode({'email': username,
'password': password,
'remember': False}).encode('utf-8')
request = Request(url, post_data, headers)
response = urlopen(request)
resp = json.loads(response.read().decode('utf-8'))
return resp
def parse_args():
"""
Parse the arguments/options passed to the program on the command line.
"""
parser = argparse.ArgumentParser(prog='edx-dl',
description='Get videos from the OpenEdX platform',
epilog='For further use information,'
'see the file README.md',)
# positional
parser.add_argument('course_urls',
nargs='*',
action='store',
default=[],
help='target course urls '
'(e.g., https://courses.edx.org/courses/BerkeleyX/CS191x/2013_Spring/info)')
# optional
parser.add_argument('-u',
'--username',
required=True,
action='store',
help='your edX username (email)')
parser.add_argument('-p',
'--password',
action='store',
help='your edX password, '
'beware: it might be visible to other users on your system')
parser.add_argument('-f',
'--format',
dest='format',
action='store',
default=None,
help='format of videos to download')
parser.add_argument('-s',
'--with-subtitles',
dest='subtitles',
action='store_true',
default=False,
help='download subtitles with the videos')
parser.add_argument('-o',
'--output-dir',
action='store',
dest='output_dir',
help='store the files to the specified directory',
default='Downloaded')
parser.add_argument('-i',
'--ignore-errors',
dest='ignore_errors',
action='store_true',
default=False,
help='continue on download errors, to avoid stopping large downloads')
sites = sorted(OPENEDX_SITES.keys())
parser.add_argument('-x',
'--platform',
action='store',
dest='platform',
help='OpenEdX platform, one of: %s' % ', '.join(sites),
default='edx')
parser.add_argument('--list-courses',
dest='list_courses',
action='store_true',
default=False,
help='list available courses')
parser.add_argument('--filter-section',
dest='filter_section',
action='store',
default=None,
help='filters sections to be downloaded')
parser.add_argument('--list-sections',
dest='list_sections',
action='store_true',
default=False,
help='list available sections')
parser.add_argument('--youtube-dl-options',
dest='youtube_dl_options',
action='store',
default='',
help='set extra options to pass to youtube-dl')
parser.add_argument('--prefer-cdn-videos',
dest='prefer_cdn_videos',
action='store_true',
default=False,
help='prefer CDN video downloads over youtube (BETA)')
parser.add_argument('--export-filename',
dest='export_filename',
default=None,
help='filename where to put an exported list of urls. '
'Use dash "-" to output to stdout. '
'Download will not be performed if this option is '
'present')
parser.add_argument('--export-format',
dest='export_format',
default='%(url)s',
help='export format string. Old-style python formatting '
'is used. Available variables: %%(url)s. Default: '
'"%%(url)s"')
parser.add_argument('--list-file-formats',
dest='list_file_formats',
action='store_true',
default=False,
help='list the default file formats extracted')
parser.add_argument('--file-formats',
dest='file_formats',
action='store',
default=None,
help='appends file formats to be extracted (comma '
'separated)')
parser.add_argument('--overwrite-file-formats',
dest='overwrite_file_formats',
action='store_true',
default=False,
help='if active overwrites the file formats to be '
'extracted')
parser.add_argument('--cache',
dest='cache',
action='store_true',
default=False,
help='create and use a cache of extracted resources')
parser.add_argument('--dry-run',
dest='dry_run',
action='store_true',
default=False,
help='makes a dry run, only lists the resources')
parser.add_argument('--sequential',
dest='sequential',
action='store_true',
default=False,
help='extracts the resources from the pages sequentially')
parser.add_argument('--quiet',
dest='quiet',
action='store_true',
default=False,
help='omit as many messages as possible, only printing errors')
parser.add_argument('--debug',
dest='debug',
action='store_true',
default=False,
help='print lots of debug information')
parser.add_argument('--version',
dest='version',
action='store_true',
default=False,
help='display version and exit')
args = parser.parse_args()
if args.version:
print(__version__)
sys.exit(ExitCode.OK)
# Initialize the logging system first so that other functions
# can use it right away.
if args.debug:
logging.basicConfig(level=logging.DEBUG,
format='%(name)s[%(funcName)s] %(message)s')
elif args.quiet:
logging.basicConfig(level=logging.ERROR,
format='%(name)s: %(message)s')
else:
logging.basicConfig(level=logging.INFO,
format='%(message)s')
return args
def edx_get_headers():
"""
Build the Open edX headers to create future requests.
"""
logging.info('Building initial headers for future requests.')
headers = {
'User-Agent': 'edX-downloader/0.01',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
'Referer': EDX_HOMEPAGE,
'X-Requested-With': 'XMLHttpRequest',
'X-CSRFToken': _get_initial_token(EDX_HOMEPAGE),
}
logging.debug('Headers built: %s', headers)
return headers
def extract_units(url, headers, file_formats):
"""
Parses a webpage and extracts its resources e.g. video_url, sub_url, etc.
"""
logging.info("Processing '%s'", url)
page = get_page_contents(url, headers)
page_extractor = get_page_extractor(url)
units = page_extractor.extract_units_from_html(page, BASE_URL, file_formats)
return units
def extract_all_units_in_sequence(urls, headers, file_formats):
"""
Returns a dict of all the units in the selected_sections: {url, units}
sequentially, this is clearer for debug purposes
"""
logging.info('Extracting all units information in sequentially.')
logging.debug('urls: ' + str(urls))
units = [extract_units(url, headers, file_formats) for url in urls]
all_units = dict(zip(urls, units))
return all_units
def extract_all_units_in_parallel(urls, headers, file_formats):
"""
Returns a dict of all the units in the selected_sections: {url, units}
in parallel
"""
logging.info('Extracting all units information in parallel.')
logging.debug('urls: ' + str(urls))
mapfunc = partial(extract_units, file_formats=file_formats, headers=headers)
pool = ThreadPool(16)
units = pool.map(mapfunc, urls)
pool.close()
pool.join()
all_units = dict(zip(urls, units))
return all_units
def _display_sections_menu(course, sections):
"""
List the weeks for the given course.
"""
num_sections = len(sections)
logging.info('%s [%s] has %d sections so far', course.name, course.id, num_sections)
for i, section in enumerate(sections, 1):
logging.info('%2d - Download %s videos', i, section.name)
def _filter_sections(index, sections):
"""
Get the sections for the given index.
If the index is not valid (that is, None, a non-integer, a negative
integer, or an integer above the number of the sections), we choose all
sections.
"""
num_sections = len(sections)
logging.info('Filtering sections')
if index is not None:
try:
index = int(index)
if index > 0 and index <= num_sections:
logging.info('Sections filtered to: %d', index)
return [sections[index - 1]]
else:
pass # log some info here
except ValueError:
pass # log some info here
else:
pass # log some info here
return sections
def _display_sections(sections):
"""
Displays a tree of section(s) and subsections
"""
logging.info('Downloading %d section(s)', len(sections))
for section in sections:
logging.info('Section %2d: %s', section.position, section.name)
for subsection in section.subsections:
logging.info(' %s', subsection.name)
def parse_courses(args, available_courses):
"""
Parses courses options and returns the selected_courses.
"""
if args.list_courses:
_display_courses(available_courses)
exit(ExitCode.OK)
if len(args.course_urls) == 0:
logging.error('You must pass the URL of at least one course, check the correct url with --list-courses')
exit(ExitCode.MISSING_COURSE_URL)
selected_courses = [available_course
for available_course in available_courses
for url in args.course_urls
if available_course.url == url]
if len(selected_courses) == 0:
logging.error('You have not passed a valid course url, check the correct url with --list-courses')
exit(ExitCode.INVALID_COURSE_URL)
return selected_courses
def parse_sections(args, selections):
"""
Parses sections options and returns selections filtered by
selected_sections
"""
if args.list_sections:
for selected_course, selected_sections in selections.items():
_display_sections_menu(selected_course, selected_sections)
exit(ExitCode.OK)
if not args.filter_section:
return selections
filtered_selections = {selected_course:
_filter_sections(args.filter_section, selected_sections)
for selected_course, selected_sections in selections.items()}
return filtered_selections
def parse_file_formats(args):
"""
parse options for file formats and builds the array to be used
"""
file_formats = DEFAULT_FILE_FORMATS
if args.list_file_formats:
logging.info(file_formats)
exit(ExitCode.OK)
if args.overwrite_file_formats:
file_formats = []
if args.file_formats:
new_file_formats = args.file_formats.split(",")
file_formats.extend(new_file_formats)
logging.debug("file_formats: %s", file_formats)
return file_formats
def _display_selections(selections):
"""
Displays the course, sections and subsections to be downloaded
"""
for selected_course, selected_sections in selections.items():
logging.info('Downloading %s [%s]',
selected_course.name, selected_course.id)
_display_sections(selected_sections)
def parse_units(all_units):
"""
Parses units options and corner cases
"""
flat_units = [unit for units in all_units.values() for unit in units]
if len(flat_units) < 1:
logging.warn('No downloadable video found.')
exit(ExitCode.NO_DOWNLOADABLE_VIDEO)
def get_subtitles_urls(available_subs_url, sub_template_url, headers):
"""
Request the available subs and builds the urls to download subs
"""
if available_subs_url is not None and sub_template_url is not None:
try:
available_subs = get_page_contents_as_json(available_subs_url,
headers)
except HTTPError:
available_subs = ['en']
return {sub_lang: sub_template_url % sub_lang
for sub_lang in available_subs}
elif sub_template_url is not None:
try:
available_subs = get_page_contents(sub_template_url,
headers)
except HTTPError:
available_subs = ['en']
return {'en': sub_template_url}
return {}
def _build_subtitles_downloads(video, target_dir, filename_prefix, headers):
"""
Builds a dict {url: filename} for the subtitles, based on the
filename_prefix of the video
"""
downloads = {}
filename = get_filename_from_prefix(target_dir, filename_prefix)
if filename is None:
logging.warn('No video downloaded for %s', filename_prefix)
return downloads
if video.sub_template_url is None:
logging.warn('No subtitles downloaded for %s', filename_prefix)
return downloads
# This is a fix for the case of retrials because the extension would be
# .lang (from .lang.srt), so the matching does not detect correctly the
# subtitles name
re_is_subtitle = re.compile(r'(.*)(?:\.[a-z]{2})')
match_subtitle = re_is_subtitle.match(filename)
if match_subtitle:
filename = match_subtitle.group(1)
subtitles_download_urls = get_subtitles_urls(video.available_subs_url,
video.sub_template_url,
headers)
for sub_lang, sub_url in subtitles_download_urls.items():
subs_filename = os.path.join(target_dir,
filename + '.' + sub_lang + '.srt')
downloads[sub_url] = subs_filename
return downloads
def _build_url_downloads(urls, target_dir, filename_prefix):
"""
Builds a dict {url: filename} for the given urls
If it is a youtube url it uses the valid template for youtube-dl
otherwise just takes the name of the file from the url
"""
downloads = {url:
_build_filename_from_url(url, target_dir, filename_prefix)
for url in urls}
return downloads
def _build_filename_from_url(url, target_dir, filename_prefix):
"""
Builds the appropriate filename for the given args
"""
if is_youtube_url(url):
filename_template = filename_prefix + "-%(title)s-%(id)s.%(ext)s"
filename = os.path.join(target_dir, filename_template)
else:
original_filename = url.rsplit('/', 1)[1]
filename = os.path.join(target_dir,
filename_prefix + '-' + original_filename)
return filename
def download_url(url, filename, headers, args):
"""
Downloads the given url in filename.
"""
if is_youtube_url(url):
download_youtube_url(url, filename, headers, args)
else:
import ssl
# FIXME: Ugly hack for coping with broken SSL sites:
# https://www.cs.duke.edu/~angl/papers/imc10-cloudcmp.pdf
#
# We should really ask the user if they want to stop the downloads
# or if they are OK proceeding without verification.
#
# Note that skipping verification by default could be a problem for
# people's lives if they happen to live ditatorial countries.
#
# Note: The mess with various exceptions being caught (and their
# order) is due to different behaviors in different Python versions
# (e.g., 2.7 vs. 3.4).
try:
urlretrieve(url, filename)
except Exception as e:
logging.warn('Got SSL/Connection error: %s', e)
if not args.ignore_errors:
logging.warn('Hint: if you want to ignore this error, add '
'--ignore-errors option to the command line')
raise e
else:
logging.warn('SSL/Connection error ignored: %s', e)
def download_youtube_url(url, filename, headers, args):
"""
Downloads a youtube URL and applies the filters from args
"""
logging.info('Downloading video with URL %s from YouTube.', url)
video_format_option = args.format + '/mp4' if args.format else 'mp4'
cmd = YOUTUBE_DL_CMD + ['-o', filename, '-f', video_format_option]
if args.subtitles:
cmd.append('--all-subs')
cmd.extend(args.youtube_dl_options.split())
cmd.append(url)
execute_command(cmd, args)
def download_subtitle(url, filename, headers, args):
"""
Downloads the subtitle from the url and transforms it to the srt format
"""
subs_string = edx_get_subtitle(url, headers)
if subs_string:
full_filename = os.path.join(os.getcwd(), filename)
with open(full_filename, 'wb+') as f:
f.write(subs_string.encode('utf-8'))
def skip_or_download(downloads, headers, args, f=download_url):
"""
downloads url into filename using download function f,
if filename exists it skips
"""
for url, filename in downloads.items():
if os.path.exists(filename):
logging.info('[skipping] %s => %s', url, filename)
continue
else:
logging.info('[download] %s => %s', url, filename)
if args.dry_run:
continue
f(url, filename, headers, args)
def download_video(video, args, target_dir, filename_prefix, headers):
if args.prefer_cdn_videos or video.video_youtube_url is None:
mp4_downloads = _build_url_downloads(video.mp4_urls, target_dir,
filename_prefix)
skip_or_download(mp4_downloads, headers, args)
else:
if video.video_youtube_url is not None:
youtube_downloads = _build_url_downloads([video.video_youtube_url],
target_dir,
filename_prefix)
skip_or_download(youtube_downloads, headers, args)
# the behavior with subtitles is different, since the subtitles don't know
# the destination name until the video is downloaded with youtube-dl
# also, subtitles must be transformed from the raw data to the srt format
if args.subtitles:
sub_downloads = _build_subtitles_downloads(video, target_dir,
filename_prefix, headers)
skip_or_download(sub_downloads, headers, args, download_subtitle)
def download_unit(unit, args, target_dir, filename_prefix, headers):
"""
Downloads the urls in unit based on args in the given target_dir
with filename_prefix
"""
if len(unit.videos) == 1:
download_video(unit.videos[0], args, target_dir, filename_prefix,
headers)
else:
# we change the filename_prefix to avoid conflicts when downloading
# subtitles
for i, video in enumerate(unit.videos, 1):
new_prefix = filename_prefix + ('-%02d' % i)
download_video(video, args, target_dir, new_prefix, headers)
res_downloads = _build_url_downloads(unit.resources_urls, target_dir,
filename_prefix)
skip_or_download(res_downloads, headers, args)
def download(args, selections, all_units, headers):
"""
Downloads all the resources based on the selections
"""
logging.info("Output directory: " + args.output_dir)
# Download Videos
# notice that we could iterate over all_units, but we prefer to do it over
# sections/subsections to add correct prefixes and show nicer information.
for selected_course, selected_sections in selections.items():
coursename = directory_name(selected_course.name)
for selected_section in selected_sections:
section_dirname = "%02d-%s" % (selected_section.position,
selected_section.name)
target_dir = os.path.join(args.output_dir, coursename,
clean_filename(section_dirname))
mkdir_p(target_dir)
counter = 0
for subsection in selected_section.subsections:
units = all_units.get(subsection.url, [])
for unit in units:
counter += 1
filename_prefix = "%02d" % counter
download_unit(unit, args, target_dir, filename_prefix,
headers)
def remove_repeated_urls(all_units):
"""
Removes repeated urls from the units, it does not consider subtitles.
This is done to avoid repeated downloads.
"""
existing_urls = set()
filtered_units = {}
for url, units in all_units.items():
reduced_units = []
for unit in units:
videos = []
for video in unit.videos:
# we don't analyze the subtitles for repetition since
# their size is negligible for the goal of this function
video_youtube_url = None
if video.video_youtube_url not in existing_urls:
video_youtube_url = video.video_youtube_url
existing_urls.add(video_youtube_url)
mp4_urls, existing_urls = remove_duplicates(video.mp4_urls, existing_urls)
if video_youtube_url is not None or len(mp4_urls) > 0:
videos.append(Video(video_youtube_url=video_youtube_url,
available_subs_url=video.available_subs_url,
sub_template_url=video.sub_template_url,
mp4_urls=mp4_urls))
resources_urls, existing_urls = remove_duplicates(unit.resources_urls, existing_urls)
if len(videos) > 0 or len(resources_urls) > 0:
reduced_units.append(Unit(videos=videos,
resources_urls=resources_urls))
filtered_units[url] = reduced_units
return filtered_units
def num_urls_in_units_dict(units_dict):
"""
Counts the number of urls in a all_units dict, it ignores subtitles from
its counting.
"""
num_urls = 0
for units in units_dict.values():
for unit in units:
for video in unit.videos:
num_urls += int(video.video_youtube_url is not None)
num_urls += int(video.available_subs_url is not None)
num_urls += int(video.sub_template_url is not None)
num_urls += len(video.mp4_urls)
num_urls += len(unit.resources_urls)
return num_urls
def extract_all_units_with_cache(all_urls, headers, file_formats,
filename=DEFAULT_CACHE_FILENAME,
extractor=extract_all_units_in_parallel):
"""
Extracts the units which are not in the cache and extract their resources
returns the full list of units (cached+new)
The cache is used to improve speed because it avoids requesting already
known (and extracted) objects from URLs. This is useful to follow courses
week by week since we won't parse the already known subsections/units,
additionally it speeds development of code unrelated to extraction.
"""
cached_units = {}
if os.path.exists(filename):
with open(filename, 'rb') as f:
cached_units = pickle.load(f)
# we filter the cached urls
new_urls = [url for url in all_urls if url not in cached_units]
logging.info('loading %d urls from cache [%s]', len(cached_units.keys()),
filename)
new_units = extractor(new_urls, headers, file_formats)
all_units = cached_units.copy()
all_units.update(new_units)
return all_units
def write_units_to_cache(units, filename=DEFAULT_CACHE_FILENAME):
"""
writes units to cache
"""
logging.info('writing %d urls to cache [%s]', len(units.keys()),
filename)
with open(filename, 'wb') as f:
pickle.dump(units, f)
def extract_urls_from_units(all_units, format_):
"""
Extract urls from units into a set of strings. Format is specified by
the user. The original purpose of this function is to export urls into
a file for external downloader.
"""
all_urls = set()
# Collect all urls into a set to remove duplicates
for units in all_units.values():
for unit in units:
if isinstance(unit, Unit):
for video in unit.videos:
if isinstance(video, Video):
for url in video.mp4_urls:
all_urls.add('%s\n' % (format_ % {'url': url}))
else:
raise TypeError('Unknown unit video type (%s) occured '
'while exporting urls' % type(video))
for url in unit.resources_urls:
all_urls.add('%s\n' % (format_ % {'url': url}))
else:
raise TypeError('Unknown unit type (%s) occured while '
'exporting urls' % type(unit))
return list(all_urls)
def save_urls_to_file(urls, filename):
"""
Save urls to file. Filename is specified by the user. The original
purpose of this function is to export urls into a file for external
downloader.
"""
file_ = sys.stdout if filename == '-' else open(filename, 'w')
file_.writelines(urls)
file_.close()
def main():
"""
Main program function
"""
args = parse_args()
logging.info('edx_dl version %s' % __version__)
file_formats = parse_file_formats(args)
change_openedx_site(args.platform)
# Query password, if not alredy passed by command line.
if not args.password:
args.password = getpass.getpass(stream=sys.stderr)
if not args.username or not args.password:
logging.error("You must supply username and password to log-in")
exit(ExitCode.MISSING_CREDENTIALS)
# Prepare Headers
headers = edx_get_headers()
# Login
resp = edx_login(LOGIN_API, headers, args.username, args.password)
if not resp.get('success', False):
logging.error(resp.get('value', "Wrong Email or Password."))
exit(ExitCode.WRONG_EMAIL_OR_PASSWORD)
# Parse and select the available courses
courses = get_courses_info(DASHBOARD, headers)
available_courses = [course for course in courses if course.state == 'Started']
selected_courses = parse_courses(args, available_courses)
# Parse the sections and build the selections dict filtered by sections
all_selections = {selected_course:
get_available_sections(selected_course.url.replace('info', 'courseware'), headers)
for selected_course in selected_courses}
selections = parse_sections(args, all_selections)
_display_selections(selections)
# Extract the unit information (downloadable resources)
# This parses the HTML of all the subsection.url and extracts
# the URLs of the resources as Units.
all_urls = [subsection.url
for selected_sections in selections.values()
for selected_section in selected_sections
for subsection in selected_section.subsections]
extractor = extract_all_units_in_parallel
if args.sequential:
extractor = extract_all_units_in_sequence
if args.cache:
all_units = extract_all_units_with_cache(all_urls, headers,
file_formats,
extractor=extractor)
else:
all_units = extractor(all_urls, headers, file_formats)
parse_units(selections)
if args.cache:
write_units_to_cache(all_units)
# This removes all repeated important urls
# FIXME: This is not the best way to do it but it is the simplest, a
# better approach will be to create symbolic or hard links for the repeated
# units to avoid losing information
filtered_units = remove_repeated_urls(all_units)
num_all_urls = num_urls_in_units_dict(all_units)
num_filtered_urls = num_urls_in_units_dict(filtered_units)
logging.warn('Removed %d duplicated urls from %d in total',
(num_all_urls - num_filtered_urls), num_all_urls)
# finally we download or export all the resources
if args.export_filename is not None:
logging.info('exporting urls to file %s' % args.export_filename)
urls = extract_urls_from_units(all_units, args.export_format)
save_urls_to_file(urls, args.export_filename)
else:
download(args, selections, all_units, headers)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
logging.warn("\n\nCTRL-C detected, shutting down....")
sys.exit(ExitCode.OK)
| lgpl-3.0 |
jjas0nn/solvem | tensorflow/lib/python2.7/site-packages/numpy/lib/tests/test_stride_tricks.py | 40 | 14732 | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
run_module_suite, assert_equal, assert_array_equal,
assert_raises, assert_
)
from numpy.lib.stride_tricks import (
as_strided, broadcast_arrays, _broadcast_shape, broadcast_to
)
def assert_shapes_correct(input_shapes, expected_shape):
# Broadcast a list of arrays with the given input shapes and check the
# common output shape.
inarrays = [np.zeros(s) for s in input_shapes]
outarrays = broadcast_arrays(*inarrays)
outshapes = [a.shape for a in outarrays]
expected = [expected_shape] * len(inarrays)
assert_equal(outshapes, expected)
def assert_incompatible_shapes_raise(input_shapes):
# Broadcast a list of arrays with the given (incompatible) input shapes
# and check that they raise a ValueError.
inarrays = [np.zeros(s) for s in input_shapes]
assert_raises(ValueError, broadcast_arrays, *inarrays)
def assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False):
# Broadcast two shapes against each other and check that the data layout
# is the same as if a ufunc did the broadcasting.
x0 = np.zeros(shape0, dtype=int)
# Note that multiply.reduce's identity element is 1.0, so when shape1==(),
# this gives the desired n==1.
n = int(np.multiply.reduce(shape1))
x1 = np.arange(n).reshape(shape1)
if transposed:
x0 = x0.T
x1 = x1.T
if flipped:
x0 = x0[::-1]
x1 = x1[::-1]
# Use the add ufunc to do the broadcasting. Since we're adding 0s to x1, the
# result should be exactly the same as the broadcasted view of x1.
y = x0 + x1
b0, b1 = broadcast_arrays(x0, x1)
assert_array_equal(y, b1)
def test_same():
x = np.arange(10)
y = np.arange(10)
bx, by = broadcast_arrays(x, y)
assert_array_equal(x, bx)
assert_array_equal(y, by)
def test_one_off():
x = np.array([[1, 2, 3]])
y = np.array([[1], [2], [3]])
bx, by = broadcast_arrays(x, y)
bx0 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
by0 = bx0.T
assert_array_equal(bx0, bx)
assert_array_equal(by0, by)
def test_same_input_shapes():
# Check that the final shape is just the input shape.
data = [
(),
(1,),
(3,),
(0, 1),
(0, 3),
(1, 0),
(3, 0),
(1, 3),
(3, 1),
(3, 3),
]
for shape in data:
input_shapes = [shape]
# Single input.
assert_shapes_correct(input_shapes, shape)
# Double input.
input_shapes2 = [shape, shape]
assert_shapes_correct(input_shapes2, shape)
# Triple input.
input_shapes3 = [shape, shape, shape]
assert_shapes_correct(input_shapes3, shape)
def test_two_compatible_by_ones_input_shapes():
# Check that two different input shapes of the same length, but some have
# ones, broadcast to the correct shape.
data = [
[[(1,), (3,)], (3,)],
[[(1, 3), (3, 3)], (3, 3)],
[[(3, 1), (3, 3)], (3, 3)],
[[(1, 3), (3, 1)], (3, 3)],
[[(1, 1), (3, 3)], (3, 3)],
[[(1, 1), (1, 3)], (1, 3)],
[[(1, 1), (3, 1)], (3, 1)],
[[(1, 0), (0, 0)], (0, 0)],
[[(0, 1), (0, 0)], (0, 0)],
[[(1, 0), (0, 1)], (0, 0)],
[[(1, 1), (0, 0)], (0, 0)],
[[(1, 1), (1, 0)], (1, 0)],
[[(1, 1), (0, 1)], (0, 1)],
]
for input_shapes, expected_shape in data:
assert_shapes_correct(input_shapes, expected_shape)
# Reverse the input shapes since broadcasting should be symmetric.
assert_shapes_correct(input_shapes[::-1], expected_shape)
def test_two_compatible_by_prepending_ones_input_shapes():
# Check that two different input shapes (of different lengths) broadcast
# to the correct shape.
data = [
[[(), (3,)], (3,)],
[[(3,), (3, 3)], (3, 3)],
[[(3,), (3, 1)], (3, 3)],
[[(1,), (3, 3)], (3, 3)],
[[(), (3, 3)], (3, 3)],
[[(1, 1), (3,)], (1, 3)],
[[(1,), (3, 1)], (3, 1)],
[[(1,), (1, 3)], (1, 3)],
[[(), (1, 3)], (1, 3)],
[[(), (3, 1)], (3, 1)],
[[(), (0,)], (0,)],
[[(0,), (0, 0)], (0, 0)],
[[(0,), (0, 1)], (0, 0)],
[[(1,), (0, 0)], (0, 0)],
[[(), (0, 0)], (0, 0)],
[[(1, 1), (0,)], (1, 0)],
[[(1,), (0, 1)], (0, 1)],
[[(1,), (1, 0)], (1, 0)],
[[(), (1, 0)], (1, 0)],
[[(), (0, 1)], (0, 1)],
]
for input_shapes, expected_shape in data:
assert_shapes_correct(input_shapes, expected_shape)
# Reverse the input shapes since broadcasting should be symmetric.
assert_shapes_correct(input_shapes[::-1], expected_shape)
def test_incompatible_shapes_raise_valueerror():
# Check that a ValueError is raised for incompatible shapes.
data = [
[(3,), (4,)],
[(2, 3), (2,)],
[(3,), (3,), (4,)],
[(1, 3, 4), (2, 3, 3)],
]
for input_shapes in data:
assert_incompatible_shapes_raise(input_shapes)
# Reverse the input shapes since broadcasting should be symmetric.
assert_incompatible_shapes_raise(input_shapes[::-1])
def test_same_as_ufunc():
# Check that the data layout is the same as if a ufunc did the operation.
data = [
[[(1,), (3,)], (3,)],
[[(1, 3), (3, 3)], (3, 3)],
[[(3, 1), (3, 3)], (3, 3)],
[[(1, 3), (3, 1)], (3, 3)],
[[(1, 1), (3, 3)], (3, 3)],
[[(1, 1), (1, 3)], (1, 3)],
[[(1, 1), (3, 1)], (3, 1)],
[[(1, 0), (0, 0)], (0, 0)],
[[(0, 1), (0, 0)], (0, 0)],
[[(1, 0), (0, 1)], (0, 0)],
[[(1, 1), (0, 0)], (0, 0)],
[[(1, 1), (1, 0)], (1, 0)],
[[(1, 1), (0, 1)], (0, 1)],
[[(), (3,)], (3,)],
[[(3,), (3, 3)], (3, 3)],
[[(3,), (3, 1)], (3, 3)],
[[(1,), (3, 3)], (3, 3)],
[[(), (3, 3)], (3, 3)],
[[(1, 1), (3,)], (1, 3)],
[[(1,), (3, 1)], (3, 1)],
[[(1,), (1, 3)], (1, 3)],
[[(), (1, 3)], (1, 3)],
[[(), (3, 1)], (3, 1)],
[[(), (0,)], (0,)],
[[(0,), (0, 0)], (0, 0)],
[[(0,), (0, 1)], (0, 0)],
[[(1,), (0, 0)], (0, 0)],
[[(), (0, 0)], (0, 0)],
[[(1, 1), (0,)], (1, 0)],
[[(1,), (0, 1)], (0, 1)],
[[(1,), (1, 0)], (1, 0)],
[[(), (1, 0)], (1, 0)],
[[(), (0, 1)], (0, 1)],
]
for input_shapes, expected_shape in data:
assert_same_as_ufunc(input_shapes[0], input_shapes[1],
"Shapes: %s %s" % (input_shapes[0], input_shapes[1]))
# Reverse the input shapes since broadcasting should be symmetric.
assert_same_as_ufunc(input_shapes[1], input_shapes[0])
# Try them transposed, too.
assert_same_as_ufunc(input_shapes[0], input_shapes[1], True)
# ... and flipped for non-rank-0 inputs in order to test negative
# strides.
if () not in input_shapes:
assert_same_as_ufunc(input_shapes[0], input_shapes[1], False, True)
assert_same_as_ufunc(input_shapes[0], input_shapes[1], True, True)
def test_broadcast_to_succeeds():
data = [
[np.array(0), (0,), np.array(0)],
[np.array(0), (1,), np.zeros(1)],
[np.array(0), (3,), np.zeros(3)],
[np.ones(1), (1,), np.ones(1)],
[np.ones(1), (2,), np.ones(2)],
[np.ones(1), (1, 2, 3), np.ones((1, 2, 3))],
[np.arange(3), (3,), np.arange(3)],
[np.arange(3), (1, 3), np.arange(3).reshape(1, -1)],
[np.arange(3), (2, 3), np.array([[0, 1, 2], [0, 1, 2]])],
# test if shape is not a tuple
[np.ones(0), 0, np.ones(0)],
[np.ones(1), 1, np.ones(1)],
[np.ones(1), 2, np.ones(2)],
# these cases with size 0 are strange, but they reproduce the behavior
# of broadcasting with ufuncs (see test_same_as_ufunc above)
[np.ones(1), (0,), np.ones(0)],
[np.ones((1, 2)), (0, 2), np.ones((0, 2))],
[np.ones((2, 1)), (2, 0), np.ones((2, 0))],
]
for input_array, shape, expected in data:
actual = broadcast_to(input_array, shape)
assert_array_equal(expected, actual)
def test_broadcast_to_raises():
data = [
[(0,), ()],
[(1,), ()],
[(3,), ()],
[(3,), (1,)],
[(3,), (2,)],
[(3,), (4,)],
[(1, 2), (2, 1)],
[(1, 1), (1,)],
[(1,), -1],
[(1,), (-1,)],
[(1, 2), (-1, 2)],
]
for orig_shape, target_shape in data:
arr = np.zeros(orig_shape)
assert_raises(ValueError, lambda: broadcast_to(arr, target_shape))
def test_broadcast_shape():
# broadcast_shape is already exercized indirectly by broadcast_arrays
assert_raises(ValueError, _broadcast_shape)
assert_equal(_broadcast_shape([1, 2]), (2,))
assert_equal(_broadcast_shape(np.ones((1, 1))), (1, 1))
assert_equal(_broadcast_shape(np.ones((1, 1)), np.ones((3, 4))), (3, 4))
assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 32)), (1, 2))
assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 100)), (1, 2))
# regression tests for gh-5862
assert_equal(_broadcast_shape(*([np.ones(2)] * 32 + [1])), (2,))
bad_args = [np.ones(2)] * 32 + [np.ones(3)] * 32
assert_raises(ValueError, lambda: _broadcast_shape(*bad_args))
def test_as_strided():
a = np.array([None])
a_view = as_strided(a)
expected = np.array([None])
assert_array_equal(a_view, np.array([None]))
a = np.array([1, 2, 3, 4])
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,))
expected = np.array([1, 3])
assert_array_equal(a_view, expected)
a = np.array([1, 2, 3, 4])
a_view = as_strided(a, shape=(3, 4), strides=(0, 1 * a.itemsize))
expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
assert_array_equal(a_view, expected)
# Regression test for gh-5081
dt = np.dtype([('num', 'i4'), ('obj', 'O')])
a = np.empty((4,), dtype=dt)
a['num'] = np.arange(1, 5)
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
expected_num = [[1, 2, 3, 4]] * 3
expected_obj = [[None]*4]*3
assert_equal(a_view.dtype, dt)
assert_array_equal(expected_num, a_view['num'])
assert_array_equal(expected_obj, a_view['obj'])
# Make sure that void types without fields are kept unchanged
a = np.empty((4,), dtype='V4')
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
assert_equal(a.dtype, a_view.dtype)
# Make sure that the only type that could fail is properly handled
dt = np.dtype({'names': [''], 'formats': ['V4']})
a = np.empty((4,), dtype=dt)
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
assert_equal(a.dtype, a_view.dtype)
def as_strided_writeable():
arr = np.ones(10)
view = as_strided(arr, writeable=False)
assert_(not view.flags.writeable)
# Check that writeable also is fine:
view = as_strided(arr, writeable=True)
assert_(view.flags.writeable)
view[...] = 3
assert_array_equal(arr, np.full_like(arr, 3))
# Test that things do not break down for readonly:
arr.flags.writeable = False
view = as_strided(arr, writeable=False)
view = as_strided(arr, writeable=True)
assert_(not view.flags.writeable)
class VerySimpleSubClass(np.ndarray):
def __new__(cls, *args, **kwargs):
kwargs['subok'] = True
return np.array(*args, **kwargs).view(cls)
class SimpleSubClass(VerySimpleSubClass):
def __new__(cls, *args, **kwargs):
kwargs['subok'] = True
self = np.array(*args, **kwargs).view(cls)
self.info = 'simple'
return self
def __array_finalize__(self, obj):
self.info = getattr(obj, 'info', '') + ' finalized'
def test_subclasses():
# test that subclass is preserved only if subok=True
a = VerySimpleSubClass([1, 2, 3, 4])
assert_(type(a) is VerySimpleSubClass)
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,))
assert_(type(a_view) is np.ndarray)
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True)
assert_(type(a_view) is VerySimpleSubClass)
# test that if a subclass has __array_finalize__, it is used
a = SimpleSubClass([1, 2, 3, 4])
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True)
assert_(type(a_view) is SimpleSubClass)
assert_(a_view.info == 'simple finalized')
# similar tests for broadcast_arrays
b = np.arange(len(a)).reshape(-1, 1)
a_view, b_view = broadcast_arrays(a, b)
assert_(type(a_view) is np.ndarray)
assert_(type(b_view) is np.ndarray)
assert_(a_view.shape == b_view.shape)
a_view, b_view = broadcast_arrays(a, b, subok=True)
assert_(type(a_view) is SimpleSubClass)
assert_(a_view.info == 'simple finalized')
assert_(type(b_view) is np.ndarray)
assert_(a_view.shape == b_view.shape)
# and for broadcast_to
shape = (2, 4)
a_view = broadcast_to(a, shape)
assert_(type(a_view) is np.ndarray)
assert_(a_view.shape == shape)
a_view = broadcast_to(a, shape, subok=True)
assert_(type(a_view) is SimpleSubClass)
assert_(a_view.info == 'simple finalized')
assert_(a_view.shape == shape)
def test_writeable():
# broadcast_to should return a readonly array
original = np.array([1, 2, 3])
result = broadcast_to(original, (2, 3))
assert_equal(result.flags.writeable, False)
assert_raises(ValueError, result.__setitem__, slice(None), 0)
# but the result of broadcast_arrays needs to be writeable (for now), to
# preserve backwards compatibility
for results in [broadcast_arrays(original),
broadcast_arrays(0, original)]:
for result in results:
assert_equal(result.flags.writeable, True)
# keep readonly input readonly
original.flags.writeable = False
_, result = broadcast_arrays(0, original)
assert_equal(result.flags.writeable, False)
# regresssion test for GH6491
shape = (2,)
strides = [0]
tricky_array = as_strided(np.array(0), shape, strides)
other = np.zeros((1,))
first, second = broadcast_arrays(tricky_array, other)
assert_(first.shape == second.shape)
def test_reference_types():
input_array = np.array('a', dtype=object)
expected = np.array(['a'] * 3, dtype=object)
actual = broadcast_to(input_array, (3,))
assert_array_equal(expected, actual)
actual, _ = broadcast_arrays(input_array, np.ones(3))
assert_array_equal(expected, actual)
if __name__ == "__main__":
run_module_suite()
| mit |
rgom/Pydev | plugins/org.python.pydev.parser/src/org/python/pydev/parser/jython/ast/asdl.py | 9 | 10621 | """An implementation of the Zephyr Abstract Syntax Definition Language.
See http://asdl.sourceforge.net/ and
http://www.cs.princeton.edu/~danwang/Papers/dsl97/dsl97-abstract.html.
Only supports top level module decl, not view. I'm guessing that view
is intended to support the browser and I'm not interested in the
browser.
"""
#__metaclass__ = type
import os
import traceback
import spark
class Token:
# spark seems to dispatch in the parser based on a token's
# type attribute
def __init__(self, type, lineno):
self.type = type
self.lineno = lineno
def __str__(self):
return self.type
def __repr__(self):
return str(self)
class Id(Token):
def __init__(self, value, lineno):
self.type = 'Id'
self.value = value
self.lineno = lineno
def __str__(self):
return self.value
class ASDLSyntaxError:
def __init__(self, lineno, token=None, msg=None):
self.lineno = lineno
self.token = token
self.msg = msg
def __str__(self):
if self.msg is None:
return "Error at '%s', line %d" % (self.token, self.lineno)
else:
return "%s, line %d" % (self.msg, self.lineno)
class ASDLScanner(spark.GenericScanner, object):
def tokenize(self, input):
self.rv = []
self.lineno = 1
super(ASDLScanner, self).tokenize(input)
return self.rv
def t_id(self, s):
r"[\w\.]+"
# XXX doesn't distinguish upper vs. lower, which is
# significant for ASDL.
self.rv.append(Id(s, self.lineno))
def t_xxx(self, s): # not sure what this production means
r"<="
self.rv.append(Token(s, self.lineno))
def t_punctuation(self, s):
r"[\{\}\*\=\|\(\)\,\?\:]"
self.rv.append(Token(s, self.lineno))
def t_comment(self, s):
r"\-\-[^\n]*"
pass
def t_newline(self, s):
r"\n"
self.lineno += 1
def t_whitespace(self, s):
r"[ \t]+"
pass
def t_default(self, s):
r" . +"
raise ValueError, "unmatched input: %s" % `s`
class ASDLParser(spark.GenericParser, object):
def __init__(self):
super(ASDLParser, self).__init__("module")
def typestring(self, tok):
return tok.type
def error(self, tok):
raise ASDLSyntaxError(tok.lineno, tok)
def p_module_0(self, (module, name, _0, _1)):
" module ::= Id Id { } "
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, None)
def p_module(self, (module, name, _0, definitions, _1)):
" module ::= Id Id { definitions } "
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, definitions)
def p_definition_0(self, (definition,)):
" definitions ::= definition "
return definition
def p_definition_1(self, (definitions, definition)):
" definitions ::= definition definitions "
return definitions + definition
def p_definition(self, (id, _, type)):
" definition ::= Id = type "
return [Type(id, type)]
def p_type_0(self, (product,)):
" type ::= product "
return product
def p_type_1(self, (sum,)):
" type ::= sum "
return Sum(sum)
def p_type_2(self, (sum, id, _0, attributes, _1)):
" type ::= sum Id ( fields ) "
if id.value != "attributes":
raise ASDLSyntaxError(id.lineno,
msg="expected attributes, found %s" % id)
return Sum(sum, attributes)
def p_product(self, (_0, fields, _1)):
" product ::= ( fields ) "
# XXX can't I just construct things in the right order?
fields.reverse()
return Product(fields)
def p_sum_0(self, (constructor,)):
" sum ::= constructor """
return [constructor]
def p_sum_1(self, (constructor, _, sum)):
" sum ::= constructor | sum "
return [constructor] + sum
def p_sum_2(self, (constructor, _, sum)):
" sum ::= constructor | sum "
return [constructor] + sum
def p_constructor_0(self, (id,)):
" constructor ::= Id "
return Constructor(id)
def p_constructor_1(self, (id, _0, fields, _1)):
" constructor ::= Id ( fields ) "
# XXX can't I just construct things in the right order?
fields.reverse()
return Constructor(id, fields)
def p_fields_0(self, (field,)):
" fields ::= field "
return [field]
def p_fields_1(self, (field, _, fields)):
" fields ::= field , fields "
return fields + [field]
def p_field_0(self, (type,)):
" field ::= Id "
return Field(type)
def p_field_1(self, (type, name)):
" field ::= Id Id "
return Field(type, name)
def p_field_2(self, (type, _, name)):
" field ::= Id * Id "
return Field(type, name, seq=1)
def p_field_3(self, (type, _, name)):
" field ::= Id ? Id "
return Field(type, name, opt=1)
def p_field_4(self, (type, _)):
" field ::= Id * "
return Field(type, seq=1)
def p_field_5(self, (type, _)):
" field ::= Id ? "
return Field(type, opt=1)
builtin_types = ("identifier", "string", "int", "bool", "object")
# below is a collection of classes to capture the AST of an AST :-)
# not sure if any of the methods are useful yet, but I'm adding them
# piecemeal as they seem helpful
class AST:
pass # a marker class
class Module(AST):
def __init__(self, name, dfns):
self.name = name
self.dfns = dfns
self.types = {} # maps type name to value (from dfns)
for type in dfns:
self.types[type.name.value] = type.value
def __repr__(self):
return "Module(%s, %s)" % (self.name, self.dfns)
class Type(AST):
def __init__(self, name, value):
self.name = name
self.value = value
def __repr__(self):
return "Type(%s, %s)" % (self.name, self.value)
class Constructor(AST):
def __init__(self, name, fields=None):
self.name = name
self.fields = fields or []
def __repr__(self):
return "Constructor(%s, %s)" % (self.name, self.fields)
class Field(AST):
def __init__(self, type, name=None, seq=0, opt=0):
self.type = type
self.name = name
self.seq = seq
self.opt = opt
def __repr__(self):
if self.seq:
extra = ", seq=1"
elif self.opt:
extra = ", opt=1"
else:
extra = ""
if self.name is None:
return "Field(%s%s)" % (self.type, extra)
else:
return "Field(%s, %s,%s)" % (self.type, self.name, extra)
class Sum(AST):
def __init__(self, types, attributes=None):
self.types = types
self.attributes = attributes or []
def __repr__(self):
if self.attributes is None:
return "Sum(%s)" % self.types
else:
return "Sum(%s, %s)" % (self.types, self.attributes)
class Product(AST):
def __init__(self, fields):
self.fields = fields
def __repr__(self):
return "Product(%s)" % self.fields
class VisitorBase(object):
def __init__(self, skip=0):
self.cache = {}
self.skip = skip
def visit(self, object, *args):
meth = self._dispatch(object)
if meth is None:
return
try:
meth(object, *args)
except Exception, err:
print "Error visiting", repr(object)
print err
traceback.print_exc()
# XXX hack
if hasattr(self, 'file'):
self.file.flush()
os._exit(1)
def _dispatch(self, object):
assert isinstance(object, AST), repr(object)
klass = object.__class__
meth = self.cache.get(klass)
if meth is None:
methname = "visit" + klass.__name__
if self.skip:
meth = getattr(self, methname, None)
else:
meth = getattr(self, methname)
self.cache[klass] = meth
return meth
class Check(VisitorBase):
def __init__(self):
super(Check, self).__init__(skip=1)
self.cons = {}
self.errors = 0
self.types = {}
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, str(type.name))
def visitSum(self, sum, name):
for t in sum.types:
self.visit(t, name)
def visitConstructor(self, cons, name):
key = str(cons.name)
conflict = self.cons.get(key)
if conflict is None:
self.cons[key] = name
else:
print "Redefinition of constructor %s" % key
print "Defined in %s and %s" % (conflict, name)
self.errors += 1
for f in cons.fields:
self.visit(f, key)
def visitField(self, field, name):
key = str(field.type)
l = self.types.setdefault(key, [])
l.append(name)
def visitProduct(self, prod, name):
for f in prod.fields:
self.visit(f, name)
def check(mod):
v = Check()
v.visit(mod)
for t in v.types:
if not mod.types.has_key(t) and not t in builtin_types:
v.errors += 1
uses = ", ".join(v.types[t])
print "Undefined type %s, used in %s" % (t, uses)
return not v.errors
def parse(file):
scanner = ASDLScanner()
parser = ASDLParser()
buf = open(file).read()
tokens = scanner.tokenize(buf)
try:
return parser.parse(tokens)
except ASDLSyntaxError, err:
print err
lines = buf.split("\n")
print lines[err.lineno - 1] # lines starts at 0, files at 1
if __name__ == "__main__":
import glob
import sys
if len(sys.argv) > 1:
files = sys.argv[1:]
else:
testdir = "tests"
files = glob.glob(testdir + "/*.asdl")
for file in files:
print file
mod = parse(file)
print "module", mod.name
print len(mod.dfns), "definitions"
if not check(mod):
print "Check failed"
else:
for dfn in mod.dfns:
print dfn.type
| epl-1.0 |
fqul/scrapy | scrapy/item.py | 133 | 2478 | """
Scrapy Item
See documentation in docs/topics/item.rst
"""
from pprint import pformat
from collections import MutableMapping
from abc import ABCMeta
import six
from scrapy.utils.trackref import object_ref
class BaseItem(object_ref):
"""Base class for all scraped items."""
pass
class Field(dict):
"""Container of field metadata"""
class ItemMeta(ABCMeta):
def __new__(mcs, class_name, bases, attrs):
new_bases = tuple(base._class for base in bases if hasattr(base, '_class'))
_class = super(ItemMeta, mcs).__new__(mcs, 'x_' + class_name, new_bases, attrs)
fields = getattr(_class, 'fields', {})
new_attrs = {}
for n in dir(_class):
v = getattr(_class, n)
if isinstance(v, Field):
fields[n] = v
elif n in attrs:
new_attrs[n] = attrs[n]
new_attrs['fields'] = fields
new_attrs['_class'] = _class
return super(ItemMeta, mcs).__new__(mcs, class_name, bases, new_attrs)
class DictItem(MutableMapping, BaseItem):
fields = {}
def __init__(self, *args, **kwargs):
self._values = {}
if args or kwargs: # avoid creating dict for most common case
for k, v in six.iteritems(dict(*args, **kwargs)):
self[k] = v
def __getitem__(self, key):
return self._values[key]
def __setitem__(self, key, value):
if key in self.fields:
self._values[key] = value
else:
raise KeyError("%s does not support field: %s" %
(self.__class__.__name__, key))
def __delitem__(self, key):
del self._values[key]
def __getattr__(self, name):
if name in self.fields:
raise AttributeError("Use item[%r] to get field value" % name)
raise AttributeError(name)
def __setattr__(self, name, value):
if not name.startswith('_'):
raise AttributeError("Use item[%r] = %r to set field value" %
(name, value))
super(DictItem, self).__setattr__(name, value)
def __len__(self):
return len(self._values)
def __iter__(self):
return iter(self._values)
__hash__ = BaseItem.__hash__
def keys(self):
return self._values.keys()
def __repr__(self):
return pformat(dict(self))
def copy(self):
return self.__class__(self)
@six.add_metaclass(ItemMeta)
class Item(DictItem):
pass
| bsd-3-clause |
nagyistoce/odoo-dev-odoo | addons/mrp/wizard/mrp_workcenter_load.py | 381 | 2222 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class mrp_workcenter_load(osv.osv_memory):
_name = 'mrp.workcenter.load'
_description = 'Work Center Load'
_columns = {
'time_unit': fields.selection([('day', 'Day by day'),('week', 'Per week'),('month', 'Per month')],'Type of period', required=True),
'measure_unit': fields.selection([('hours', 'Amount in hours'),('cycles', 'Amount in cycles')],'Amount measuring unit', required=True),
}
def print_report(self, cr, uid, ids, context=None):
""" To print the report of Work Center Load
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : Report
"""
if context is None:
context = {}
datas = {'ids' : context.get('active_ids',[])}
res = self.read(cr, uid, ids, ['time_unit','measure_unit'])
res = res and res[0] or {}
datas['form'] = res
return {
'type' : 'ir.actions.report.xml',
'report_name':'mrp.workcenter.load',
'datas' : datas,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
YongseopKim/crosswalk-test-suite | webapi/tct-indexeddb-w3c-tests/inst.wgt.py | 294 | 6758 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex+1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
mrquim/repository.mrquim | script.module.pycryptodome/lib/Crypto/SelfTest/Cipher/__init__.py | 12 | 3500 | # -*- coding: utf-8 -*-
#
# SelfTest/Cipher/__init__.py: Self-test for cipher modules
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test for cipher modules"""
__revision__ = "$Id$"
def get_tests(config={}):
tests = []
from Crypto.SelfTest.Cipher import test_AES; tests += test_AES.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_ARC2; tests += test_ARC2.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_ARC4; tests += test_ARC4.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_Blowfish; tests += test_Blowfish.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_CAST; tests += test_CAST.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_DES3; tests += test_DES3.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_DES; tests += test_DES.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_Salsa20; tests += test_Salsa20.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_ChaCha20; tests += test_ChaCha20.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_pkcs1_15; tests += test_pkcs1_15.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_pkcs1_oaep; tests += test_pkcs1_oaep.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_OCB; tests += test_OCB.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_CBC; tests += test_CBC.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_CFB; tests += test_CFB.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_OpenPGP; tests += test_OpenPGP.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_OFB; tests += test_OFB.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_CTR; tests += test_CTR.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_CCM; tests += test_CCM.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_EAX; tests += test_EAX.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_GCM; tests += test_GCM.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_SIV; tests += test_SIV.get_tests(config=config)
return tests
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| gpl-2.0 |
mezz64/home-assistant | homeassistant/components/dweet/sensor.py | 21 | 3239 | """Support for showing values from Dweet.io."""
from datetime import timedelta
import json
import logging
import dweepy
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_DEVICE,
CONF_NAME,
CONF_UNIT_OF_MEASUREMENT,
CONF_VALUE_TEMPLATE,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Dweet.io Sensor"
SCAN_INTERVAL = timedelta(minutes=1)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_DEVICE): cv.string,
vol.Required(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Dweet sensor."""
name = config.get(CONF_NAME)
device = config.get(CONF_DEVICE)
value_template = config.get(CONF_VALUE_TEMPLATE)
unit = config.get(CONF_UNIT_OF_MEASUREMENT)
if value_template is not None:
value_template.hass = hass
try:
content = json.dumps(dweepy.get_latest_dweet_for(device)[0]["content"])
except dweepy.DweepyError:
_LOGGER.error("Device/thing %s could not be found", device)
return
if value_template.render_with_possible_json_value(content) == "":
_LOGGER.error("%s was not found", value_template)
return
dweet = DweetData(device)
add_entities([DweetSensor(hass, dweet, name, value_template, unit)], True)
class DweetSensor(Entity):
"""Representation of a Dweet sensor."""
def __init__(self, hass, dweet, name, value_template, unit_of_measurement):
"""Initialize the sensor."""
self.hass = hass
self.dweet = dweet
self._name = name
self._value_template = value_template
self._state = None
self._unit_of_measurement = unit_of_measurement
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state."""
return self._state
def update(self):
"""Get the latest data from REST API."""
self.dweet.update()
if self.dweet.data is None:
self._state = None
else:
values = json.dumps(self.dweet.data[0]["content"])
self._state = self._value_template.render_with_possible_json_value(
values, None
)
class DweetData:
"""The class for handling the data retrieval."""
def __init__(self, device):
"""Initialize the sensor."""
self._device = device
self.data = None
def update(self):
"""Get the latest data from Dweet.io."""
try:
self.data = dweepy.get_latest_dweet_for(self._device)
except dweepy.DweepyError:
_LOGGER.warning("Device %s doesn't contain any data", self._device)
self.data = None
| apache-2.0 |
ikappas/SublimeLinter3 | lint/queue.py | 13 | 4121 | #
# queue.py
# Part of SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Ryan Hileman and Aparajita Fishman
#
# Project: https://github.com/SublimeLinter/SublimeLinter3
# License: MIT
#
"""This module provides a threaded queue for lint requests."""
from queue import Queue, Empty
import threading
import traceback
import time
from . import persist, util
class Daemon:
"""
This class provides a threaded queue that dispatches lints.
The following operations can be added to the queue:
hit - Queue a lint for a given view
delay - Queue a delay for a number of milliseconds
reload - Indicates the main plugin was reloaded
"""
MIN_DELAY = 0.1
running = False
callback = None
q = Queue()
last_runs = {}
def start(self, callback):
"""Start the daemon thread that runs loop."""
self.callback = callback
if self.running:
self.q.put('reload')
else:
self.running = True
threading.Thread(target=self.loop).start()
def loop(self):
"""Continually check the queue for new items and process them."""
last_runs = {}
while True:
try:
try:
item = self.q.get(block=True, timeout=self.MIN_DELAY)
except Empty:
for view_id, (timestamp, delay) in last_runs.copy().items():
# Lint the view if we have gone past the time
# at which the lint wants to run.
if time.monotonic() > timestamp + delay:
self.last_runs[view_id] = time.monotonic()
del last_runs[view_id]
self.lint(view_id, timestamp)
continue
if isinstance(item, tuple):
view_id, timestamp, delay = item
if view_id in self.last_runs and timestamp < self.last_runs[view_id]:
continue
last_runs[view_id] = timestamp, delay
elif isinstance(item, (int, float)):
time.sleep(item)
elif isinstance(item, str):
if item == 'reload':
persist.printf('daemon detected a reload')
self.last_runs.clear()
last_runs.clear()
else:
persist.printf('unknown message sent to daemon:', item)
except:
persist.printf('error in SublimeLinter daemon:')
persist.printf('-' * 20)
persist.printf(traceback.format_exc())
persist.printf('-' * 20)
def hit(self, view):
"""Add a lint request to the queue, return the time at which the request was enqueued."""
timestamp = time.monotonic()
self.q.put((view.id(), timestamp, self.get_delay(view)))
return timestamp
def delay(self, milliseconds=100):
"""Add a millisecond delay to the queue."""
self.q.put(milliseconds / 1000.0)
def lint(self, view_id, timestamp):
"""
Call back into the main plugin to lint the given view.
timestamp is used to determine if the view has been modified
since the lint was requested.
"""
self.callback(view_id, timestamp)
def get_delay(self, view):
"""
Return the delay between a lint request and when it will be processed.
If the lint mode is not background, there is no delay. Otherwise, if
a "delay" setting is not available in any of the settings, MIN_DELAY is used.
"""
if persist.settings.get('lint_mode') != 'background':
return 0
limit = persist.settings.get('rc_search_limit', None)
rc_settings = util.get_view_rc_settings(view, limit=limit)
delay = (rc_settings or {}).get('delay')
if delay is None:
delay = persist.settings.get('delay', self.MIN_DELAY)
return delay
queue = Daemon()
| mit |
divio/djangocms-installer | djangocms_installer/config/internal.py | 4 | 1270 | # -*- coding: utf-8 -*-
from __future__ import print_function
from argparse import Action
import keyword
import sys
import dj_database_url
from .data import DRIVERS
class DbAction(Action):
def __call__(self, parser, namespace, values, option_string):
parsed = dj_database_url.parse(values)
if parsed.get('ENGINE', None):
if DRIVERS[parsed['ENGINE']] == 'postgis':
sys.stdout.write("postgis installation is not supported at the moment.\nYou need to install and configure the backend.\n")
setattr(namespace, self.dest, values)
setattr(namespace, "%s_parsed" % self.dest, parsed)
setattr(namespace, "%s_driver" % self.dest, DRIVERS[parsed['ENGINE']])
else:
raise ValueError("Database URL not recognized, try again")
def validate_project(project_name):
"""
Check the defined project name against keywords, builtins and existing
modules to avoid name clashing
"""
if '-' in project_name:
return None
if keyword.iskeyword(project_name):
return None
if project_name in dir(__builtins__):
return None
try:
__import__(project_name)
return None
except ImportError:
return project_name
| bsd-3-clause |
pigshell/nhnick | src/qt/qtwebkit/Source/ThirdParty/gtest/test/gtest_nc_test.py | 277 | 3758 | #!/usr/bin/env python
#
# Copyright 2007, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Negative compilation test for Google Test."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
import unittest
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
if not IS_LINUX:
sys.exit(0) # Negative compilation tests are not supported on Windows & Mac.
class GTestNCTest(unittest.TestCase):
"""Negative compilation test for Google Test."""
def testCompilerError(self):
"""Verifies that erroneous code leads to expected compiler
messages."""
# Defines a list of test specs, where each element is a tuple
# (test name, list of regexes for matching the compiler errors).
test_specs = [
('CANNOT_IGNORE_RUN_ALL_TESTS_RESULT',
[r'ignoring return value']),
('USER_CANNOT_INCLUDE_GTEST_INTERNAL_INL_H',
[r'must not be included except by Google Test itself']),
('CATCHES_DECLARING_SETUP_IN_TEST_FIXTURE_WITH_TYPO',
[r'Setup_should_be_spelled_SetUp']),
('CATCHES_CALLING_SETUP_IN_TEST_WITH_TYPO',
[r'Setup_should_be_spelled_SetUp']),
('CATCHES_DECLARING_SETUP_IN_ENVIRONMENT_WITH_TYPO',
[r'Setup_should_be_spelled_SetUp']),
('CATCHES_CALLING_SETUP_IN_ENVIRONMENT_WITH_TYPO',
[r'Setup_should_be_spelled_SetUp']),
('CATCHES_WRONG_CASE_IN_TYPED_TEST_P',
[r'BarTest.*was not declared']),
('CATCHES_WRONG_CASE_IN_REGISTER_TYPED_TEST_CASE_P',
[r'BarTest.*was not declared']),
('CATCHES_WRONG_CASE_IN_INSTANTIATE_TYPED_TEST_CASE_P',
[r'BarTest.*not declared']),
('CATCHES_INSTANTIATE_TYPED_TESET_CASE_P_WITH_SAME_NAME_PREFIX',
[r'redefinition of.*My.*FooTest']),
('STATIC_ASSERT_TYPE_EQ_IS_NOT_A_TYPE',
[r'StaticAssertTypeEq.* does not name a type']),
('STATIC_ASSERT_TYPE_EQ_WORKS_IN_NAMESPACE',
[r'StaticAssertTypeEq.*int.*const int']),
('STATIC_ASSERT_TYPE_EQ_WORKS_IN_CLASS',
[r'StaticAssertTypeEq.*int.*bool']),
('STATIC_ASSERT_TYPE_EQ_WORKS_IN_FUNCTION',
[r'StaticAssertTypeEq.*const int.*int']),
('SANITY',
None)
]
# TODO(wan@google.com): verify that the test specs are satisfied.
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
metabrainz/listenbrainz-server | relations/get_relations.py | 2 | 2089 | #!/usr/bin/env python3
import sys
import psycopg2
import config
SELECT_ARTIST_RELATIONS_QUERY = '''
SELECT count, arr.artist_0, a0.name AS artist_name_0, arr.artist_1, a1.name AS artist_name_1
FROM relations.artist_artist_relations arr
JOIN artist a0 ON arr.artist_0 = a0.id
JOIN artist a1 ON arr.artist_1 = a1.id
WHERE (arr.artist_0 = %s OR arr.artist_1 = %s)
ORDER BY count desc
'''
SELECT_ARTIST_CREDIT_RELATIONS_QUERY = '''
SELECT count, arr.artist_credit_0, a0.name AS artist_name_0, arr.artist_credit_1, a1.name AS artist_name_1
FROM relations.artist_credit_artist_credit_relations arr
JOIN artist a0 ON arr.artist_credit_0 = a0.id
JOIN artist a1 ON arr.artist_credit_1 = a1.id
WHERE (arr.artist_credit_0 = %s OR arr.artist_credit_1 = %s)
ORDER BY count desc
'''
def get_artist_credit_similarities(artist_credit_id):
'''
Fetch artist credit relations for the given artist_credit_id.
'''
return _get_similarities(SELECT_ARTIST_CREDIT_RELATIONS_QUERY, artist_credit_id)
def get_artist_similarities(artist_id):
'''
Fetch artist relations for the given artist_mbid.
'''
return _get_similarities(SELECT_ARTIST_RELATIONS_QUERY, artist_id)
def _get_similarities(query, id):
'''
The actual function that does the real work.
'''
with psycopg2.connect(config.DB_CONNECT) as conn:
with conn.cursor() as curs:
curs.execute(query, (id, id))
relations = []
while True:
row = curs.fetchone()
if not row:
break
if id == row[1]:
relations.append({
'count': row[0],
'id': row[3],
'artist_name': row[4]
})
else:
relations.append({
'count': row[0],
'id': row[1],
'artist_name': row[2]
})
return relations
| gpl-2.0 |
KodiColdkeys/coldkeys-addons | repository/plugin.video.white.devil/resources/lib/sources/watchfree_mv_tv.py | 6 | 8281 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,base64
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import proxy
class source:
def __init__(self):
self.language = ['en']
self.domains = ['watchfree.to']
self.base_link = 'http://www.watchfree.to'
self.moviesearch_link = '/?keyword=%s&search_section=1'
self.tvsearch_link = '/?keyword=%s&search_section=2'
def movie(self, imdb, title, year):
try:
query = self.moviesearch_link % urllib.quote_plus(cleantitle.query(title))
query = urlparse.urljoin(self.base_link, query)
result = str(proxy.request(query, 'item'))
if 'page=2' in result or 'page%3D2' in result: result += str(proxy.request(query + '&page=2', 'item'))
result = client.parseDOM(result, 'div', attrs = {'class': 'item'})
title = 'watch' + cleantitle.get(title)
years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result]
result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
result = [i for i in result if any(x in i[1] for x in years)]
r = [(proxy.parse(i[0]), i[1]) for i in result]
match = [i[0] for i in r if title == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1]]
match2 = [i[0] for i in r]
match2 = [x for y,x in enumerate(match2) if x not in match2[:y]]
if match2 == []: return
for i in match2[:5]:
try:
if len(match) > 0: url = match[0] ; break
r = proxy.request(urlparse.urljoin(self.base_link, i), 'link_ite')
r = re.findall('(tt\d+)', r)
if imdb in r: url = i ; break
except:
pass
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
query = self.tvsearch_link % urllib.quote_plus(cleantitle.query(tvshowtitle))
query = urlparse.urljoin(self.base_link, query)
result = str(proxy.request(query, 'item'))
if 'page=2' in result or 'page%3D2' in result: result += str(proxy.request(query + '&page=2', 'item'))
result = client.parseDOM(result, 'div', attrs = {'class': 'item'})
tvshowtitle = 'watch' + cleantitle.get(tvshowtitle)
years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result]
result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
result = [i for i in result if any(x in i[1] for x in years)]
r = [(proxy.parse(i[0]), i[1]) for i in result]
match = [i[0] for i in r if tvshowtitle == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1]]
match2 = [i[0] for i in r]
match2 = [x for y,x in enumerate(match2) if x not in match2[:y]]
if match2 == []: return
for i in match2[:5]:
try:
if len(match) > 0: url = match[0] ; break
r = proxy.request(urlparse.urljoin(self.base_link, i), 'tv_episode_item')
r = re.findall('(tt\d+)', r)
if imdb in r: url = i ; break
except:
pass
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.urljoin(self.base_link, url)
result = proxy.request(url, 'tv_episode_item')
result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'})
title = cleantitle.get(title)
premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(premiered)[0]
premiered = '%s %01d %s' % (premiered[1].replace('01','January').replace('02','February').replace('03','March').replace('04','April').replace('05','May').replace('06','June').replace('07','July').replace('08','August').replace('09','September').replace('10','October').replace('11','November').replace('12','December'), int(premiered[2]), premiered[0])
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), client.parseDOM(i, 'span', attrs = {'class': 'tv_num_versions'})) for i in result]
result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]
url = [i for i in result if title == cleantitle.get(i[1]) and premiered == i[2]][:1]
if len(url) == 0: url = [i for i in result if premiered == i[2]]
if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]]
url = url[0][0]
url = proxy.parse(url)
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
result = proxy.request(url, 'link_ite')
links = client.parseDOM(result, 'table', attrs = {'class': 'link_ite.+?'})
for i in links:
try:
url = client.parseDOM(i, 'a', ret='href')
url = [x for x in url if 'gtfo' in x][-1]
url = proxy.parse(url)
url = urlparse.parse_qs(urlparse.urlparse(url).query)['gtfo'][0]
url = base64.b64decode(url)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = host.encode('utf-8')
quality = client.parseDOM(i, 'div', attrs = {'class': 'quality'})
if any(x in ['[CAM]', '[TS]'] for x in quality): quality = 'CAM'
else: quality = 'SD'
quality = quality.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'Watchfree', 'url': url, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
| gpl-2.0 |
flh/odoo | addons/knowledge/res_config.py | 378 | 1436 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class knowledge_config_settings(osv.osv_memory):
_name = 'knowledge.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_document': fields.boolean('Manage documents',
help='Document indexation, full text search of attachements.\n'
'-This installs the module document.'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
anoopcs9/samba | python/samba/tests/password_hash_gpgme.py | 5 | 8988 | # Tests for Tests for source4/dsdb/samdb/ldb_modules/password_hash.c
#
# Copyright (C) Catalyst IT Ltd. 2017
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for source4/dsdb/samdb/ldb_modules/password_hash.c
These tests need to be run in an environment in which
io->ac->gpg_key_ids != NULL, so that the gpg supplemental credentials
are generated. The functional level needs to be >= 2008 so that the
kerberos newer keys are generated.
"""
from samba.tests.password_hash import (
PassWordHashTests,
get_package,
USER_PASS
)
from samba.ndr import ndr_unpack
from samba.dcerpc import drsblobs
import binascii
class PassWordHashGpgmeTests(PassWordHashTests):
def setUp(self):
super(PassWordHashGpgmeTests, self).setUp()
def test_default_supplementalCredentials(self):
self.add_user()
if not self.lp.get("password hash gpg key ids"):
self.skipTest("No password hash gpg key ids, " +
"Primary:SambaGPG will not be generated");
sc = self.get_supplemental_creds()
# Check that we got all the expected supplemental credentials
# And they are in the expected order.
size = len(sc.sub.packages)
self.assertEquals(5, size)
(pos, package) = get_package(sc, "Primary:Kerberos-Newer-Keys")
self.assertEquals(1, pos)
self.assertEquals("Primary:Kerberos-Newer-Keys", package.name)
(pos, package) = get_package(sc, "Primary:Kerberos")
self.assertEquals(2, pos)
self.assertEquals("Primary:Kerberos", package.name)
(pos, wd_package) = get_package(sc, "Primary:WDigest")
self.assertEquals(3, pos)
self.assertEquals("Primary:WDigest", wd_package.name)
(pos, package) = get_package(sc, "Packages")
self.assertEquals(4, pos)
self.assertEquals("Packages", package.name)
(pos, package) = get_package(sc, "Primary:SambaGPG")
self.assertEquals(5, pos)
self.assertEquals("Primary:SambaGPG", package.name)
# Check that the WDigest values are correct.
#
digests = ndr_unpack(drsblobs.package_PrimaryWDigestBlob,
binascii.a2b_hex(wd_package.data))
self.check_wdigests(digests)
def test_supplementalCredentials_cleartext(self):
self.add_user(clear_text=True)
if not self.lp.get("password hash gpg key ids"):
self.skipTest("No password hash gpg key ids, " +
"Primary:SambaGPG will not be generated");
sc = self.get_supplemental_creds()
# Check that we got all the expected supplemental credentials
# And they are in the expected order.
size = len(sc.sub.packages)
self.assertEquals(6, size)
(pos, package) = get_package(sc, "Primary:Kerberos-Newer-Keys")
self.assertEquals(1, pos)
self.assertEquals("Primary:Kerberos-Newer-Keys", package.name)
(pos, package) = get_package(sc, "Primary:Kerberos")
self.assertEquals(2, pos)
self.assertEquals("Primary:Kerberos", package.name)
(pos, wd_package) = get_package(sc, "Primary:WDigest")
self.assertEquals(3, pos)
self.assertEquals("Primary:WDigest", wd_package.name)
(pos, ct_package) = get_package(sc, "Primary:CLEARTEXT")
self.assertEquals(4, pos)
self.assertEquals("Primary:CLEARTEXT", ct_package.name)
(pos, package) = get_package(sc, "Packages")
self.assertEquals(5, pos)
self.assertEquals("Packages", package.name)
(pos, package) = get_package(sc, "Primary:SambaGPG")
self.assertEquals(6, pos)
self.assertEquals("Primary:SambaGPG", package.name)
# Check that the WDigest values are correct.
#
digests = ndr_unpack(drsblobs.package_PrimaryWDigestBlob,
binascii.a2b_hex(wd_package.data))
self.check_wdigests(digests)
# Check the clear text value is correct.
ct = ndr_unpack(drsblobs.package_PrimaryCLEARTEXTBlob,
binascii.a2b_hex(ct_package.data))
self.assertEquals(USER_PASS.encode('utf-16-le'), ct.cleartext)
def test_userPassword_multiple_hashes(self):
self.add_user(options=[(
"password hash userPassword schemes",
"CryptSHA512 CryptSHA256 CryptSHA512")])
sc = self.get_supplemental_creds()
# Check that we got all the expected supplemental credentials
# And they are in the expected order.
size = len(sc.sub.packages)
self.assertEquals(6, size)
(pos, package) = get_package(sc, "Primary:Kerberos-Newer-Keys")
self.assertEquals(1, pos)
self.assertEquals("Primary:Kerberos-Newer-Keys", package.name)
(pos, package) = get_package(sc, "Primary:Kerberos")
self.assertEquals(2, pos)
self.assertEquals("Primary:Kerberos", package.name)
(pos, wp_package) = get_package(sc, "Primary:WDigest")
self.assertEquals(3, pos)
self.assertEquals("Primary:WDigest", wp_package.name)
(pos, up_package) = get_package(sc, "Primary:userPassword")
self.assertEquals(4, pos)
self.assertEquals("Primary:userPassword", up_package.name)
(pos, package) = get_package(sc, "Packages")
self.assertEquals(5, pos)
self.assertEquals("Packages", package.name)
(pos, package) = get_package(sc, "Primary:SambaGPG")
self.assertEquals(6, pos)
self.assertEquals("Primary:SambaGPG", package.name)
# Check that the WDigest values are correct.
#
digests = ndr_unpack(drsblobs.package_PrimaryWDigestBlob,
binascii.a2b_hex(wp_package.data))
self.check_wdigests(digests)
# Check that the userPassword hashes are computed correctly
# Expect three hashes to be calculated
up = ndr_unpack(drsblobs.package_PrimaryUserPasswordBlob,
binascii.a2b_hex(up_package.data))
self.checkUserPassword(up, [
("{CRYPT}", "6", None),
("{CRYPT}", "5", None),
("{CRYPT}", "6", None)
])
self.checkNtHash(USER_PASS, up.current_nt_hash.hash)
def test_userPassword_multiple_hashes_rounds_specified(self):
self.add_user(options=[(
"password hash userPassword schemes",
"CryptSHA512:rounds=5120 CryptSHA256:rounds=2560 CryptSHA512:rounds=5122")])
sc = self.get_supplemental_creds()
# Check that we got all the expected supplemental credentials
# And they are in the expected order.
size = len(sc.sub.packages)
self.assertEquals(6, size)
(pos, package) = get_package(sc, "Primary:Kerberos-Newer-Keys")
self.assertEquals(1, pos)
self.assertEquals("Primary:Kerberos-Newer-Keys", package.name)
(pos, package) = get_package(sc, "Primary:Kerberos")
self.assertEquals(2, pos)
self.assertEquals("Primary:Kerberos", package.name)
(pos, wp_package) = get_package(sc, "Primary:WDigest")
self.assertEquals(3, pos)
self.assertEquals("Primary:WDigest", wp_package.name)
(pos, up_package) = get_package(sc, "Primary:userPassword")
self.assertEquals(4, pos)
self.assertEquals("Primary:userPassword", up_package.name)
(pos, package) = get_package(sc, "Packages")
self.assertEquals(5, pos)
self.assertEquals("Packages", package.name)
(pos, package) = get_package(sc, "Primary:SambaGPG")
self.assertEquals(6, pos)
self.assertEquals("Primary:SambaGPG", package.name)
# Check that the WDigest values are correct.
#
digests = ndr_unpack(drsblobs.package_PrimaryWDigestBlob,
binascii.a2b_hex(wp_package.data))
self.check_wdigests(digests)
# Check that the userPassword hashes are computed correctly
# Expect three hashes to be calculated
up = ndr_unpack(drsblobs.package_PrimaryUserPasswordBlob,
binascii.a2b_hex(up_package.data))
self.checkUserPassword(up, [
("{CRYPT}", "6", 5120),
("{CRYPT}", "5", 2560),
("{CRYPT}", "6", 5122)
])
self.checkNtHash(USER_PASS, up.current_nt_hash.hash)
| gpl-3.0 |
timothycrosley/webelements_site | pygments/lexers/sql.py | 16 | 23617 | # -*- coding: utf-8 -*-
"""
pygments.lexers.sql
~~~~~~~~~~~~~~~~~~~
Lexers for various SQL dialects and related interactive sessions.
Postgres specific lexers:
`PostgresLexer`
A SQL lexer for the PostgreSQL dialect. Differences w.r.t. the SQL
lexer are:
- keywords and data types list parsed from the PG docs (run the
`_postgres_builtins` module to update them);
- Content of $-strings parsed using a specific lexer, e.g. the content
of a PL/Python function is parsed using the Python lexer;
- parse PG specific constructs: E-strings, $-strings, U&-strings,
different operators and punctuation.
`PlPgsqlLexer`
A lexer for the PL/pgSQL language. Adds a few specific construct on
top of the PG SQL lexer (such as <<label>>).
`PostgresConsoleLexer`
A lexer to highlight an interactive psql session:
- identifies the prompt and does its best to detect the end of command
in multiline statement where not all the lines are prefixed by a
prompt, telling them apart from the output;
- highlights errors in the output and notification levels;
- handles psql backslash commands.
The ``tests/examplefiles`` contains a few test files with data to be
parsed by these lexers.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from copy import deepcopy
from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups
from pygments.token import Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Generic
from pygments.lexers import get_lexer_by_name, ClassNotFound
from pygments.lexers._postgres_builtins import KEYWORDS, DATATYPES, \
PSEUDO_TYPES, PLPGSQL_KEYWORDS
__all__ = ['PostgresLexer', 'PlPgsqlLexer', 'PostgresConsoleLexer',
'SqlLexer', 'MySqlLexer', 'SqliteConsoleLexer']
line_re = re.compile('.*?\n')
language_re = re.compile(r"\s+LANGUAGE\s+'?(\w+)'?", re.IGNORECASE)
def language_callback(lexer, match):
"""Parse the content of a $-string using a lexer
The lexer is chosen looking for a nearby LANGUAGE.
Note: this function should have been a `PostgresBase` method, but the
rules deepcopy fails in this case.
"""
l = None
m = language_re.match(lexer.text[match.end():match.end()+100])
if m is not None:
l = lexer._get_lexer(m.group(1))
else:
m = list(language_re.finditer(
lexer.text[max(0, match.start()-100):match.start()]))
if m:
l = lexer._get_lexer(m[-1].group(1))
if l:
yield (match.start(1), String, match.group(1))
for x in l.get_tokens_unprocessed(match.group(2)):
yield x
yield (match.start(3), String, match.group(3))
else:
yield (match.start(), String, match.group())
class PostgresBase(object):
"""Base class for Postgres-related lexers.
This is implemented as a mixin to avoid the Lexer metaclass kicking in.
this way the different lexer don't have a common Lexer ancestor. If they
had, _tokens could be created on this ancestor and not updated for the
other classes, resulting e.g. in PL/pgSQL parsed as SQL. This shortcoming
seem to suggest that regexp lexers are not really subclassable.
`language_callback` should really be our method, but this breaks deepcopy.
"""
def get_tokens_unprocessed(self, text, *args):
# Have a copy of the entire text to be used by `language_callback`.
self.text = text
for x in super(PostgresBase, self).get_tokens_unprocessed(
text, *args):
yield x
def _get_lexer(self, lang):
if lang.lower() == 'sql':
return get_lexer_by_name('postgresql', **self.options)
tries = [ lang ]
if lang.startswith('pl'):
tries.append(lang[2:])
if lang.endswith('u'):
tries.append(lang[:-1])
if lang.startswith('pl') and lang.endswith('u'):
tries.append(lang[2:-1])
for l in tries:
try:
return get_lexer_by_name(l, **self.options)
except ClassNotFound:
pass
else:
# TODO: better logging
# print >>sys.stderr, "language not found:", lang
return None
class PostgresLexer(PostgresBase, RegexLexer):
"""
Lexer for the PostgreSQL dialect of SQL.
*New in Pygments 1.5.*
"""
name = 'PostgreSQL SQL dialect'
aliases = ['postgresql', 'postgres']
mimetypes = ['text/x-postgresql']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'--.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'(' + '|'.join([s.replace(" ", "\s+")
for s in DATATYPES + PSEUDO_TYPES])
+ r')\b', Name.Builtin),
(r'(' + '|'.join(KEYWORDS) + r')\b', Keyword),
(r'[+*/<>=~!@#%^&|`?-]+', Operator),
(r'::', Operator), # cast
(r'\$\d+', Name.Variable),
(r'([0-9]*\.[0-9]*|[0-9]+)(e[+-]?[0-9]+)?', Number.Float),
(r'[0-9]+', Number.Integer),
(r"(E|U&)?'(''|[^'])*'", String.Single),
(r'(U&)?"(""|[^"])*"', String.Name), # quoted identifier
(r'(?s)(\$[^\$]*\$)(.*?)(\1)', language_callback),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
# psql variable in SQL
(r""":(['"]?)[a-z][a-z0-9_]*\b\1""", Name.Variable),
(r'[;:()\[\]\{\},\.]', Punctuation),
],
'multiline-comments': [
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[^/\*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
],
}
class PlPgsqlLexer(PostgresBase, RegexLexer):
"""
Handle the extra syntax in Pl/pgSQL language.
*New in Pygments 1.5.*
"""
name = 'PL/pgSQL'
aliases = ['plpgsql']
mimetypes = ['text/x-plpgsql']
flags = re.IGNORECASE
tokens = deepcopy(PostgresLexer.tokens)
# extend the keywords list
for i, pattern in enumerate(tokens['root']):
if pattern[1] == Keyword:
tokens['root'][i] = (
r'(' + '|'.join(KEYWORDS + PLPGSQL_KEYWORDS) + r')\b',
Keyword)
del i
break
else:
assert 0, "SQL keywords not found"
# Add specific PL/pgSQL rules (before the SQL ones)
tokens['root'][:0] = [
(r'\%[a-z][a-z0-9_]*\b', Name.Builtin), # actually, a datatype
(r':=', Operator),
(r'\<\<[a-z][a-z0-9_]*\>\>', Name.Label),
(r'\#[a-z][a-z0-9_]*\b', Keyword.Pseudo), # #variable_conflict
]
class PsqlRegexLexer(PostgresBase, RegexLexer):
"""
Extend the PostgresLexer adding support specific for psql commands.
This is not a complete psql lexer yet as it lacks prompt support
and output rendering.
"""
name = 'PostgreSQL console - regexp based lexer'
aliases = [] # not public
flags = re.IGNORECASE
tokens = deepcopy(PostgresLexer.tokens)
tokens['root'].append(
(r'\\[^\s]+', Keyword.Pseudo, 'psql-command'))
tokens['psql-command'] = [
(r'\n', Text, 'root'),
(r'\s+', Text),
(r'\\[^\s]+', Keyword.Pseudo),
(r""":(['"]?)[a-z][a-z0-9_]*\b\1""", Name.Variable),
(r"'(''|[^'])*'", String.Single),
(r"`([^`])*`", String.Backtick),
(r"[^\s]+", String.Symbol),
]
re_prompt = re.compile(r'^(\S.*?)??[=\-\(\$\'\"][#>]')
re_psql_command = re.compile(r'\s*\\')
re_end_command = re.compile(r';\s*(--.*?)?$')
re_psql_command = re.compile(r'(\s*)(\\.+?)(\s+)$')
re_error = re.compile(r'(ERROR|FATAL):')
re_message = re.compile(
r'((?:DEBUG|INFO|NOTICE|WARNING|ERROR|'
r'FATAL|HINT|DETAIL|CONTEXT|LINE [0-9]+):)(.*?\n)')
class lookahead(object):
"""Wrap an iterator and allow pushing back an item."""
def __init__(self, x):
self.iter = iter(x)
self._nextitem = None
def __iter__(self):
return self
def send(self, i):
self._nextitem = i
return i
def next(self):
if self._nextitem is not None:
ni = self._nextitem
self._nextitem = None
return ni
return self.iter.next()
class PostgresConsoleLexer(Lexer):
"""
Lexer for psql sessions.
*New in Pygments 1.5.*
"""
name = 'PostgreSQL console (psql)'
aliases = ['psql', 'postgresql-console', 'postgres-console']
mimetypes = ['text/x-postgresql-psql']
def get_tokens_unprocessed(self, data):
sql = PsqlRegexLexer(**self.options)
lines = lookahead(line_re.findall(data))
# prompt-output cycle
while 1:
# consume the lines of the command: start with an optional prompt
# and continue until the end of command is detected
curcode = ''
insertions = []
while 1:
try:
line = lines.next()
except StopIteration:
# allow the emission of partially collected items
# the repl loop will be broken below
break
# Identify a shell prompt in case of psql commandline example
if line.startswith('$') and not curcode:
lexer = get_lexer_by_name('console', **self.options)
for x in lexer.get_tokens_unprocessed(line):
yield x
break
# Identify a psql prompt
mprompt = re_prompt.match(line)
if mprompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, mprompt.group())]))
curcode += line[len(mprompt.group()):]
else:
curcode += line
# Check if this is the end of the command
# TODO: better handle multiline comments at the end with
# a lexer with an external state?
if re_psql_command.match(curcode) \
or re_end_command.search(curcode):
break
# Emit the combined stream of command and prompt(s)
for item in do_insertions(insertions,
sql.get_tokens_unprocessed(curcode)):
yield item
# Emit the output lines
out_token = Generic.Output
while 1:
line = lines.next()
mprompt = re_prompt.match(line)
if mprompt is not None:
# push the line back to have it processed by the prompt
lines.send(line)
break
mmsg = re_message.match(line)
if mmsg is not None:
if mmsg.group(1).startswith("ERROR") \
or mmsg.group(1).startswith("FATAL"):
out_token = Generic.Error
yield (mmsg.start(1), Generic.Strong, mmsg.group(1))
yield (mmsg.start(2), out_token, mmsg.group(2))
else:
yield (0, out_token, line)
class SqlLexer(RegexLexer):
"""
Lexer for Structured Query Language. Currently, this lexer does
not recognize any special syntax except ANSI SQL.
"""
name = 'SQL'
aliases = ['sql']
filenames = ['*.sql']
mimetypes = ['text/x-sql']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'--.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'(ABORT|ABS|ABSOLUTE|ACCESS|ADA|ADD|ADMIN|AFTER|AGGREGATE|'
r'ALIAS|ALL|ALLOCATE|ALTER|ANALYSE|ANALYZE|AND|ANY|ARE|AS|'
r'ASC|ASENSITIVE|ASSERTION|ASSIGNMENT|ASYMMETRIC|AT|ATOMIC|'
r'AUTHORIZATION|AVG|BACKWARD|BEFORE|BEGIN|BETWEEN|BITVAR|'
r'BIT_LENGTH|BOTH|BREADTH|BY|C|CACHE|CALL|CALLED|CARDINALITY|'
r'CASCADE|CASCADED|CASE|CAST|CATALOG|CATALOG_NAME|CHAIN|'
r'CHARACTERISTICS|CHARACTER_LENGTH|CHARACTER_SET_CATALOG|'
r'CHARACTER_SET_NAME|CHARACTER_SET_SCHEMA|CHAR_LENGTH|CHECK|'
r'CHECKED|CHECKPOINT|CLASS|CLASS_ORIGIN|CLOB|CLOSE|CLUSTER|'
r'COALSECE|COBOL|COLLATE|COLLATION|COLLATION_CATALOG|'
r'COLLATION_NAME|COLLATION_SCHEMA|COLUMN|COLUMN_NAME|'
r'COMMAND_FUNCTION|COMMAND_FUNCTION_CODE|COMMENT|COMMIT|'
r'COMMITTED|COMPLETION|CONDITION_NUMBER|CONNECT|CONNECTION|'
r'CONNECTION_NAME|CONSTRAINT|CONSTRAINTS|CONSTRAINT_CATALOG|'
r'CONSTRAINT_NAME|CONSTRAINT_SCHEMA|CONSTRUCTOR|CONTAINS|'
r'CONTINUE|CONVERSION|CONVERT|COPY|CORRESPONTING|COUNT|'
r'CREATE|CREATEDB|CREATEUSER|CROSS|CUBE|CURRENT|CURRENT_DATE|'
r'CURRENT_PATH|CURRENT_ROLE|CURRENT_TIME|CURRENT_TIMESTAMP|'
r'CURRENT_USER|CURSOR|CURSOR_NAME|CYCLE|DATA|DATABASE|'
r'DATETIME_INTERVAL_CODE|DATETIME_INTERVAL_PRECISION|DAY|'
r'DEALLOCATE|DECLARE|DEFAULT|DEFAULTS|DEFERRABLE|DEFERRED|'
r'DEFINED|DEFINER|DELETE|DELIMITER|DELIMITERS|DEREF|DESC|'
r'DESCRIBE|DESCRIPTOR|DESTROY|DESTRUCTOR|DETERMINISTIC|'
r'DIAGNOSTICS|DICTIONARY|DISCONNECT|DISPATCH|DISTINCT|DO|'
r'DOMAIN|DROP|DYNAMIC|DYNAMIC_FUNCTION|DYNAMIC_FUNCTION_CODE|'
r'EACH|ELSE|ENCODING|ENCRYPTED|END|END-EXEC|EQUALS|ESCAPE|EVERY|'
r'EXCEPT|ESCEPTION|EXCLUDING|EXCLUSIVE|EXEC|EXECUTE|EXISTING|'
r'EXISTS|EXPLAIN|EXTERNAL|EXTRACT|FALSE|FETCH|FINAL|FIRST|FOR|'
r'FORCE|FOREIGN|FORTRAN|FORWARD|FOUND|FREE|FREEZE|FROM|FULL|'
r'FUNCTION|G|GENERAL|GENERATED|GET|GLOBAL|GO|GOTO|GRANT|GRANTED|'
r'GROUP|GROUPING|HANDLER|HAVING|HIERARCHY|HOLD|HOST|IDENTITY|'
r'IGNORE|ILIKE|IMMEDIATE|IMMUTABLE|IMPLEMENTATION|IMPLICIT|IN|'
r'INCLUDING|INCREMENT|INDEX|INDITCATOR|INFIX|INHERITS|INITIALIZE|'
r'INITIALLY|INNER|INOUT|INPUT|INSENSITIVE|INSERT|INSTANTIABLE|'
r'INSTEAD|INTERSECT|INTO|INVOKER|IS|ISNULL|ISOLATION|ITERATE|JOIN|'
r'KEY|KEY_MEMBER|KEY_TYPE|LANCOMPILER|LANGUAGE|LARGE|LAST|'
r'LATERAL|LEADING|LEFT|LENGTH|LESS|LEVEL|LIKE|LIMIT|LISTEN|LOAD|'
r'LOCAL|LOCALTIME|LOCALTIMESTAMP|LOCATION|LOCATOR|LOCK|LOWER|'
r'MAP|MATCH|MAX|MAXVALUE|MESSAGE_LENGTH|MESSAGE_OCTET_LENGTH|'
r'MESSAGE_TEXT|METHOD|MIN|MINUTE|MINVALUE|MOD|MODE|MODIFIES|'
r'MODIFY|MONTH|MORE|MOVE|MUMPS|NAMES|NATIONAL|NATURAL|NCHAR|'
r'NCLOB|NEW|NEXT|NO|NOCREATEDB|NOCREATEUSER|NONE|NOT|NOTHING|'
r'NOTIFY|NOTNULL|NULL|NULLABLE|NULLIF|OBJECT|OCTET_LENGTH|OF|OFF|'
r'OFFSET|OIDS|OLD|ON|ONLY|OPEN|OPERATION|OPERATOR|OPTION|OPTIONS|'
r'OR|ORDER|ORDINALITY|OUT|OUTER|OUTPUT|OVERLAPS|OVERLAY|OVERRIDING|'
r'OWNER|PAD|PARAMETER|PARAMETERS|PARAMETER_MODE|PARAMATER_NAME|'
r'PARAMATER_ORDINAL_POSITION|PARAMETER_SPECIFIC_CATALOG|'
r'PARAMETER_SPECIFIC_NAME|PARAMATER_SPECIFIC_SCHEMA|PARTIAL|'
r'PASCAL|PENDANT|PLACING|PLI|POSITION|POSTFIX|PRECISION|PREFIX|'
r'PREORDER|PREPARE|PRESERVE|PRIMARY|PRIOR|PRIVILEGES|PROCEDURAL|'
r'PROCEDURE|PUBLIC|READ|READS|RECHECK|RECURSIVE|REF|REFERENCES|'
r'REFERENCING|REINDEX|RELATIVE|RENAME|REPEATABLE|REPLACE|RESET|'
r'RESTART|RESTRICT|RESULT|RETURN|RETURNED_LENGTH|'
r'RETURNED_OCTET_LENGTH|RETURNED_SQLSTATE|RETURNS|REVOKE|RIGHT|'
r'ROLE|ROLLBACK|ROLLUP|ROUTINE|ROUTINE_CATALOG|ROUTINE_NAME|'
r'ROUTINE_SCHEMA|ROW|ROWS|ROW_COUNT|RULE|SAVE_POINT|SCALE|SCHEMA|'
r'SCHEMA_NAME|SCOPE|SCROLL|SEARCH|SECOND|SECURITY|SELECT|SELF|'
r'SENSITIVE|SERIALIZABLE|SERVER_NAME|SESSION|SESSION_USER|SET|'
r'SETOF|SETS|SHARE|SHOW|SIMILAR|SIMPLE|SIZE|SOME|SOURCE|SPACE|'
r'SPECIFIC|SPECIFICTYPE|SPECIFIC_NAME|SQL|SQLCODE|SQLERROR|'
r'SQLEXCEPTION|SQLSTATE|SQLWARNINIG|STABLE|START|STATE|STATEMENT|'
r'STATIC|STATISTICS|STDIN|STDOUT|STORAGE|STRICT|STRUCTURE|STYPE|'
r'SUBCLASS_ORIGIN|SUBLIST|SUBSTRING|SUM|SYMMETRIC|SYSID|SYSTEM|'
r'SYSTEM_USER|TABLE|TABLE_NAME| TEMP|TEMPLATE|TEMPORARY|TERMINATE|'
r'THAN|THEN|TIMESTAMP|TIMEZONE_HOUR|TIMEZONE_MINUTE|TO|TOAST|'
r'TRAILING|TRANSATION|TRANSACTIONS_COMMITTED|'
r'TRANSACTIONS_ROLLED_BACK|TRANSATION_ACTIVE|TRANSFORM|'
r'TRANSFORMS|TRANSLATE|TRANSLATION|TREAT|TRIGGER|TRIGGER_CATALOG|'
r'TRIGGER_NAME|TRIGGER_SCHEMA|TRIM|TRUE|TRUNCATE|TRUSTED|TYPE|'
r'UNCOMMITTED|UNDER|UNENCRYPTED|UNION|UNIQUE|UNKNOWN|UNLISTEN|'
r'UNNAMED|UNNEST|UNTIL|UPDATE|UPPER|USAGE|USER|'
r'USER_DEFINED_TYPE_CATALOG|USER_DEFINED_TYPE_NAME|'
r'USER_DEFINED_TYPE_SCHEMA|USING|VACUUM|VALID|VALIDATOR|VALUES|'
r'VARIABLE|VERBOSE|VERSION|VIEW|VOLATILE|WHEN|WHENEVER|WHERE|'
r'WITH|WITHOUT|WORK|WRITE|YEAR|ZONE)\b', Keyword),
(r'(ARRAY|BIGINT|BINARY|BIT|BLOB|BOOLEAN|CHAR|CHARACTER|DATE|'
r'DEC|DECIMAL|FLOAT|INT|INTEGER|INTERVAL|NUMBER|NUMERIC|REAL|'
r'SERIAL|SMALLINT|VARCHAR|VARYING|INT8|SERIAL8|TEXT)\b',
Name.Builtin),
(r'[+*/<>=~!@#%^&|`?-]', Operator),
(r'[0-9]+', Number.Integer),
# TODO: Backslash escapes?
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Symbol), # not a real string literal in ANSI SQL
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'[;:()\[\],\.]', Punctuation)
],
'multiline-comments': [
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[^/\*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
]
}
class MySqlLexer(RegexLexer):
"""
Special lexer for MySQL.
"""
name = 'MySQL'
aliases = ['mysql']
mimetypes = ['text/x-mysql']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'(#|--\s+).*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'[0-9]+', Number.Integer),
(r'[0-9]*\.[0-9]+(e[+-][0-9]+)', Number.Float),
# TODO: add backslash escapes
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Double),
(r"`(``|[^`])*`", String.Symbol),
(r'[+*/<>=~!@#%^&|`?-]', Operator),
(r'\b(tinyint|smallint|mediumint|int|integer|bigint|date|'
r'datetime|time|bit|bool|tinytext|mediumtext|longtext|text|'
r'tinyblob|mediumblob|longblob|blob|float|double|double\s+'
r'precision|real|numeric|dec|decimal|timestamp|year|char|'
r'varchar|varbinary|varcharacter|enum|set)(\b\s*)(\()?',
bygroups(Keyword.Type, Text, Punctuation)),
(r'\b(add|all|alter|analyze|and|as|asc|asensitive|before|between|'
r'bigint|binary|blob|both|by|call|cascade|case|change|char|'
r'character|check|collate|column|condition|constraint|continue|'
r'convert|create|cross|current_date|current_time|'
r'current_timestamp|current_user|cursor|database|databases|'
r'day_hour|day_microsecond|day_minute|day_second|dec|decimal|'
r'declare|default|delayed|delete|desc|describe|deterministic|'
r'distinct|distinctrow|div|double|drop|dual|each|else|elseif|'
r'enclosed|escaped|exists|exit|explain|fetch|float|float4|float8'
r'|for|force|foreign|from|fulltext|grant|group|having|'
r'high_priority|hour_microsecond|hour_minute|hour_second|if|'
r'ignore|in|index|infile|inner|inout|insensitive|insert|int|'
r'int1|int2|int3|int4|int8|integer|interval|into|is|iterate|'
r'join|key|keys|kill|leading|leave|left|like|limit|lines|load|'
r'localtime|localtimestamp|lock|long|loop|low_priority|match|'
r'minute_microsecond|minute_second|mod|modifies|natural|'
r'no_write_to_binlog|not|numeric|on|optimize|option|optionally|'
r'or|order|out|outer|outfile|precision|primary|procedure|purge|'
r'raid0|read|reads|real|references|regexp|release|rename|repeat|'
r'replace|require|restrict|return|revoke|right|rlike|schema|'
r'schemas|second_microsecond|select|sensitive|separator|set|'
r'show|smallint|soname|spatial|specific|sql|sql_big_result|'
r'sql_calc_found_rows|sql_small_result|sqlexception|sqlstate|'
r'sqlwarning|ssl|starting|straight_join|table|terminated|then|'
r'to|trailing|trigger|undo|union|unique|unlock|unsigned|update|'
r'usage|use|using|utc_date|utc_time|utc_timestamp|values|'
r'varying|when|where|while|with|write|x509|xor|year_month|'
r'zerofill)\b', Keyword),
# TODO: this list is not complete
(r'\b(auto_increment|engine|charset|tables)\b', Keyword.Pseudo),
(r'(true|false|null)', Name.Constant),
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'@[A-Za-z0-9]*[._]*[A-Za-z0-9]*', Name.Variable),
(r'[;:()\[\],\.]', Punctuation)
],
'multiline-comments': [
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[^/\*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
]
}
class SqliteConsoleLexer(Lexer):
"""
Lexer for example sessions using sqlite3.
*New in Pygments 0.11.*
"""
name = 'sqlite3con'
aliases = ['sqlite3']
filenames = ['*.sqlite3-console']
mimetypes = ['text/x-sqlite3-console']
def get_tokens_unprocessed(self, data):
sql = SqlLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(data):
line = match.group()
if line.startswith('sqlite> ') or line.startswith(' ...> '):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:8])]))
curcode += line[8:]
else:
if curcode:
for item in do_insertions(insertions,
sql.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
if line.startswith('SQL error: '):
yield (match.start(), Generic.Traceback, line)
else:
yield (match.start(), Generic.Output, line)
if curcode:
for item in do_insertions(insertions,
sql.get_tokens_unprocessed(curcode)):
yield item
| mit |
imply/chuu | chrome/common/extensions/docs/server2/rietveld_patcher_test.py | 32 | 2898 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
from appengine_url_fetcher import AppEngineUrlFetcher
from fake_fetchers import ConfigureFakeFetchers
from file_system import FileNotFoundError
from rietveld_patcher import RietveldPatcher
from svn_constants import EXTENSIONS_PATH
import url_constants
class RietveldPatcherTest(unittest.TestCase):
def setUp(self):
ConfigureFakeFetchers()
self._patcher = RietveldPatcher(
EXTENSIONS_PATH,
'14096030',
AppEngineUrlFetcher(url_constants.CODEREVIEW_SERVER))
def _ReadLocalFile(self, filename):
with open(os.path.join(sys.path[0],
'test_data',
'rietveld_patcher',
'expected',
filename), 'r') as f:
return f.read()
def _ApplySingle(self, path, binary=False):
return self._patcher.Apply([path], None, binary).Get()[path]
def testGetVersion(self):
self.assertEqual(self._patcher.GetVersion(), '22002')
def testGetPatchedFiles(self):
added, deleted, modified = self._patcher.GetPatchedFiles()
self.assertEqual(sorted(added),
sorted([
u'docs/templates/articles/test_foo.html',
u'docs/templates/public/extensions/test_foo.html']))
self.assertEqual(sorted(deleted),
sorted([
u'docs/templates/public/extensions/runtime.html']))
self.assertEqual(sorted(modified),
sorted([
u'api/test.json',
u'docs/templates/json/extensions_sidenav.json']))
def testApply(self):
article_path = 'docs/templates/articles/test_foo.html'
# Make sure RietveldPatcher handles |binary| correctly.
self.assertTrue(isinstance(self._ApplySingle(article_path, True), str),
'Expected result is binary. It was text.')
self.assertTrue(isinstance(self._ApplySingle(article_path), unicode),
'Expected result is text. It was binary.')
# Apply to an added file.
self.assertEqual(
self._ReadLocalFile('test_foo.html'),
self._ApplySingle(
'docs/templates/public/extensions/test_foo.html'))
# Apply to a modified file.
self.assertEqual(
self._ReadLocalFile('extensions_sidenav.json'),
self._ApplySingle(
'docs/templates/json/extensions_sidenav.json'))
# Applying to a deleted file doesn't throw exceptions. It just returns
# empty content.
# self.assertRaises(FileNotFoundError, self._ApplySingle,
# 'docs/templates/public/extensions/runtime.html')
# Apply to an unknown file.
self.assertRaises(FileNotFoundError, self._ApplySingle, 'not_existing')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
tpsatish95/Python-Workshop | Python Environment Setup/Alternate/1. Python/1. Installer/Python-3.4.0(Linux)/Tools/i18n/pygettext.py | 41 | 22370 | #! /usr/bin/env python3
# -*- coding: iso-8859-1 -*-
# Originally written by Barry Warsaw <barry@python.org>
#
# Minimally patched to make it even more xgettext compatible
# by Peter Funk <pf@artcom-gmbh.de>
#
# 2002-11-22 Jürgen Hermann <jh@web.de>
# Added checks that _() only contains string literals, and
# command line args are resolved to module lists, i.e. you
# can now pass a filename, a module or package name, or a
# directory (including globbing chars, important for Win32).
# Made docstring fit in 80 chars wide displays using pydoc.
#
# for selftesting
try:
import fintl
_ = fintl.gettext
except ImportError:
_ = lambda s: s
__doc__ = _("""pygettext -- Python equivalent of xgettext(1)
Many systems (Solaris, Linux, Gnu) provide extensive tools that ease the
internationalization of C programs. Most of these tools are independent of
the programming language and can be used from within Python programs.
Martin von Loewis' work[1] helps considerably in this regard.
There's one problem though; xgettext is the program that scans source code
looking for message strings, but it groks only C (or C++). Python
introduces a few wrinkles, such as dual quoting characters, triple quoted
strings, and raw strings. xgettext understands none of this.
Enter pygettext, which uses Python's standard tokenize module to scan
Python source code, generating .pot files identical to what GNU xgettext[2]
generates for C and C++ code. From there, the standard GNU tools can be
used.
A word about marking Python strings as candidates for translation. GNU
xgettext recognizes the following keywords: gettext, dgettext, dcgettext,
and gettext_noop. But those can be a lot of text to include all over your
code. C and C++ have a trick: they use the C preprocessor. Most
internationalized C source includes a #define for gettext() to _() so that
what has to be written in the source is much less. Thus these are both
translatable strings:
gettext("Translatable String")
_("Translatable String")
Python of course has no preprocessor so this doesn't work so well. Thus,
pygettext searches only for _() by default, but see the -k/--keyword flag
below for how to augment this.
[1] http://www.python.org/workshops/1997-10/proceedings/loewis.html
[2] http://www.gnu.org/software/gettext/gettext.html
NOTE: pygettext attempts to be option and feature compatible with GNU
xgettext where ever possible. However some options are still missing or are
not fully implemented. Also, xgettext's use of command line switches with
option arguments is broken, and in these cases, pygettext just defines
additional switches.
Usage: pygettext [options] inputfile ...
Options:
-a
--extract-all
Extract all strings.
-d name
--default-domain=name
Rename the default output file from messages.pot to name.pot.
-E
--escape
Replace non-ASCII characters with octal escape sequences.
-D
--docstrings
Extract module, class, method, and function docstrings. These do
not need to be wrapped in _() markers, and in fact cannot be for
Python to consider them docstrings. (See also the -X option).
-h
--help
Print this help message and exit.
-k word
--keyword=word
Keywords to look for in addition to the default set, which are:
%(DEFAULTKEYWORDS)s
You can have multiple -k flags on the command line.
-K
--no-default-keywords
Disable the default set of keywords (see above). Any keywords
explicitly added with the -k/--keyword option are still recognized.
--no-location
Do not write filename/lineno location comments.
-n
--add-location
Write filename/lineno location comments indicating where each
extracted string is found in the source. These lines appear before
each msgid. The style of comments is controlled by the -S/--style
option. This is the default.
-o filename
--output=filename
Rename the default output file from messages.pot to filename. If
filename is `-' then the output is sent to standard out.
-p dir
--output-dir=dir
Output files will be placed in directory dir.
-S stylename
--style stylename
Specify which style to use for location comments. Two styles are
supported:
Solaris # File: filename, line: line-number
GNU #: filename:line
The style name is case insensitive. GNU style is the default.
-v
--verbose
Print the names of the files being processed.
-V
--version
Print the version of pygettext and exit.
-w columns
--width=columns
Set width of output to columns.
-x filename
--exclude-file=filename
Specify a file that contains a list of strings that are not be
extracted from the input files. Each string to be excluded must
appear on a line by itself in the file.
-X filename
--no-docstrings=filename
Specify a file that contains a list of files (one per line) that
should not have their docstrings extracted. This is only useful in
conjunction with the -D option above.
If `inputfile' is -, standard input is read.
""")
import os
import imp
import sys
import glob
import time
import getopt
import token
import tokenize
__version__ = '1.5'
default_keywords = ['_']
DEFAULTKEYWORDS = ', '.join(default_keywords)
EMPTYSTRING = ''
# The normal pot-file header. msgmerge and Emacs's po-mode work better if it's
# there.
pot_header = _('''\
# SOME DESCRIPTIVE TITLE.
# Copyright (C) YEAR ORGANIZATION
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#
msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\\n"
"POT-Creation-Date: %(time)s\\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n"
"Language-Team: LANGUAGE <LL@li.org>\\n"
"MIME-Version: 1.0\\n"
"Content-Type: text/plain; charset=%(charset)s\\n"
"Content-Transfer-Encoding: %(encoding)s\\n"
"Generated-By: pygettext.py %(version)s\\n"
''')
def usage(code, msg=''):
print(__doc__ % globals(), file=sys.stderr)
if msg:
print(msg, file=sys.stderr)
sys.exit(code)
def make_escapes(pass_nonascii):
global escapes, escape
if pass_nonascii:
# Allow non-ascii characters to pass through so that e.g. 'msgid
# "Höhe"' would result not result in 'msgid "H\366he"'. Otherwise we
# escape any character outside the 32..126 range.
mod = 128
escape = escape_ascii
else:
mod = 256
escape = escape_nonascii
escapes = [r"\%03o" % i for i in range(mod)]
for i in range(32, 127):
escapes[i] = chr(i)
escapes[ord('\\')] = r'\\'
escapes[ord('\t')] = r'\t'
escapes[ord('\r')] = r'\r'
escapes[ord('\n')] = r'\n'
escapes[ord('\"')] = r'\"'
def escape_ascii(s, encoding):
return ''.join(escapes[ord(c)] if ord(c) < 128 else c for c in s)
def escape_nonascii(s, encoding):
return ''.join(escapes[b] for b in s.encode(encoding))
def safe_eval(s):
# unwrap quotes, safely
return eval(s, {'__builtins__':{}}, {})
def normalize(s, encoding):
# This converts the various Python string types into a format that is
# appropriate for .po files, namely much closer to C style.
lines = s.split('\n')
if len(lines) == 1:
s = '"' + escape(s, encoding) + '"'
else:
if not lines[-1]:
del lines[-1]
lines[-1] = lines[-1] + '\n'
for i in range(len(lines)):
lines[i] = escape(lines[i], encoding)
lineterm = '\\n"\n"'
s = '""\n"' + lineterm.join(lines) + '"'
return s
def containsAny(str, set):
"""Check whether 'str' contains ANY of the chars in 'set'"""
return 1 in [c in str for c in set]
def _visit_pyfiles(list, dirname, names):
"""Helper for getFilesForName()."""
# get extension for python source files
if '_py_ext' not in globals():
global _py_ext
_py_ext = [triple[0] for triple in imp.get_suffixes()
if triple[2] == imp.PY_SOURCE][0]
# don't recurse into CVS directories
if 'CVS' in names:
names.remove('CVS')
# add all *.py files to list
list.extend(
[os.path.join(dirname, file) for file in names
if os.path.splitext(file)[1] == _py_ext]
)
def _get_modpkg_path(dotted_name, pathlist=None):
"""Get the filesystem path for a module or a package.
Return the file system path to a file for a module, and to a directory for
a package. Return None if the name is not found, or is a builtin or
extension module.
"""
# split off top-most name
parts = dotted_name.split('.', 1)
if len(parts) > 1:
# we have a dotted path, import top-level package
try:
file, pathname, description = imp.find_module(parts[0], pathlist)
if file: file.close()
except ImportError:
return None
# check if it's indeed a package
if description[2] == imp.PKG_DIRECTORY:
# recursively handle the remaining name parts
pathname = _get_modpkg_path(parts[1], [pathname])
else:
pathname = None
else:
# plain name
try:
file, pathname, description = imp.find_module(
dotted_name, pathlist)
if file:
file.close()
if description[2] not in [imp.PY_SOURCE, imp.PKG_DIRECTORY]:
pathname = None
except ImportError:
pathname = None
return pathname
def getFilesForName(name):
"""Get a list of module files for a filename, a module or package name,
or a directory.
"""
if not os.path.exists(name):
# check for glob chars
if containsAny(name, "*?[]"):
files = glob.glob(name)
list = []
for file in files:
list.extend(getFilesForName(file))
return list
# try to find module or package
name = _get_modpkg_path(name)
if not name:
return []
if os.path.isdir(name):
# find all python files in directory
list = []
os.walk(name, _visit_pyfiles, list)
return list
elif os.path.exists(name):
# a single file
return [name]
return []
class TokenEater:
def __init__(self, options):
self.__options = options
self.__messages = {}
self.__state = self.__waiting
self.__data = []
self.__lineno = -1
self.__freshmodule = 1
self.__curfile = None
def __call__(self, ttype, tstring, stup, etup, line):
# dispatch
## import token
## print >> sys.stderr, 'ttype:', token.tok_name[ttype], \
## 'tstring:', tstring
self.__state(ttype, tstring, stup[0])
def __waiting(self, ttype, tstring, lineno):
opts = self.__options
# Do docstring extractions, if enabled
if opts.docstrings and not opts.nodocstrings.get(self.__curfile):
# module docstring?
if self.__freshmodule:
if ttype == tokenize.STRING:
self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
self.__freshmodule = 0
elif ttype not in (tokenize.COMMENT, tokenize.NL):
self.__freshmodule = 0
return
# class docstring?
if ttype == tokenize.NAME and tstring in ('class', 'def'):
self.__state = self.__suiteseen
return
if ttype == tokenize.NAME and tstring in opts.keywords:
self.__state = self.__keywordseen
def __suiteseen(self, ttype, tstring, lineno):
# ignore anything until we see the colon
if ttype == tokenize.OP and tstring == ':':
self.__state = self.__suitedocstring
def __suitedocstring(self, ttype, tstring, lineno):
# ignore any intervening noise
if ttype == tokenize.STRING:
self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
self.__state = self.__waiting
elif ttype not in (tokenize.NEWLINE, tokenize.INDENT,
tokenize.COMMENT):
# there was no class docstring
self.__state = self.__waiting
def __keywordseen(self, ttype, tstring, lineno):
if ttype == tokenize.OP and tstring == '(':
self.__data = []
self.__lineno = lineno
self.__state = self.__openseen
else:
self.__state = self.__waiting
def __openseen(self, ttype, tstring, lineno):
if ttype == tokenize.OP and tstring == ')':
# We've seen the last of the translatable strings. Record the
# line number of the first line of the strings and update the list
# of messages seen. Reset state for the next batch. If there
# were no strings inside _(), then just ignore this entry.
if self.__data:
self.__addentry(EMPTYSTRING.join(self.__data))
self.__state = self.__waiting
elif ttype == tokenize.STRING:
self.__data.append(safe_eval(tstring))
elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
token.NEWLINE, tokenize.NL]:
# warn if we see anything else than STRING or whitespace
print(_(
'*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"'
) % {
'token': tstring,
'file': self.__curfile,
'lineno': self.__lineno
}, file=sys.stderr)
self.__state = self.__waiting
def __addentry(self, msg, lineno=None, isdocstring=0):
if lineno is None:
lineno = self.__lineno
if not msg in self.__options.toexclude:
entry = (self.__curfile, lineno)
self.__messages.setdefault(msg, {})[entry] = isdocstring
def set_filename(self, filename):
self.__curfile = filename
self.__freshmodule = 1
def write(self, fp):
options = self.__options
timestamp = time.strftime('%Y-%m-%d %H:%M+%Z')
# The time stamp in the header doesn't have the same format as that
# generated by xgettext...
encoding = fp.encoding if fp.encoding else 'UTF-8'
print(pot_header % {'time': timestamp, 'version': __version__,
'charset': encoding,
'encoding': '8bit'}, file=fp)
# Sort the entries. First sort each particular entry's keys, then
# sort all the entries by their first item.
reverse = {}
for k, v in self.__messages.items():
keys = sorted(v.keys())
reverse.setdefault(tuple(keys), []).append((k, v))
rkeys = sorted(reverse.keys())
for rkey in rkeys:
rentries = reverse[rkey]
rentries.sort()
for k, v in rentries:
# If the entry was gleaned out of a docstring, then add a
# comment stating so. This is to aid translators who may wish
# to skip translating some unimportant docstrings.
isdocstring = any(v.values())
# k is the message string, v is a dictionary-set of (filename,
# lineno) tuples. We want to sort the entries in v first by
# file name and then by line number.
v = sorted(v.keys())
if not options.writelocations:
pass
# location comments are different b/w Solaris and GNU:
elif options.locationstyle == options.SOLARIS:
for filename, lineno in v:
d = {'filename': filename, 'lineno': lineno}
print(_(
'# File: %(filename)s, line: %(lineno)d') % d, file=fp)
elif options.locationstyle == options.GNU:
# fit as many locations on one line, as long as the
# resulting line length doesn't exceeds 'options.width'
locline = '#:'
for filename, lineno in v:
d = {'filename': filename, 'lineno': lineno}
s = _(' %(filename)s:%(lineno)d') % d
if len(locline) + len(s) <= options.width:
locline = locline + s
else:
print(locline, file=fp)
locline = "#:" + s
if len(locline) > 2:
print(locline, file=fp)
if isdocstring:
print('#, docstring', file=fp)
print('msgid', normalize(k, encoding), file=fp)
print('msgstr ""\n', file=fp)
def main():
global default_keywords
try:
opts, args = getopt.getopt(
sys.argv[1:],
'ad:DEhk:Kno:p:S:Vvw:x:X:',
['extract-all', 'default-domain=', 'escape', 'help',
'keyword=', 'no-default-keywords',
'add-location', 'no-location', 'output=', 'output-dir=',
'style=', 'verbose', 'version', 'width=', 'exclude-file=',
'docstrings', 'no-docstrings',
])
except getopt.error as msg:
usage(1, msg)
# for holding option values
class Options:
# constants
GNU = 1
SOLARIS = 2
# defaults
extractall = 0 # FIXME: currently this option has no effect at all.
escape = 0
keywords = []
outpath = ''
outfile = 'messages.pot'
writelocations = 1
locationstyle = GNU
verbose = 0
width = 78
excludefilename = ''
docstrings = 0
nodocstrings = {}
options = Options()
locations = {'gnu' : options.GNU,
'solaris' : options.SOLARIS,
}
# parse options
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-a', '--extract-all'):
options.extractall = 1
elif opt in ('-d', '--default-domain'):
options.outfile = arg + '.pot'
elif opt in ('-E', '--escape'):
options.escape = 1
elif opt in ('-D', '--docstrings'):
options.docstrings = 1
elif opt in ('-k', '--keyword'):
options.keywords.append(arg)
elif opt in ('-K', '--no-default-keywords'):
default_keywords = []
elif opt in ('-n', '--add-location'):
options.writelocations = 1
elif opt in ('--no-location',):
options.writelocations = 0
elif opt in ('-S', '--style'):
options.locationstyle = locations.get(arg.lower())
if options.locationstyle is None:
usage(1, _('Invalid value for --style: %s') % arg)
elif opt in ('-o', '--output'):
options.outfile = arg
elif opt in ('-p', '--output-dir'):
options.outpath = arg
elif opt in ('-v', '--verbose'):
options.verbose = 1
elif opt in ('-V', '--version'):
print(_('pygettext.py (xgettext for Python) %s') % __version__)
sys.exit(0)
elif opt in ('-w', '--width'):
try:
options.width = int(arg)
except ValueError:
usage(1, _('--width argument must be an integer: %s') % arg)
elif opt in ('-x', '--exclude-file'):
options.excludefilename = arg
elif opt in ('-X', '--no-docstrings'):
fp = open(arg)
try:
while 1:
line = fp.readline()
if not line:
break
options.nodocstrings[line[:-1]] = 1
finally:
fp.close()
# calculate escapes
make_escapes(not options.escape)
# calculate all keywords
options.keywords.extend(default_keywords)
# initialize list of strings to exclude
if options.excludefilename:
try:
fp = open(options.excludefilename)
options.toexclude = fp.readlines()
fp.close()
except IOError:
print(_(
"Can't read --exclude-file: %s") % options.excludefilename, file=sys.stderr)
sys.exit(1)
else:
options.toexclude = []
# resolve args to module lists
expanded = []
for arg in args:
if arg == '-':
expanded.append(arg)
else:
expanded.extend(getFilesForName(arg))
args = expanded
# slurp through all the files
eater = TokenEater(options)
for filename in args:
if filename == '-':
if options.verbose:
print(_('Reading standard input'))
fp = sys.stdin.buffer
closep = 0
else:
if options.verbose:
print(_('Working on %s') % filename)
fp = open(filename, 'rb')
closep = 1
try:
eater.set_filename(filename)
try:
tokens = tokenize.tokenize(fp.readline)
for _token in tokens:
eater(*_token)
except tokenize.TokenError as e:
print('%s: %s, line %d, column %d' % (
e.args[0], filename, e.args[1][0], e.args[1][1]),
file=sys.stderr)
finally:
if closep:
fp.close()
# write the output
if options.outfile == '-':
fp = sys.stdout
closep = 0
else:
if options.outpath:
options.outfile = os.path.join(options.outpath, options.outfile)
fp = open(options.outfile, 'w')
closep = 1
try:
eater.write(fp)
finally:
if closep:
fp.close()
if __name__ == '__main__':
main()
# some more test strings
# this one creates a warning
_('*** Seen unexpected token "%(token)s"') % {'token': 'test'}
_('more' 'than' 'one' 'string')
| apache-2.0 |
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.3/django/contrib/gis/sitemaps/georss.py | 291 | 2156 | from django.core import urlresolvers
from django.contrib.sitemaps import Sitemap
class GeoRSSSitemap(Sitemap):
"""
A minimal hook to produce sitemaps for GeoRSS feeds.
"""
def __init__(self, feed_dict, slug_dict=None):
"""
This sitemap object initializes on a feed dictionary (as would be passed
to `django.contrib.syndication.views.feed`) and a slug dictionary.
If the slug dictionary is not defined, then it's assumed the keys provide
the URL parameter to the feed. However, if you have a complex feed (e.g.,
you override `get_object`, then you'll need to provide a slug dictionary.
The slug dictionary should have the same keys as the feed dictionary, but
each value in the slug dictionary should be a sequence of slugs that may
be used for valid feeds. For example, let's say we have a feed that
returns objects for a specific ZIP code in our feed dictionary:
feed_dict = {'zipcode' : ZipFeed}
Then we would use a slug dictionary with a list of the zip code slugs
corresponding to feeds you want listed in the sitemap:
slug_dict = {'zipcode' : ['77002', '77054']}
"""
# Setting up.
self.feed_dict = feed_dict
self.locations = []
if slug_dict is None: slug_dict = {}
# Getting the feed locations.
for section in feed_dict.keys():
if slug_dict.get(section, False):
for slug in slug_dict[section]:
self.locations.append('%s/%s' % (section, slug))
else:
self.locations.append(section)
def get_urls(self, page=1, site=None):
"""
This method is overrridden so the appropriate `geo_format` attribute
is placed on each URL element.
"""
urls = Sitemap.get_urls(self, page=page, site=site)
for url in urls: url['geo_format'] = 'georss'
return urls
def items(self):
return self.locations
def location(self, obj):
return urlresolvers.reverse('django.contrib.syndication.views.feed', args=(obj,))
| bsd-3-clause |
globocom/tapioca | tests/unit/test_generate_swagger_specification.py | 1 | 3973 | from json import loads
from unittest import TestCase
from tapioca.spec import SwaggerSpecification, APISpecification, \
Resource, Path, Method, Param
class SwaggerGenerationTestCase(TestCase):
def apply_generation(self, api, resource=None):
result = SwaggerSpecification(api).generate(resource)
return loads(result)
def test_general_info(self):
api = APISpecification(version='v1', base_url='http://api.globo.com')
result = self.apply_generation(api)
assert result['apiVersion'] == 'v1'
assert result['swaggerVersion'] == '1.1'
assert result['basePath'] == 'http://api.globo.com/v1'
assert result['apis'] == []
assert result['models'] == []
def test_gen_spec_generic_with_resource(self):
api = APISpecification(version='v1', base_url='http://api.globo.com')
api.add_resource(
Resource('comments',
paths=[
Path('/comments', methods=[Method('GET')])
]))
result = self.apply_generation(api)
assert len(result['apis']) == 1
assert result['apis'][0]['path'] == '/discovery/comments.swagger'
assert result['apis'][0]['description'] == ''
def test_gen_spec_for_a_resource(self):
api = APISpecification(version='v1', base_url='http://api.globo.com')
resource_name = 'comments'
api.add_resource(
Resource(resource_name,
paths=[
Path('/comments', methods=[Method('GET')])
]))
result = self.apply_generation(api, resource_name)
assert result['resourcePath'] == '/comments'
assert len(result['apis']) == 1
assert result['apis'][0]['path'] == '/comments'
assert result['apis'][0]['description'] == ''
assert len(result['apis'][0]['operations']) == 1
operation = result['apis'][0]['operations'][0]
assert operation['httpMethod'] == 'GET'
assert operation['nickname'] == 'get_comments'
assert operation['parameters'] == []
assert operation['summary'] == ''
assert operation['notes'] == ''
assert operation['errorResponses'] == []
def test_gen_spec_for_put_method(self):
api = APISpecification(version='v1', base_url='http://api.globo.com')
api.add_resource(Resource('dogs',
paths=[
Path('/dogs', methods=[
Method('PUT')])]))
api.add_resource(Resource('cats',
paths=[
Path('/cats', methods=[
Method('PUT')])]))
result = self.apply_generation(api, 'cats')
assert result['resourcePath'] == '/cats'
assert result['apis'][0]['path'] == '/cats'
operation = result['apis'][0]['operations'][0]
assert operation['httpMethod'] == 'PUT'
assert operation['nickname'] == 'put_cats'
def test_gen_spec_with_params(self):
api = APISpecification(version='v1', base_url='http://api.globo.com')
api.add_resource(Resource('dogs',
paths=[
Path('/dogs/{key}',
params=[
Param('key')
],
methods=[
Method('GET')
]
),
])
)
result = self.apply_generation(api, 'dogs')
assert result['apis'][0]['path'] == '/dogs/{key}'
operation = result['apis'][0]['operations'][0]
assert len(operation['parameters']) == 1
assert operation['parameters'][0]['name'] == 'key'
assert operation['parameters'][0]['paramType'] == 'path'
assert operation['parameters'][0]['description'] == ''
assert operation['parameters'][0]['dataType'] == 'String'
assert operation['parameters'][0]['required'] == True
assert operation['parameters'][0]['allowMultiple'] == False
| mit |
RPGOne/Skynet | numpy-master/numpy/random/info.py | 128 | 5199 | """
========================
Random Number Generation
========================
==================== =========================================================
Utility functions
==============================================================================
random_sample Uniformly distributed floats over ``[0, 1)``.
random Alias for `random_sample`.
bytes Uniformly distributed random bytes.
random_integers Uniformly distributed integers in a given range.
permutation Randomly permute a sequence / generate a random sequence.
shuffle Randomly permute a sequence in place.
seed Seed the random number generator.
choice Random sample from 1-D array.
==================== =========================================================
==================== =========================================================
Compatibility functions
==============================================================================
rand Uniformly distributed values.
randn Normally distributed values.
ranf Uniformly distributed floating point numbers.
randint Uniformly distributed integers in a given range.
==================== =========================================================
==================== =========================================================
Univariate distributions
==============================================================================
beta Beta distribution over ``[0, 1]``.
binomial Binomial distribution.
chisquare :math:`\\chi^2` distribution.
exponential Exponential distribution.
f F (Fisher-Snedecor) distribution.
gamma Gamma distribution.
geometric Geometric distribution.
gumbel Gumbel distribution.
hypergeometric Hypergeometric distribution.
laplace Laplace distribution.
logistic Logistic distribution.
lognormal Log-normal distribution.
logseries Logarithmic series distribution.
negative_binomial Negative binomial distribution.
noncentral_chisquare Non-central chi-square distribution.
noncentral_f Non-central F distribution.
normal Normal / Gaussian distribution.
pareto Pareto distribution.
poisson Poisson distribution.
power Power distribution.
rayleigh Rayleigh distribution.
triangular Triangular distribution.
uniform Uniform distribution.
vonmises Von Mises circular distribution.
wald Wald (inverse Gaussian) distribution.
weibull Weibull distribution.
zipf Zipf's distribution over ranked data.
==================== =========================================================
==================== =========================================================
Multivariate distributions
==============================================================================
dirichlet Multivariate generalization of Beta distribution.
multinomial Multivariate generalization of the binomial distribution.
multivariate_normal Multivariate generalization of the normal distribution.
==================== =========================================================
==================== =========================================================
Standard distributions
==============================================================================
standard_cauchy Standard Cauchy-Lorentz distribution.
standard_exponential Standard exponential distribution.
standard_gamma Standard Gamma distribution.
standard_normal Standard normal distribution.
standard_t Standard Student's t-distribution.
==================== =========================================================
==================== =========================================================
Internal functions
==============================================================================
get_state Get tuple representing internal state of generator.
set_state Set state of generator.
==================== =========================================================
"""
from __future__ import division, absolute_import, print_function
depends = ['core']
__all__ = [
'beta',
'binomial',
'bytes',
'chisquare',
'choice',
'dirichlet',
'exponential',
'f',
'gamma',
'geometric',
'get_state',
'gumbel',
'hypergeometric',
'laplace',
'logistic',
'lognormal',
'logseries',
'multinomial',
'multivariate_normal',
'negative_binomial',
'noncentral_chisquare',
'noncentral_f',
'normal',
'pareto',
'permutation',
'poisson',
'power',
'rand',
'randint',
'randn',
'random_integers',
'random_sample',
'rayleigh',
'seed',
'set_state',
'shuffle',
'standard_cauchy',
'standard_exponential',
'standard_gamma',
'standard_normal',
'standard_t',
'triangular',
'uniform',
'vonmises',
'wald',
'weibull',
'zipf'
]
| bsd-3-clause |
songyi199111/sentry | tests/sentry/api/endpoints/test_organization_index.py | 26 | 1622 | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from exam import fixture
from sentry.models import Organization
from sentry.testutils import APITestCase
class OrganizationsListTest(APITestCase):
@fixture
def path(self):
return reverse('sentry-api-0-organizations')
def test_simple(self):
org = self.create_organization(owner=self.user)
self.login_as(user=self.user)
response = self.client.get(self.path)
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(org.id)
class OrganizationsCreateTest(APITestCase):
@fixture
def path(self):
return reverse('sentry-api-0-organizations')
def test_missing_params(self):
self.login_as(user=self.user)
resp = self.client.post(self.path)
assert resp.status_code == 400
def test_valid_params(self):
self.login_as(user=self.user)
resp = self.client.post(self.path, data={
'name': 'hello world',
'slug': 'foobar',
})
assert resp.status_code == 201, resp.content
org = Organization.objects.get(id=resp.data['id'])
assert org.name == 'hello world'
assert org.slug == 'foobar'
def test_without_slug(self):
self.login_as(user=self.user)
resp = self.client.post(self.path, data={
'name': 'hello world',
})
assert resp.status_code == 201, resp.content
org = Organization.objects.get(id=resp.data['id'])
assert org.slug == 'hello-world'
| bsd-3-clause |
binoculars/osf.io | osf/management/commands/backfill_date_retracted.py | 28 | 3634 | # -*- coding: utf-8 -*-
# This is a management command, rather than a migration script, for two primary reasons:
# 1. It makes no changes to database structure (e.g. AlterField), only database content.
# 2. It may need to be ran more than once, as it skips failed registrations.
from __future__ import unicode_literals
from datetime import timedelta
import logging
import django
django.setup()
from django.core.management.base import BaseCommand
from django.db import transaction
from osf.models import Registration, Retraction, Sanction
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def set_date_retracted(*args):
registrations = (
Registration.objects.filter(retraction__state=Sanction.APPROVED, retraction__date_retracted=None)
.select_related('retraction')
.include('registered_from__logs')
.include('registered_from__guids')
)
total = registrations.count()
logger.info('Migrating {} retractions.'.format(total))
for registration in registrations:
if not registration.registered_from:
logger.warn('Skipping failed registration {}'.format(registration._id))
continue
retraction_logs = registration.registered_from.logs.filter(action='retraction_approved', params__retraction_id=registration.retraction._id)
if retraction_logs.count() != 1 and retraction_logs.first().date - retraction_logs.last().date > timedelta(seconds=5):
msg = (
'There should be a retraction_approved log for retraction {} on node {}. No retraction_approved log found.'
if retraction_logs.count() == 0
else 'There should only be one retraction_approved log for retraction {} on node {}. Multiple logs found.'
)
raise Exception(msg.format(registration.retraction._id, registration.registered_from._id))
date_retracted = retraction_logs[0].date
logger.info(
'Setting date_retracted for retraction {} to be {}, from retraction_approved node log {}.'.format(
registration.retraction._id, date_retracted, retraction_logs[0]._id
)
)
registration.retraction.date_retracted = date_retracted
registration.retraction.save()
def unset_date_retracted(*args):
retractions = Retraction.objects.filter(state=Sanction.APPROVED).exclude(date_retracted=None)
logger.info('Migrating {} retractions.'.format(retractions.count()))
for retraction in retractions:
retraction.date_retracted = None
retraction.save()
class Command(BaseCommand):
"""
Backfill Retraction.date_retracted with `RETRACTION_APPROVED` log date.
"""
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--dry',
action='store_true',
dest='dry_run',
help='Run migration and roll back changes to db',
)
parser.add_argument(
'--reverse',
action='store_true',
dest='reverse',
help='Unsets date_retraction'
)
def handle(self, *args, **options):
reverse = options.get('reverse', False)
dry_run = options.get('dry_run', False)
if not dry_run:
script_utils.add_file_logger(logger, __file__)
with transaction.atomic():
if reverse:
unset_date_retracted()
else:
set_date_retracted()
if dry_run:
raise RuntimeError('Dry run, transaction rolled back.')
| apache-2.0 |
Tillerino/oppai | pyoppai/example.py | 2 | 4228 | import sys
import os
import pyoppai
def print_pp(acc, pp, aim_pp, speed_pp, acc_pp):
print(
"\n%.17g aim\n%.17g speed\n%.17g acc\n%.17g pp\nfor %.17g%%" %
(aim_pp, speed_pp, acc_pp, pp, acc)
)
def print_diff(stars, aim, speed):
print(
"\n%.17g stars\n%.17g aim stars\n%.17g speed stars" %
(stars, aim, speed)
)
def chk(ctx):
err = pyoppai.err(ctx)
if err:
print(err)
sys.exit(1)
def main():
if len(sys.argv) != 2:
print("Usage: " + sys.argv[0] + " file.osu")
sys.exit(1)
# if you need to multithread, create one ctx and buffer for each thread
ctx = pyoppai.new_ctx()
# parse beatmap ------------------------------------------------------------
b = pyoppai.new_beatmap(ctx)
BUFSIZE = 2000000 # should be big enough to hold the .osu file
buf = pyoppai.new_buffer(BUFSIZE)
pyoppai.parse(
sys.argv[1],
b,
buf,
BUFSIZE,
# don't disable caching and use python script's folder for caching
False,
os.path.dirname(os.path.realpath(__file__))
);
chk(ctx)
print("Cache folder: " + os.path.dirname(os.path.realpath(__file__)) + "\n")
cs, od, ar, hp = pyoppai.stats(b)
print(
"%s - %s [%s] (by %s)\n"
"CS%g OD%g AR%g HP%g\n"
"%d objects (%d circles, %d sliders, %d spinners)\n"
"max combo: %d" %
(
pyoppai.artist(b),
pyoppai.title(b),
pyoppai.version(b),
pyoppai.creator(b),
cs, od, ar, hp,
pyoppai.num_objects(b),
pyoppai.num_circles(b),
pyoppai.num_sliders(b),
pyoppai.num_spinners(b),
pyoppai.max_combo(b)
)
)
# diff calc ----------------------------------------------------------------
dctx = pyoppai.new_d_calc_ctx(ctx)
stars, aim, speed, _, _, _, _ = pyoppai.d_calc(dctx, b)
chk(ctx)
print_diff(stars, aim, speed)
# pp calc ------------------------------------------------------------------
acc, pp, aim_pp, speed_pp, acc_pp = \
pyoppai.pp_calc(ctx, aim, speed, b)
chk(ctx)
print_pp(acc, pp, aim_pp, speed_pp, acc_pp)
# pp calc (with acc %) -----------------------------------------------------
acc, pp, aim_pp, speed_pp, acc_pp = \
pyoppai.pp_calc_acc(ctx, aim, speed, b, 90.0)
chk(ctx)
print_pp(acc, pp, aim_pp, speed_pp, acc_pp)
# override OD example ------------------------------------------------------
print("\n----\nIf the map was od10:")
pyoppai.set_od(b, 10)
acc, pp, aim_pp, speed_pp, acc_pp = \
pyoppai.pp_calc(ctx, aim, speed, b)
chk(ctx)
print_pp(acc, pp, aim_pp, speed_pp, acc_pp)
pyoppai.set_od(b, od)
# override AR example ------------------------------------------------------
print("\n----\nIf the map was ar11:")
pyoppai.set_ar(b, 11)
acc, pp, aim_pp, speed_pp, acc_pp = \
pyoppai.pp_calc(ctx, aim, speed, b)
chk(ctx)
print_pp(acc, pp, aim_pp, speed_pp, acc_pp)
pyoppai.set_ar(b, ar)
# override CS example ------------------------------------------------------
print("\n----\nIf the map was cs6.5:")
pyoppai.set_cs(b, 6.5)
# remember that CS is map-changing so difficulty must be recomputed
stars, aim, speed, _, _, _, _ = pyoppai.d_calc(dctx, b)
chk(ctx)
print_diff(stars, aim, speed)
acc, pp, aim_pp, speed_pp, acc_pp = \
pyoppai.pp_calc(ctx, aim, speed, b)
chk(ctx)
print_pp(acc, pp, aim_pp, speed_pp, acc_pp)
pyoppai.set_cs(b, cs)
# mods example -------------------------------------------------------------
print("\n----\nWith HDHR:")
# mods are a bitmask, same as what the osu! api uses
mods = pyoppai.hd | pyoppai.hr
pyoppai.apply_mods(b, mods)
# mods are map-changing, recompute diff
stars, aim, speed, _, _, _, _ = pyoppai.d_calc(dctx, b)
chk(ctx)
print_diff(stars, aim, speed)
acc, pp, aim_pp, speed_pp, acc_pp = \
pyoppai.pp_calc(ctx, aim, speed, b, mods)
chk(ctx)
print_pp(acc, pp, aim_pp, speed_pp, acc_pp)
main()
| gpl-3.0 |
chipaca/snapcraft | snapcraft/plugins/v2/qmake.py | 2 | 3297 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2020 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The qmake plugin is useful for building qmake-based parts.
These are projects that are built using .pro files.
This plugin uses the common plugin keywords as well as those for "sources".
For more information check the 'plugins' topic for the former and the
'sources' topic for the latter.
Additionally, this plugin uses the following plugin-specific keywords:
- qmake-parameters:
(list of strings)
additional options to pass to the qmake invocation.
- qmake-project-file:
(string)
the qmake project file to use. This is usually only needed if
qmake can not determine what project file to use on its own.
"""
from typing import Any, Dict, List, Set
from snapcraft.plugins.v2 import PluginV2
class QMakePlugin(PluginV2):
@classmethod
def get_schema(cls) -> Dict[str, Any]:
return {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"additionalProperties": False,
"properties": {
"qmake-parameters": {
"type": "array",
"uniqueItems": True,
"items": {"type": "string"},
"default": [],
},
"qmake-project-file": {"type": "string", "default": ""},
},
}
def get_build_snaps(self) -> Set[str]:
return set()
def get_build_packages(self) -> Set[str]:
return {"g++", "make", "qt5-qmake"}
def get_build_environment(self) -> Dict[str, str]:
return {"QT_SELECT": "qt5"}
@property
def out_of_source_build(self):
return True
def _get_qmake_configure_command(self) -> str:
cmd = [
"qmake",
'QMAKE_CFLAGS+="${CFLAGS:-}"',
'QMAKE_CXXFLAGS+="${CXXFLAGS:-}"',
'QMAKE_LFLAGS+="${LDFLAGS:-}"',
] + self.options.qmake_parameters
if self.options.qmake_project_file:
cmd.append(
'"${{SNAPCRAFT_PART_SRC_WORK}}/{}"'.format(
self.options.qmake_project_file
)
)
else:
cmd.append('"${SNAPCRAFT_PART_SRC_WORK}"')
return " ".join(cmd)
def get_build_commands(self) -> List[str]:
return [
self._get_qmake_configure_command(),
# Avoid overriding the CFLAGS and CXXFLAGS environment
# variables qmake sets in the generated Makefile
'env -u CFLAGS -u CXXFLAGS make -j"${SNAPCRAFT_PARALLEL_BUILD_COUNT}"',
'make install INSTALL_ROOT="${SNAPCRAFT_PART_INSTALL}"',
]
| gpl-3.0 |
toshywoshy/ansible | lib/ansible/module_utils/network/ios/providers/module.py | 20 | 2106 | #
# (c) 2019, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.ios.providers import providers
from ansible.module_utils._text import to_text
class NetworkModule(AnsibleModule):
fail_on_missing_provider = True
def __init__(self, connection=None, *args, **kwargs):
super(NetworkModule, self).__init__(*args, **kwargs)
if connection is None:
connection = Connection(self._socket_path)
self.connection = connection
@property
def provider(self):
if not hasattr(self, '_provider'):
capabilities = self.from_json(self.connection.get_capabilities())
network_os = capabilities['device_info']['network_os']
network_api = capabilities['network_api']
if network_api == 'cliconf':
connection_type = 'network_cli'
cls = providers.get(network_os, self._name.split('.')[-1], connection_type)
if not cls:
msg = 'unable to find suitable provider for network os %s' % network_os
if self.fail_on_missing_provider:
self.fail_json(msg=msg)
else:
self.warn(msg)
obj = cls(self.params, self.connection, self.check_mode)
setattr(self, '_provider', obj)
return getattr(self, '_provider')
def get_facts(self, subset=None):
try:
self.provider.get_facts(subset)
except Exception as exc:
self.fail_json(msg=to_text(exc))
def edit_config(self, config_filter=None):
current_config = self.connection.get_config(flags=config_filter)
try:
commands = self.provider.edit_config(current_config)
changed = bool(commands)
return {'commands': commands, 'changed': changed}
except Exception as exc:
self.fail_json(msg=to_text(exc))
| gpl-3.0 |
Royal-Society-of-New-Zealand/NZ-ORCID-Hub | orcid_api_v3/models/employment_v20.py | 1 | 11230 | # coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.created_date_v20 import CreatedDateV20 # noqa: F401,E501
from orcid_api_v3.models.fuzzy_date_v20 import FuzzyDateV20 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v20 import LastModifiedDateV20 # noqa: F401,E501
from orcid_api_v3.models.organization_v20 import OrganizationV20 # noqa: F401,E501
from orcid_api_v3.models.source_v20 import SourceV20 # noqa: F401,E501
class EmploymentV20(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'created_date': 'CreatedDateV20',
'last_modified_date': 'LastModifiedDateV20',
'source': 'SourceV20',
'put_code': 'int',
'path': 'str',
'department_name': 'str',
'role_title': 'str',
'start_date': 'FuzzyDateV20',
'end_date': 'FuzzyDateV20',
'organization': 'OrganizationV20',
'visibility': 'str'
}
attribute_map = {
'created_date': 'created-date',
'last_modified_date': 'last-modified-date',
'source': 'source',
'put_code': 'put-code',
'path': 'path',
'department_name': 'department-name',
'role_title': 'role-title',
'start_date': 'start-date',
'end_date': 'end-date',
'organization': 'organization',
'visibility': 'visibility'
}
def __init__(self, created_date=None, last_modified_date=None, source=None, put_code=None, path=None, department_name=None, role_title=None, start_date=None, end_date=None, organization=None, visibility=None): # noqa: E501
"""EmploymentV20 - a model defined in Swagger""" # noqa: E501
self._created_date = None
self._last_modified_date = None
self._source = None
self._put_code = None
self._path = None
self._department_name = None
self._role_title = None
self._start_date = None
self._end_date = None
self._organization = None
self._visibility = None
self.discriminator = None
if created_date is not None:
self.created_date = created_date
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if source is not None:
self.source = source
if put_code is not None:
self.put_code = put_code
if path is not None:
self.path = path
if department_name is not None:
self.department_name = department_name
if role_title is not None:
self.role_title = role_title
if start_date is not None:
self.start_date = start_date
if end_date is not None:
self.end_date = end_date
self.organization = organization
if visibility is not None:
self.visibility = visibility
@property
def created_date(self):
"""Gets the created_date of this EmploymentV20. # noqa: E501
:return: The created_date of this EmploymentV20. # noqa: E501
:rtype: CreatedDateV20
"""
return self._created_date
@created_date.setter
def created_date(self, created_date):
"""Sets the created_date of this EmploymentV20.
:param created_date: The created_date of this EmploymentV20. # noqa: E501
:type: CreatedDateV20
"""
self._created_date = created_date
@property
def last_modified_date(self):
"""Gets the last_modified_date of this EmploymentV20. # noqa: E501
:return: The last_modified_date of this EmploymentV20. # noqa: E501
:rtype: LastModifiedDateV20
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this EmploymentV20.
:param last_modified_date: The last_modified_date of this EmploymentV20. # noqa: E501
:type: LastModifiedDateV20
"""
self._last_modified_date = last_modified_date
@property
def source(self):
"""Gets the source of this EmploymentV20. # noqa: E501
:return: The source of this EmploymentV20. # noqa: E501
:rtype: SourceV20
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this EmploymentV20.
:param source: The source of this EmploymentV20. # noqa: E501
:type: SourceV20
"""
self._source = source
@property
def put_code(self):
"""Gets the put_code of this EmploymentV20. # noqa: E501
:return: The put_code of this EmploymentV20. # noqa: E501
:rtype: int
"""
return self._put_code
@put_code.setter
def put_code(self, put_code):
"""Sets the put_code of this EmploymentV20.
:param put_code: The put_code of this EmploymentV20. # noqa: E501
:type: int
"""
self._put_code = put_code
@property
def path(self):
"""Gets the path of this EmploymentV20. # noqa: E501
:return: The path of this EmploymentV20. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this EmploymentV20.
:param path: The path of this EmploymentV20. # noqa: E501
:type: str
"""
self._path = path
@property
def department_name(self):
"""Gets the department_name of this EmploymentV20. # noqa: E501
:return: The department_name of this EmploymentV20. # noqa: E501
:rtype: str
"""
return self._department_name
@department_name.setter
def department_name(self, department_name):
"""Sets the department_name of this EmploymentV20.
:param department_name: The department_name of this EmploymentV20. # noqa: E501
:type: str
"""
self._department_name = department_name
@property
def role_title(self):
"""Gets the role_title of this EmploymentV20. # noqa: E501
:return: The role_title of this EmploymentV20. # noqa: E501
:rtype: str
"""
return self._role_title
@role_title.setter
def role_title(self, role_title):
"""Sets the role_title of this EmploymentV20.
:param role_title: The role_title of this EmploymentV20. # noqa: E501
:type: str
"""
self._role_title = role_title
@property
def start_date(self):
"""Gets the start_date of this EmploymentV20. # noqa: E501
:return: The start_date of this EmploymentV20. # noqa: E501
:rtype: FuzzyDateV20
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this EmploymentV20.
:param start_date: The start_date of this EmploymentV20. # noqa: E501
:type: FuzzyDateV20
"""
self._start_date = start_date
@property
def end_date(self):
"""Gets the end_date of this EmploymentV20. # noqa: E501
:return: The end_date of this EmploymentV20. # noqa: E501
:rtype: FuzzyDateV20
"""
return self._end_date
@end_date.setter
def end_date(self, end_date):
"""Sets the end_date of this EmploymentV20.
:param end_date: The end_date of this EmploymentV20. # noqa: E501
:type: FuzzyDateV20
"""
self._end_date = end_date
@property
def organization(self):
"""Gets the organization of this EmploymentV20. # noqa: E501
:return: The organization of this EmploymentV20. # noqa: E501
:rtype: OrganizationV20
"""
return self._organization
@organization.setter
def organization(self, organization):
"""Sets the organization of this EmploymentV20.
:param organization: The organization of this EmploymentV20. # noqa: E501
:type: OrganizationV20
"""
if organization is None:
raise ValueError("Invalid value for `organization`, must not be `None`") # noqa: E501
self._organization = organization
@property
def visibility(self):
"""Gets the visibility of this EmploymentV20. # noqa: E501
:return: The visibility of this EmploymentV20. # noqa: E501
:rtype: str
"""
return self._visibility
@visibility.setter
def visibility(self, visibility):
"""Sets the visibility of this EmploymentV20.
:param visibility: The visibility of this EmploymentV20. # noqa: E501
:type: str
"""
allowed_values = ["LIMITED", "REGISTERED_ONLY", "PUBLIC", "PRIVATE"] # noqa: E501
if visibility not in allowed_values:
raise ValueError(
"Invalid value for `visibility` ({0}), must be one of {1}" # noqa: E501
.format(visibility, allowed_values)
)
self._visibility = visibility
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EmploymentV20, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EmploymentV20):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| mit |
nateGeorge/IDmyDog | scrape_ims/scrapy/scrape_dogs/scrape_dogs/settings.py | 1 | 3579 | # -*- coding: utf-8 -*-
# Scrapy settings for scrape_dogs project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
import logging
import json
with open('config.json', 'rb') as f:
config = json.load(f)
mainImPath = config['image_dir']
BOT_NAME = 'scrape_dogs'
SPIDER_MODULES = ['scrape_dogs.spiders']
NEWSPIDER_MODULE = 'scrape_dogs.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'scrape_dogs (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# http://doc.scrapy.org/en/latest/topics/logging.html#topics-logging-settings
#LOG_LEVEL = logging.WARNING
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'scrape_dogs.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'scrape_dogs.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'scrape_dogs.pipelines.SomePipeline': 300,
#}
ITEM_PIPELINES = {'scrape_dogs.pipelines.DogImagePipeline': 1}
IMAGES_STORE = mainImPath
IMAGES_EXPIRES = 100 # don't download new images unless it's been x days since last dl
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| mit |
barbour-em/osf.io | scripts/migrate_github_oauth_settings.py | 55 | 7419 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to migrate addongithubusersettings and create and attach addongithuboauthsettings.
Log:
Executed on production by SL on 2014-10-05 at 23:11 EST. 269 AddonGithubUserSettings records
were successfully migrated. 3 records with invalidated credentials were skipped.
Script was modified by @chennan47 to handle records with invalidated credentials by unsetting
the oauth_access_token, oauth_token_type, and github_user fields. Run on production by @sloria
on 2014-10-07 at 12:34 EST. 3 records with invalidated credentials were migrated.
"""
import sys
import mock
from nose.tools import *
import github3
from framework.mongo import database
from website.app import init_app
from tests.base import OsfTestCase
from website.addons.github.api import GitHub
from website.addons.github.model import AddonGitHubOauthSettings, AddonGitHubUserSettings
def do_migration(records, dry=True):
count, inval_cred_handled = 0, 0
for raw_user_settings in records:
# False if missing, None if field exists
access_token = raw_user_settings.get('oauth_access_token', False)
token_type = raw_user_settings.get('oauth_token_type', False)
github_user_name = raw_user_settings.get('github_user', False)
if access_token and token_type and github_user_name:
if not dry:
gh = GitHub(access_token, token_type)
try:
github_user = gh.user()
except github3.models.GitHubError:
AddonGitHubUserSettings._storage[0].store.update(
{'_id': raw_user_settings['_id']},
{
'$unset': {
"oauth_access_token" : True,
"oauth_token_type" : True,
"github_user" : True,
},
}
)
inval_cred_handled += 1
print('invalidated credentials handled record: {}'.format(raw_user_settings['_id']))
continue
oauth_settings = AddonGitHubOauthSettings()
oauth_settings.github_user_id = str(github_user.id)
oauth_settings.save()
oauth_settings.oauth_access_token = access_token
oauth_settings.oauth_token_type = token_type
oauth_settings.github_user_name = github_user_name
oauth_settings.save()
AddonGitHubUserSettings._storage[0].store.update(
{'_id': raw_user_settings['_id']},
{
'$unset': {
'oauth_access_token': True,
'oauth_token_type': True,
'github_user': True,
},
'$set': {
'oauth_settings': oauth_settings.github_user_id,
}
}
)
AddonGitHubOauthSettings._storage[0].store.update(
{'github_user_id': oauth_settings.github_user_id},
{
'$push': {
'__backrefs.accessed.addongithubusersettings.oauth_settings': raw_user_settings['_id'],
}
}
)
print('Finished migrating AddonGithubUserSettings record: {}'.format(raw_user_settings['_id']))
count += 1
# Old fields have not yet been unset
elif None in set([access_token, token_type, github_user_name]):
if not dry:
AddonGitHubUserSettings._storage[0].store.update(
{'_id': raw_user_settings['_id']},
{
'$unset': {
'oauth_access_token': True,
'oauth_token_type': True,
'github_user': True,
},
}
)
print('Unset oauth_access_token and oauth_token_type: {0}'.format(raw_user_settings['_id']))
count += 1
return count, inval_cred_handled
def get_user_settings():
# ... return the StoredObjects to migrate ...
return database.addongithubusersettings.find()
def main():
init_app('website.settings', set_backends=True, routes=True) # Sets the storage backends on all models
user_settings = get_user_settings()
n_migrated, n_inval_cred_handled = do_migration(user_settings, dry='dry' in sys.argv)
print("Total migrated records: {}".format(n_migrated))
print("Total invalidated credentials handled records: {}".format(n_inval_cred_handled))
class TestMigrateGitHubOauthSettings(OsfTestCase):
def setUp(self):
super(TestMigrateGitHubOauthSettings, self).setUp()
self.mongo_collection = database.addongithubusersettings
self.user_settings = {
"__backrefs" : {
"authorized" : {
"addongithubnodesettings" : {
"user_settings" : [
"678910",
]
}
}
},
"_id" : "123456",
"_version" : 1,
"deletedAddonGitHubUserSettings" : False,
"github_user" : "testing user",
"oauth_access_token" : "testing acess token",
"oauth_state" : "no state",
"oauth_token_type" : "testing token type",
"owner" : "abcde"
}
self.mongo_collection.insert(self.user_settings)
def test_get_user_settings(self):
records = list(get_user_settings())
assert_equal(1, len(records))
assert_equal(
records[0]['github_user'],
self.user_settings['github_user']
)
assert_equal(
records[0]['oauth_state'],
self.user_settings['oauth_state']
)
assert_equal(
records[0]['oauth_access_token'],
self.user_settings['oauth_access_token']
)
assert_equal(
records[0]['oauth_token_type'],
self.user_settings['oauth_token_type']
)
@mock.patch('website.addons.github.api.GitHub.user')
def test_do_migration(self, mock_github_user):
user = mock.Mock()
user.id = "testing user id"
mock_github_user.return_value = user
do_migration(get_user_settings())
user_settings = AddonGitHubUserSettings.find()[0]
assert_true(user_settings.oauth_settings)
assert_true(user_settings.oauth_state)
assert_equal(
user_settings.oauth_settings.github_user_name,
"testing user"
)
assert_equal(
user_settings.oauth_settings.oauth_access_token,
"testing acess token"
)
assert_equal(
user_settings.oauth_settings.oauth_token_type,
"testing token type"
)
assert_equal(
user_settings.oauth_settings.github_user_id,
"testing user id"
)
def tearDown(self):
self.mongo_collection.remove()
if __name__ == '__main__':
main()
| apache-2.0 |
devs1991/test_edx_docmode | lms/djangoapps/lti_provider/tests/test_signature_validator.py | 139 | 3804 | """
Tests for the SignatureValidator class.
"""
import ddt
from django.test import TestCase
from django.test.client import RequestFactory
from mock import patch
from lti_provider.models import LtiConsumer
from lti_provider.signature_validator import SignatureValidator
def get_lti_consumer():
"""
Helper method for all Signature Validator tests to get an LtiConsumer object.
"""
return LtiConsumer(
consumer_name='Consumer Name',
consumer_key='Consumer Key',
consumer_secret='Consumer Secret'
)
@ddt.ddt
class ClientKeyValidatorTest(TestCase):
"""
Tests for the check_client_key method in the SignatureValidator class.
"""
def setUp(self):
super(ClientKeyValidatorTest, self).setUp()
self.lti_consumer = get_lti_consumer()
def test_valid_client_key(self):
"""
Verify that check_client_key succeeds with a valid key
"""
key = self.lti_consumer.consumer_key
self.assertTrue(SignatureValidator(self.lti_consumer).check_client_key(key))
@ddt.data(
('0123456789012345678901234567890123456789',),
('',),
(None,),
)
@ddt.unpack
def test_invalid_client_key(self, key):
"""
Verify that check_client_key fails with a disallowed key
"""
self.assertFalse(SignatureValidator(self.lti_consumer).check_client_key(key))
@ddt.ddt
class NonceValidatorTest(TestCase):
"""
Tests for the check_nonce method in the SignatureValidator class.
"""
def setUp(self):
super(NonceValidatorTest, self).setUp()
self.lti_consumer = get_lti_consumer()
def test_valid_nonce(self):
"""
Verify that check_nonce succeeds with a key of maximum length
"""
nonce = '0123456789012345678901234567890123456789012345678901234567890123'
self.assertTrue(SignatureValidator(self.lti_consumer).check_nonce(nonce))
@ddt.data(
('01234567890123456789012345678901234567890123456789012345678901234',),
('',),
(None,),
)
@ddt.unpack
def test_invalid_nonce(self, nonce):
"""
Verify that check_nonce fails with badly formatted nonce
"""
self.assertFalse(SignatureValidator(self.lti_consumer).check_nonce(nonce))
class SignatureValidatorTest(TestCase):
"""
Tests for the custom SignatureValidator class that uses the oauthlib library
to check message signatures. Note that these tests mock out the library
itself, since we assume it to be correct.
"""
def setUp(self):
super(SignatureValidatorTest, self).setUp()
self.lti_consumer = get_lti_consumer()
def test_get_existing_client_secret(self):
"""
Verify that get_client_secret returns the right value for the correct
key
"""
key = self.lti_consumer.consumer_key
secret = SignatureValidator(self.lti_consumer).get_client_secret(key, None)
self.assertEqual(secret, self.lti_consumer.consumer_secret)
@patch('oauthlib.oauth1.SignatureOnlyEndpoint.validate_request',
return_value=(True, None))
def test_verification_parameters(self, verify_mock):
"""
Verify that the signature validaton library method is called using the
correct parameters derived from the HttpRequest.
"""
body = 'oauth_signature_method=HMAC-SHA1&oauth_version=1.0'
content_type = 'application/x-www-form-urlencoded'
request = RequestFactory().post('/url', body, content_type=content_type)
headers = {'Content-Type': content_type}
SignatureValidator(self.lti_consumer).verify(request)
verify_mock.assert_called_once_with(
request.build_absolute_uri(), 'POST', body, headers)
| agpl-3.0 |
vmanoria/bluemix-hue-filebrowser | hue-3.8.1-bluemix/desktop/core/src/desktop/auth/backend.py | 2 | 18238 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Authentication backend classes for Desktop.
These classes should implement the interface described at:
http://docs.djangoproject.com/en/1.0/topics/auth/#writing-an-authentication-backend
In addition, the User classes they return must support:
- get_groups() (returns a list of strings)
- get_home_directory() (returns None or a string)
- has_hue_permission(action, app) -> boolean
Because Django's models are sometimes unfriendly, you'll want
User to remain a django.contrib.auth.models.User object.
In Desktop, only one authentication backend may be specified.
"""
from django.contrib.auth.models import User
import django.contrib.auth.backends
import logging
import desktop.conf
from desktop import metrics
from django.utils.importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from useradmin.models import get_profile, get_default_user_group, UserProfile
from useradmin.views import import_ldap_users
from useradmin import ldap_access
import pam
from django_auth_ldap.backend import LDAPBackend
import ldap
from django_auth_ldap.config import LDAPSearch
LOG = logging.getLogger(__name__)
def load_augmentation_class():
"""
Loads the user augmentation class.
Similar in spirit to django.contrib.auth.load_backend
"""
try:
class_name = desktop.conf.AUTH.USER_AUGMENTOR.get()
i = class_name.rfind('.')
module, attr = class_name[:i], class_name[i+1:]
mod = import_module(module)
klass = getattr(mod, attr)
LOG.info("Augmenting users with class: %s" % (klass,))
return klass
except:
LOG.exception('failed to augment class')
raise ImproperlyConfigured("Could not find user_augmentation_class: %s" % (class_name,))
_user_augmentation_class = None
def get_user_augmentation_class():
global _user_augmentation_class
if _user_augmentation_class is None:
_user_augmentation_class = load_augmentation_class()
return _user_augmentation_class
def rewrite_user(user):
"""
Rewrites the user according to the augmentation class.
We currently only re-write specific attributes,
though this could be generalized.
"""
augment = get_user_augmentation_class()(user)
for attr in ("get_groups", "get_home_directory", "has_hue_permission"):
setattr(user, attr, getattr(augment, attr))
return user
class DefaultUserAugmentor(object):
def __init__(self, parent):
self._parent = parent
def _get_profile(self):
return get_profile(self._parent)
def get_groups(self):
return self._get_profile().get_groups()
def get_home_directory(self):
return self._get_profile().home_directory
def has_hue_permission(self, action, app):
return self._get_profile().has_hue_permission(action=action, app=app)
def find_user(username):
try:
user = User.objects.get(username=username)
LOG.debug("Found user %s in the db" % username)
except User.DoesNotExist:
user = None
return user
def create_user(username, password):
LOG.info("Materializing user %s in the database" % username)
user = User(username=username)
if password is None:
user.set_unusable_password()
else:
user.set_password(password)
user.is_superuser = True
user.save()
return user
def find_or_create_user(username, password=None):
user = find_user(username)
if user is None:
user = create_user(username, password)
return user
class DesktopBackendBase(object):
"""
Abstract base class for providing external authentication schemes.
Extend this class and implement check_auth
"""
def authenticate(self, username, password):
if self.check_auth(username, password):
user = find_or_create_user(username)
user = rewrite_user(user)
return user
else:
return None
def get_user(self, user_id):
try:
user = User.objects.get(pk=user_id)
user = rewrite_user(user)
return user
except User.DoesNotExist:
return None
def check_auth(self, username, password):
"""
Implementors should return a boolean value which determines
whether the given username and password pair is valid.
"""
raise NotImplemented("Abstract class - must implement check_auth")
class AllowFirstUserDjangoBackend(django.contrib.auth.backends.ModelBackend):
"""
Allows the first user in, but otherwise delegates to Django's
ModelBackend.
"""
def authenticate(self, username=None, password=None):
user = super(AllowFirstUserDjangoBackend, self).authenticate(username, password)
if user is not None:
if user.is_active:
user = rewrite_user(user)
return user
return user
if self.is_first_login_ever():
user = find_or_create_user(username, password)
user = rewrite_user(user)
userprofile = get_profile(user)
userprofile.first_login = False
userprofile.save()
default_group = get_default_user_group()
if default_group is not None:
user.groups.add(default_group)
user.save()
return user
return None
def get_user(self, user_id):
user = super(AllowFirstUserDjangoBackend, self).get_user(user_id)
user = rewrite_user(user)
return user
def is_first_login_ever(self):
""" Return true if no one has ever logged in to Desktop yet. """
return User.objects.count() == 0
class OAuthBackend(DesktopBackendBase):
"""
Deprecated, use liboauth.backend.OAuthBackend instead
Heavily based on Twitter Oauth: https://github.com/simplegeo/python-oauth2#logging-into-django-w-twitter
Requires: python-oauth2 and httplib2
build/env/bin/python setup.py install https://github.com/simplegeo/python-oauth2
build/env/bin/pip install httplib2
"""
@metrics.oauth_authentication_time
def authenticate(self, access_token):
username = access_token['screen_name']
password = access_token['oauth_token_secret']
# Could save oauth_token detail in the user profile here
user = find_or_create_user(username, password)
user.is_superuser = False
user.save()
default_group = get_default_user_group()
if default_group is not None:
user.groups.add(default_group)
return user
@classmethod
def manages_passwords_externally(cls):
return True
class AllowAllBackend(DesktopBackendBase):
"""
Authentication backend that allows any user to login as long
as they have a username. The users will be added to the 'default_user_group'.
We want to ensure that already created users (e.g., from other backends)
retain their superuser status, and any new users are not super users by default.
"""
def check_auth(self, username, password):
user = find_user(username)
if user is None:
user = create_user(username, password)
user.is_superuser = False
user.save()
default_group = get_default_user_group()
if default_group is not None:
user.groups.add(default_group)
return user
@classmethod
def manages_passwords_externally(cls):
return True
class DemoBackend(django.contrib.auth.backends.ModelBackend):
"""
Log automatically users without a session with a new user account.
"""
def authenticate(self, username, password):
user = super(DemoBackend, self).authenticate(username, password)
if not user:
username = self._random_name()
user = find_or_create_user(username, None)
user.is_superuser = False
user.save()
default_group = get_default_user_group()
if default_group is not None:
user.groups.add(default_group)
user = rewrite_user(user)
return user
def get_user(self, user_id):
user = super(DemoBackend, self).get_user(user_id)
user = rewrite_user(user)
return user
def _random_name(self):
import string
import random
N = 7
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(N))
class PamBackend(DesktopBackendBase):
"""
Authentication backend that uses PAM to authenticate logins. The first user to
login will become the superuser.
"""
@metrics.pam_authentication_time
def check_auth(self, username, password):
if pam.authenticate(username, password, desktop.conf.AUTH.PAM_SERVICE.get()):
is_super = False
if User.objects.count() == 0:
is_super = True
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = find_or_create_user(username, None)
if user is not None and user.is_active:
profile = get_profile(user)
profile.creation_method = UserProfile.CreationMethod.EXTERNAL
profile.save()
user.is_superuser = is_super
default_group = get_default_user_group()
if default_group is not None:
user.groups.add(default_group)
user.save()
user = rewrite_user(user)
return user
return None
@classmethod
def manages_passwords_externally(cls):
return True
class LdapBackend(object):
"""
Authentication backend that uses LDAP to authenticate logins.
The first user to login will become the superuser.
"""
def __init__(self):
# Delegate to django_auth_ldap.LDAPBackend
class _LDAPBackend(LDAPBackend):
def get_or_create_user(self, username, ldap_user):
username = desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.get() and username.lower() or username
if desktop.conf.LDAP.IGNORE_USERNAME_CASE.get():
try:
return User.objects.get(username__iexact=username), False
except User.DoesNotExist:
return User.objects.get_or_create(username=username)
else:
return User.objects.get_or_create(username=username)
self._backend = _LDAPBackend()
def add_ldap_config(self, ldap_config):
if ldap_config.LDAP_URL.get() is None:
LOG.warn("Could not find LDAP URL required for authentication.")
return None
else:
setattr(self._backend.settings, 'SERVER_URI', ldap_config.LDAP_URL.get())
if ldap_config.SEARCH_BIND_AUTHENTICATION.get():
# New Search/Bind Auth
base_dn = ldap_config.BASE_DN.get()
user_name_attr = ldap_config.USERS.USER_NAME_ATTR.get()
user_filter = ldap_config.USERS.USER_FILTER.get()
if not user_filter.startswith('('):
user_filter = '(' + user_filter + ')'
if ldap_config.BIND_DN.get():
bind_dn = ldap_config.BIND_DN.get()
setattr(self._backend.settings, 'BIND_DN', bind_dn)
bind_password = ldap_config.BIND_PASSWORD.get()
if not bind_password:
password = ldap_config.BIND_PASSWORD_SCRIPT.get()
setattr(self._backend.settings, 'BIND_PASSWORD', bind_password)
if user_filter is None:
search_bind_results = LDAPSearch(base_dn,
ldap.SCOPE_SUBTREE, "(" + user_name_attr + "=%(user)s)")
else:
search_bind_results = LDAPSearch(base_dn,
ldap.SCOPE_SUBTREE, "(&(" + user_name_attr + "=%(user)s)" + user_filter + ")")
setattr(self._backend.settings, 'USER_SEARCH', search_bind_results)
else:
nt_domain = ldap_config.NT_DOMAIN.get()
if nt_domain is None:
pattern = ldap_config.LDAP_USERNAME_PATTERN.get()
pattern = pattern.replace('<username>', '%(user)s')
setattr(self._backend.settings, 'USER_DN_TEMPLATE', pattern)
else:
# %(user)s is a special string that will get replaced during the authentication process
setattr(self._backend.settings, 'USER_DN_TEMPLATE', "%(user)s@" + nt_domain)
# Certificate-related config settings
if ldap_config.LDAP_CERT.get():
setattr(self._backend.settings, 'START_TLS', ldap_config.USE_START_TLS.get())
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, ldap_config.LDAP_CERT.get())
else:
setattr(self._backend.settings, 'START_TLS', False)
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
if ldap_config.FOLLOW_REFERRALS.get():
ldap.set_option(ldap.OPT_REFERRALS, 1)
else:
ldap.set_option(ldap.OPT_REFERRALS, 0)
def add_ldap_config_for_server(self, server):
if desktop.conf.LDAP.LDAP_SERVERS.get():
# Choose from multiple server configs
if server in desktop.conf.LDAP.LDAP_SERVERS.get():
self.add_ldap_config(desktop.conf.LDAP.LDAP_SERVERS.get()[server])
else:
self.add_ldap_config(desktop.conf.LDAP)
@metrics.ldap_authentication_time
def authenticate(self, username=None, password=None, server=None):
self.add_ldap_config_for_server(server)
username_filter_kwargs = ldap_access.get_ldap_user_kwargs(username)
# Do this check up here, because the auth call creates a django user upon first login per user
is_super = False
if not UserProfile.objects.filter(creation_method=str(UserProfile.CreationMethod.EXTERNAL)).exists():
# If there are no LDAP users already in the system, the first one will
# become a superuser
is_super = True
elif User.objects.filter(**username_filter_kwargs).exists():
# If the user already exists, we shouldn't change its superuser
# privileges. However, if there's a naming conflict with a non-external
# user, we should do the safe thing and turn off superuser privs.
existing_user = User.objects.get(**username_filter_kwargs)
existing_profile = get_profile(existing_user)
if existing_profile.creation_method == str(UserProfile.CreationMethod.EXTERNAL):
is_super = User.objects.get(**username_filter_kwargs).is_superuser
elif not desktop.conf.LDAP.CREATE_USERS_ON_LOGIN.get():
return None
try:
user = self._backend.authenticate(username, password)
except ImproperlyConfigured, detail:
LOG.warn("LDAP was not properly configured: %s", detail)
return None
if user is not None and user.is_active:
profile = get_profile(user)
profile.creation_method = UserProfile.CreationMethod.EXTERNAL
profile.save()
user.is_superuser = is_super
user = rewrite_user(user)
default_group = get_default_user_group()
if default_group is not None:
user.groups.add(default_group)
user.save()
if desktop.conf.LDAP.SYNC_GROUPS_ON_LOGIN.get():
self.import_groups(server, user)
return user
def get_user(self, user_id):
user = self._backend.get_user(user_id)
user = rewrite_user(user)
return user
def import_groups(self, server, user):
connection = ldap_access.get_connection_from_server(server)
import_ldap_users(connection, user.username, sync_groups=True, import_by_dn=False, server=server)
@classmethod
def manages_passwords_externally(cls):
return True
class SpnegoDjangoBackend(django.contrib.auth.backends.ModelBackend):
"""
A note about configuration:
The HTTP/_HOST@REALM principal (where _HOST is the fully qualified domain
name of the server running Hue) needs to be exported to a keytab file.
The keytab file can either be located in /etc/krb5.keytab or you can set
the KRB5_KTNAME environment variable to point to another location
(e.g. /etc/hue/hue.keytab).
"""
@metrics.spnego_authentication_time
def authenticate(self, username=None):
username = self.clean_username(username)
is_super = False
if User.objects.count() == 0:
is_super = True
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = find_or_create_user(username, None)
if user is not None and user.is_active:
profile = get_profile(user)
profile.creation_method = UserProfile.CreationMethod.EXTERNAL
profile.save()
user.is_superuser = is_super
default_group = get_default_user_group()
if default_group is not None:
user.groups.add(default_group)
user.save()
user = rewrite_user(user)
return user
def clean_username(self, username):
if '@' in username:
return username.split('@')[0]
return username
def get_user(self, user_id):
user = super(SpnegoDjangoBackend, self).get_user(user_id)
user = rewrite_user(user)
return user
class RemoteUserDjangoBackend(django.contrib.auth.backends.RemoteUserBackend):
"""
Delegates to Django's RemoteUserBackend and requires HueRemoteUserMiddleware
"""
def authenticate(self, remote_user=None):
username = self.clean_username(remote_user)
username = desktop.conf.AUTH.FORCE_USERNAME_LOWERCASE.get() and username.lower() or username
is_super = False
if User.objects.count() == 0:
is_super = True
try:
if desktop.conf.AUTH.IGNORE_USERNAME_CASE.get():
user = User.objects.get(username__iexact=username)
else:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = find_or_create_user(username, None)
if user is not None and user.is_active:
profile = get_profile(user)
profile.creation_method = UserProfile.CreationMethod.EXTERNAL
profile.save()
user.is_superuser = is_super
default_group = get_default_user_group()
if default_group is not None:
user.groups.add(default_group)
user.save()
user = rewrite_user(user)
return user
def get_user(self, user_id):
user = super(RemoteUserDjangoBackend, self).get_user(user_id)
user = rewrite_user(user)
return user
| gpl-2.0 |
cdsteinkuehler/linuxcnc | gladevcp/templates/classhandler.py | 10 | 1761 | #!/usr/bin/env python
# vim: sts=4 sw=4 et
# This is a component of EMC
# classhandler.py Copyright 2010 Michael Haberler
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
'''
example gladevcp handler class to start your own
no persistence support
'''
import glib
class HandlerClass:
def on_button_press(self,widget,data=None):
print "on_button_press"
def on_destroy(self,obj,data=None):
print "on_destroy"
def _on_timer_tick(self,userdata=None):
print "timer tick"
return True
def __init__(self, halcomp,builder,useropts):
self.halcomp = halcomp
self.builder = builder
self.useropts = useropts
# demonstrate a slow background timer - granularity is one second
# for a faster timer, use this:
# glib.timeout_add(5000, self._on_timer_tick)
glib.timeout_add_seconds(1, self._on_timer_tick)
def get_handlers(halcomp,builder,useropts):
for cmd in useropts:
exec cmd in globals()
return [HandlerClass(halcomp,builder,useropts)]
| lgpl-2.1 |
testalt/electrum-nmc-server | src/storage.py | 1 | 18189 | import plyvel
import ast
import hashlib
import os
import sys
from processor import print_log, logger
from utils import bc_address_to_hash_160, hash_160_to_pubkey_address, hex_to_int, int_to_hex, Hash
global GENESIS_HASH
GENESIS_HASH = '0000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f'
"""
Patricia tree for hashing unspents
"""
DEBUG = 0
KEYLENGTH = 20 + 32 + 4 #56
class Storage(object):
def __init__(self, config, shared, test_reorgs):
self.dbpath = config.get('leveldb', 'path')
if not os.path.exists(self.dbpath):
os.mkdir(self.dbpath)
self.pruning_limit = config.getint('leveldb', 'pruning_limit')
self.shared = shared
self.hash_list = {}
self.parents = {}
self.test_reorgs = test_reorgs
try:
self.db_utxo = plyvel.DB(os.path.join(self.dbpath,'utxo'), create_if_missing=True, compression=None)
self.db_addr = plyvel.DB(os.path.join(self.dbpath,'addr'), create_if_missing=True, compression=None)
self.db_hist = plyvel.DB(os.path.join(self.dbpath,'hist'), create_if_missing=True, compression=None)
self.db_undo = plyvel.DB(os.path.join(self.dbpath,'undo'), create_if_missing=True, compression=None)
except:
logger.error('db init', exc_info=True)
self.shared.stop()
self.db_version = 3 # increase this when database needs to be updated
try:
self.last_hash, self.height, db_version = ast.literal_eval(self.db_undo.get('height'))
print_log("Database version", self.db_version)
print_log("Blockchain height", self.height)
except:
print_log('initializing database')
self.height = 0
self.last_hash = GENESIS_HASH
db_version = self.db_version
# write root
self.put_node('', {})
# check version
if self.db_version != db_version:
print_log("Your database '%s' is deprecated. Please create a new database"%self.dbpath)
self.shared.stop()
return
# compute root hash
d = self.get_node('')
self.root_hash, v = self.get_node_hash('',d,None)
print_log("UTXO tree root hash:", self.root_hash.encode('hex'))
print_log("Coins in database:", v)
# convert between bitcoin addresses and 20 bytes keys used for storage.
def address_to_key(self, addr):
return bc_address_to_hash_160(addr)
def key_to_address(self, addr):
return hash_160_to_pubkey_address(addr)
def get_proof(self, addr):
key = self.address_to_key(addr)
i = self.db_utxo.iterator(start=key)
k, _ = i.next()
p = self.get_path(k)
p.append(k)
out = []
for item in p:
v = self.db_utxo.get(item)
out.append((item.encode('hex'), v.encode('hex')))
return out
def get_balance(self, addr):
key = self.address_to_key(addr)
i = self.db_utxo.iterator(start=key)
k, _ = i.next()
if not k.startswith(key):
return 0
p = self.get_parent(k)
d = self.get_node(p)
letter = k[len(p)]
return d[letter][1]
def listunspent(self, addr):
key = self.address_to_key(addr)
if key is None:
raise BaseException('Invalid Namecoin address', addr)
out = []
for k, v in self.db_utxo.iterator(start=key):
if not k.startswith(key):
break
if len(k) == KEYLENGTH:
txid = k[20:52].encode('hex')
txpos = hex_to_int(k[52:56])
h = hex_to_int(v[8:12])
v = hex_to_int(v[0:8])
out.append({'tx_hash': txid, 'tx_pos':txpos, 'height': h, 'value':v})
out.sort(key=lambda x:x['height'])
return out
def get_history(self, addr):
out = []
o = self.listunspent(addr)
for item in o:
out.append((item['tx_hash'], item['height']))
h = self.db_hist.get(addr)
while h:
item = h[0:80]
h = h[80:]
txi = item[0:32].encode('hex')
hi = hex_to_int(item[36:40])
txo = item[40:72].encode('hex')
ho = hex_to_int(item[76:80])
out.append((txi, hi))
out.append((txo, ho))
# sort
out.sort(key=lambda x:x[1])
# uniqueness
out = set(out)
return map(lambda x: {'tx_hash':x[0], 'height':x[1]}, out)
def get_address(self, txi):
return self.db_addr.get(txi)
def get_undo_info(self, height):
s = self.db_undo.get("undo_info_%d" % (height % 100))
if s is None: print_log("no undo info for ", height)
return eval(s)
def write_undo_info(self, height, bitcoind_height, undo_info):
if height > bitcoind_height - 100 or self.test_reorgs:
self.db_undo.put("undo_info_%d" % (height % 100), repr(undo_info))
def common_prefix(self, word1, word2):
max_len = min(len(word1),len(word2))
for i in range(max_len):
if word2[i] != word1[i]:
index = i
break
else:
index = max_len
return word1[0:index]
def put_node(self, key, d, batch=None):
k = 0
serialized = ''
for i in range(256):
if chr(i) in d.keys():
k += 1<<i
h, v = d[chr(i)]
if h is None: h = chr(0)*32
vv = int_to_hex(v, 8).decode('hex')
item = h + vv
assert len(item) == 40
serialized += item
k = "0x%0.64X" % k # 32 bytes
k = k[2:].decode('hex')
assert len(k) == 32
out = k + serialized
if batch:
batch.put(key, out)
else:
self.db_utxo.put(key, out)
def get_node(self, key):
s = self.db_utxo.get(key)
if s is None:
return
#print "get node", key.encode('hex'), len(key), s.encode('hex')
k = int(s[0:32].encode('hex'), 16)
s = s[32:]
d = {}
for i in range(256):
if k % 2 == 1:
_hash = s[0:32]
value = hex_to_int(s[32:40])
d[chr(i)] = (_hash, value)
s = s[40:]
k = k/2
#cache
return d
def add_address(self, target, value, height):
assert len(target) == KEYLENGTH
word = target
key = ''
path = [ '' ]
i = self.db_utxo.iterator()
while key != target:
items = self.get_node(key)
if word[0] in items.keys():
i.seek(key + word[0])
new_key, _ = i.next()
if target.startswith(new_key):
# add value to the child node
key = new_key
word = target[len(key):]
if key == target:
break
else:
assert key not in path
path.append(key)
else:
# prune current node and add new node
prefix = self.common_prefix(new_key, target)
index = len(prefix)
## get hash and value of new_key from parent (if it's a leaf)
if len(new_key) == KEYLENGTH:
parent_key = self.get_parent(new_key)
parent = self.get_node(parent_key)
z = parent[ new_key[len(parent_key)] ]
self.put_node(prefix, { target[index]:(None,0), new_key[index]:z } )
else:
# if it is not a leaf, update the hash of new_key because skip_string changed
h, v = self.get_node_hash(new_key, self.get_node(new_key), prefix)
self.put_node(prefix, { target[index]:(None,0), new_key[index]:(h,v) } )
path.append(prefix)
self.parents[new_key] = prefix
break
else:
assert key in path
items[ word[0] ] = (None,0)
self.put_node(key,items)
break
# write
s = (int_to_hex(value, 8) + int_to_hex(height,4)).decode('hex')
self.db_utxo.put(target, s)
# the hash of a node is the txid
_hash = target[20:52]
self.update_node_hash(target, path, _hash, value)
def update_node_hash(self, node, path, _hash, value):
c = node
for x in path[::-1]:
self.parents[c] = x
c = x
self.hash_list[node] = (_hash, value)
def update_hashes(self):
nodes = {} # nodes to write
for i in range(KEYLENGTH, -1, -1):
for node in self.hash_list.keys():
if len(node) != i: continue
node_hash, node_value = self.hash_list.pop(node)
# for each node, compute its hash, send it to the parent
if node == '':
self.root_hash = node_hash
self.root_value = node_value
break
parent = self.parents[node]
# read parent.. do this in add_address
d = nodes.get(parent)
if d is None:
d = self.get_node(parent)
assert d is not None
letter = node[len(parent)]
assert letter in d.keys()
if i != KEYLENGTH and node_hash is None:
d2 = self.get_node(node)
node_hash, node_value = self.get_node_hash(node, d2, parent)
assert node_hash is not None
# write new value
d[letter] = (node_hash, node_value)
nodes[parent] = d
# iterate
grandparent = self.parents[parent] if parent != '' else None
parent_hash, parent_value = self.get_node_hash(parent, d, grandparent)
self.hash_list[parent] = (parent_hash, parent_value)
# batch write modified nodes
batch = self.db_utxo.write_batch()
for k, v in nodes.items():
self.put_node(k, v, batch)
batch.write()
# cleanup
assert self.hash_list == {}
self.parents = {}
def get_node_hash(self, x, d, parent):
# final hash
if x != '':
skip_string = x[len(parent)+1:]
else:
skip_string = ''
d2 = sorted(d.items())
values = map(lambda x: x[1][1], d2)
hashes = map(lambda x: x[1][0], d2)
value = sum( values )
_hash = self.hash( skip_string + ''.join(hashes) )
return _hash, value
def get_path(self, target):
word = target
key = ''
path = [ '' ]
i = self.db_utxo.iterator(start='')
while key != target:
i.seek(key + word[0])
try:
new_key, _ = i.next()
is_child = new_key.startswith(key + word[0])
except StopIteration:
is_child = False
if is_child:
if target.startswith(new_key):
# add value to the child node
key = new_key
word = target[len(key):]
if key == target:
break
else:
assert key not in path
path.append(key)
else:
print_log('not in tree', self.db_utxo.get(key+word[0]), new_key.encode('hex'))
return False
else:
assert key in path
break
return path
def delete_address(self, leaf):
path = self.get_path(leaf)
if path is False:
print_log("addr not in tree", leaf.encode('hex'), self.key_to_address(leaf[0:20]), self.db_utxo.get(leaf))
raise
s = self.db_utxo.get(leaf)
self.db_utxo.delete(leaf)
if leaf in self.hash_list:
self.hash_list.pop(leaf)
parent = path[-1]
letter = leaf[len(parent)]
items = self.get_node(parent)
items.pop(letter)
# remove key if it has a single child
if len(items) == 1:
letter, v = items.items()[0]
self.db_utxo.delete(parent)
if parent in self.hash_list:
self.hash_list.pop(parent)
# we need the exact length for the iteration
i = self.db_utxo.iterator()
i.seek(parent+letter)
k, v = i.next()
# note: k is not necessarily a leaf
if len(k) == KEYLENGTH:
_hash, value = k[20:52], hex_to_int(v[0:8])
else:
_hash, value = None, None
self.update_node_hash(k, path[:-1], _hash, value)
else:
self.put_node(parent, items)
_hash, value = None, None
self.update_node_hash(parent, path[:-1], _hash, value)
return s
def get_children(self, x):
i = self.db_utxo.iterator()
l = 0
while l <256:
i.seek(x+chr(l))
k, v = i.next()
if k.startswith(x+chr(l)):
yield k, v
l += 1
elif k.startswith(x):
yield k, v
l = ord(k[len(x)]) + 1
else:
break
def get_parent(self, x):
""" return parent and skip string"""
i = self.db_utxo.iterator()
for j in range(len(x)):
p = x[0:-j-1]
i.seek(p)
k, v = i.next()
if x.startswith(k) and x!=k:
break
else: raise
return k
def hash(self, x):
if DEBUG: return "hash("+x+")"
return Hash(x)
def get_root_hash(self):
return self.root_hash
def close(self):
self.db_utxo.close()
self.db_addr.close()
self.db_hist.close()
self.db_undo.close()
def add_to_history(self, addr, tx_hash, tx_pos, value, tx_height):
key = self.address_to_key(addr)
txo = (tx_hash + int_to_hex(tx_pos, 4)).decode('hex')
# write the new history
self.add_address(key + txo, value, tx_height)
# backlink
self.db_addr.put(txo, addr)
def revert_add_to_history(self, addr, tx_hash, tx_pos, value, tx_height):
key = self.address_to_key(addr)
txo = (tx_hash + int_to_hex(tx_pos, 4)).decode('hex')
# delete
self.delete_address(key + txo)
# backlink
self.db_addr.delete(txo)
def get_utxo_value(self, addr, txi):
key = self.address_to_key(addr)
leaf = key + txi
s = self.db_utxo.get(leaf)
value = hex_to_int(s[0:8])
return value
def set_spent(self, addr, txi, txid, index, height, undo):
key = self.address_to_key(addr)
leaf = key + txi
s = self.delete_address(leaf)
value = hex_to_int(s[0:8])
in_height = hex_to_int(s[8:12])
undo[leaf] = value, in_height
# delete backlink txi-> addr
self.db_addr.delete(txi)
# add to history
s = self.db_hist.get(addr)
if s is None: s = ''
txo = (txid + int_to_hex(index,4) + int_to_hex(height,4)).decode('hex')
s += txi + int_to_hex(in_height,4).decode('hex') + txo
s = s[ -80*self.pruning_limit:]
self.db_hist.put(addr, s)
def revert_set_spent(self, addr, txi, undo):
key = self.address_to_key(addr)
leaf = key + txi
# restore backlink
self.db_addr.put(txi, addr)
v, height = undo.pop(leaf)
self.add_address(leaf, v, height)
# revert add to history
s = self.db_hist.get(addr)
# s might be empty if pruning limit was reached
if not s:
return
assert s[-80:-44] == txi
s = s[:-80]
self.db_hist.put(addr, s)
def import_transaction(self, txid, tx, block_height, touched_addr):
undo = { 'prev_addr':[] } # contains the list of pruned items for each address in the tx; also, 'prev_addr' is a list of prev addresses
prev_addr = []
for i, x in enumerate(tx.get('inputs')):
txi = (x.get('prevout_hash') + int_to_hex(x.get('prevout_n'), 4)).decode('hex')
addr = self.get_address(txi)
if addr is not None:
self.set_spent(addr, txi, txid, i, block_height, undo)
touched_addr.add(addr)
prev_addr.append(addr)
undo['prev_addr'] = prev_addr
# here I add only the outputs to history; maybe I want to add inputs too (that's in the other loop)
for x in tx.get('outputs'):
addr = x.get('address')
if addr is None: continue
self.add_to_history(addr, txid, x.get('index'), x.get('value'), block_height)
touched_addr.add(addr)
return undo
def revert_transaction(self, txid, tx, block_height, touched_addr, undo):
#print_log("revert tx", txid)
for x in reversed(tx.get('outputs')):
addr = x.get('address')
if addr is None: continue
self.revert_add_to_history(addr, txid, x.get('index'), x.get('value'), block_height)
touched_addr.add(addr)
prev_addr = undo.pop('prev_addr')
for i, x in reversed(list(enumerate(tx.get('inputs')))):
addr = prev_addr[i]
if addr is not None:
txi = (x.get('prevout_hash') + int_to_hex(x.get('prevout_n'), 4)).decode('hex')
self.revert_set_spent(addr, txi, undo)
touched_addr.add(addr)
assert undo == {}
| agpl-3.0 |
BeenzSyed/tempest | tempest/services/compute/xml/aggregates_client.py | 1 | 5282 | # Copyright 2013 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from tempest.common.rest_client import RestClientXML
from tempest import exceptions
from tempest.services.compute.xml.common import Document
from tempest.services.compute.xml.common import Element
from tempest.services.compute.xml.common import Text
from tempest.services.compute.xml.common import xml_to_json
class AggregatesClientXML(RestClientXML):
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(AggregatesClientXML, self).__init__(config, username, password,
auth_url, tenant_name)
self.service = self.config.compute.catalog_type
def _format_aggregate(self, g):
agg = xml_to_json(g)
aggregate = {}
for key, value in agg.items():
if key == 'hosts':
aggregate['hosts'] = []
for k, v in value.items():
aggregate['hosts'].append(v)
elif key == 'availability_zone':
aggregate[key] = None if value == 'None' else value
else:
aggregate[key] = value
return aggregate
def _parse_array(self, node):
return [self._format_aggregate(x) for x in node]
def list_aggregates(self):
"""Get aggregate list."""
resp, body = self.get("os-aggregates", self.headers)
aggregates = self._parse_array(etree.fromstring(body))
return resp, aggregates
def get_aggregate(self, aggregate_id):
"""Get details of the given aggregate."""
resp, body = self.get("os-aggregates/%s" % str(aggregate_id),
self.headers)
aggregate = self._format_aggregate(etree.fromstring(body))
return resp, aggregate
def create_aggregate(self, name, availability_zone=None):
"""Creates a new aggregate."""
post_body = Element("aggregate",
name=name,
availability_zone=availability_zone)
resp, body = self.post('os-aggregates',
str(Document(post_body)),
self.headers)
aggregate = self._format_aggregate(etree.fromstring(body))
return resp, aggregate
def update_aggregate(self, aggregate_id, name, availability_zone=None):
"""Update a aggregate."""
put_body = Element("aggregate",
name=name,
availability_zone=availability_zone)
resp, body = self.put('os-aggregates/%s' % str(aggregate_id),
str(Document(put_body)),
self.headers)
aggregate = self._format_aggregate(etree.fromstring(body))
return resp, aggregate
def delete_aggregate(self, aggregate_id):
"""Deletes the given aggregate."""
return self.delete("os-aggregates/%s" % str(aggregate_id),
self.headers)
def is_resource_deleted(self, id):
try:
self.get_aggregate(id)
except exceptions.NotFound:
return True
return False
def add_host(self, aggregate_id, host):
"""Adds a host to the given aggregate."""
post_body = Element("add_host", host=host)
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
str(Document(post_body)),
self.headers)
aggregate = self._format_aggregate(etree.fromstring(body))
return resp, aggregate
def remove_host(self, aggregate_id, host):
"""Removes a host from the given aggregate."""
post_body = Element("remove_host", host=host)
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
str(Document(post_body)),
self.headers)
aggregate = self._format_aggregate(etree.fromstring(body))
return resp, aggregate
def set_metadata(self, aggregate_id, meta):
"""Replaces the aggregate's existing metadata with new metadata."""
post_body = Element("set_metadata")
metadata = Element("metadata")
post_body.append(metadata)
for k, v in meta.items():
meta = Element(k)
meta.append(Text(v))
metadata.append(meta)
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
str(Document(post_body)),
self.headers)
aggregate = self._format_aggregate(etree.fromstring(body))
return resp, aggregate
| apache-2.0 |
wanghuafeng/spider_tools | utils.py | 1 | 3177 | #!-*- coding:utf-8 -*-
__author__= 'huafeng'
import re
import sys
from urlparse import urlparse
def format_str(unformat_str, split_by=":"):
"""
:arg
unformat_str:待格式化字符串,一般为Request Header或者Post Parameters
split_by: key,value切割符
:return
字典格式的参数
格式化"""
header_kv_param_list = unformat_str.split("\n")
formate_header_kv_list = []
for kv_param in header_kv_param_list:
if not re.search('%s'%split_by, kv_param) or not kv_param:
# print 'split_by:"%s"不存在, kv_param:%s' % (split_by, kv_param)
continue
kv_tuple = tuple(re.split("\s*%s\s*"%split_by, kv_param.strip(), 1))
formate_header_kv_list.append('"%s":"%s",' % kv_tuple)
return """{\n%s\n}""" % "\n".join(formate_header_kv_list)
def select_proxy(url, proxies):
"""Select a proxy for the url, if applicable.
:param url: The url being for the request
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
"""
proxies = proxies or {}
urlparts = urlparse(url)
if urlparts.hostname is None:
return proxies.get(urlparts.scheme, proxies.get('all'))
proxy_keys = [
urlparts.scheme + '://' + urlparts.hostname,
urlparts.scheme,
'all://' + urlparts.hostname,
'all',
]
proxy = None
for proxy_key in proxy_keys:
if proxy_key in proxies:
proxy = proxies[proxy_key]
break
return proxy
ip_pattern = r'((?:\d{1,3}\.){3}\d{1,3}\:\d+)'
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If `silent` is True the return value will be `None` if the import fails.
:param import_name: the dotted name for the object to import.
:param silent: if set to `True` import errors are ignored and
`None` is returned instead.
:return: imported object
"""
# force the import name to automatically convert to strings
# __import__ is not able to handle unicode strings in the fromlist
# if the module is a package
import_name = str(import_name).replace(':', '.')
try:
try:
__import__(import_name)
except ImportError:
if '.' not in import_name:
raise
else:
return sys.modules[import_name]
module_name, obj_name = import_name.rsplit('.', 1)
try:
module = __import__(module_name, None, None, [obj_name])
except ImportError:
# support importing modules not yet set up by the parent module
# (or package for that matter)
module = import_string(module_name)
try:
return getattr(module, obj_name)
except AttributeError as e:
raise ImportError(e)
except ImportError as e:
if not silent:
raise
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.