repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
troepman/MarQtChips | bluechips/config/middleware.py | 2 | 2833 | """Pylons middleware initialization"""
from beaker.middleware import CacheMiddleware, SessionMiddleware
from paste.cascade import Cascade
from paste.registry import RegistryManager
from paste.urlparser import StaticURLParser
from paste.auth.basic import AuthBasicHandler
from paste.deploy.converters import asbool
from pylons import config
from pylons.middleware import ErrorHandler, StatusCodeRedirect
from pylons.wsgiapp import PylonsApp
from routes.middleware import RoutesMiddleware
import authkit.authorize
from bluechips.config.environment import load_environment
from bluechips.lib.permissions import (BlueChipUser, DummyAuthenticate,
authenticate)
def make_app(global_conf, full_stack=True, **app_conf):
"""Create a Pylons WSGI application and return it
``global_conf``
The inherited configuration for this application. Normally from
the [DEFAULT] section of the Paste ini file.
``full_stack``
Whether or not this application provides a full WSGI stack (by
default, meaning it handles its own exceptions and errors).
Disable full_stack when this application is "managed" by
another WSGI middleware.
``app_conf``
The application's local configuration. Normally specified in the
[app:<name>] section of the Paste ini file (where <name>
defaults to main).
"""
# Configure the Pylons environment
load_environment(global_conf, app_conf)
# The Pylons WSGI app
app = PylonsApp()
# CUSTOM MIDDLEWARE HERE (filtered by error handling middlewares)
if not asbool(app_conf.get('noauth')):
app = authkit.authorize.middleware(app, BlueChipUser())
# Routing/Session/Cache Middleware
app = RoutesMiddleware(app, config['routes.map'])
app = SessionMiddleware(app, config)
app = CacheMiddleware(app, config)
if asbool(full_stack):
# Handle Python exceptions
app = ErrorHandler(app, global_conf, **config['pylons.errorware'])
# Display error documents for 401, 403, 404 status codes (and
# 500 when debug is disabled)
status_codes = [400, 401, 403, 404]
if not asbool(config.get('debug')):
status_codes.append(500)
app = StatusCodeRedirect(app, status_codes)
# Establish the Registry for this application
app = RegistryManager(app)
# Static files (If running in production, and Apache or another web
# server is handling this static content, remove the following 3 lines)
static_app = StaticURLParser(config['pylons.paths']['static_files'])
app = Cascade([static_app, app])
if not asbool(app_conf.get('noauth')):
app = AuthBasicHandler(app, 'BlueChips', authenticate)
app = DummyAuthenticate(app, app_conf)
return app
| gpl-2.0 |
azunite/chrome_build | git_reparent_branch.py | 9 | 2519 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Change the upstream of the current branch."""
import argparse
import sys
import subprocess2
from git_common import upstream, current_branch, run, tags, set_branch_config
from git_common import get_or_create_merge_base, root, manual_merge_base
import git_rebase_update
def main(args):
root_ref = root()
parser = argparse.ArgumentParser()
g = parser.add_mutually_exclusive_group()
g.add_argument('new_parent', nargs='?',
help='New parent branch (or tag) to reparent to.')
g.add_argument('--root', action='store_true',
help='Reparent to the configured root branch (%s).' % root_ref)
g.add_argument('--lkgr', action='store_true',
help='Reparent to the lkgr tag.')
opts = parser.parse_args(args)
# TODO(iannucci): Allow specification of the branch-to-reparent
branch = current_branch()
if opts.root:
new_parent = root_ref
elif opts.lkgr:
new_parent = 'lkgr'
else:
if not opts.new_parent:
parser.error('Must specify new parent somehow')
new_parent = opts.new_parent
cur_parent = upstream(branch)
if branch == 'HEAD' or not branch:
parser.error('Must be on the branch you want to reparent')
if new_parent == cur_parent:
parser.error('Cannot reparent a branch to its existing parent')
mbase = get_or_create_merge_base(branch, cur_parent)
all_tags = tags()
if cur_parent in all_tags:
cur_parent += ' [tag]'
try:
run('show-ref', new_parent)
except subprocess2.CalledProcessError:
print >> sys.stderr, 'fatal: invalid reference: %s' % new_parent
return 1
if new_parent in all_tags:
print ("Reparenting %s to track %s [tag] (was %s)"
% (branch, new_parent, cur_parent))
set_branch_config(branch, 'remote', '.')
set_branch_config(branch, 'merge', new_parent)
else:
print ("Reparenting %s to track %s (was %s)"
% (branch, new_parent, cur_parent))
run('branch', '--set-upstream-to', new_parent, branch)
manual_merge_base(branch, mbase, new_parent)
# TODO(iannucci): ONLY rebase-update the branch which moved (and dependants)
return git_rebase_update.main(['--no-fetch'])
if __name__ == '__main__': # pragma: no cover
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
| bsd-3-clause |
maartenq/ansible | lib/ansible/module_utils/digital_ocean.py | 104 | 5769 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Ansible Project 2017
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import os
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback
class Response(object):
def __init__(self, resp, info):
self.body = None
if resp:
self.body = resp.read()
self.info = info
@property
def json(self):
if not self.body:
if "body" in self.info:
return json.loads(to_text(self.info["body"]))
return None
try:
return json.loads(to_text(self.body))
except ValueError:
return None
@property
def status_code(self):
return self.info["status"]
class DigitalOceanHelper:
def __init__(self, module):
self.module = module
self.baseurl = 'https://api.digitalocean.com/v2'
self.timeout = module.params.get('timeout', 30)
self.oauth_token = module.params.get('oauth_token')
self.headers = {'Authorization': 'Bearer {0}'.format(self.oauth_token),
'Content-type': 'application/json'}
# Check if api_token is valid or not
response = self.get('account')
if response.status_code == 401:
self.module.fail_json(msg='Failed to login using API token, please verify validity of API token.')
def _url_builder(self, path):
if path[0] == '/':
path = path[1:]
return '%s/%s' % (self.baseurl, path)
def send(self, method, path, data=None):
url = self._url_builder(path)
data = self.module.jsonify(data)
resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method, timeout=self.timeout)
return Response(resp, info)
def get(self, path, data=None):
return self.send('GET', path, data)
def put(self, path, data=None):
return self.send('PUT', path, data)
def post(self, path, data=None):
return self.send('POST', path, data)
def delete(self, path, data=None):
return self.send('DELETE', path, data)
@staticmethod
def digital_ocean_argument_spec():
return dict(
validate_certs=dict(type='bool', required=False, default=True),
oauth_token=dict(
no_log=True,
# Support environment variable for DigitalOcean OAuth Token
fallback=(env_fallback, ['DO_API_TOKEN', 'DO_API_KEY', 'DO_OAUTH_TOKEN', 'OAUTH_TOKEN']),
required=False,
aliases=['api_token'],
),
timeout=dict(type='int', default=30),
)
def get_paginated_data(self, base_url=None, data_key_name=None, data_per_page=40, expected_status_code=200):
"""
Function to get all paginated data from given URL
Args:
base_url: Base URL to get data from
data_key_name: Name of data key value
data_per_page: Number results per page (Default: 40)
expected_status_code: Expected returned code from DigitalOcean (Default: 200)
Returns: List of data
"""
page = 1
has_next = True
ret_data = []
status_code = None
response = None
while has_next or status_code != expected_status_code:
required_url = "{0}page={1}&per_page={2}".format(base_url, page, data_per_page)
response = self.get(required_url)
status_code = response.status_code
# stop if any error during pagination
if status_code != expected_status_code:
break
page += 1
ret_data.extend(response.json[data_key_name])
has_next = "pages" in response.json["links"] and "next" in response.json["links"]["pages"]
if status_code != expected_status_code:
msg = "Failed to fetch %s from %s" % (data_key_name, base_url)
if response:
msg += " due to error : %s" % response.json['message']
self.module.fail_json(msg=msg)
return ret_data
| gpl-3.0 |
pabloborrego93/edx-platform | common/djangoapps/course_action_state/tests/test_managers.py | 126 | 7219 | # pylint: disable=invalid-name, attribute-defined-outside-init
"""
Tests for basic common operations related to Course Action State managers
"""
from ddt import ddt, data
from django.test import TestCase
from collections import namedtuple
from opaque_keys.edx.locations import CourseLocator
from course_action_state.models import CourseRerunState
from course_action_state.managers import CourseActionStateItemNotFoundError
# Sequence of Action models to be tested with ddt.
COURSE_ACTION_STATES = (CourseRerunState, )
class TestCourseActionStateManagerBase(TestCase):
"""
Base class for testing Course Action State Managers.
"""
def setUp(self):
super(TestCourseActionStateManagerBase, self).setUp()
self.course_key = CourseLocator("test_org", "test_course_num", "test_run")
@ddt
class TestCourseActionStateManager(TestCourseActionStateManagerBase):
"""
Test class for testing the CourseActionStateManager.
"""
@data(*COURSE_ACTION_STATES)
def test_update_state_allow_not_found_is_false(self, action_class):
with self.assertRaises(CourseActionStateItemNotFoundError):
action_class.objects.update_state(self.course_key, "fake_state", allow_not_found=False)
@data(*COURSE_ACTION_STATES)
def test_update_state_allow_not_found(self, action_class):
action_class.objects.update_state(self.course_key, "initial_state", allow_not_found=True)
self.assertIsNotNone(
action_class.objects.find_first(course_key=self.course_key)
)
@data(*COURSE_ACTION_STATES)
def test_delete(self, action_class):
obj = action_class.objects.update_state(self.course_key, "initial_state", allow_not_found=True)
action_class.objects.delete(obj.id)
with self.assertRaises(CourseActionStateItemNotFoundError):
action_class.objects.find_first(course_key=self.course_key)
@ddt
class TestCourseActionUIStateManager(TestCourseActionStateManagerBase):
"""
Test class for testing the CourseActionUIStateManager.
"""
def init_course_action_states(self, action_class):
"""
Creates course action state entries with different states for the given action model class.
Creates both displayable (should_display=True) and non-displayable (should_display=False) entries.
"""
def create_course_states(starting_course_num, ending_course_num, state, should_display=True):
"""
Creates a list of course state tuples by creating unique course locators with course-numbers
from starting_course_num to ending_course_num.
"""
CourseState = namedtuple('CourseState', 'course_key, state, should_display')
return [
CourseState(CourseLocator("org", "course", "run" + str(num)), state, should_display)
for num in range(starting_course_num, ending_course_num)
]
NUM_COURSES_WITH_STATE1 = 3
NUM_COURSES_WITH_STATE2 = 3
NUM_COURSES_WITH_STATE3 = 3
NUM_COURSES_NON_DISPLAYABLE = 3
# courses with state1 and should_display=True
self.courses_with_state1 = create_course_states(
0,
NUM_COURSES_WITH_STATE1,
'state1'
)
# courses with state2 and should_display=True
self.courses_with_state2 = create_course_states(
NUM_COURSES_WITH_STATE1,
NUM_COURSES_WITH_STATE1 + NUM_COURSES_WITH_STATE2,
'state2'
)
# courses with state3 and should_display=True
self.courses_with_state3 = create_course_states(
NUM_COURSES_WITH_STATE1 + NUM_COURSES_WITH_STATE2,
NUM_COURSES_WITH_STATE1 + NUM_COURSES_WITH_STATE2 + NUM_COURSES_WITH_STATE3,
'state3'
)
# all courses with should_display=True
self.course_actions_displayable_states = (
self.courses_with_state1 + self.courses_with_state2 + self.courses_with_state3
)
# courses with state3 and should_display=False
self.courses_with_state3_non_displayable = create_course_states(
NUM_COURSES_WITH_STATE1 + NUM_COURSES_WITH_STATE2 + NUM_COURSES_WITH_STATE3,
NUM_COURSES_WITH_STATE1 + NUM_COURSES_WITH_STATE2 + NUM_COURSES_WITH_STATE3 + NUM_COURSES_NON_DISPLAYABLE,
'state3',
should_display=False,
)
# create course action states for all courses
for CourseState in self.course_actions_displayable_states + self.courses_with_state3_non_displayable:
action_class.objects.update_state(
CourseState.course_key,
CourseState.state,
should_display=CourseState.should_display,
allow_not_found=True
)
def assertCourseActionStatesEqual(self, expected, found):
"""Asserts that the set of course keys in the expected state equal those that are found"""
self.assertSetEqual(
set(course_action_state.course_key for course_action_state in expected),
set(course_action_state.course_key for course_action_state in found))
@data(*COURSE_ACTION_STATES)
def test_find_all_for_display(self, action_class):
self.init_course_action_states(action_class)
self.assertCourseActionStatesEqual(
self.course_actions_displayable_states,
action_class.objects.find_all(should_display=True),
)
@data(*COURSE_ACTION_STATES)
def test_find_all_for_display_filter_exclude(self, action_class):
self.init_course_action_states(action_class)
for course_action_state, filter_state, exclude_state in (
(self.courses_with_state1, 'state1', None), # filter for state1
(self.courses_with_state2, 'state2', None), # filter for state2
(self.courses_with_state2 + self.courses_with_state3, None, 'state1'), # exclude state1
(self.courses_with_state1 + self.courses_with_state3, None, 'state2'), # exclude state2
(self.courses_with_state1, 'state1', 'state2'), # filter for state1, exclude state2
([], 'state1', 'state1'), # filter for state1, exclude state1
):
self.assertCourseActionStatesEqual(
course_action_state,
action_class.objects.find_all(
exclude_args=({'state': exclude_state} if exclude_state else None),
should_display=True,
**({'state': filter_state} if filter_state else {})
)
)
def test_kwargs_in_update_state(self):
destination_course_key = CourseLocator("org", "course", "run")
source_course_key = CourseLocator("source_org", "source_course", "source_run")
CourseRerunState.objects.update_state(
course_key=destination_course_key,
new_state='state1',
allow_not_found=True,
source_course_key=source_course_key,
)
found_action_state = CourseRerunState.objects.find_first(course_key=destination_course_key)
self.assertEquals(source_course_key, found_action_state.source_course_key)
| agpl-3.0 |
jbeezley/girder | plugins/sentry/setup.py | 1 | 1755 | # -*- coding: utf-8 -*-
import os
from setuptools import find_packages, setup
def prerelease_local_scheme(version):
"""Return local scheme version unless building on master in CircleCI.
This function returns the local scheme version number
(e.g. 0.0.0.dev<N>+g<HASH>) unless building on CircleCI for a
pre-release in which case it ignores the hash and produces a
PEP440 compliant pre-release version number (e.g. 0.0.0.dev<N>).
"""
from setuptools_scm.version import get_local_node_and_date
if os.getenv('CIRCLE_BRANCH') == 'master':
return ''
else:
return get_local_node_and_date(version)
# perform the install
setup(
name='girder-sentry',
use_scm_version={'root': '../..', 'local_scheme': prerelease_local_scheme},
setup_requires=['setuptools-scm', 'setuptools-git'],
description='Allow the automatic tracking of issues using Sentry.',
maintainer='Kitware, Inc.',
maintainer_email='kitware@kitware.com',
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6'
],
include_package_data=True,
packages=find_packages(exclude=['plugin_tests']),
zip_safe=False,
install_requires=[
'girder>=3',
'sentry-sdk'
],
entry_points={
'girder.plugin': [
'sentry = girder_sentry:SentryPlugin'
]
}
)
| apache-2.0 |
yashsharan/sympy | sympy/plotting/pygletplot/plot_rotation.py | 94 | 1478 | from __future__ import print_function, division
try:
from pyglet.gl.gl import c_float
except ImportError:
pass
from pyglet.gl import *
from math import sqrt as _sqrt, acos as _acos
def cross(a, b):
return (a[1] * b[2] - a[2] * b[1],
a[2] * b[0] - a[0] * b[2],
a[0] * b[1] - a[1] * b[0])
def dot(a, b):
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
def mag(a):
return _sqrt(a[0]**2 + a[1]**2 + a[2]**2)
def norm(a):
m = mag(a)
return (a[0] / m, a[1] / m, a[2] / m)
def get_sphere_mapping(x, y, width, height):
x = min([max([x, 0]), width])
y = min([max([y, 0]), height])
sr = _sqrt((width/2)**2 + (height/2)**2)
sx = ((x - width / 2) / sr)
sy = ((y - height / 2) / sr)
sz = 1.0 - sx**2 - sy**2
if sz > 0.0:
sz = _sqrt(sz)
return (sx, sy, sz)
else:
sz = 0
return norm((sx, sy, sz))
rad2deg = 180.0 / 3.141592
def get_spherical_rotatation(p1, p2, width, height, theta_multiplier):
v1 = get_sphere_mapping(p1[0], p1[1], width, height)
v2 = get_sphere_mapping(p2[0], p2[1], width, height)
d = min(max([dot(v1, v2), -1]), 1)
if abs(d - 1.0) < 0.000001:
return None
raxis = norm( cross(v1, v2) )
rtheta = theta_multiplier * rad2deg * _acos(d)
glPushMatrix()
glLoadIdentity()
glRotatef(rtheta, *raxis)
mat = (c_float*16)()
glGetFloatv(GL_MODELVIEW_MATRIX, mat)
glPopMatrix()
return mat
| bsd-3-clause |
illicitonion/givabit | lib/sdks/google_appengine_1.7.1/google_appengine/lib/django_1_3/tests/modeltests/model_package/tests.py | 92 | 2570 | from django.contrib.sites.models import Site
from django.db import models
from django.test import TestCase
from models.publication import Publication
from models.article import Article
class Advertisment(models.Model):
customer = models.CharField(max_length=100)
publications = models.ManyToManyField(
"model_package.Publication", null=True, blank=True
)
class Meta:
app_label = 'model_package'
class ModelPackageTests(TestCase):
def test_model_packages(self):
p = Publication.objects.create(title="FooBar")
current_site = Site.objects.get_current()
self.assertEqual(current_site.domain, "example.com")
# Regression for #12168: models split into subpackages still get M2M
# tables
a = Article.objects.create(headline="a foo headline")
a.publications.add(p)
a.sites.add(current_site)
a = Article.objects.get(id=a.pk)
self.assertEqual(a.id, a.pk)
self.assertEqual(a.sites.count(), 1)
# Regression for #12245 - Models can exist in the test package, too
ad = Advertisment.objects.create(customer="Lawrence Journal-World")
ad.publications.add(p)
ad = Advertisment.objects.get(id=ad.pk)
self.assertEqual(ad.publications.count(), 1)
# Regression for #12386 - field names on the autogenerated intermediate
# class that are specified as dotted strings don't retain any path
# component for the field or column name
self.assertEqual(
Article.publications.through._meta.fields[1].name, 'article'
)
self.assertEqual(
Article.publications.through._meta.fields[1].get_attname_column(),
('article_id', 'article_id')
)
self.assertEqual(
Article.publications.through._meta.fields[2].name, 'publication'
)
self.assertEqual(
Article.publications.through._meta.fields[2].get_attname_column(),
('publication_id', 'publication_id')
)
# The oracle backend truncates the name to 'model_package_article_publ233f'.
self.assertTrue(
Article._meta.get_field('publications').m2m_db_table() in ('model_package_article_publications', 'model_package_article_publ233f')
)
self.assertEqual(
Article._meta.get_field('publications').m2m_column_name(), 'article_id'
)
self.assertEqual(
Article._meta.get_field('publications').m2m_reverse_name(),
'publication_id'
)
| apache-2.0 |
AmbientSensorsEvaluation/Ambient-Sensors-Proximity-Evaluation | Python Code/analyser.py | 1 | 10257 |
import re
import sys
import sqlite3
import numpy as np
import multiprocessing as mp
from lib.data import *
from lib.utils import *
from lib.retriever import DataRetriever
from lib.grapher import *
from lib.stats import compute_eer, find_eer
usage = "Usage: python analyser.py <path to card DB> <path to reader db> <path to output file>"
seen = []
msg_sens = []
sensor_place_data = dict()
def compute_threshold(place, sensor, c_ts, c_ms, r_ts, r_ms, loc=False):
""" Returns the average threshold for a set of measurements, along with max and min vals """
global seen
global sensor_place_data
# Process geolocations differently to rest
if loc:
# Almost always only a single location is retrieved in time.
# In case more are returned, err on the side of caution by
#
if len(c_ts) > 1 and len(c_ms) > 1:
del c_ms[1]
del c_ts[1]
if len(r_ts) > 1 and len(r_ms) > 1:
del r_ms[1]
del r_ts[1]
sensor_place_data[sensor].append([c_ts, c_ms, r_ts, r_ms])
return compute_avg_threshold_loc(mean_loc(c_ms), mean_loc(r_ms))
# Remove dud data values not removed in the apps
if sensor in single_vals:
c_ms = np.array([ m[0] for m in c_ms ])
r_ms = np.array([ m[0] for m in r_ms ])
elif not loc:
c_ms = np.array([ magnitude(m) for m in c_ms ])
r_ms = np.array([ magnitude(m) for m in r_ms ])
if not loc:
# UNCOMMENT!
#if sensor == 'Gyroscope' and count > 1:
# graph_sensor(place, sensor, c_ts, c_ms, r_ts, r_ms)
# exit()
#count += 1
seen.append(sensor)
# Store card and reader values for future analysis
sensor_place_data[sensor].append([c_ts, c_ms, r_ts, r_ms])
t = compute_avg_threshold(c_ts, c_ms, r_ts, r_ms)
# Get threshold, min and max values for card and reader
return (t, c_ms.min(), c_ms.max(), r_ms.min(), r_ms.max())
def analyse(c_data, r_data, sensor, place):
""" Analyses all card and reader transactions for a given sensor and place """
# Analyse shared card and reader measurements if values exist
# and track 'good' values: those containing measurements
thresholds = []
c_mins, c_maxs = [], []
r_mins, r_maxs = [], []
good = 0
loc = (sensor == 'NetworkLocation')
# For each transaction
for c, r in zip(c_data, r_data):
# Same shared id
assert c[0] == r[0]
# NetworkLocation is parsed differently to rest
c_times, c_vals = parse_measurement_data(c[1], loc)
r_times, r_vals = parse_measurement_data(r[1], loc)
# For location, we do not use Numpy arrays
if len(c_vals) > 0 and len(r_vals) > 0:
good += 1
if loc:
# Process locations differently
thresholds.append(compute_threshold(place, sensor, c_times, c_vals, r_times, r_vals, loc))
else:
thresh, c_min, c_max, r_min, r_max = compute_threshold(place, sensor, c_times, c_vals, r_times, r_vals, loc)
thresholds.append(thresh)
c_mins = np.append(c_mins, c_min)
c_maxs = np.append(c_maxs, c_max)
r_mins = np.append(r_mins, r_min)
r_maxs = np.append(r_maxs, r_max)
return (good, np.array(thresholds), c_mins, c_maxs, r_mins, r_maxs)
def store_data(thresholds, val_ratio, sensor, place, output_dir):
""" Store thresholds, % good values, sensor and place data in file """
path_str = "{0}/{1}_{2}.txt".format(output_dir, place, sensor)
print("Storing data at {0}...".format(path_str))
# Proceed only if we have any thresholds; for Bluetooth and Wi-Fi,
# this will not be the case usually
if thresholds.any():
msg = ""
else:
msg = "<No data recorded>"
msg += "\n(Successful, total) measurements: {0}".format(val_ratio)
try:
with open(path_str, 'w') as f:
f.write(msg)
except Exception as e:
print("Failed to write to database!")
print(e.message)
print("Done!")
print
def run(card_conn, reader_conn, output_dir):
""" Fetch and pre-process data from card and reader databases """
# Get database objects for card and reader
card_db = DataRetriever(card_conn)
reader_db = DataRetriever(reader_conn)
skip_sensors = []
card_ids, reader_ids = {}, {}
common_ids = {}
# Get shared IDs for all DBs
for sensor in sensors.keys():
card_ids[sensor] = card_db.get_measurement_ids(sensor)
reader_ids[sensor] = reader_db.get_measurement_ids(sensor)
common_ids[sensor] = find_shared_ids(card_ids[sensor], reader_ids[sensor])
#for sensor in sensors.keys():
# try:
#
# except:
# Track unavailable sensors; exception is thrown
# when a particular sensor isn't in database
# skip_sensors.append(sensor)
# Initialise
global sensor_place_data
for sensor in sensors.keys():
if not sensor in skip_sensors:
sensor_place_data[sensor] = []
for place in places:
for sensor in sensors.keys():
# Skip unavailable sensors
if sensor in skip_sensors:
print("Skipping {0} in {1}".format(sensor, place))
continue
c_total, c_data = card_db.get_measurements(sensor, place, common_ids[sensor])
r_total, r_data = reader_db.get_measurements(sensor, place, common_ids[sensor])
print(len(c_data))
print(len(r_data))
print
if not c_data and not r_data:
print("Skipping {0} in {1}".format(sensor, place))
continue
print("Processing data for {0} in {1}...".format(sensor, place))
# Remove cancelled data if necessary
#if c_canc_ids or r_canc_ids:
# c_data, r_data = remove_cancelled_data(c_canc_ids, c_data, r_canc_ids, r_data)
# Sanity check: check number of transactions are same across devices
assert len(c_data) == len(r_data)
# Analyse all measurements for a given sensor and place
good, thresholds, c_mins, c_maxs, r_mins, r_maxs = analyse(c_data, r_data, sensor, place)
# Tuple/ratio of successful vals:total vals
val_ratio = (good, len(c_data))
# Store results
store_data(thresholds, val_ratio, sensor, place, output_dir)
do_eers()
def pre_compute_ts(data, sensor, lock):
""" Pre-computes thresholds for a given sensor and its associated transactions """
pre_ts = np.zeros((len(data), len(data)))
print("Pre-computing thresholds...")
# Pre compute thresholds
# Data is in the form [card_xs, card_ys, reader_xs, reader_ys]
if sensor == 'NetworkLocation':
for i, t1 in enumerate(data):
for j, t2 in enumerate(data):
pre_ts[i][j] = compute_avg_threshold_loc(
mean_loc(t2[1]),
mean_loc(t1[3]))
lock.acquire()
print("{0}: {1}".format(sensor, i))
lock.release()
else:
for i, t1 in enumerate(data):
for j, t2 in enumerate(data):
pre_ts[i][j] = compute_avg_threshold(
t2[0], t2[1],
t1[2], t1[3])
lock.acquire()
print("{0}: {1}".format(sensor, i))
lock.release()
print("Done!")
lock.acquire()
np.save('pre/{0}_pre.npy'.format(sensor), pre_ts)
lock.release()
def start_pre_compute():
global sensor_place_data
l = mp.Lock()
pool = []
for sensor in sensor_place_data.keys():
data = sensor_place_data[sensor]
if data != []:
p = mp.Process(target=pre_compute_ts, args=(data, sensor, l),)
pool.append(p)
for p in pool: p.start()
for p in pool: p.join()
def do_eers():
# Graph EERSs
print("Computing EERs...")
# Erase existing EER file
with open('eers/eer.csv', 'w') as f:
pass
# Pre-compute thresholds
#print("Starting pre-compute...")
#start_pre_compute()
global sensor_place_data
for sensor in sensor_place_data.keys():
false_negs, false_poss = [], []
true_negs, true_poss = [], []
if sensor_place_data[sensor] != []:
data = sensor_place_data[sensor]
# Load pre_ts
print("Loading pre-computed matrix...")
pre_ts = np.load('pre/{0}_pre.npy'.format(sensor))
# Data stored as [card_xs, card_ys, reader_xs, reader_ys]
# for each transaction
min_t = np.amin(pre_ts)
max_t = np.amax(pre_ts)
ts = np.linspace(min_t, max_t, num=50)
print("Computing EER for {0}...".format(sensor))
for i, t in enumerate(ts):
tpr, tnr, fpr, fnr = compute_eer(t, pre_ts)
true_poss.append(tpr)
true_negs.append(tnr)
false_poss.append(fpr)
false_negs.append(fnr)
# Graph
print("Finding EER for {0}...".format(sensor))
eer = find_eer(false_poss, false_negs, ts)
eer_str = '{0},{1},{2}\n'.format(sensor, eer[1], eer[0])
with open('eers/eer.csv', 'a') as f:
f.write(eer_str)
#graph_eer(sensor, ts, false_negs, false_poss)
print("Done!")
if __name__ == '__main__':
# Process args and run
args = sys.argv
if len(args) < 4:
print("Insufficient arguments!")
print(usage)
exit(1)
else:
card_path = sys.argv[1]
reader_path = sys.argv[2]
output_dir = sys.argv[3]
if not ("Card" in sys.argv[1] and "Reader" in sys.argv[2]):
print("Incorrect arguments: are you sure they're in the right order?")
print(usage)
exit(1)
else:
print("Running...")
# Establish database connections
card_conn = sqlite3.connect(card_path)
reader_conn = sqlite3.connect(reader_path)
run(card_conn, reader_conn, output_dir)
| gpl-3.0 |
jmartinm/InvenioAuthorLists | modules/webaccess/lib/access_control_firerole_tests.py | 9 | 8093 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the access_control_firerole library."""
__revision__ = "$Id$"
import unittest
from invenio.access_control_firerole import compile_role_definition, \
serialize, deserialize, acc_firerole_check_user
from invenio.access_control_config import InvenioWebAccessFireroleError, \
CFG_ACC_EMPTY_ROLE_DEFINITION_SER
from invenio.testutils import make_test_suite, run_test_suite
from invenio.webuser import collect_user_info
class AccessControlFireRoleTest(unittest.TestCase):
"""Test functions related to the firewall like role definitions."""
def setUp(self):
"""setting up helper variables for tests"""
self.user_info = {'email' : 'foo.bar@cern.ch', 'uid': 1000,
'group' : ['patata', 'cetriolo'], 'remote_ip' : '127.0.0.1'}
self.guest = collect_user_info({})
def test_compile_role_definition_empty(self):
"""firerole - compiling empty role definitions"""
self.assertEqual(compile_role_definition(None),
deserialize(CFG_ACC_EMPTY_ROLE_DEFINITION_SER))
def test_compile_role_definition_allow_any(self):
"""firerole - compiling allow any role definitions"""
self.failUnless(serialize(compile_role_definition("allow any")))
def test_compile_role_definition_deny_any(self):
"""firerole - compiling deny any role definitions"""
self.failIf(serialize(compile_role_definition("deny any")))
def test_compile_role_definition_literal_field(self):
"""firerole - compiling literal field role definitions"""
self.failUnless(serialize(compile_role_definition(
"allow email 'info@invenio-software.org'")))
def test_compile_role_definition_not(self):
"""firerole - compiling not role definitions"""
self.failUnless(serialize(compile_role_definition(
"allow not email 'info@invenio-software.org'")))
def test_compile_role_definition_group_field(self):
"""firerole - compiling group field role definitions"""
self.failUnless(serialize(compile_role_definition(
"allow groups 'patata'")))
def test_compile_role_definition_regexp_field(self):
"""firerole - compiling regexp field role definitions"""
self.failUnless(serialize(compile_role_definition(
"allow email /.*@cern.ch/")))
def test_compile_role_definition_literal_list(self):
"""firerole - compiling literal list role definitions"""
self.failUnless(serialize(compile_role_definition(
"allow email 'info@invenio-software.org', 'foo.bar@cern.ch'")))
def test_compile_role_definition_more_rows(self):
"""firerole - compiling more rows role definitions"""
self.failUnless(serialize(compile_role_definition(
"allow email /.*@cern.ch/\nallow groups 'patata' "
"# a comment\ndeny any")))
def test_compile_role_definition_complex(self):
"""firerole - compiling complex role definitions"""
self.failUnless(serialize(compile_role_definition(
"allow email /.*@cern.ch/\nallow groups 'patata' "
"# a comment\ndeny remote_ip '127.0.0.0/24'\ndeny any")))
def test_compile_role_definition_with_date(self):
"""firerole - compiling date based role definitions"""
self.failUnless(serialize(compile_role_definition(
"allow from '2010-11-11'")))
self.failUnless(serialize(compile_role_definition(
"allow until '2010-11-11'")))
self.assertRaises(InvenioWebAccessFireroleError,
compile_role_definition, "allow from '2010-11-11','2010-11-23'")
self.assertRaises(InvenioWebAccessFireroleError,
compile_role_definition, "allow from '2010-11'")
def test_compile_role_definition_wrong(self):
"""firerole - compiling wrong role definitions"""
self.assertRaises(InvenioWebAccessFireroleError,
compile_role_definition, "allow al")
self.assertRaises(InvenioWebAccessFireroleError,
compile_role_definition, "fgdfglk g fgk")
def test_deserialize(self):
"""firerole - deserializing"""
self.assertEqual(compile_role_definition("allow any"),
(True, ()))
def test_firerole_literal_email(self):
"""firerole - firerole core testing literal email matching"""
self.failUnless(acc_firerole_check_user(self.user_info,
compile_role_definition("allow email 'info@invenio-software.org',"
"'foo.bar@cern.ch'\ndeny any")))
def test_firerole_regexp_email(self):
"""firerole - firerole core testing regexp email matching"""
self.failUnless(acc_firerole_check_user(self.user_info,
compile_role_definition("allow email /.*@cern.ch/\ndeny any")))
def test_firerole_literal_group(self):
"""firerole - firerole core testing literal group matching"""
self.failUnless(acc_firerole_check_user(self.user_info,
compile_role_definition("allow groups 'patata'\ndeny any")))
def test_firerole_ip_mask(self):
"""firerole - firerole core testing ip mask matching"""
self.failUnless(acc_firerole_check_user(self.user_info,
compile_role_definition("allow remote_ip '127.0.0.0/24'"
"\ndeny any")))
def test_firerole_non_existant_group(self):
"""firerole - firerole core testing non existant group matching"""
self.failIf(acc_firerole_check_user(self.user_info,
compile_role_definition("allow groups 'patat'\ndeny any")))
def test_firerole_with_future_date(self):
"""firerole - firerole core testing with future date"""
import time
future_date = time.strftime('%Y-%m-%d', time.gmtime(time.time() + 24 * 3600 * 2))
self.failUnless(acc_firerole_check_user(self.user_info,
compile_role_definition("allow until '%s'\nallow any" % future_date)))
self.failIf(acc_firerole_check_user(self.user_info,
compile_role_definition("allow from '%s'\nallow any" % future_date)))
def test_firerole_with_past_date(self):
"""firerole - firerole core testing with past date"""
import time
past_date = time.strftime('%Y-%m-%d', time.gmtime(time.time() - 24 * 3600 * 2))
self.failIf(acc_firerole_check_user(self.user_info,
compile_role_definition("allow until '%s'\nallow any" % past_date)))
self.failUnless(acc_firerole_check_user(self.user_info,
compile_role_definition("allow from '%s'\nallow any" % past_date)))
def test_firerole_empty(self):
"""firerole - firerole core testing empty matching"""
self.assertEqual(False, acc_firerole_check_user(self.user_info,
compile_role_definition(None)))
def test_firerole_uid(self):
"""firerole - firerole core testing with integer uid"""
self.assertEqual(False, acc_firerole_check_user(self.guest,
compile_role_definition("deny uid '-1'\nallow all")))
self.assertEqual(True, acc_firerole_check_user(self.user_info,
compile_role_definition("deny uid '-1'\nallow all")))
TEST_SUITE = make_test_suite(AccessControlFireRoleTest,)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
beniwohli/apm-agent-python | tests/handlers/logging/logging_tests.py | 1 | 16422 | # BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import logging.handlers
import sys
import warnings
from logging import LogRecord
import ecs_logging
import pytest
import structlog
from elasticapm.conf import Config
from elasticapm.conf.constants import ERROR
from elasticapm.handlers.logging import Formatter, LoggingFilter, LoggingHandler
from elasticapm.handlers.structlog import structlog_processor
from elasticapm.traces import Tracer, capture_span
from elasticapm.utils import compat
from elasticapm.utils.stacks import iter_stack_frames
from tests.fixtures import TempStoreClient
@pytest.fixture()
def logger(elasticapm_client):
elasticapm_client.config.include_paths = ["tests", "elasticapm"]
handler = LoggingHandler(elasticapm_client)
logger = logging.getLogger(__name__)
logger.handlers = []
logger.addHandler(handler)
logger.client = elasticapm_client
logger.level = logging.INFO
return logger
def test_logger_basic(logger):
logger.error("This is a test error")
assert len(logger.client.events) == 1
event = logger.client.events[ERROR][0]
assert event["log"]["logger_name"] == __name__
assert event["log"]["level"] == "error"
assert event["log"]["message"] == "This is a test error"
assert "stacktrace" in event["log"]
assert "exception" not in event
assert "param_message" in event["log"]
assert event["log"]["param_message"] == "This is a test error"
def test_logger_warning(logger):
logger.warning("This is a test warning")
assert len(logger.client.events) == 1
event = logger.client.events[ERROR][0]
assert event["log"]["logger_name"] == __name__
assert event["log"]["level"] == "warning"
assert "exception" not in event
assert "param_message" in event["log"]
assert event["log"]["param_message"] == "This is a test warning"
def test_logger_extra_data(logger):
logger.info("This is a test info with a url", extra=dict(data=dict(url="http://example.com")))
assert len(logger.client.events) == 1
event = logger.client.events[ERROR][0]
assert event["context"]["custom"]["url"] == "http://example.com"
assert "stacktrace" in event["log"]
assert "exception" not in event
assert "param_message" in event["log"]
assert event["log"]["param_message"] == "This is a test info with a url"
def test_logger_exc_info(logger):
try:
raise ValueError("This is a test ValueError")
except ValueError:
logger.info("This is a test info with an exception", exc_info=True)
assert len(logger.client.events) == 1
event = logger.client.events[ERROR][0]
# assert event['message'] == 'This is a test info with an exception'
assert "exception" in event
assert "stacktrace" in event["exception"]
exc = event["exception"]
assert exc["type"] == "ValueError"
assert exc["message"] == "ValueError: This is a test ValueError"
assert "param_message" in event["log"]
assert event["log"]["message"] == "This is a test info with an exception"
def test_message_params(logger):
logger.info("This is a test of %s", "args")
assert len(logger.client.events) == 1
event = logger.client.events[ERROR][0]
assert "exception" not in event
assert "param_message" in event["log"]
assert event["log"]["message"] == "This is a test of args"
assert event["log"]["param_message"] == "This is a test of %s"
def test_record_stack(logger):
logger.info("This is a test of stacks", extra={"stack": True})
assert len(logger.client.events) == 1
event = logger.client.events[ERROR][0]
frames = event["log"]["stacktrace"]
assert len(frames) != 1
frame = frames[0]
assert frame["module"] == __name__
assert "exception" not in event
assert "param_message" in event["log"]
assert event["log"]["param_message"] == "This is a test of stacks"
assert event["culprit"] == "tests.handlers.logging.logging_tests.test_record_stack"
assert event["log"]["message"] == "This is a test of stacks"
def test_no_record_stack(logger):
logger.info("This is a test of no stacks", extra={"stack": False})
assert len(logger.client.events) == 1
event = logger.client.events[ERROR][0]
assert event.get("culprit") == None
assert event["log"]["message"] == "This is a test of no stacks"
assert "stacktrace" not in event["log"]
assert "exception" not in event
assert "param_message" in event["log"]
assert event["log"]["param_message"] == "This is a test of no stacks"
def test_no_record_stack_via_config(logger):
logger.client.config.auto_log_stacks = False
logger.info("This is a test of no stacks")
assert len(logger.client.events) == 1
event = logger.client.events[ERROR][0]
assert event.get("culprit") == None
assert event["log"]["message"] == "This is a test of no stacks"
assert "stacktrace" not in event["log"]
assert "exception" not in event
assert "param_message" in event["log"]
assert event["log"]["param_message"] == "This is a test of no stacks"
def test_explicit_stack(logger):
logger.info("This is a test of stacks", extra={"stack": iter_stack_frames()})
assert len(logger.client.events) == 1
event = logger.client.events[ERROR][0]
assert "culprit" in event, event
assert event["culprit"] == "tests.handlers.logging.logging_tests.test_explicit_stack"
assert "message" in event["log"], event
assert event["log"]["message"] == "This is a test of stacks"
assert "exception" not in event
assert "param_message" in event["log"]
assert event["log"]["param_message"] == "This is a test of stacks"
assert "stacktrace" in event["log"]
def test_extra_culprit(logger):
logger.info("This is a test of stacks", extra={"culprit": "foo.bar"})
assert len(logger.client.events) == 1
event = logger.client.events[ERROR][0]
assert event["culprit"] == "foo.bar"
assert "culprit" not in event["context"]["custom"]
def test_logger_exception(logger):
try:
raise ValueError("This is a test ValueError")
except ValueError:
logger.exception("This is a test with an exception", extra={"stack": True})
assert len(logger.client.events) == 1
event = logger.client.events[ERROR][0]
assert event["log"]["message"] == "This is a test with an exception"
assert "stacktrace" in event["log"]
assert "exception" in event
exc = event["exception"]
assert exc["type"] == "ValueError"
assert exc["message"] == "ValueError: This is a test ValueError"
assert "param_message" in event["log"]
assert event["log"]["message"] == "This is a test with an exception"
def test_client_arg(elasticapm_client):
handler = LoggingHandler(elasticapm_client)
assert handler.client == elasticapm_client
def test_client_kwarg(elasticapm_client):
handler = LoggingHandler(client=elasticapm_client)
assert handler.client == elasticapm_client
def test_logger_setup():
handler = LoggingHandler(
server_url="foo", service_name="bar", secret_token="baz", metrics_interval="0ms", client_cls=TempStoreClient
)
client = handler.client
assert client.config.server_url == "foo"
assert client.config.service_name == "bar"
assert client.config.secret_token == "baz"
assert handler.level == logging.NOTSET
def test_logging_handler_emit_error(capsys, elasticapm_client):
handler = LoggingHandler(elasticapm_client)
handler._emit = lambda: 1 / 0
handler.emit(LogRecord("x", 1, "/ab/c/", 10, "Oops", [], None))
out, err = capsys.readouterr()
assert "Top level ElasticAPM exception caught" in err
assert "Oops" in err
def test_logging_handler_dont_emit_elasticapm(capsys, elasticapm_client):
handler = LoggingHandler(elasticapm_client)
handler.emit(LogRecord("elasticapm.errors", 1, "/ab/c/", 10, "Oops", [], None))
out, err = capsys.readouterr()
assert "Oops" in err
def test_arbitrary_object(logger):
logger.error(["a", "list", "of", "strings"])
assert len(logger.client.events) == 1
event = logger.client.events[ERROR][0]
assert "param_message" in event["log"]
assert event["log"]["param_message"] == "['a', 'list', 'of', 'strings']"
def test_logging_filter_no_span(elasticapm_client):
transaction = elasticapm_client.begin_transaction("test")
f = LoggingFilter()
record = logging.LogRecord(__name__, logging.DEBUG, __file__, 252, "dummy_msg", [], None)
f.filter(record)
assert record.elasticapm_transaction_id == transaction.id
assert record.elasticapm_service_name == transaction.tracer.config.service_name
assert record.elasticapm_trace_id == transaction.trace_parent.trace_id
assert record.elasticapm_span_id is None
assert record.elasticapm_labels
def test_structlog_processor_no_span(elasticapm_client):
transaction = elasticapm_client.begin_transaction("test")
event_dict = {}
new_dict = structlog_processor(None, None, event_dict)
assert new_dict["transaction.id"] == transaction.id
assert new_dict["trace.id"] == transaction.trace_parent.trace_id
assert "span.id" not in new_dict
@pytest.mark.parametrize("elasticapm_client", [{"transaction_max_spans": 5}], indirect=True)
def test_logging_filter_span(elasticapm_client):
transaction = elasticapm_client.begin_transaction("test")
with capture_span("test") as span:
f = LoggingFilter()
record = logging.LogRecord(__name__, logging.DEBUG, __file__, 252, "dummy_msg", [], None)
f.filter(record)
assert record.elasticapm_transaction_id == transaction.id
assert record.elasticapm_service_name == transaction.tracer.config.service_name
assert record.elasticapm_trace_id == transaction.trace_parent.trace_id
assert record.elasticapm_span_id == span.id
assert record.elasticapm_labels
# Capture too many spans so we start dropping
for i in range(10):
with capture_span("drop"):
pass
# Test logging with DroppedSpan
with capture_span("drop") as span:
record = logging.LogRecord(__name__, logging.DEBUG, __file__, 252, "dummy_msg2", [], None)
f.filter(record)
assert record.elasticapm_transaction_id == transaction.id
assert record.elasticapm_service_name == transaction.tracer.config.service_name
assert record.elasticapm_trace_id == transaction.trace_parent.trace_id
assert record.elasticapm_span_id is None
assert record.elasticapm_labels
@pytest.mark.parametrize("elasticapm_client", [{"transaction_max_spans": 5}], indirect=True)
def test_structlog_processor_span(elasticapm_client):
transaction = elasticapm_client.begin_transaction("test")
with capture_span("test") as span:
event_dict = {}
new_dict = structlog_processor(None, None, event_dict)
assert new_dict["transaction.id"] == transaction.id
assert new_dict["service.name"] == transaction.tracer.config.service_name
assert new_dict["trace.id"] == transaction.trace_parent.trace_id
assert new_dict["span.id"] == span.id
# Capture too many spans so we start dropping
for i in range(10):
with capture_span("drop"):
pass
# Test logging with DroppedSpan
with capture_span("drop") as span:
event_dict = {}
new_dict = structlog_processor(None, None, event_dict)
assert new_dict["transaction.id"] == transaction.id
assert new_dict["service.name"] == transaction.tracer.config.service_name
assert new_dict["trace.id"] == transaction.trace_parent.trace_id
assert "span.id" not in new_dict
@pytest.mark.skipif(not compat.PY3, reason="Log record factories are only 3.2+")
def test_automatic_log_record_factory_install(elasticapm_client):
"""
Use the elasticapm_client fixture to load the client, which in turn installs
the log_record_factory. Check to make sure it happened.
"""
transaction = elasticapm_client.begin_transaction("test")
with capture_span("test") as span:
record_factory = logging.getLogRecordFactory()
record = record_factory(__name__, logging.DEBUG, __file__, 252, "dummy_msg", [], None)
assert record.elasticapm_transaction_id == transaction.id
assert record.elasticapm_service_name == transaction.tracer.config.service_name
assert record.elasticapm_trace_id == transaction.trace_parent.trace_id
assert record.elasticapm_span_id == span.id
assert record.elasticapm_labels
def test_formatter():
record = logging.LogRecord(__name__, logging.DEBUG, __file__, 252, "dummy_msg", [], None)
formatter = Formatter()
formatted_record = formatter.format(record)
assert "| elasticapm" in formatted_record
assert hasattr(record, "elasticapm_transaction_id")
assert hasattr(record, "elasticapm_service_name")
record = logging.LogRecord(__name__, logging.DEBUG, __file__, 252, "dummy_msg", [], None)
formatted_time = formatter.formatTime(record)
assert formatted_time
assert hasattr(record, "elasticapm_transaction_id")
assert hasattr(record, "elasticapm_service_name")
def test_logging_handler_no_client(recwarn):
# In 6.0, this should be changed to expect a ValueError instead of a log
warnings.simplefilter("always")
LoggingHandler()
while True:
# If we never find our desired warning this will eventually throw an
# AssertionError
w = recwarn.pop(PendingDeprecationWarning)
if "LoggingHandler requires a Client instance" in w.message.args[0]:
return True
@pytest.mark.parametrize(
"elasticapm_client,expected",
[
({}, logging.NOTSET),
({"log_level": "off"}, 1000),
({"log_level": "trace"}, 5),
({"log_level": "debug"}, logging.DEBUG),
({"log_level": "info"}, logging.INFO),
({"log_level": "WARNING"}, logging.WARNING),
({"log_level": "errOr"}, logging.ERROR),
({"log_level": "CRITICAL"}, logging.CRITICAL),
],
indirect=["elasticapm_client"],
)
def test_log_level_config(elasticapm_client, expected):
logger = logging.getLogger("elasticapm")
assert logger.level == expected
def test_log_file(elasticapm_client_log_file):
logger = logging.getLogger("elasticapm")
found = False
for handler in logger.handlers:
if isinstance(handler, logging.handlers.RotatingFileHandler):
found = True
assert found
@pytest.mark.parametrize("elasticapm_client_log_file", [{"auto_ecs_logging": True}], indirect=True)
def test_auto_ecs_logging(elasticapm_client_log_file):
logger = logging.getLogger()
assert isinstance(logger.handlers[0].formatter, ecs_logging.StdlibFormatter)
assert isinstance(structlog.get_config()["processors"][-1], ecs_logging.StructlogFormatter)
| bsd-3-clause |
abloomston/sympy | sympy/core/sympify.py | 60 | 13856 | """sympify -- convert objects SymPy internal format"""
from __future__ import print_function, division
from inspect import getmro
from .core import all_classes as sympy_classes
from .compatibility import iterable, string_types, range
from .evaluate import global_evaluate
class SympifyError(ValueError):
def __init__(self, expr, base_exc=None):
self.expr = expr
self.base_exc = base_exc
def __str__(self):
if self.base_exc is None:
return "SympifyError: %r" % (self.expr,)
return ("Sympify of expression '%s' failed, because of exception being "
"raised:\n%s: %s" % (self.expr, self.base_exc.__class__.__name__,
str(self.base_exc)))
converter = {} # See sympify docstring.
class CantSympify(object):
"""
Mix in this trait to a class to disallow sympification of its instances.
Examples
========
>>> from sympy.core.sympify import sympify, CantSympify
>>> class Something(dict):
... pass
...
>>> sympify(Something())
{}
>>> class Something(dict, CantSympify):
... pass
...
>>> sympify(Something())
Traceback (most recent call last):
...
SympifyError: SympifyError: {}
"""
pass
def sympify(a, locals=None, convert_xor=True, strict=False, rational=False,
evaluate=None):
"""Converts an arbitrary expression to a type that can be used inside SymPy.
For example, it will convert Python ints into instance of sympy.Rational,
floats into instances of sympy.Float, etc. It is also able to coerce symbolic
expressions which inherit from Basic. This can be useful in cooperation
with SAGE.
It currently accepts as arguments:
- any object defined in sympy
- standard numeric python types: int, long, float, Decimal
- strings (like "0.09" or "2e-19")
- booleans, including ``None`` (will leave ``None`` unchanged)
- lists, sets or tuples containing any of the above
If the argument is already a type that SymPy understands, it will do
nothing but return that value. This can be used at the beginning of a
function to ensure you are working with the correct type.
>>> from sympy import sympify
>>> sympify(2).is_integer
True
>>> sympify(2).is_real
True
>>> sympify(2.0).is_real
True
>>> sympify("2.0").is_real
True
>>> sympify("2e-45").is_real
True
If the expression could not be converted, a SympifyError is raised.
>>> sympify("x***2")
Traceback (most recent call last):
...
SympifyError: SympifyError: "could not parse u'x***2'"
Locals
------
The sympification happens with access to everything that is loaded
by ``from sympy import *``; anything used in a string that is not
defined by that import will be converted to a symbol. In the following,
the ``bitcount`` function is treated as a symbol and the ``O`` is
interpreted as the Order object (used with series) and it raises
an error when used improperly:
>>> s = 'bitcount(42)'
>>> sympify(s)
bitcount(42)
>>> sympify("O(x)")
O(x)
>>> sympify("O + 1")
Traceback (most recent call last):
...
TypeError: unbound method...
In order to have ``bitcount`` be recognized it can be imported into a
namespace dictionary and passed as locals:
>>> from sympy.core.compatibility import exec_
>>> ns = {}
>>> exec_('from sympy.core.evalf import bitcount', ns)
>>> sympify(s, locals=ns)
6
In order to have the ``O`` interpreted as a Symbol, identify it as such
in the namespace dictionary. This can be done in a variety of ways; all
three of the following are possibilities:
>>> from sympy import Symbol
>>> ns["O"] = Symbol("O") # method 1
>>> exec_('from sympy.abc import O', ns) # method 2
>>> ns.update(dict(O=Symbol("O"))) # method 3
>>> sympify("O + 1", locals=ns)
O + 1
If you want *all* single-letter and Greek-letter variables to be symbols
then you can use the clashing-symbols dictionaries that have been defined
there as private variables: _clash1 (single-letter variables), _clash2
(the multi-letter Greek names) or _clash (both single and multi-letter
names that are defined in abc).
>>> from sympy.abc import _clash1
>>> _clash1
{'C': C, 'E': E, 'I': I, 'N': N, 'O': O, 'Q': Q, 'S': S}
>>> sympify('I & Q', _clash1)
And(I, Q)
Strict
------
If the option ``strict`` is set to ``True``, only the types for which an
explicit conversion has been defined are converted. In the other
cases, a SympifyError is raised.
>>> print(sympify(None))
None
>>> sympify(None, strict=True)
Traceback (most recent call last):
...
SympifyError: SympifyError: None
Evaluation
----------
If the option ``evaluate`` is set to ``False``, then arithmetic and
operators will be converted into their SymPy equivalents and the
``evaluate=False`` option will be added. Nested ``Add`` or ``Mul`` will
be denested first. This is done via an AST transformation that replaces
operators with their SymPy equivalents, so if an operand redefines any
of those operations, the redefined operators will not be used.
>>> sympify('2**2 / 3 + 5')
19/3
>>> sympify('2**2 / 3 + 5', evaluate=False)
2**2/3 + 5
Extending
---------
To extend ``sympify`` to convert custom objects (not derived from ``Basic``),
just define a ``_sympy_`` method to your class. You can do that even to
classes that you do not own by subclassing or adding the method at runtime.
>>> from sympy import Matrix
>>> class MyList1(object):
... def __iter__(self):
... yield 1
... yield 2
... raise StopIteration
... def __getitem__(self, i): return list(self)[i]
... def _sympy_(self): return Matrix(self)
>>> sympify(MyList1())
Matrix([
[1],
[2]])
If you do not have control over the class definition you could also use the
``converter`` global dictionary. The key is the class and the value is a
function that takes a single argument and returns the desired SymPy
object, e.g. ``converter[MyList] = lambda x: Matrix(x)``.
>>> class MyList2(object): # XXX Do not do this if you control the class!
... def __iter__(self): # Use _sympy_!
... yield 1
... yield 2
... raise StopIteration
... def __getitem__(self, i): return list(self)[i]
>>> from sympy.core.sympify import converter
>>> converter[MyList2] = lambda x: Matrix(x)
>>> sympify(MyList2())
Matrix([
[1],
[2]])
Notes
=====
Sometimes autosimplification during sympification results in expressions
that are very different in structure than what was entered. Until such
autosimplification is no longer done, the ``kernS`` function might be of
some use. In the example below you can see how an expression reduces to
-1 by autosimplification, but does not do so when ``kernS`` is used.
>>> from sympy.core.sympify import kernS
>>> from sympy.abc import x
>>> -2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1
-1
>>> s = '-2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1'
>>> sympify(s)
-1
>>> kernS(s)
-2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1
"""
if evaluate is None:
evaluate = global_evaluate[0]
try:
if a in sympy_classes:
return a
except TypeError: # Type of a is unhashable
pass
try:
cls = a.__class__
except AttributeError: # a is probably an old-style class object
cls = type(a)
if cls in sympy_classes:
return a
if cls is type(None):
if strict:
raise SympifyError(a)
else:
return a
try:
return converter[cls](a)
except KeyError:
for superclass in getmro(cls):
try:
return converter[superclass](a)
except KeyError:
continue
if isinstance(a, CantSympify):
raise SympifyError(a)
try:
return a._sympy_()
except AttributeError:
pass
if not isinstance(a, string_types):
for coerce in (float, int):
try:
return sympify(coerce(a))
except (TypeError, ValueError, AttributeError, SympifyError):
continue
if strict:
raise SympifyError(a)
if iterable(a):
try:
return type(a)([sympify(x, locals=locals, convert_xor=convert_xor,
rational=rational) for x in a])
except TypeError:
# Not all iterables are rebuildable with their type.
pass
if isinstance(a, dict):
try:
return type(a)([sympify(x, locals=locals, convert_xor=convert_xor,
rational=rational) for x in a.items()])
except TypeError:
# Not all iterables are rebuildable with their type.
pass
# At this point we were given an arbitrary expression
# which does not inherit from Basic and doesn't implement
# _sympy_ (which is a canonical and robust way to convert
# anything to SymPy expression).
#
# As a last chance, we try to take "a"'s normal form via unicode()
# and try to parse it. If it fails, then we have no luck and
# return an exception
try:
from .compatibility import unicode
a = unicode(a)
except Exception as exc:
raise SympifyError(a, exc)
from sympy.parsing.sympy_parser import (parse_expr, TokenError,
standard_transformations)
from sympy.parsing.sympy_parser import convert_xor as t_convert_xor
from sympy.parsing.sympy_parser import rationalize as t_rationalize
transformations = standard_transformations
if rational:
transformations += (t_rationalize,)
if convert_xor:
transformations += (t_convert_xor,)
try:
a = a.replace('\n', '')
expr = parse_expr(a, local_dict=locals, transformations=transformations, evaluate=evaluate)
except (TokenError, SyntaxError) as exc:
raise SympifyError('could not parse %r' % a, exc)
return expr
def _sympify(a):
"""
Short version of sympify for internal usage for __add__ and __eq__ methods
where it is ok to allow some things (like Python integers and floats) in
the expression. This excludes things (like strings) that are unwise to
allow into such an expression.
>>> from sympy import Integer
>>> Integer(1) == 1
True
>>> Integer(1) == '1'
False
>>> from sympy.abc import x
>>> x + 1
x + 1
>>> x + '1'
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for +: 'Symbol' and 'str'
see: sympify
"""
return sympify(a, strict=True)
def kernS(s):
"""Use a hack to try keep autosimplification from joining Integer or
minus sign into an Add of a Mul; this modification doesn't
prevent the 2-arg Mul from becoming an Add, however.
Examples
========
>>> from sympy.core.sympify import kernS
>>> from sympy.abc import x, y, z
The 2-arg Mul allows a leading Integer to be distributed but kernS will
prevent that:
>>> 2*(x + y)
2*x + 2*y
>>> kernS('2*(x + y)')
2*(x + y)
If use of the hack fails, the un-hacked string will be passed to sympify...
and you get what you get.
XXX This hack should not be necessary once issue 4596 has been resolved.
"""
import re
from sympy.core.symbol import Symbol
hit = False
if '(' in s:
if s.count('(') != s.count(")"):
raise SympifyError('unmatched left parenthesis')
kern = '_kern'
while kern in s:
kern += "_"
olds = s
# digits*( -> digits*kern*(
s = re.sub(r'(\d+)( *\* *)\(', r'\1*%s\2(' % kern, s)
# negated parenthetical
kern2 = kern + "2"
while kern2 in s:
kern2 += "_"
# step 1: -(...) --> kern-kern*(...)
target = r'%s-%s*(' % (kern, kern)
s = re.sub(r'- *\(', target, s)
# step 2: double the matching closing parenthesis
# kern-kern*(...) --> kern-kern*(...)kern2
i = nest = 0
while True:
j = s.find(target, i)
if j == -1:
break
j = s.find('(')
for j in range(j, len(s)):
if s[j] == "(":
nest += 1
elif s[j] == ")":
nest -= 1
if nest == 0:
break
s = s[:j] + kern2 + s[j:]
i = j
# step 3: put in the parentheses
# kern-kern*(...)kern2 --> (-kern*(...))
s = s.replace(target, target.replace(kern, "(", 1))
s = s.replace(kern2, ')')
hit = kern in s
for i in range(2):
try:
expr = sympify(s)
break
except: # the kern might cause unknown errors, so use bare except
if hit:
s = olds # maybe it didn't like the kern; use un-kerned s
hit = False
continue
expr = sympify(s) # let original error raise
if not hit:
return expr
rep = {Symbol(kern): 1}
def _clear(expr):
if isinstance(expr, (list, tuple, set)):
return type(expr)([_clear(e) for e in expr])
if hasattr(expr, 'subs'):
return expr.subs(rep, hack2=True)
return expr
expr = _clear(expr)
# hope that kern is not there anymore
return expr
| bsd-3-clause |
BMJHayward/django | django/db/migrations/topological_sort.py | 538 | 1129 | def topological_sort_as_sets(dependency_graph):
"""Variation of Kahn's algorithm (1962) that returns sets.
Takes a dependency graph as a dictionary of node => dependencies.
Yields sets of items in topological order, where the first set contains
all nodes without dependencies, and each following set contains all
nodes that depend on the nodes in the previously yielded sets.
"""
todo = dependency_graph.copy()
while todo:
current = {node for node, deps in todo.items() if len(deps) == 0}
if not current:
raise ValueError('Cyclic dependency in graph: {}'.format(
', '.join(repr(x) for x in todo.items())))
yield current
# remove current from todo's nodes & dependencies
todo = {node: (dependencies - current) for node, dependencies in
todo.items() if node not in current}
def stable_topological_sort(l, dependency_graph):
result = []
for layer in topological_sort_as_sets(dependency_graph):
for node in l:
if node in layer:
result.append(node)
return result
| bsd-3-clause |
dgarros/ansible | test/units/module_utils/test_basic.py | 24 | 43983 | # -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import errno
import json
import os
import sys
from io import BytesIO, StringIO
from units.mock.procenv import ModuleTestCase, swap_stdin_and_argv
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock, mock_open, Mock, call
from ansible.module_utils.six.moves import builtins
realimport = builtins.__import__
class TestModuleUtilsBasic(ModuleTestCase):
def clear_modules(self, mods):
for mod in mods:
if mod in sys.modules:
del sys.modules[mod]
@patch.object(builtins, '__import__')
def test_module_utils_basic_import_syslog(self, mock_import):
def _mock_import(name, *args, **kwargs):
if name == 'syslog':
raise ImportError
return realimport(name, *args, **kwargs)
self.clear_modules(['syslog', 'ansible.module_utils.basic'])
mod = builtins.__import__('ansible.module_utils.basic')
self.assertTrue(mod.module_utils.basic.HAS_SYSLOG)
self.clear_modules(['syslog', 'ansible.module_utils.basic'])
mock_import.side_effect = _mock_import
mod = builtins.__import__('ansible.module_utils.basic')
self.assertFalse(mod.module_utils.basic.HAS_SYSLOG)
@patch.object(builtins, '__import__')
def test_module_utils_basic_import_selinux(self, mock_import):
def _mock_import(name, *args, **kwargs):
if name == 'selinux':
raise ImportError
return realimport(name, *args, **kwargs)
try:
self.clear_modules(['selinux', 'ansible.module_utils.basic'])
mod = builtins.__import__('ansible.module_utils.basic')
self.assertTrue(mod.module_utils.basic.HAVE_SELINUX)
except ImportError:
# no selinux on test system, so skip
pass
self.clear_modules(['selinux', 'ansible.module_utils.basic'])
mock_import.side_effect = _mock_import
mod = builtins.__import__('ansible.module_utils.basic')
self.assertFalse(mod.module_utils.basic.HAVE_SELINUX)
@patch.object(builtins, '__import__')
def test_module_utils_basic_import_json(self, mock_import):
def _mock_import(name, *args, **kwargs):
if name == 'json':
raise ImportError
elif name == 'simplejson':
return MagicMock()
return realimport(name, *args, **kwargs)
self.clear_modules(['json', 'ansible.module_utils.basic'])
mod = builtins.__import__('ansible.module_utils.basic')
self.clear_modules(['json', 'ansible.module_utils.basic'])
mock_import.side_effect = _mock_import
mod = builtins.__import__('ansible.module_utils.basic')
# FIXME: doesn't work yet
# @patch.object(builtins, 'bytes')
# def test_module_utils_basic_bytes(self, mock_bytes):
# mock_bytes.side_effect = NameError()
# from ansible.module_utils import basic
@patch.object(builtins, '__import__')
@unittest.skipIf(sys.version_info[0] >= 3, "literal_eval is available in every version of Python3")
def test_module_utils_basic_import_literal_eval(self, mock_import):
def _mock_import(name, *args, **kwargs):
try:
fromlist = kwargs.get('fromlist', args[2])
except IndexError:
fromlist = []
if name == 'ast' and 'literal_eval' in fromlist:
raise ImportError
return realimport(name, *args, **kwargs)
mock_import.side_effect = _mock_import
self.clear_modules(['ast', 'ansible.module_utils.basic'])
mod = builtins.__import__('ansible.module_utils.basic')
self.assertEqual(mod.module_utils.basic.literal_eval("'1'"), "1")
self.assertEqual(mod.module_utils.basic.literal_eval("1"), 1)
self.assertEqual(mod.module_utils.basic.literal_eval("-1"), -1)
self.assertEqual(mod.module_utils.basic.literal_eval("(1,2,3)"), (1, 2, 3))
self.assertEqual(mod.module_utils.basic.literal_eval("[1]"), [1])
self.assertEqual(mod.module_utils.basic.literal_eval("True"), True)
self.assertEqual(mod.module_utils.basic.literal_eval("False"), False)
self.assertEqual(mod.module_utils.basic.literal_eval("None"), None)
# self.assertEqual(mod.module_utils.basic.literal_eval('{"a": 1}'), dict(a=1))
self.assertRaises(ValueError, mod.module_utils.basic.literal_eval, "asdfasdfasdf")
@patch.object(builtins, '__import__')
def test_module_utils_basic_import_systemd_journal(self, mock_import):
def _mock_import(name, *args, **kwargs):
try:
fromlist = kwargs.get('fromlist', args[2])
except IndexError:
fromlist = []
if name == 'systemd' and 'journal' in fromlist:
raise ImportError
return realimport(name, *args, **kwargs)
self.clear_modules(['systemd', 'ansible.module_utils.basic'])
mod = builtins.__import__('ansible.module_utils.basic')
self.assertTrue(mod.module_utils.basic.has_journal)
self.clear_modules(['systemd', 'ansible.module_utils.basic'])
mock_import.side_effect = _mock_import
mod = builtins.__import__('ansible.module_utils.basic')
self.assertFalse(mod.module_utils.basic.has_journal)
def test_module_utils_basic_get_platform(self):
with patch('platform.system', return_value='foo'):
from ansible.module_utils.basic import get_platform
self.assertEqual(get_platform(), 'foo')
def test_module_utils_basic_get_distribution(self):
from ansible.module_utils.basic import get_distribution
with patch('platform.system', return_value='Foo'):
self.assertEqual(get_distribution(), None)
with patch('platform.system', return_value='Linux'):
with patch('platform.linux_distribution', return_value=["foo"]):
self.assertEqual(get_distribution(), "Foo")
with patch('os.path.isfile', return_value=True):
with patch('platform.linux_distribution', side_effect=[("AmazonFooBar", )]):
self.assertEqual(get_distribution(), "Amazonfoobar")
with patch('platform.linux_distribution', side_effect=(("", ), ("AmazonFooBam",))):
self.assertEqual(get_distribution(), "Amazon")
with patch('platform.linux_distribution', side_effect=[("", ), ("", )]):
self.assertEqual(get_distribution(), "OtherLinux")
def _dist(distname='', version='', id='', supported_dists=(), full_distribution_name=1):
if supported_dists != ():
return ("Bar", "2", "Two")
else:
return ("", "", "")
with patch('platform.linux_distribution', side_effect=_dist):
self.assertEqual(get_distribution(), "Bar")
with patch('platform.linux_distribution', side_effect=Exception("boo")):
with patch('platform.dist', return_value=("bar", "2", "Two")):
self.assertEqual(get_distribution(), "Bar")
def test_module_utils_basic_get_distribution_version(self):
from ansible.module_utils.basic import get_distribution_version
with patch('platform.system', return_value='Foo'):
self.assertEqual(get_distribution_version(), None)
with patch('platform.system', return_value='Linux'):
with patch('platform.linux_distribution', return_value=("foo", "1", "One")):
self.assertEqual(get_distribution_version(), "1")
with patch('os.path.isfile', return_value=True):
def _dist(distname='', version='', id='', supported_dists=(), full_distribution_name=1):
if supported_dists != ():
return ("AmazonFooBar", "2", "")
else:
return ("", "", "")
with patch('platform.linux_distribution', side_effect=_dist):
self.assertEqual(get_distribution_version(), "2")
with patch('platform.linux_distribution', side_effect=Exception("boo")):
with patch('platform.dist', return_value=("bar", "3", "Three")):
self.assertEqual(get_distribution_version(), "3")
def test_module_utils_basic_load_platform_subclass(self):
class LinuxTest:
pass
class Foo(LinuxTest):
platform = "Linux"
distribution = None
class Bar(LinuxTest):
platform = "Linux"
distribution = "Bar"
from ansible.module_utils.basic import load_platform_subclass
# match just the platform class, not a specific distribution
with patch('ansible.module_utils.basic.get_platform', return_value="Linux"):
with patch('ansible.module_utils.basic.get_distribution', return_value=None):
self.assertIs(type(load_platform_subclass(LinuxTest)), Foo)
# match both the distribution and platform class
with patch('ansible.module_utils.basic.get_platform', return_value="Linux"):
with patch('ansible.module_utils.basic.get_distribution', return_value="Bar"):
self.assertIs(type(load_platform_subclass(LinuxTest)), Bar)
# if neither match, the fallback should be the top-level class
with patch('ansible.module_utils.basic.get_platform', return_value="Foo"):
with patch('ansible.module_utils.basic.get_distribution', return_value=None):
self.assertIs(type(load_platform_subclass(LinuxTest)), LinuxTest)
def test_module_utils_basic_json_dict_converters(self):
from ansible.module_utils.basic import json_dict_unicode_to_bytes, json_dict_bytes_to_unicode
test_data = dict(
item1=u"Fóo",
item2=[u"Bár", u"Bam"],
item3=dict(sub1=u"Súb"),
item4=(u"föo", u"bär", u"©"),
item5=42,
)
res = json_dict_unicode_to_bytes(test_data)
res2 = json_dict_bytes_to_unicode(res)
self.assertEqual(test_data, res2)
def test_module_utils_basic_get_module_path(self):
from ansible.module_utils.basic import get_module_path
with patch('os.path.realpath', return_value='/path/to/foo/'):
self.assertEqual(get_module_path(), '/path/to/foo')
def test_module_utils_basic_ansible_module_creation(self):
from ansible.module_utils import basic
am = basic.AnsibleModule(
argument_spec=dict(),
)
arg_spec = dict(
foo=dict(required=True),
bar=dict(),
bam=dict(),
baz=dict(),
)
mut_ex = (('bar', 'bam'),)
req_to = (('bam', 'baz'),)
# should test ok
args = json.dumps(dict(ANSIBLE_MODULE_ARGS={"foo": "hello"}))
with swap_stdin_and_argv(stdin_data=args):
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=arg_spec,
mutually_exclusive=mut_ex,
required_together=req_to,
no_log=True,
check_invalid_arguments=False,
add_file_common_args=True,
supports_check_mode=True,
)
# FIXME: add asserts here to verify the basic config
# fail, because a required param was not specified
args = json.dumps(dict(ANSIBLE_MODULE_ARGS={}))
with swap_stdin_and_argv(stdin_data=args):
basic._ANSIBLE_ARGS = None
self.assertRaises(
SystemExit,
basic.AnsibleModule,
argument_spec=arg_spec,
mutually_exclusive=mut_ex,
required_together=req_to,
no_log=True,
check_invalid_arguments=False,
add_file_common_args=True,
supports_check_mode=True,
)
# fail because of mutually exclusive parameters
args = json.dumps(dict(ANSIBLE_MODULE_ARGS={"foo": "hello", "bar": "bad", "bam": "bad"}))
with swap_stdin_and_argv(stdin_data=args):
basic._ANSIBLE_ARGS = None
self.assertRaises(
SystemExit,
basic.AnsibleModule,
argument_spec=arg_spec,
mutually_exclusive=mut_ex,
required_together=req_to,
no_log=True,
check_invalid_arguments=False,
add_file_common_args=True,
supports_check_mode=True,
)
# fail because a param required due to another param was not specified
args = json.dumps(dict(ANSIBLE_MODULE_ARGS={"bam": "bad"}))
with swap_stdin_and_argv(stdin_data=args):
basic._ANSIBLE_ARGS = None
self.assertRaises(
SystemExit,
basic.AnsibleModule,
argument_spec=arg_spec,
mutually_exclusive=mut_ex,
required_together=req_to,
no_log=True,
check_invalid_arguments=False,
add_file_common_args=True,
supports_check_mode=True,
)
def test_module_utils_basic_ansible_module_type_check(self):
from ansible.module_utils import basic
arg_spec = dict(
foo=dict(type='float'),
foo2=dict(type='float'),
foo3=dict(type='float'),
bar=dict(type='int'),
bar2=dict(type='int'),
)
# should test ok
args = json.dumps(dict(ANSIBLE_MODULE_ARGS={
"foo": 123.0, # float
"foo2": 123, # int
"foo3": "123", # string
"bar": 123, # int
"bar2": "123", # string
}))
with swap_stdin_and_argv(stdin_data=args):
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=arg_spec,
no_log=True,
check_invalid_arguments=False,
add_file_common_args=True,
supports_check_mode=True,
)
# fail, because bar does not accept floating point numbers
args = json.dumps(dict(ANSIBLE_MODULE_ARGS={"bar": 123.0}))
with swap_stdin_and_argv(stdin_data=args):
basic._ANSIBLE_ARGS = None
self.assertRaises(
SystemExit,
basic.AnsibleModule,
argument_spec=arg_spec,
no_log=True,
check_invalid_arguments=False,
add_file_common_args=True,
supports_check_mode=True,
)
def test_module_utils_basic_ansible_module_load_file_common_arguments(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
am.selinux_mls_enabled = MagicMock()
am.selinux_mls_enabled.return_value = True
am.selinux_default_context = MagicMock()
am.selinux_default_context.return_value = 'unconfined_u:object_r:default_t:s0'.split(':', 3)
# with no params, the result should be an empty dict
res = am.load_file_common_arguments(params=dict())
self.assertEqual(res, dict())
base_params = dict(
path='/path/to/file',
mode=0o600,
owner='root',
group='root',
seuser='_default',
serole='_default',
setype='_default',
selevel='_default',
)
extended_params = base_params.copy()
extended_params.update(dict(
follow=True,
foo='bar',
))
final_params = base_params.copy()
final_params.update(dict(
path='/path/to/real_file',
secontext=['unconfined_u', 'object_r', 'default_t', 's0'],
attributes=None,
))
# with the proper params specified, the returned dictionary should represent
# only those params which have something to do with the file arguments, excluding
# other params and updated as required with proper values which may have been
# massaged by the method
with patch('os.path.islink', return_value=True):
with patch('os.path.realpath', return_value='/path/to/real_file'):
res = am.load_file_common_arguments(params=extended_params)
self.assertEqual(res, final_params)
def test_module_utils_basic_ansible_module_selinux_mls_enabled(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
basic.HAVE_SELINUX = False
self.assertEqual(am.selinux_mls_enabled(), False)
basic.HAVE_SELINUX = True
basic.selinux = Mock()
with patch.dict('sys.modules', {'selinux': basic.selinux}):
with patch('selinux.is_selinux_mls_enabled', return_value=0):
self.assertEqual(am.selinux_mls_enabled(), False)
with patch('selinux.is_selinux_mls_enabled', return_value=1):
self.assertEqual(am.selinux_mls_enabled(), True)
delattr(basic, 'selinux')
def test_module_utils_basic_ansible_module_selinux_initial_context(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
am.selinux_mls_enabled = MagicMock()
am.selinux_mls_enabled.return_value = False
self.assertEqual(am.selinux_initial_context(), [None, None, None])
am.selinux_mls_enabled.return_value = True
self.assertEqual(am.selinux_initial_context(), [None, None, None, None])
def test_module_utils_basic_ansible_module_selinux_enabled(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
# we first test the cases where the python selinux lib is
# not installed, which has two paths: one in which the system
# does have selinux installed (and the selinuxenabled command
# is present and returns 0 when run), or selinux is not installed
basic.HAVE_SELINUX = False
am.get_bin_path = MagicMock()
am.get_bin_path.return_value = '/path/to/selinuxenabled'
am.run_command = MagicMock()
am.run_command.return_value = (0, '', '')
self.assertRaises(SystemExit, am.selinux_enabled)
am.get_bin_path.return_value = None
self.assertEqual(am.selinux_enabled(), False)
# finally we test the case where the python selinux lib is installed,
# and both possibilities there (enabled vs. disabled)
basic.HAVE_SELINUX = True
basic.selinux = Mock()
with patch.dict('sys.modules', {'selinux': basic.selinux}):
with patch('selinux.is_selinux_enabled', return_value=0):
self.assertEqual(am.selinux_enabled(), False)
with patch('selinux.is_selinux_enabled', return_value=1):
self.assertEqual(am.selinux_enabled(), True)
delattr(basic, 'selinux')
def test_module_utils_basic_ansible_module_selinux_default_context(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
am.selinux_initial_context = MagicMock(return_value=[None, None, None, None])
am.selinux_enabled = MagicMock(return_value=True)
# we first test the cases where the python selinux lib is not installed
basic.HAVE_SELINUX = False
self.assertEqual(am.selinux_default_context(path='/foo/bar'), [None, None, None, None])
# all following tests assume the python selinux bindings are installed
basic.HAVE_SELINUX = True
basic.selinux = Mock()
with patch.dict('sys.modules', {'selinux': basic.selinux}):
# next, we test with a mocked implementation of selinux.matchpathcon to simulate
# an actual context being found
with patch('selinux.matchpathcon', return_value=[0, 'unconfined_u:object_r:default_t:s0']):
self.assertEqual(am.selinux_default_context(path='/foo/bar'), ['unconfined_u', 'object_r', 'default_t', 's0'])
# we also test the case where matchpathcon returned a failure
with patch('selinux.matchpathcon', return_value=[-1, '']):
self.assertEqual(am.selinux_default_context(path='/foo/bar'), [None, None, None, None])
# finally, we test where an OSError occurred during matchpathcon's call
with patch('selinux.matchpathcon', side_effect=OSError):
self.assertEqual(am.selinux_default_context(path='/foo/bar'), [None, None, None, None])
delattr(basic, 'selinux')
def test_module_utils_basic_ansible_module_selinux_context(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
am.selinux_initial_context = MagicMock(return_value=[None, None, None, None])
am.selinux_enabled = MagicMock(return_value=True)
# we first test the cases where the python selinux lib is not installed
basic.HAVE_SELINUX = False
self.assertEqual(am.selinux_context(path='/foo/bar'), [None, None, None, None])
# all following tests assume the python selinux bindings are installed
basic.HAVE_SELINUX = True
basic.selinux = Mock()
with patch.dict('sys.modules', {'selinux': basic.selinux}):
# next, we test with a mocked implementation of selinux.lgetfilecon_raw to simulate
# an actual context being found
with patch('selinux.lgetfilecon_raw', return_value=[0, 'unconfined_u:object_r:default_t:s0']):
self.assertEqual(am.selinux_context(path='/foo/bar'), ['unconfined_u', 'object_r', 'default_t', 's0'])
# we also test the case where matchpathcon returned a failure
with patch('selinux.lgetfilecon_raw', return_value=[-1, '']):
self.assertEqual(am.selinux_context(path='/foo/bar'), [None, None, None, None])
# finally, we test where an OSError occurred during matchpathcon's call
e = OSError()
e.errno = errno.ENOENT
with patch('selinux.lgetfilecon_raw', side_effect=e):
self.assertRaises(SystemExit, am.selinux_context, path='/foo/bar')
e = OSError()
with patch('selinux.lgetfilecon_raw', side_effect=e):
self.assertRaises(SystemExit, am.selinux_context, path='/foo/bar')
delattr(basic, 'selinux')
def test_module_utils_basic_ansible_module_is_special_selinux_path(self):
from ansible.module_utils import basic
args = json.dumps(dict(ANSIBLE_MODULE_ARGS={'_ansible_selinux_special_fs': "nfs,nfsd,foos"}))
with swap_stdin_and_argv(stdin_data=args):
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
def _mock_find_mount_point(path):
if path.startswith('/some/path'):
return '/some/path'
elif path.startswith('/weird/random/fstype'):
return '/weird/random/fstype'
return '/'
am.find_mount_point = MagicMock(side_effect=_mock_find_mount_point)
am.selinux_context = MagicMock(return_value=['foo_u', 'foo_r', 'foo_t', 's0'])
m = mock_open()
m.side_effect = OSError
with patch.object(builtins, 'open', m, create=True):
self.assertEqual(am.is_special_selinux_path('/some/path/that/should/be/nfs'), (False, None))
mount_data = [
'/dev/disk1 / ext4 rw,seclabel,relatime,data=ordered 0 0\n',
'1.1.1.1:/path/to/nfs /some/path nfs ro 0 0\n',
'whatever /weird/random/fstype foos rw 0 0\n',
]
# mock_open has a broken readlines() implementation apparently...
# this should work by default but doesn't, so we fix it
m = mock_open(read_data=''.join(mount_data))
m.return_value.readlines.return_value = mount_data
with patch.object(builtins, 'open', m, create=True):
self.assertEqual(am.is_special_selinux_path('/some/random/path'), (False, None))
self.assertEqual(am.is_special_selinux_path('/some/path/that/should/be/nfs'), (True, ['foo_u', 'foo_r', 'foo_t', 's0']))
self.assertEqual(am.is_special_selinux_path('/weird/random/fstype/path'), (True, ['foo_u', 'foo_r', 'foo_t', 's0']))
def test_module_utils_basic_ansible_module_user_and_group(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
mock_stat = MagicMock()
mock_stat.st_uid = 0
mock_stat.st_gid = 0
with patch('os.lstat', return_value=mock_stat):
self.assertEqual(am.user_and_group('/path/to/file'), (0, 0))
def test_module_utils_basic_ansible_module_find_mount_point(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
def _mock_ismount(path):
if path == b'/':
return True
return False
with patch('os.path.ismount', side_effect=_mock_ismount):
self.assertEqual(am.find_mount_point('/root/fs/../mounted/path/to/whatever'), '/')
def _mock_ismount(path):
if path == b'/subdir/mount':
return True
if path == b'/':
return True
return False
with patch('os.path.ismount', side_effect=_mock_ismount):
self.assertEqual(am.find_mount_point('/subdir/mount/path/to/whatever'), '/subdir/mount')
def test_module_utils_basic_ansible_module_set_context_if_different(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
basic.HAVE_SELINUX = False
am.selinux_enabled = MagicMock(return_value=False)
self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], True), True)
self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), False)
basic.HAVE_SELINUX = True
am.selinux_enabled = MagicMock(return_value=True)
am.selinux_context = MagicMock(return_value=['bar_u', 'bar_r', None, None])
am.is_special_selinux_path = MagicMock(return_value=(False, None))
basic.selinux = Mock()
with patch.dict('sys.modules', {'selinux': basic.selinux}):
with patch('selinux.lsetfilecon', return_value=0) as m:
self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), True)
m.assert_called_with('/path/to/file', 'foo_u:foo_r:foo_t:s0')
m.reset_mock()
am.check_mode = True
self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), True)
self.assertEqual(m.called, False)
am.check_mode = False
with patch('selinux.lsetfilecon', return_value=1) as m:
self.assertRaises(SystemExit, am.set_context_if_different, '/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], True)
with patch('selinux.lsetfilecon', side_effect=OSError) as m:
self.assertRaises(SystemExit, am.set_context_if_different, '/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], True)
am.is_special_selinux_path = MagicMock(return_value=(True, ['sp_u', 'sp_r', 'sp_t', 's0']))
with patch('selinux.lsetfilecon', return_value=0) as m:
self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), True)
m.assert_called_with('/path/to/file', 'sp_u:sp_r:sp_t:s0')
delattr(basic, 'selinux')
def test_module_utils_basic_ansible_module_set_owner_if_different(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
self.assertEqual(am.set_owner_if_different('/path/to/file', None, True), True)
self.assertEqual(am.set_owner_if_different('/path/to/file', None, False), False)
am.user_and_group = MagicMock(return_value=(500, 500))
with patch('os.lchown', return_value=None) as m:
self.assertEqual(am.set_owner_if_different('/path/to/file', 0, False), True)
m.assert_called_with(b'/path/to/file', 0, -1)
def _mock_getpwnam(*args, **kwargs):
mock_pw = MagicMock()
mock_pw.pw_uid = 0
return mock_pw
m.reset_mock()
with patch('pwd.getpwnam', side_effect=_mock_getpwnam):
self.assertEqual(am.set_owner_if_different('/path/to/file', 'root', False), True)
m.assert_called_with(b'/path/to/file', 0, -1)
with patch('pwd.getpwnam', side_effect=KeyError):
self.assertRaises(SystemExit, am.set_owner_if_different, '/path/to/file', 'root', False)
m.reset_mock()
am.check_mode = True
self.assertEqual(am.set_owner_if_different('/path/to/file', 0, False), True)
self.assertEqual(m.called, False)
am.check_mode = False
with patch('os.lchown', side_effect=OSError) as m:
self.assertRaises(SystemExit, am.set_owner_if_different, '/path/to/file', 'root', False)
def test_module_utils_basic_ansible_module_set_group_if_different(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
self.assertEqual(am.set_group_if_different('/path/to/file', None, True), True)
self.assertEqual(am.set_group_if_different('/path/to/file', None, False), False)
am.user_and_group = MagicMock(return_value=(500, 500))
with patch('os.lchown', return_value=None) as m:
self.assertEqual(am.set_group_if_different('/path/to/file', 0, False), True)
m.assert_called_with(b'/path/to/file', -1, 0)
def _mock_getgrnam(*args, **kwargs):
mock_gr = MagicMock()
mock_gr.gr_gid = 0
return mock_gr
m.reset_mock()
with patch('grp.getgrnam', side_effect=_mock_getgrnam):
self.assertEqual(am.set_group_if_different('/path/to/file', 'root', False), True)
m.assert_called_with(b'/path/to/file', -1, 0)
with patch('grp.getgrnam', side_effect=KeyError):
self.assertRaises(SystemExit, am.set_group_if_different, '/path/to/file', 'root', False)
m.reset_mock()
am.check_mode = True
self.assertEqual(am.set_group_if_different('/path/to/file', 0, False), True)
self.assertEqual(m.called, False)
am.check_mode = False
with patch('os.lchown', side_effect=OSError) as m:
self.assertRaises(SystemExit, am.set_group_if_different, '/path/to/file', 'root', False)
@patch('tempfile.mkstemp')
@patch('os.umask')
@patch('shutil.copyfileobj')
@patch('shutil.move')
@patch('shutil.copy2')
@patch('os.rename')
@patch('pwd.getpwuid')
@patch('os.getuid')
@patch('os.environ')
@patch('os.getlogin')
@patch('os.chown')
@patch('os.chmod')
@patch('os.stat')
@patch('os.path.exists')
@patch('os.close')
def test_module_utils_basic_ansible_module_atomic_move(
self,
_os_close,
_os_path_exists,
_os_stat,
_os_chmod,
_os_chown,
_os_getlogin,
_os_environ,
_os_getuid,
_pwd_getpwuid,
_os_rename,
_shutil_copy2,
_shutil_move,
_shutil_copyfileobj,
_os_umask,
_tempfile_mkstemp):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
environ = dict()
_os_environ.__getitem__ = environ.__getitem__
_os_environ.__setitem__ = environ.__setitem__
am.selinux_enabled = MagicMock()
am.selinux_context = MagicMock()
am.selinux_default_context = MagicMock()
am.set_context_if_different = MagicMock()
# test destination does not exist, no selinux, login name = 'root',
# no environment, os.rename() succeeds
_os_path_exists.side_effect = [False, False]
_os_getlogin.return_value = 'root'
_os_getuid.return_value = 0
_pwd_getpwuid.return_value = ('root', '', 0, 0, '', '', '')
_os_rename.return_value = None
_os_umask.side_effect = [18, 0]
am.selinux_enabled.return_value = False
_os_chmod.reset_mock()
_os_chown.reset_mock()
am.set_context_if_different.reset_mock()
am.atomic_move('/path/to/src', '/path/to/dest')
_os_rename.assert_called_with(b'/path/to/src', b'/path/to/dest')
self.assertEqual(_os_chmod.call_args_list, [call(b'/path/to/dest', basic.DEFAULT_PERM & ~18)])
# same as above, except selinux_enabled
_os_path_exists.side_effect = [False, False]
_os_getlogin.return_value = 'root'
_os_getuid.return_value = 0
_pwd_getpwuid.return_value = ('root', '', 0, 0, '', '', '')
_os_rename.return_value = None
_os_umask.side_effect = [18, 0]
mock_context = MagicMock()
am.selinux_default_context.return_value = mock_context
am.selinux_enabled.return_value = True
_os_chmod.reset_mock()
_os_chown.reset_mock()
am.set_context_if_different.reset_mock()
am.selinux_default_context.reset_mock()
am.atomic_move('/path/to/src', '/path/to/dest')
_os_rename.assert_called_with(b'/path/to/src', b'/path/to/dest')
self.assertEqual(_os_chmod.call_args_list, [call(b'/path/to/dest', basic.DEFAULT_PERM & ~18)])
self.assertEqual(am.selinux_default_context.call_args_list, [call('/path/to/dest')])
self.assertEqual(am.set_context_if_different.call_args_list, [call('/path/to/dest', mock_context, False)])
# now with dest present, no selinux, also raise OSError when using
# os.getlogin() to test corner case with no tty
_os_path_exists.side_effect = [True, True]
_os_getlogin.side_effect = OSError()
_os_getuid.return_value = 0
_pwd_getpwuid.return_value = ('root', '', 0, 0, '', '', '')
_os_rename.return_value = None
_os_umask.side_effect = [18, 0]
environ['LOGNAME'] = 'root'
stat1 = MagicMock()
stat1.st_mode = 0o0644
stat1.st_uid = 0
stat1.st_gid = 0
_os_stat.side_effect = [stat1, ]
am.selinux_enabled.return_value = False
_os_chmod.reset_mock()
_os_chown.reset_mock()
am.set_context_if_different.reset_mock()
am.atomic_move('/path/to/src', '/path/to/dest')
_os_rename.assert_called_with(b'/path/to/src', b'/path/to/dest')
# dest missing, selinux enabled
_os_path_exists.side_effect = [True, True]
_os_getlogin.return_value = 'root'
_os_getuid.return_value = 0
_pwd_getpwuid.return_value = ('root', '', 0, 0, '', '', '')
_os_rename.return_value = None
_os_umask.side_effect = [18, 0]
stat1 = MagicMock()
stat1.st_mode = 0o0644
stat1.st_uid = 0
stat1.st_gid = 0
_os_stat.side_effect = [stat1, ]
mock_context = MagicMock()
am.selinux_context.return_value = mock_context
am.selinux_enabled.return_value = True
_os_chmod.reset_mock()
_os_chown.reset_mock()
am.set_context_if_different.reset_mock()
am.selinux_default_context.reset_mock()
am.atomic_move('/path/to/src', '/path/to/dest')
_os_rename.assert_called_with(b'/path/to/src', b'/path/to/dest')
self.assertEqual(am.selinux_context.call_args_list, [call('/path/to/dest')])
self.assertEqual(am.set_context_if_different.call_args_list, [call('/path/to/dest', mock_context, False)])
# now testing with exceptions raised
# have os.stat raise OSError which is not EPERM
_os_stat.side_effect = OSError()
_os_path_exists.side_effect = [True, True]
_os_getlogin.return_value = 'root'
_os_getuid.return_value = 0
_pwd_getpwuid.return_value = ('root', '', 0, 0, '', '', '')
_os_rename.return_value = None
_os_umask.side_effect = [18, 0]
self.assertRaises(OSError, am.atomic_move, '/path/to/src', '/path/to/dest')
# and now have os.stat return EPERM, which should not fail
_os_stat.side_effect = OSError(errno.EPERM, 'testing os stat with EPERM')
_os_path_exists.side_effect = [True, True]
_os_getlogin.return_value = 'root'
_os_getuid.return_value = 0
_pwd_getpwuid.return_value = ('root', '', 0, 0, '', '', '')
_os_rename.return_value = None
_os_umask.side_effect = [18, 0]
# FIXME: we don't assert anything here yet
am.atomic_move('/path/to/src', '/path/to/dest')
# now we test os.rename() raising errors...
# first we test with a bad errno to verify it bombs out
_os_path_exists.side_effect = [False, False]
_os_getlogin.return_value = 'root'
_os_getuid.return_value = 0
_pwd_getpwuid.return_value = ('root', '', 0, 0, '', '', '')
_os_umask.side_effect = [18, 0]
_os_rename.side_effect = OSError(errno.EIO, 'failing with EIO')
self.assertRaises(SystemExit, am.atomic_move, '/path/to/src', '/path/to/dest')
# next we test with EPERM so it continues to the alternate code for moving
# test with mkstemp raising an error first
_os_path_exists.side_effect = [False, False]
_os_getlogin.return_value = 'root'
_os_getuid.return_value = 0
_os_close.return_value = None
_pwd_getpwuid.return_value = ('root', '', 0, 0, '', '', '')
_os_umask.side_effect = [18, 0]
_os_rename.side_effect = [OSError(errno.EPERM, 'failing with EPERM'), None]
_tempfile_mkstemp.return_value = None
_tempfile_mkstemp.side_effect = OSError()
am.selinux_enabled.return_value = False
self.assertRaises(SystemExit, am.atomic_move, '/path/to/src', '/path/to/dest')
# then test with it creating a temp file
_os_path_exists.side_effect = [False, False, False]
_os_getlogin.return_value = 'root'
_os_getuid.return_value = 0
_pwd_getpwuid.return_value = ('root', '', 0, 0, '', '', '')
_os_umask.side_effect = [18, 0]
_os_rename.side_effect = [OSError(errno.EPERM, 'failing with EPERM'), None]
mock_stat1 = MagicMock()
mock_stat2 = MagicMock()
mock_stat3 = MagicMock()
_os_stat.return_value = [mock_stat1, mock_stat2, mock_stat3]
_os_stat.side_effect = None
_tempfile_mkstemp.return_value = (None, '/path/to/tempfile')
_tempfile_mkstemp.side_effect = None
am.selinux_enabled.return_value = False
# FIXME: we don't assert anything here yet
am.atomic_move('/path/to/src', '/path/to/dest')
# same as above, but with selinux enabled
_os_path_exists.side_effect = [False, False, False]
_os_getlogin.return_value = 'root'
_os_getuid.return_value = 0
_pwd_getpwuid.return_value = ('root', '', 0, 0, '', '', '')
_os_umask.side_effect = [18, 0]
_os_rename.side_effect = [OSError(errno.EPERM, 'failing with EPERM'), None]
_tempfile_mkstemp.return_value = (None, None)
mock_context = MagicMock()
am.selinux_default_context.return_value = mock_context
am.selinux_enabled.return_value = True
am.atomic_move('/path/to/src', '/path/to/dest')
def test_module_utils_basic_ansible_module__symbolic_mode_to_octal(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
mock_stat = MagicMock()
# FIXME: trying many more combinations here would be good
# directory, give full perms to all, then one group at a time
mock_stat.st_mode = 0o040000
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'a+rwx'), 0o0777)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'u+rwx,g+rwx,o+rwx'), 0o0777)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'o+rwx'), 0o0007)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'g+rwx'), 0o0070)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'u+rwx'), 0o0700)
# same as above, but in reverse so removing permissions
mock_stat.st_mode = 0o040777
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'a-rwx'), 0o0000)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'u-rwx,g-rwx,o-rwx'), 0o0000)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'o-rwx'), 0o0770)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'g-rwx'), 0o0707)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'u-rwx'), 0o0077)
# now using absolute assignment
mock_stat.st_mode = 0o040000
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'a=rwx'), 0o0777)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'u=rwx,g=rwx,o=rwx'), 0o0777)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'o=rwx'), 0o0007)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'g=rwx'), 0o0070)
self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'u=rwx'), 0o0700)
# invalid modes
mock_stat.st_mode = 0o040000
self.assertRaises(ValueError, am._symbolic_mode_to_octal, mock_stat, 'a=foo')
| gpl-3.0 |
yhli365/YHiBench | src/sparkbench/src/main/python/IOCommon/IOCommon.py | 16 | 3270 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, re
class IOCommon(object):
conf = {}
def __init__(self, sc):
self.sc = sc
@classmethod
def getProperty(cls, key, default = None):
return cls.conf.get(key, default)
@classmethod
def getPropertiesFromFile(cls):
def split(x):
ret = re.split("\s", x.strip(), 1)
if len(ret)<2: return (ret[0], '')
return tuple(ret)
prop_file = os.environ.get("SPARKBENCH_PROPERTIES_FILES", None)
assert prop_file, "SPARKBENCH_PROPERTIES_FILES undefined!"
with open(prop_file) as f:
cls.conf = dict([split(x.strip()) for x in f.readlines() if x.strip() and x.strip()[0]!="#"])
def load(self, filename, force_format=None):
input_format = force_format if force_format else IOCommon.getProperty("sparkbench.inputformat", "Text")
if input_format == "Text":
return self.sc.textFile(filename)
elif input_format == "Sequence":
return self.sc.sequenceFile(filename, "org.apache.hadoop.io.NullWritable, org.apache.hadoop.io.Text")\
.map(lambda x:x[1])
else:
raise Exception("Unknown input format: %s" % input_format)
def save(selfself, filename, data, PropPrefix = "sparkbench.outputformat"):
output_format = IOCommon.getProperty(PropPrefix, "Text")
output_format_codec = IOCommon.getProperty(PropPrefix+".codec")
if output_format == "Text":
if not output_format_codec: # isEmpty
data.saveAsTextFile(filename)
else:
print "Warning, save as text file with a format codec is unsupported in python api"
data.saveAsTextFile(filename)
#data.saveAsTextFile(filename, output_format_codec)
elif output_format == "Sequence":
sequence_data = data.map(lambda x:(None, x))
if not output_format_codec: # isEmpty
data.saveAsHadoopFile(filename, "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.NullWritable", "org.apache.hadoop.io.Text")
else:
data.saveAsHadoopFile(filename, "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.NullWritable", "org.apache.hadoop.io.Text",
compressionCodecClass = output_format_codec)
IOCommon.getPropertiesFromFile() | apache-2.0 |
soldag/home-assistant | tests/components/accuweather/test_init.py | 8 | 1796 | """Test init of AccuWeather integration."""
from homeassistant.components.accuweather.const import DOMAIN
from homeassistant.config_entries import (
ENTRY_STATE_LOADED,
ENTRY_STATE_NOT_LOADED,
ENTRY_STATE_SETUP_RETRY,
)
from homeassistant.const import STATE_UNAVAILABLE
from tests.async_mock import patch
from tests.common import MockConfigEntry
from tests.components.accuweather import init_integration
async def test_async_setup_entry(hass):
"""Test a successful setup entry."""
await init_integration(hass)
state = hass.states.get("weather.home")
assert state is not None
assert state.state != STATE_UNAVAILABLE
assert state.state == "sunny"
async def test_config_not_ready(hass):
"""Test for setup failure if connection to AccuWeather is missing."""
entry = MockConfigEntry(
domain=DOMAIN,
title="Home",
unique_id="0123456",
data={
"api_key": "32-character-string-1234567890qw",
"latitude": 55.55,
"longitude": 122.12,
"name": "Home",
},
)
with patch(
"homeassistant.components.accuweather.AccuWeather._async_get_data",
side_effect=ConnectionError(),
):
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == ENTRY_STATE_SETUP_RETRY
async def test_unload_entry(hass):
"""Test successful unload of entry."""
entry = await init_integration(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state == ENTRY_STATE_LOADED
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == ENTRY_STATE_NOT_LOADED
assert not hass.data.get(DOMAIN)
| apache-2.0 |
juanantoniofm/accesible-moodle | fabtools/system.py | 1 | 6823 | """
System settings
===============
"""
from __future__ import with_statement
from fabric.api import hide, run, settings
from fabtools.files import is_file
from fabtools.utils import run_as_root
class UnsupportedFamily(Exception):
"""
Operation not supported on this system family.
::
from fabtools.system import UnsupportedFamily, distrib_family
family = distrib_family()
if family == 'debian':
do_some_stuff()
elif family == 'redhat':
do_other_stuff()
else:
raise UnsupportedFamily(supported=['debian', 'redhat'])
"""
def __init__(self, supported):
self.supported = supported
self.distrib = distrib_id()
msg = "Unsupported system %s (supported families: %s)" % (self.distrib, ', '.join(supported))
super(UnsupportedFamily, self).__init__(msg)
def distrib_id():
"""
Get the OS distribution ID.
Returns a string such as ``"Debian"``, ``"Ubuntu"``, ``"RHEL"``,
``"CentOS"``, ``"SLES"``, ``"Fedora"``, ``"Archlinux"``, ``"Gentoo"``,
``"SunOS"``...
Example::
from fabtools.system import distrib_id
if distrib_id() != 'Debian':
abort(u"Distribution is not supported")
"""
with settings(hide('running', 'stdout')):
kernel = run('uname -s')
if kernel == 'Linux':
# lsb_release works on Ubuntu and Debian >= 6.0
# but is not always included in other distros such as:
# Gentoo
if is_file('/usr/bin/lsb_release'):
return run('lsb_release --id --short')
else:
if is_file('/etc/debian_version'):
return "Debian"
elif is_file('/etc/fedora-release'):
return "Fedora"
elif is_file('/etc/arch-release'):
return "Archlinux"
elif is_file('/etc/redhat-release'):
release = run('cat /etc/redhat-release')
if release.startswith('Red Hat Enterprise Linux'):
return "RHEL"
elif release.startswith('CentOS'):
return "CentOS"
elif release.startswith('Scientific Linux'):
return "SLES"
elif is_file('/etc/gentoo-release'):
return "Gentoo"
elif kernel == "SunOS":
return "SunOS"
def distrib_release():
"""
Get the release number of the distribution.
Example::
from fabtools.system import distrib_id, distrib_release
if distrib_id() == 'CentOS' and distrib_release() == '6.1':
print(u"CentOS 6.2 has been released. Please upgrade.")
"""
with settings(hide('running', 'stdout')):
kernel = run('uname -s')
if kernel == 'Linux':
return run('lsb_release -r --short')
elif kernel == 'SunOS':
return run('uname -v')
def distrib_codename():
"""
Get the codename of the Linux distribution.
Example::
from fabtools.deb import distrib_codename
if distrib_codename() == 'precise':
print(u"Ubuntu 12.04 LTS detected")
"""
with settings(hide('running', 'stdout')):
return run('lsb_release --codename --short')
def distrib_desc():
"""
Get the description of the Linux distribution.
For example: ``Debian GNU/Linux 6.0.7 (squeeze)``.
"""
with settings(hide('running', 'stdout')):
if not is_file('/etc/redhat-release'):
return run('lsb_release --desc --short')
return run('cat /etc/redhat-release')
def distrib_family():
"""
Get the distribution family.
Returns one of ``debian``, ``redhat``, ``arch``, ``gentoo``,
``sun``, ``other``.
"""
distrib = distrib_id()
if distrib in ['Debian', 'Ubuntu', 'LinuxMint']:
return 'debian'
elif distrib in ['RHEL', 'CentOS', 'SLES', 'Fedora']:
return 'redhat'
elif distrib in ['SunOS']:
return 'sun'
elif distrib in ['Gentoo']:
return 'gentoo'
elif distrib in ['Archlinux']:
return 'arch'
else:
return 'other'
def get_hostname():
"""
Get the fully qualified hostname.
"""
with settings(hide('running', 'stdout')):
return run('hostname --fqdn')
def set_hostname(hostname, persist=True):
"""
Set the hostname.
"""
run_as_root('hostname %s' % hostname)
if persist:
run_as_root('echo %s >/etc/hostname' % hostname)
def get_sysctl(key):
"""
Get a kernel parameter.
Example::
from fabtools.system import get_sysctl
print "Max number of open files:", get_sysctl('fs.file-max')
"""
with settings(hide('running', 'stdout')):
return run_as_root('/sbin/sysctl -n -e %(key)s' % locals())
def set_sysctl(key, value):
"""
Set a kernel parameter.
Example::
import fabtools
# Protect from SYN flooding attack
fabtools.system.set_sysctl('net.ipv4.tcp_syncookies', 1)
"""
run_as_root('/sbin/sysctl -n -e -w %(key)s=%(value)s' % locals())
def supported_locales():
"""
Gets the list of supported locales.
Each locale is returned as a ``(locale, charset)`` tuple.
"""
with settings(hide('running', 'stdout')):
if distrib_id() == "Archlinux":
res = run("cat /etc/locale.gen")
else:
res = run('cat /usr/share/i18n/SUPPORTED')
return [line.strip().split(' ') for line in res.splitlines()
if not line.startswith('#')]
def get_arch():
"""
Get the CPU architecture.
Example::
from fabtools.system import get_arch
if get_arch() == 'x86_64':
print(u"Running on a 64-bit Intel/AMD system")
"""
with settings(hide('running', 'stdout')):
arch = run('uname -m')
return arch
def cpus():
"""
Get the number of CPU cores.
Example::
from fabtools.system import cpus
nb_workers = 2 * cpus() + 1
"""
with settings(hide('running', 'stdout')):
res = run('python -c "import multiprocessing; '
'print(multiprocessing.cpu_count())"')
return int(res)
def using_systemd():
"""
Return True if using systemd
Example::
from fabtools.system import use_systemd
if using_systemd():
# do stuff with fabtools.systemd ...
pass
"""
return run('which systemctl', quiet=True).succeeded
def time():
"""
Return the current time in seconds since the Epoch.
Same as :py:func:`time.time()`
"""
with settings(hide('running', 'stdout')):
return int(run('date +%s'))
| gpl-2.0 |
majidaldo/ansible-modules-core | network/basics/get_url.py | 23 | 14185 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# see examples/playbooks/get_url.yml
import shutil
import datetime
import re
import tempfile
DOCUMENTATION = '''
---
module: get_url
short_description: Downloads files from HTTP, HTTPS, or FTP to node
description:
- Downloads files from HTTP, HTTPS, or FTP to the remote server. The remote
server I(must) have direct access to the remote resource.
- By default, if an environment variable C(<protocol>_proxy) is set on
the target host, requests will be sent through that proxy. This
behaviour can be overridden by setting a variable for this task
(see `setting the environment
<http://docs.ansible.com/playbooks_environment.html>`_),
or by using the use_proxy option.
- HTTP redirects can redirect from HTTP to HTTPS so you should be sure that
your proxy environment for both protocols is correct.
version_added: "0.6"
options:
url:
description:
- HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
required: true
dest:
description:
- absolute path of where to download the file to.
- If C(dest) is a directory, either the server provided filename or, if
none provided, the base name of the URL on the remote server will be
used. If a directory, C(force) has no effect.
If C(dest) is a directory, the file will always be
downloaded (regardless of the force option), but replaced only if the contents changed.
required: true
force:
description:
- If C(yes) and C(dest) is not a directory, will download the file every
time and replace the file if the contents change. If C(no), the file
will only be downloaded if the destination does not exist. Generally
should be C(yes) only for small local files. Prior to 0.6, this module
behaved as if C(yes) was the default.
version_added: "0.7"
required: false
choices: [ "yes", "no" ]
default: "no"
aliases: [ "thirsty" ]
sha256sum:
description:
- If a SHA-256 checksum is passed to this parameter, the digest of the
destination file will be calculated after it is downloaded to ensure
its integrity and verify that the transfer completed successfully.
This option is deprecated. Use 'checksum'.
version_added: "1.3"
required: false
default: null
checksum:
description:
- 'If a checksum is passed to this parameter, the digest of the
destination file will be calculated after it is downloaded to ensure
its integrity and verify that the transfer completed successfully.
Format: <algorithm>:<checksum>, e.g.: checksum="sha256:d98291acbedd510e3dbd36dbfdd83cbca8415220af43b327c0a0c574b6dc7b97"
If you worry about portability, only the sha1 algorithm is available
on all platforms and python versions. The third party hashlib
library can be installed for access to additional algorithms.'
version_added: "2.0"
required: false
default: null
use_proxy:
description:
- if C(no), it will not use a proxy, even if one is defined in
an environment variable on the target hosts.
required: false
default: 'yes'
choices: ['yes', 'no']
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
timeout:
description:
- Timeout for URL request
required: false
default: 10
version_added: '1.8'
headers:
description:
- 'Add custom HTTP headers to a request in the format "key:value,key:value"'
required: false
default: null
version_added: '2.0'
url_username:
description:
- The username for use in HTTP basic authentication. This parameter can be used
without C(url_password) for sites that allow empty passwords.
required: false
version_added: '1.6'
url_password:
description:
- The password for use in HTTP basic authentication. If the C(url_username)
parameter is not specified, the C(url_password) parameter will not be used.
required: false
version_added: '1.6'
force_basic_auth:
version_added: '2.0'
description:
- httplib2, the library used by the uri module only sends authentication information when a webservice
responds to an initial request with a 401 status. Since some basic auth services do not properly
send a 401, logins will fail. This option forces the sending of the Basic authentication header
upon initial request.
required: false
choices: [ "yes", "no" ]
default: "no"
others:
description:
- all arguments accepted by the M(file) module also work here
required: false
# informational: requirements for nodes
requirements: [ ]
author: "Jan-Piet Mens (@jpmens)"
'''
EXAMPLES='''
- name: download foo.conf
get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf mode=0440
- name: download file and force basic auth
get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf force_basic_auth=yes
- name: download file with custom HTTP headers
get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf headers: 'key:value,key:value'
- name: download file with check
get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf checksum=sha256:b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c
get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf checksum=md5:66dffb5228a211e61d6d7ef4a86f5758
'''
import urlparse
# ==============================================================
# url handling
def url_filename(url):
fn = os.path.basename(urlparse.urlsplit(url)[2])
if fn == '':
return 'index.html'
return fn
def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, headers=None):
"""
Download data from the url and store in a temporary file.
Return (tempfile, info about the request)
"""
rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, headers=headers)
if info['status'] == 304:
module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', ''))
# create a temporary file and copy content to do checksum-based replacement
if info['status'] != 200:
module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], url=url, dest=dest)
fd, tempname = tempfile.mkstemp()
f = os.fdopen(fd, 'wb')
try:
shutil.copyfileobj(rsp, f)
except Exception, err:
os.remove(tempname)
module.fail_json(msg="failed to create temporary content file: %s" % str(err))
f.close()
rsp.close()
return tempname, info
def extract_filename_from_headers(headers):
"""
Extracts a filename from the given dict of HTTP headers.
Looks for the content-disposition header and applies a regex.
Returns the filename if successful, else None."""
cont_disp_regex = 'attachment; ?filename="?([^"]+)'
res = None
if 'content-disposition' in headers:
cont_disp = headers['content-disposition']
match = re.match(cont_disp_regex, cont_disp)
if match:
res = match.group(1)
# Try preventing any funny business.
res = os.path.basename(res)
return res
# ==============================================================
# main
def main():
argument_spec = url_argument_spec()
argument_spec.update(
url = dict(required=True),
dest = dict(required=True),
sha256sum = dict(default=''),
checksum = dict(default=''),
timeout = dict(required=False, type='int', default=10),
headers = dict(required=False, default=None),
)
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec = argument_spec,
add_file_common_args=True
)
url = module.params['url']
dest = os.path.expanduser(module.params['dest'])
force = module.params['force']
sha256sum = module.params['sha256sum']
checksum = module.params['checksum']
use_proxy = module.params['use_proxy']
timeout = module.params['timeout']
# Parse headers to dict
if module.params['headers']:
try:
headers = dict(item.split(':') for item in module.params['headers'].split(','))
except:
module.fail_json(msg="The header parameter requires a key:value,key:value syntax to be properly parsed.")
else:
headers = None
dest_is_dir = os.path.isdir(dest)
last_mod_time = None
# workaround for usage of deprecated sha256sum parameter
if sha256sum != '':
checksum = 'sha256:%s' % (sha256sum)
# checksum specified, parse for algorithm and checksum
if checksum != '':
try:
algorithm, checksum = checksum.rsplit(':', 1)
# Remove any non-alphanumeric characters, including the infamous
# Unicode zero-width space
checksum = re.sub(r'\W+', '', checksum).lower()
# Ensure the checksum portion is a hexdigest
int(checksum, 16)
except ValueError:
module.fail_json(msg="The checksum parameter has to be in format <algorithm>:<checksum>")
if not dest_is_dir and os.path.exists(dest):
checksum_mismatch = False
# If the download is not forced and there is a checksum, allow
# checksum match to skip the download.
if not force and checksum != '':
destination_checksum = module.digest_from_file(dest, algorithm)
if checksum == destination_checksum:
module.exit_json(msg="file already exists", dest=dest, url=url, changed=False)
checksum_mismatch = True
# Not forcing redownload, unless checksum does not match
if not force and not checksum_mismatch:
module.exit_json(msg="file already exists", dest=dest, url=url, changed=False)
# If the file already exists, prepare the last modified time for the
# request.
mtime = os.path.getmtime(dest)
last_mod_time = datetime.datetime.utcfromtimestamp(mtime)
# download to tmpsrc
tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers)
# Now the request has completed, we can finally generate the final
# destination file name from the info dict.
if dest_is_dir:
filename = extract_filename_from_headers(info)
if not filename:
# Fall back to extracting the filename from the URL.
# Pluck the URL from the info, since a redirect could have changed
# it.
filename = url_filename(info['url'])
dest = os.path.join(dest, filename)
checksum_src = None
checksum_dest = None
# raise an error if there is no tmpsrc file
if not os.path.exists(tmpsrc):
os.remove(tmpsrc)
module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'])
if not os.access(tmpsrc, os.R_OK):
os.remove(tmpsrc)
module.fail_json( msg="Source %s not readable" % (tmpsrc))
checksum_src = module.sha1(tmpsrc)
# check if there is no dest file
if os.path.exists(dest):
# raise an error if copy has no permission on dest
if not os.access(dest, os.W_OK):
os.remove(tmpsrc)
module.fail_json( msg="Destination %s not writable" % (dest))
if not os.access(dest, os.R_OK):
os.remove(tmpsrc)
module.fail_json( msg="Destination %s not readable" % (dest))
checksum_dest = module.sha1(dest)
else:
if not os.access(os.path.dirname(dest), os.W_OK):
os.remove(tmpsrc)
module.fail_json( msg="Destination %s not writable" % (os.path.dirname(dest)))
if checksum_src != checksum_dest:
try:
shutil.copyfile(tmpsrc, dest)
except Exception, err:
os.remove(tmpsrc)
module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err)))
changed = True
else:
changed = False
if checksum != '':
destination_checksum = module.digest_from_file(dest, algorithm)
if checksum != destination_checksum:
os.remove(dest)
module.fail_json(msg="The checksum for %s did not match %s; it was %s." % (dest, checksum, destination_checksum))
os.remove(tmpsrc)
# allow file attribute changes
module.params['path'] = dest
file_args = module.load_file_common_arguments(module.params)
file_args['path'] = dest
changed = module.set_fs_attributes_if_different(file_args, changed)
# Backwards compat only. We'll return None on FIPS enabled systems
try:
md5sum = module.md5(dest)
except ValueError:
md5sum = None
# Mission complete
module.exit_json(url=url, dest=dest, src=tmpsrc, md5sum=md5sum, checksum_src=checksum_src,
checksum_dest=checksum_dest, changed=changed, msg=info.get('msg', ''))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 |
T2DREAM/t2dream-portal | src/encoded/tests/test_upgrade_genetic_modification.py | 1 | 3897 | import pytest
@pytest.fixture
def genetic_modification_1(lab, award):
return {
'modification_type': 'deletion',
'award': award['uuid'],
'lab': lab['uuid'],
'modifiction_description': 'some description'
}
@pytest.fixture
def genetic_modification_2(lab, award):
return {
'modification_type': 'deletion',
'award': award['uuid'],
'lab': lab['uuid'],
'modification_description': 'some description',
'modification_zygocity': 'homozygous',
'modification_purpose': 'tagging',
'modification_treatments': [],
'modification_genome_coordinates': [{
'chromosome': '11',
'start': 5309435,
'end': 5309451
}]
}
@pytest.fixture
def crispr(lab, award, source):
return {
'lab': lab['uuid'],
'award': award['uuid'],
'source': source['uuid'],
'guide_rna_sequences': [
"ACA",
"GCG"
],
'insert_sequence': 'TCGA',
'aliases': ['encode:crispr_technique1'],
'@type': ['Crispr', 'ModificationTechnique', 'Item'],
'@id': '/crisprs/79c1ec08-c878-4419-8dba-66aa4eca156b/',
'uuid': '79c1ec08-c878-4419-8dba-66aa4eca156b'
}
@pytest.fixture
def genetic_modification_5(lab, award, crispr):
return {
'modification_type': 'deletion',
'award': award['uuid'],
'lab': lab['uuid'],
'description': 'blah blah description blah',
'zygosity': 'homozygous',
'treatments': [],
'source': 'sigma',
'product_id': '12345',
'modification_techniques': [crispr],
'modified_site': [{
'assembly': 'GRCh38',
'chromosome': '11',
'start': 5309435,
'end': 5309451
}]
}
def test_genetic_modification_upgrade_1_2(upgrader, genetic_modification_1):
value = upgrader.upgrade('genetic_modification', genetic_modification_1,
current_version='1', target_version='2')
assert value['schema_version'] == '2'
assert value.get('modification_description') == 'some description'
def test_genetic_modification_upgrade_2_3(upgrader, genetic_modification_2):
value = upgrader.upgrade('genetic_modification', genetic_modification_2,
current_version='2', target_version='3')
assert value['schema_version'] == '3'
assert value.get('description') == 'some description'
assert value.get('zygosity') == 'homozygous'
assert value.get('purpose') == 'tagging'
assert 'modification_genome_coordinates' not in value
assert 'modification_treatments' not in value
'''
Commented this test out because the linked technique objects are not embedded for the upgrade
but are for the test so it fails when it's trying to resolve the linked object by UUID. In
the former case, it's a link, in the latter case it's the embedded object. I can make the test
work but then the upgrade doesn't do what it should do.
def test_genetic_modification_upgrade_5_6(upgrader, genetic_modification_5, crispr, registry):
value = upgrader.upgrade('genetic_modification', genetic_modification_5, registry=registry,
current_version='5', target_version='6')
assert value['schema_version'] == '6'
assert 'modification_techniques' not in value
assert value['method'] == 'CRISPR'
assert 'modified_site' not in value
assert 'target' not in value
assert 'purpose' in value
assert value['purpose'] == 'analysis'
assert len(value['guide_rna_sequences']) == 2
assert value['aliases'][0] == 'encode:crispr_technique1-CRISPR'
assert value['introduced_sequence'] == 'TCGA'
assert 'reagents' in value
assert value['reagents'][0]['source'] == 'sigma'
assert value['reagents'][0]['identifier'] == '12345'
'''
| mit |
rwl/godot | godot/ui/graph_view_model.py | 1 | 17340 | #------------------------------------------------------------------------------
# Copyright (c) 2008 Richard W. Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#------------------------------------------------------------------------------
""" Defines a view model for Graphs. """
#------------------------------------------------------------------------------
# Imports:
#------------------------------------------------------------------------------
import sys
from os.path import join, dirname, expanduser, isfile
import logging
import pickle
from enthought.traits.api import \
HasTraits, Instance, File, Bool, Str, List, on_trait_change, \
Float, Tuple, Property, Delegate, Code, Button
from enthought.traits.ui.api import \
View, Handler, UIInfo, Group, Item, TableEditor, InstanceEditor, \
Label, Tabbed, HGroup, VGroup, ModelView, FileEditor, StatusItem, \
spring, TextEditor
from enthought.traits.ui.menu import NoButtons, OKCancelButtons, Separator
from enthought.pyface.api import error, confirm, YES, FileDialog, OK
from enthought.pyface.image_resource import ImageResource
from enthought.naming.unique_name import make_unique_name
from enthought.logger.api import add_log_queue_handler
from enthought.logger.log_queue_handler import LogQueueHandler
#------------------------------------------------------------------------------
# Local imports:
#------------------------------------------------------------------------------
from godot.base_graph import BaseGraph
from godot.api import Graph, Cluster, Node, Edge, GodotDataParser, Subgraph
from godot.ui.graph_menu import menubar, toolbar
from godot.ui.graph_view import nodes_view, edges_view, attr_view, about_view
from godot.ui.graph_tree import graph_tree_editor
#------------------------------------------------------------------------------
# Constants:
#------------------------------------------------------------------------------
frame_icon = ImageResource("dot.ico")
#------------------------------------------------------------------------------
# "GraphViewModel" class:
#------------------------------------------------------------------------------
class GraphViewModel(ModelView):
""" Defines a view model for Graphs. """
#--------------------------------------------------------------------------
# Trait definitions:
#--------------------------------------------------------------------------
# File path to to use for saving.
save_file = File
# Is the tree view of the network displayed?
show_tree = Bool(True, desc="that the network tree view is visible")
# All graphs, subgraphs and clusters.
# all_graphs = Property(List(Instance(HasTraits)))
all_graphs = Delegate("model")
# Select graph when adding to the graph?
select_graph = Bool(True)
# Working graph instance.
selected_graph = Instance(BaseGraph, allow_none=False)
# Exit confirmation.
prompt_on_exit = Bool(False, desc="exit confirmation request")
# Representation of the graph in the Dot language.
dot_code = Code
# Parse the dot_code and replace the existing model.
parse_dot_code = Button("Parse", desc="dot code parsing action that "
"replaces the existing model.")
#--------------------------------------------------------------------------
# Views:
#--------------------------------------------------------------------------
# Default model view.
traits_view = View(
HGroup(
Item(
name="model", editor=graph_tree_editor,
show_label=False, id=".tree_editor",
visible_when="show_tree==True", width=.14
),
Item("model", show_label=False),
),
id="graph_view_model.graph_view", title="Godot", icon=frame_icon,
resizable=True, style="custom", width=.81, height=.81, kind="live",
buttons=NoButtons, menubar=menubar,
# toolbar=toolbar,
dock="vertical",
# statusbar=[StatusItem(name="status", width=0.5),
# StatusItem(name="versions", width=200)]
)
# File selection view.
# file_view = View(
# Item(name="file", id="file"),#, editor=FileEditor(entries=6)),
# id="graph_view_model.file_view", title="Select a file",
# icon=frame_icon, resizable=True, width=.3, kind="livemodal",
# buttons=OKCancelButtons
# )
# Graph selection view.
all_graphs_view = View(
Item(name = "selected_graph",
editor = InstanceEditor( name = "all_graphs",
editable = False),
label = "Graph"),
Item("select_graph", label="Always ask?"),
icon = frame_icon, kind = "livemodal", title = "Select a graph",
buttons = OKCancelButtons, close_result = False
)
# Model view options view.
options_view = View(
Item("prompt_on_exit"),
"_",
Item("select_graph"),
Item("selected_graph",
enabled_when = "not select_graph",
editor = InstanceEditor( name = "all_graphs",
editable = False ),
label = "Graph" ),
icon = frame_icon, kind = "livemodal", title = "Options",
buttons = OKCancelButtons, close_result = True
)
# Text representation of the graph viewed in a text editor
dot_code_view = View(
Item("dot_code", show_label=False, style="custom"),
Item("parse_dot_code", show_label=False),
id="godot.view_model.dot_code",
icon = frame_icon, kind = "livemodal",
title = "Dot Code", resizable = True,
buttons = [], height = .3, width = .3
)
#--------------------------------------------------------------------------
# Trait intialisers:
#--------------------------------------------------------------------------
def _selected_graph_default(self):
""" Trait intialiser.
"""
return self.model
def _parse_dot_code_fired(self):
""" Parses the dot_code string and replaces the existing model.
"""
parser = GodotDataParser()
graph = parser.parse_dot_data(self.dot_code)
if graph is not None:
self.model = graph
#--------------------------------------------------------------------------
# Event handlers:
#--------------------------------------------------------------------------
def _model_changed(self, old, new):
""" Handles the model changing.
"""
self.selected_graph = new
#--------------------------------------------------------------------------
# Action handlers:
#--------------------------------------------------------------------------
def new_model(self, info):
""" Handles the new Graph action. """
if info.initialized:
retval = confirm(parent = info.ui.control,
message = "Replace existing graph?",
title = "New Graph",
default = YES)
if retval == YES:
self.model = Graph()
def open_file(self, info):
""" Handles the open action. """
if not info.initialized: return # Escape.
# retval = self.edit_traits(parent=info.ui.control, view="file_view")
dlg = FileDialog( action = "open",
wildcard = "Graphviz Files (*.dot, *.xdot, *.txt)|"
"*.dot;*.xdot;*.txt|Dot Files (*.dot)|*.dot|"
"All Files (*.*)|*.*|")
if dlg.open() == OK:
parser = GodotDataParser()
model = parser.parse_dot_file(dlg.path)
if model is not None:
self.model = model
else:
print "error parsing: %s" % dlg.path
self.save_file = dlg.path
del dlg
# fd = None
# try:
# fd = open(self.file, "rb")
# parser = DotParser()
# self.model = parser.parse_dot_file(self.file)
## except:
## error(parent=info.ui.control, title="Load Error",
## message="An error was encountered when loading\nfrom %s"
## % self.file)
# finally:
# if fd is not None:
# fd.close()
def save(self, info):
""" Handles saving the current model to the last file.
"""
save_file = self.save_file
if not isfile(save_file):
self.save_as(info)
else:
fd = None
try:
fd = open(save_file, "wb")
dot_code = str(self.model)
fd.write(dot_code)
finally:
if fd is not None:
fd.close()
def save_as(self, info):
""" Handles saving the current model to file.
"""
if not info.initialized:
return
# retval = self.edit_traits(parent=info.ui.control, view="file_view")
dlg = FileDialog( action = "save as",
wildcard = "Graphviz Files (*.dot, *.xdot, *.txt)|" \
"*.dot;*.xdot;*.txt|Dot Files (*.dot)|*.dot|" \
"All Files (*.*)|*.*|")
if dlg.open() == OK:
fd = None
try:
fd = open(dlg.path, "wb")
dot_code = str(self.model)
fd.write(dot_code)
self.save_file = dlg.path
except:
error(parent=info.ui.control, title="Save Error",
message="An error was encountered when saving\nto %s"
% self.file)
finally:
if fd is not None:
fd.close()
del dlg
def configure_graph(self, info):
""" Handles display of the graph dot traits.
"""
if info.initialized:
self.model.edit_traits(parent=info.ui.control,
kind="live", view=attr_view)
def configure_nodes(self, info):
""" Handles display of the nodes editor.
"""
if info.initialized:
self.model.edit_traits(parent=info.ui.control,
kind="live", view=nodes_view)
def configure_edges(self, info):
""" Handles display of the edges editor.
"""
if info.initialized:
self.model.edit_traits(parent=info.ui.control,
kind="live", view=edges_view)
def about_godot(self, info):
""" Handles displaying a view about Godot.
"""
if info.initialized:
self.edit_traits(parent=info.ui.control,
kind="livemodal", view=about_view)
def add_node(self, info):
""" Handles adding a Node to the graph.
"""
if not info.initialized:
return
graph = self._request_graph(info.ui.control)
if graph is None:
return
IDs = [v.ID for v in graph.nodes]
node = Node(ID=make_unique_name("node", IDs))
graph.nodes.append(node)
retval = node.edit_traits(parent=info.ui.control, kind="livemodal")
if not retval.result:
graph.nodes.remove(node)
def add_edge(self, info):
""" Handles adding an Edge to the graph.
"""
if not info.initialized:
return
graph = self._request_graph(info.ui.control)
if graph is None:
return
n_nodes = len(graph.nodes)
IDs = [v.ID for v in graph.nodes]
if n_nodes == 0:
tail_node = Node(ID=make_unique_name("node", IDs))
head_name = make_unique_name("node", IDs + [tail_node.ID])
head_node = Node(ID=head_name)
elif n_nodes == 1:
tail_node = graph.nodes[0]
head_node = Node(ID=make_unique_name("node", IDs))
else:
tail_node = graph.nodes[0]
head_node = graph.nodes[1]
edge = Edge(tail_node, head_node, _nodes=graph.nodes)
retval = edge.edit_traits(parent=info.ui.control, kind="livemodal")
if retval.result:
graph.edges.append(edge)
def add_subgraph(self, info):
""" Handles adding a Subgraph to the main graph.
"""
if not info.initialized:
return
graph = self._request_graph(info.ui.control)
if graph is not None:
subgraph = Subgraph()#root=graph, parent=graph)
retval = subgraph.edit_traits(parent = info.ui.control,
kind = "livemodal")
if retval.result:
graph.subgraphs.append(subgraph)
def add_cluster(self, info):
""" Handles adding a Cluster to the main graph. """
if not info.initialized:
return
graph = self._request_graph(info.ui.control)
if graph is not None:
cluster = Cluster()#root=graph, parent=graph)
retval = cluster.edit_traits(parent = info.ui.control,
kind = "livemodal")
if retval.result:
graph.clusters.append(cluster)
def _request_graph(self, parent=None):
""" Displays a dialog for graph selection if more than one exists.
Returns None if the dialog is canceled.
"""
if (len(self.all_graphs) > 1) and (self.select_graph):
retval = self.edit_traits(parent = parent,
view = "all_graphs_view")
if not retval.result:
return None
if self.selected_graph is not None:
return self.selected_graph
else:
return self.model
def toggle_tree(self, info):
""" Handles displaying the tree view """
if info.initialized:
self.show_tree = not self.show_tree
def godot_options(self, info):
""" Handles display of the options menu. """
if info.initialized:
self.edit_traits( parent = info.ui.control,
kind = "livemodal",
view = "options_view" )
def configure_dot_code(self, info):
""" Handles display of the dot code in a text editor.
"""
if not info.initialized:
return
self.dot_code = str(self.model)
retval = self.edit_traits( parent = info.ui.control,
kind = "livemodal",
view = "dot_code_view" )
# if retval.result:
# parser = DotParser()
# graph = parser.parse_dot_data(self.dot_code)
# if graph is not None:
# self.model = graph
#---------------------------------------------------------------------------
# Handle the user attempting to exit Godot:
#---------------------------------------------------------------------------
def on_exit(self, info):
""" Handles the user attempting to exit Godot.
"""
if self.prompt_on_exit:# and (not is_ok):
retval = confirm(parent = info.ui.control,
message = "Exit Godot?",
title = "Confirm exit",
default = YES)
if retval == YES:
self._on_close( info )
else:
self._on_close( info )
#------------------------------------------------------------------------------
# Stand-alone call:
#------------------------------------------------------------------------------
if __name__ == "__main__":
import sys
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.DEBUG)
graph = Graph(ID="G")
# sg1 = Subgraph(ID="SG1")
# graph.subgraphs.append(sg1)
# sg2 = Subgraph(ID="SG2")
# sg1.subgraphs.append(sg2)
#
# n1 = Node(ID="N1")
# sg2.nodes = [n1]
graph.add_node("node1")
graph.add_node("node2")
view_model = GraphViewModel(model=graph)
view_model.configure_traits()
# EOF -------------------------------------------------------------------------
| mit |
jhutar/spacewalk | backend/server/test/misc_functions.py | 8 | 9311 | #!/usr/bin/python
#
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
#
#
import os
import sys
import time
from spacewalk.common import usix
import server.importlib.headerSource
import server.importlib.packageImport
import server.importlib.backendOracle
import server.xmlrpc.up2date
from spacewalk.server import rhnSQL, rhnChannel, rhnServer, rhnUser, rhnServerGroup, rhnActivationKey
from spacewalk.server.xmlrpc import registration
def create_channel_family():
cf = rhnChannel.ChannelFamily()
cf.load_from_dict(new_channel_family_dict())
cf.save()
return cf
def create_channel(label, channel_family, org_id=None, channel_arch=None):
vdict = new_channel_dict(label=label, channel_family=channel_family, org_id=org_id, channel_arch=channel_arch)
c = rhnChannel.Channel()
c.load_from_dict(vdict)
c.save()
return c
def create_new_org():
"Create a brand new org; return the new org id"
org_name = "unittest-org-%.3f" % time.time()
org_password = "unittest-password-%.3f" % time.time()
org_id = rhnServerGroup.create_new_org(org_name, org_password)
rhnSQL.commit()
return (org_id, org_name, org_password)
def _create_server_group(org_id, name, description):
"Create a server group; return the server group object"
s = rhnServerGroup.ServerGroup()
s.set_org_id(org_id)
s.set_name(name)
s.set_description(description)
s.save()
rhnSQL.commit()
return s
def create_server_group(params):
"Create a server group from a dictionary with the params"
return _create_server_group(**params)
def fetch_server_group(org_id, name):
"Load a server group object from the org id and name"
s = rhnServerGroup.ServerGroup()
s.load(org_id, name)
return s
_query_fetch_server_groups = rhnSQL.Statement("""
select sgm.server_group_id
from rhnServerGroupMembers sgm,
rhnServerGroup sg
where sgm.server_id = :server_id
and sgm.server_group_id = sg.id
and sg.group_type is null
""")
def fetch_server_groups(server_id):
"Return a server's groups"
h = rhnSQL.prepare(_query_fetch_server_groups)
h.execute(server_id=server_id)
groups = [x['server_group_id'] for x in h.fetchall_dict() or []]
groups.sort()
return groups
def build_server_group_params(**kwargs):
"Build params for server groups"
params = {
'org_id': 'no such org',
'name': "unittest group name %.3f" % time.time(),
'description': "unittest group description %.3f" % time.time(),
}
params.update(kwargs)
return params
def create_new_user(org_id=None, username=None, password=None, roles=None):
"Create a new user"
if org_id is None:
org_id = create_new_org()
else:
org_id = lookup_org_id(org_id)
if username is None:
username = "unittest-user-%.3f" % time.time()
if password is None:
password = "unittest-password-%.3f" % time.time()
if roles is None:
roles = []
u = rhnUser.User(username, password)
u.set_org_id(org_id)
u.save()
# The password is scrambled now - re-set it
u.contact['password'] = password
u.save()
user_id = u.getid()
# Set roles
h = rhnSQL.prepare("""
select ug.id
from rhnUserGroupType ugt, rhnUserGroup ug
where ug.org_id = :org_id
and ug.group_type = ugt.id
and ugt.label = :role
""")
create_ugm = rhnSQL.Procedure("rhn_user.add_to_usergroup")
for role in roles:
h.execute(org_id=org_id, role=role)
row = h.fetchone_dict()
if not row:
raise InvalidRoleError(org_id, role)
user_group_id = row['id']
create_ugm(user_id, user_group_id)
rhnSQL.commit()
return u
def lookup_org_id(org_id):
"Look up the org id by user name"
if isinstance(org_id, usix.StringType):
# Is it a user?
u = rhnUser.search(org_id)
if not u:
raise rhnServerGroup.InvalidUserError(org_id)
return u.contact['org_id']
t = rhnSQL.Table('web_customer', 'id')
row = t[org_id]
if not row:
raise rhnServerGroup.InvalidOrgError(org_id)
return row['id']
# class InvalidEntitlementError(Exception):
# pass
class InvalidRoleError(Exception):
pass
def listdir(directory):
directory = os.path.abspath(os.path.normpath(directory))
if not os.access(directory, os.R_OK | os.X_OK):
print("Can't access %s." % (directory))
sys.exit(1)
if not os.path.isdir(directory):
print("%s not valid." % (directory))
sys.exit(1)
packageList = []
for f in os.listdir(directory):
packageList.append("%s/%s" % (directory, f))
return packageList
# stolen from backend/server/test/unit-test/test_rhnChannel
def new_channel_dict(**kwargs):
_counter = 0
label = kwargs.get('label')
if label is None:
label = 'rhn-unittest-%.3f-%s' % (time.time(), _counter)
_counter = _counter + 1
release = kwargs.get('release') or 'release-' + label
os = kwargs.get('os') or 'Unittest Distro'
if kwargs.has_key('org_id'):
org_id = kwargs['org_id']
else:
org_id = 'rhn-noc'
vdict = {
'label': label,
'name': kwargs.get('name') or label,
'summary': kwargs.get('summary') or label,
'description': kwargs.get('description') or label,
'basedir': kwargs.get('basedir') or '/',
'channel_arch': kwargs.get('channel_arch') or 'i386',
'channel_families': [kwargs.get('channel_family') or label],
'org_id': kwargs.get('org_id'),
'gpg_key_url': kwargs.get('gpg_key_url'),
'gpg_key_id': kwargs.get('gpg_key_id'),
'gpg_key_fp': kwargs.get('gpg_key_fp'),
'end_of_life': kwargs.get('end_of_life'),
'dists': [{
'release': release,
'os': os,
}],
}
return vdict
# stolen from backend/server/tests/unit-test/test_rhnChannel
def new_channel_family_dict(**kwargs):
_counter = 0
label = kwargs.get('label')
if label is None:
label = 'rhn-unittest-%.3f-%s' % (time.time(), _counter)
_counter = _counter + 1
product_url = kwargs.get('product_url') or 'http://rhn.redhat.com'
vdict = {
'label': label,
'name': kwargs.get('name') or label,
'product_url': product_url,
}
return vdict
def new_server(user, org_id):
serv = rhnServer.Server(user, org_id=org_id)
# serv.default_description()
params = build_sys_params_with_username(username=user.contact['login'])
# print params
serv.server['release'] = params['os_release']
serv.server['os'] = "Unittest Distro"
serv.server['name'] = params['profile_name']
serv.set_arch('i386')
serv.default_description()
serv.getid()
serv.gen_secret()
serv.save()
return serv
class Counter:
_counter = 0
def value(self):
val = self._counter
self._counter = val + 1
return val
def build_sys_params_with_username(**kwargs):
val = Counter().value()
rnd_string = "%s%s" % (int(time.time()), val)
params = {
'os_release': '9',
'architecture': 'i386',
'profile_name': "unittest server " + rnd_string,
'username': 'no such user',
'password': 'no such password',
}
params.update(kwargs)
if params.has_key('token'):
del params['token']
return params
def create_activation_key(org_id=None, user_id=None, groups=None,
channels=None, entitlement_level=None, note=None, server_id=None):
if org_id is None:
need_user = 1
org_id = create_new_org()
else:
need_user = 0
if user_id is None:
if need_user:
u = create_new_user(org_id=org_id)
user_id = u.getid()
else:
u = rhnUser.User("", "")
u.reload(user_id)
if groups is None:
groups = []
for i in range(3):
params = build_server_group_params(org_id=org_id)
sg = create_server_group(params)
groups.append(sg.get_id())
if channels is None:
channels = ['rhel-i386-as-3-beta', 'rhel-i386-as-2.1-beta']
if entitlement_level is None:
entitlement_level = 'enterprise_entitled'
if note is None:
note = "Test activation key %d" % int(time.time())
a = rhnActivationKey.ActivationKey()
a.set_user_id(user_id)
a.set_org_id(org_id)
a.set_entitlement_level(entitlement_level)
a.set_note(note)
a.set_server_groups(groups)
a.set_channels(channels)
a.set_server_id(server_id)
a.save()
rhnSQL.commit()
return a
| gpl-2.0 |
corakwue/ftrace | ftrace/parsers/workqueue_execute_end.py | 1 | 1783 | #!/usr/bin/python
# Copyright 2015 Huawei Devices USA Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Authors:
# Chuk Orakwue <chuk.orakwue@huawei.com>
import re
from ftrace.common import ParserError
from .register import register_parser
from collections import namedtuple
#from ftrace.third_party.cnamedtuple import namedtuple
TRACEPOINT = 'workqueue_execute_end'
__all__ = [TRACEPOINT]
WorkqueueQueueExecuteEndBase = namedtuple(TRACEPOINT,
[
'work_struct',
]
)
class WorkqueueQueueExecuteEnd(WorkqueueQueueExecuteEndBase):
__slots__ = ()
def __new__(cls, work_struct):
return super(cls, WorkqueueQueueExecuteEnd).__new__(
cls,
work_struct=work_struct
)
workqueue_execute_end_pattern = re.compile(
r"""
work struct (?P<work_struct>.+)
""",
re.X|re.M
)
@register_parser
def workqueue_execute_end(payload):
"""Parser for `workqueue_execute_end` tracepoint"""
try:
match = re.match(workqueue_execute_end_pattern, payload)
if match:
match_group_dict = match.groupdict()
return WorkqueueQueueExecuteEnd(**match_group_dict)
except Exception, e:
raise ParserError(e.message)
| apache-2.0 |
Mj258/weiboapi | srapyDemo/envs/Lib/site-packages/scrapy/downloadermiddlewares/ajaxcrawl.py | 116 | 3427 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import re
import logging
import six
from w3lib import html
from scrapy.exceptions import NotConfigured
from scrapy.http import HtmlResponse
from scrapy.utils.response import _noscript_re, _script_re
logger = logging.getLogger(__name__)
class AjaxCrawlMiddleware(object):
"""
Handle 'AJAX crawlable' pages marked as crawlable via meta tag.
For more info see https://developers.google.com/webmasters/ajax-crawling/docs/getting-started.
"""
def __init__(self, settings):
if not settings.getbool('AJAXCRAWL_ENABLED'):
raise NotConfigured
# XXX: Google parses at least first 100k bytes; scrapy's redirect
# middleware parses first 4k. 4k turns out to be insufficient
# for this middleware, and parsing 100k could be slow.
# We use something in between (32K) by default.
self.lookup_bytes = settings.getint('AJAXCRAWL_MAXSIZE', 32768)
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def process_response(self, request, response, spider):
if not isinstance(response, HtmlResponse) or response.status != 200:
return response
if request.method != 'GET':
# other HTTP methods are either not safe or don't have a body
return response
if 'ajax_crawlable' in request.meta: # prevent loops
return response
if not self._has_ajax_crawlable_variant(response):
return response
# scrapy already handles #! links properly
ajax_crawl_request = request.replace(url=request.url+'#!')
logger.debug("Downloading AJAX crawlable %(ajax_crawl_request)s instead of %(request)s",
{'ajax_crawl_request': ajax_crawl_request, 'request': request},
extra={'spider': spider})
ajax_crawl_request.meta['ajax_crawlable'] = True
return ajax_crawl_request
def _has_ajax_crawlable_variant(self, response):
"""
Return True if a page without hash fragment could be "AJAX crawlable"
according to https://developers.google.com/webmasters/ajax-crawling/docs/getting-started.
"""
body = response.body_as_unicode()[:self.lookup_bytes]
return _has_ajaxcrawlable_meta(body)
# XXX: move it to w3lib?
_ajax_crawlable_re = re.compile(six.u(r'<meta\s+name=["\']fragment["\']\s+content=["\']!["\']/?>'))
def _has_ajaxcrawlable_meta(text):
"""
>>> _has_ajaxcrawlable_meta('<html><head><meta name="fragment" content="!"/></head><body></body></html>')
True
>>> _has_ajaxcrawlable_meta("<html><head><meta name='fragment' content='!'></head></html>")
True
>>> _has_ajaxcrawlable_meta('<html><head><!--<meta name="fragment" content="!"/>--></head><body></body></html>')
False
>>> _has_ajaxcrawlable_meta('<html></html>')
False
"""
# Stripping scripts and comments is slow (about 20x slower than
# just checking if a string is in text); this is a quick fail-fast
# path that should work for most pages.
if 'fragment' not in text:
return False
if 'content' not in text:
return False
text = _script_re.sub(u'', text)
text = _noscript_re.sub(u'', text)
text = html.remove_comments(html.replace_entities(text))
return _ajax_crawlable_re.search(text) is not None
| mit |
SimVascular/VTK | Rendering/Core/Testing/Python/CamBlur.py | 20 | 1969 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create the piplinee, ball and spikes
sphere = vtk.vtkSphereSource()
sphere.SetThetaResolution(7)
sphere.SetPhiResolution(7)
sphereMapper = vtk.vtkPolyDataMapper()
sphereMapper.SetInputConnection(sphere.GetOutputPort())
sphereActor = vtk.vtkActor()
sphereActor.SetMapper(sphereMapper)
sphereActor2 = vtk.vtkActor()
sphereActor2.SetMapper(sphereMapper)
cone = vtk.vtkConeSource()
cone.SetResolution(5)
glyph = vtk.vtkGlyph3D()
glyph.SetInputConnection(sphere.GetOutputPort())
glyph.SetSourceConnection(cone.GetOutputPort())
glyph.SetVectorModeToUseNormal()
glyph.SetScaleModeToScaleByVector()
glyph.SetScaleFactor(0.25)
spikeMapper = vtk.vtkPolyDataMapper()
spikeMapper.SetInputConnection(glyph.GetOutputPort())
spikeActor = vtk.vtkActor()
spikeActor.SetMapper(spikeMapper)
spikeActor2 = vtk.vtkActor()
spikeActor2.SetMapper(spikeMapper)
# set the actors position and scale
spikeActor.SetPosition(0,0.7,0)
sphereActor.SetPosition(0,0.7,0)
spikeActor2.SetPosition(0,-1,-10)
sphereActor2.SetPosition(0,-1,-10)
spikeActor2.SetScale(1.5,1.5,1.5)
sphereActor2.SetScale(1.5,1.5,1.5)
ren1.AddActor(sphereActor)
ren1.AddActor(spikeActor)
ren1.AddActor(sphereActor2)
ren1.AddActor(spikeActor2)
ren1.SetBackground(0.1,0.2,0.4)
renWin.SetSize(200,200)
# do the first render and then zoom in a little
renWin.Render()
ren1.GetActiveCamera().SetFocalPoint(0,0,0)
ren1.GetActiveCamera().Zoom(1.8)
ren1.GetActiveCamera().SetFocalDisk(0.05)
renWin.SetFDFrames(11)
renWin.Render()
iren.Initialize()
#renWin SetFileName CamBlur.tcl.ppm
#renWin SaveImageAsPPM
# prevent the tk window from showing up then start the event loop
# --- end of script --
| bsd-3-clause |
boompieman/iim_project | project_python2/lib/python2.7/site-packages/traitlets/utils/getargspec.py | 8 | 2997 | # -*- coding: utf-8 -*-
"""
getargspec excerpted from:
sphinx.util.inspect
~~~~~~~~~~~~~~~~~~~
Helpers for inspecting Python modules.
:copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import inspect
from ipython_genutils.py3compat import PY3
# Unmodified from sphinx below this line
if PY3:
from functools import partial
def getargspec(func):
"""Like inspect.getargspec but supports functools.partial as well."""
if inspect.ismethod(func):
func = func.__func__
if type(func) is partial:
orig_func = func.func
argspec = getargspec(orig_func)
args = list(argspec[0])
defaults = list(argspec[3] or ())
kwoargs = list(argspec[4])
kwodefs = dict(argspec[5] or {})
if func.args:
args = args[len(func.args):]
for arg in func.keywords or ():
try:
i = args.index(arg) - len(args)
del args[i]
try:
del defaults[i]
except IndexError:
pass
except ValueError: # must be a kwonly arg
i = kwoargs.index(arg)
del kwoargs[i]
del kwodefs[arg]
return inspect.FullArgSpec(args, argspec[1], argspec[2],
tuple(defaults), kwoargs,
kwodefs, argspec[6])
while hasattr(func, '__wrapped__'):
func = func.__wrapped__
if not inspect.isfunction(func):
raise TypeError('%r is not a Python function' % func)
return inspect.getfullargspec(func)
else: # 2.6, 2.7
from functools import partial
def getargspec(func):
"""Like inspect.getargspec but supports functools.partial as well."""
if inspect.ismethod(func):
func = func.__func__
parts = 0, ()
if type(func) is partial:
keywords = func.keywords
if keywords is None:
keywords = {}
parts = len(func.args), keywords.keys()
func = func.func
if not inspect.isfunction(func):
raise TypeError('%r is not a Python function' % func)
args, varargs, varkw = inspect.getargs(func.__code__)
func_defaults = func.__defaults__
if func_defaults is None:
func_defaults = []
else:
func_defaults = list(func_defaults)
if parts[0]:
args = args[parts[0]:]
if parts[1]:
for arg in parts[1]:
i = args.index(arg) - len(args)
del args[i]
try:
del func_defaults[i]
except IndexError:
pass
return inspect.ArgSpec(args, varargs, varkw, func_defaults)
| gpl-3.0 |
tensorflow/moonlight | moonlight/glyphs/neural.py | 1 | 11877 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""1-D convolutional neural network glyph classifier model.
Convolves a filter horizontally along a staffline, to classify glyphs at each
x position.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from moonlight.protobuf import musicscore_pb2
# Count every glyph type except for UNKNOWN_TYPE.
NUM_GLYPHS = len(musicscore_pb2.Glyph.Type.values()) - 1
# TODO(ringw): Make this extend BaseGlyphClassifier.
class NeuralNetworkGlyphClassifier(object):
"""Holds a TensorFlow NN model used for classifying glyphs on staff lines."""
def __init__(self,
input_placeholder,
hidden_layer,
reconstruction_layer=None,
autoencoder_vars=None,
labels_placeholder=None,
prediction_layer=None,
prediction_vars=None):
"""Builds the NeuralNetworkGlyphClassifier that holds the TensorFlow model.
Args:
input_placeholder: A tf.placeholder representing the input staffline
image. Dtype float32 and shape (batch_size, target_height, None).
hidden_layer: An inner layer in the model. Should be the last layer in the
autoencoder model before reconstructing the input, and/or an
intermediate layer in the prediction network. self is intended to be the
last common ancestor of the reconstruction_layer output and the
prediction_layer output, if both are present.
reconstruction_layer: The reconstruction of the input, for an autoencoder
model. If non-None, should have the same shape as input_placeholder.
autoencoder_vars: The variables for the autoencoder model (parameters
affecting hidden_layer and reconstruction_layer), or None. If non-None,
a dict mapping variable name to tf.Variable object.
labels_placeholder: The labels tensor. A placeholder will be created if
None is given. Dtype int32 and shape (batch_size, width). Values are
between 0 and NUM_GLYPHS - 1 (where each value is the Glyph.Type enum
value minus one, to skip UNKNOWN_TYPE).
prediction_layer: The logit probability of each glyph for each column.
Must be able to be passed to tf.nn.softmax to produce the probability of
each glyph. 2D (width, NUM_GLYPHS). May be None if the model is not
being used for classification.
prediction_vars: The variables for the classification model (parameters
affecting hidden_layer and prediction_layer), or None. If non-None, a
dict mapping variable name to tf.Variable object.
"""
self.input_placeholder = input_placeholder
self.hidden_layer = hidden_layer
self.reconstruction_layer = reconstruction_layer
self.autoencoder_vars = autoencoder_vars or {}
# Calculate the loss that will be minimized for the autoencoder model.
self.autoencoder_loss = None
if self.reconstruction_layer is not None:
self.autoencoder_loss = (
tf.reduce_mean(
tf.squared_difference(self.input_placeholder,
self.reconstruction_layer)))
self.prediction_layer = prediction_layer
self.prediction_vars = prediction_vars or {}
self.labels_placeholder = (
labels_placeholder if labels_placeholder is not None else
tf.placeholder(tf.int32, (None, None)))
# Calculate the loss that will be minimized for the prediction model.
self.prediction_loss = None
if self.prediction_layer is not None:
self.prediction_loss = (
tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits=self.prediction_layer,
labels=tf.one_hot(self.labels_placeholder, NUM_GLYPHS))))
# The probabilities of each glyph for each column.
self.prediction = tf.nn.softmax(self.prediction_layer)
def get_autoencoder_initializers(self):
"""Gets the autoencoder initializer ops.
Returns:
The list of TensorFlow ops which initialize the autoencoder model.
"""
return [var.initializer for var in self.autoencoder_vars.values()]
def get_classifier_initializers(self):
"""Gets the classifier initializer ops.
Returns:
The list of TensorFlow ops which initialize the classifier model.
"""
return [var.initializer for var in self.prediction_vars.values()]
@staticmethod
def semi_supervised_model(batch_size,
target_height,
input_placeholder=None,
labels_placeholder=None):
"""Constructs the semi-supervised model.
Consists of an autoencoder and classifier, sharing a hidden layer.
Args:
batch_size: The number of staffline images in a batch, which must be known
at model definition time. int.
target_height: The height of each scaled staffline image. int.
input_placeholder: The input layer. A placeholder will be created if None
is given. Dtype float32 and shape (batch_size, target_height,
any_width).
labels_placeholder: The labels tensor. A placeholder will be created if
None is given. Dtype int32 and shape (batch_size, width).
Returns:
A NeuralNetworkGlyphClassifier instance holding the model.
"""
if input_placeholder is None:
input_placeholder = tf.placeholder(tf.float32,
(batch_size, target_height, None))
autoencoder_vars = {}
prediction_vars = {}
hidden, layer_vars = InputConvLayer(input_placeholder, 10).get()
autoencoder_vars.update(layer_vars)
prediction_vars.update(layer_vars)
hidden, layer_vars = HiddenLayer(hidden, 10, 10).get()
autoencoder_vars.update(layer_vars)
prediction_vars.update(layer_vars)
reconstruction, layer_vars = ReconstructionLayer(hidden, target_height,
target_height).get()
autoencoder_vars.update(layer_vars)
hidden, layer_vars = HiddenLayer(hidden, 10, 10, name="hidden_2").get()
prediction_vars.update(layer_vars)
prediction, layer_vars = PredictionLayer(hidden).get()
prediction_vars.update(layer_vars)
return NeuralNetworkGlyphClassifier(
input_placeholder,
hidden,
reconstruction_layer=reconstruction,
autoencoder_vars=autoencoder_vars,
labels_placeholder=labels_placeholder,
prediction_layer=prediction,
prediction_vars=prediction_vars)
class BaseLayer(object):
def __init__(self, filter_size, n_in, n_out, name):
self.weights = tf.Variable(
tf.truncated_normal((filter_size, n_in, n_out)), name=name + "_W")
self.bias = tf.Variable(tf.zeros(n_out), name=name + "_bias")
self.vars = {self.weights.name: self.weights, self.bias.name: self.bias}
def get(self):
"""Gets the layer output and variables.
Returns:
The output tensor of the layer.
The dict of variables (parameters) for the layer.
"""
return self.output, self.vars
class InputConvLayer(BaseLayer):
"""Convolves the input image strip, producing multiple outputs per column."""
def __init__(self, image, n_hidden, activation=tf.nn.sigmoid, name="input"):
"""Creates the InputConvLayer.
Args:
image: The input image (height, width). Should be wider than it is tall.
n_hidden: The number of output nodes of the layer.
activation: Callable applied to the convolved image. Applied to the 1D
convolution result to produce the activation of the layer.
name: The prefix for variable names for the layer. Produces self.output
with shape (width, n_hidden).
"""
height = int(image.get_shape()[1])
super(InputConvLayer, self).__init__(
filter_size=height, n_in=height, n_out=n_hidden, name=name)
self.input = image
# Transpose the image, so that the rows are "channels" in a 1D input.
self.output = activation(
tf.nn.conv1d(
tf.transpose(image, [0, 2, 1]),
self.weights,
stride=1,
padding="SAME") + self.bias[None, None, :])
class HiddenLayer(BaseLayer):
"""Performs a 1D convolution between hidden layers in the model."""
def __init__(self,
layer_in,
filter_size,
n_out,
activation=tf.nn.sigmoid,
name="hidden"):
"""Performs a 1D convolution between hidden layers in the model.
Args:
layer_in: The input layer (width, num_channels).
filter_size: The width of the convolution filter.
n_out: The number of output channels.
activation: Callable applied to the convolved image. Applied to the 1D
convolution result to produce the activation of the layer.
name: The prefix for variable names for the layer. Produces self.output
with shape (width, n_out).
"""
n_in = int(layer_in.get_shape()[2])
super(HiddenLayer, self).__init__(filter_size, n_in, n_out, name)
self.output = activation(
tf.nn.conv1d(layer_in, self.weights, stride=1, padding="SAME") +
self.bias[None, None, :])
class ReconstructionLayer(BaseLayer):
"""Outputs a reconstructed layer."""
def __init__(self,
layer_in,
filter_size,
out_height,
activation=tf.nn.sigmoid,
name="reconstruction"):
"""Outputs a reconstructed image of shape (out_height, width).
Args:
layer_in: The input layer (width, num_channels).
filter_size: The width of the convolution filter.
out_height: The height of the output image.
activation: Callable applied to the convolved image. Applied to the 1D
convolution result to produce the activation of the output.
name: The prefix for variable names for the layer. Produces self.output
with shape (width, n_out).
"""
n_in = int(layer_in.get_shape()[2])
super(ReconstructionLayer, self).__init__(filter_size, n_in, out_height,
name)
output = activation(
tf.nn.conv1d(layer_in, self.weights, stride=1, padding="SAME") +
self.bias[None, None, :])
self.output = tf.transpose(output, [0, 2, 1])
class PredictionLayer(BaseLayer):
"""Classifies each column from a hidden layer."""
def __init__(self, layer_in, name="prediction"):
"""Outputs logit predictions for each column from a hidden layer.
Args:
layer_in: The input layer (width, num_channels).
name: The prefix for variable names for the layer. Produces the logits
for each class in self.output. Shape (width, NUM_GLYPHS)
"""
n_in = int(layer_in.get_shape()[2])
n_out = NUM_GLYPHS
super(PredictionLayer, self).__init__(1, n_in, n_out, name)
input_shape = tf.shape(layer_in)
input_columns = tf.reshape(
layer_in, [input_shape[0] * input_shape[1], input_shape[2]])
# Ignore the 0th axis of the weights (convolutional filter, which is 1 here)
weights = self.weights[0, :, :]
output = tf.matmul(input_columns, weights) + self.bias
self.output = tf.reshape(output,
[input_shape[0], input_shape[1], NUM_GLYPHS])
| apache-2.0 |
tinloaf/home-assistant | homeassistant/components/camera/zoneminder.py | 1 | 2223 | """
Support for ZoneMinder camera streaming.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/camera.zoneminder/
"""
import logging
from homeassistant.const import CONF_NAME, CONF_VERIFY_SSL
from homeassistant.components.camera.mjpeg import (
CONF_MJPEG_URL, CONF_STILL_IMAGE_URL, MjpegCamera)
from homeassistant.components.zoneminder import DOMAIN as ZONEMINDER_DOMAIN
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['zoneminder']
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the ZoneMinder cameras."""
zm_client = hass.data[ZONEMINDER_DOMAIN]
monitors = zm_client.get_monitors()
if not monitors:
_LOGGER.warning("Could not fetch monitors from ZoneMinder")
return
cameras = []
for monitor in monitors:
_LOGGER.info("Initializing camera %s", monitor.id)
cameras.append(ZoneMinderCamera(monitor, zm_client.verify_ssl))
add_entities(cameras)
class ZoneMinderCamera(MjpegCamera):
"""Representation of a ZoneMinder Monitor Stream."""
def __init__(self, monitor, verify_ssl):
"""Initialize as a subclass of MjpegCamera."""
device_info = {
CONF_NAME: monitor.name,
CONF_MJPEG_URL: monitor.mjpeg_image_url,
CONF_STILL_IMAGE_URL: monitor.still_image_url,
CONF_VERIFY_SSL: verify_ssl
}
super().__init__(device_info)
self._is_recording = None
self._is_available = None
self._monitor = monitor
@property
def should_poll(self):
"""Update the recording state periodically."""
return True
def update(self):
"""Update our recording state from the ZM API."""
_LOGGER.debug("Updating camera state for monitor %i", self._monitor.id)
self._is_recording = self._monitor.is_recording
self._is_available = self._monitor.is_available
@property
def is_recording(self):
"""Return whether the monitor is in alarm mode."""
return self._is_recording
@property
def available(self):
"""Return True if entity is available."""
return self._is_available
| apache-2.0 |
nmGit/MView | MDevice.py | 1 | 16221 | # Copyright (C) 2016 Noah Meltzer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__author__ = "Noah Meltzer"
__copyright__ = "Copyright 2016, McDermott Group"
__license__ = "GPL"
__version__ = "1.0.2"
__maintainer__ = "Noah Meltzer"
__status__ = "Beta"
from MFrame import MFrame
from PyQt4 import QtCore
from PyQt4.QtCore import QObject, pyqtSignal, pyqtSlot, QThread
import threading
from MDataBase.MDataBase import MDataBase
from MDataBase.MDataBaseWrapper import MDataBaseWrapper
from MWeb import web
import traceback
import time
class MDevice(QThread):
'''
MView uses the MDevice class to give all sources of data a common
interface with which to interact in the context of MView. These
sources of data can be anything including but not limited to LabRad
servers, RS232 devices, GPIB Devices, they can even represent the
contents of .hdf5 files. Devices in MView are created by instantiating
their device drivers. For example, if there are two RS232 devices,
we create two instances of the RS232 device driver. This means that
only one generic device driver needs to be created for one interface
(RS232, LabRad Servers, HDF5 files, etc.) and it can then be applied
to all devices that use the same interface.
'''
updateSignal = pyqtSignal()
addParameterSignal = pyqtSignal(str)
lock = threading.Lock()
begin_signal = pyqtSignal(name = "begin_signal")
def __init__(self, name, *args, **kwargs):
'''Initializes the device:
1. Sets the frame title. 1.
2. Sets the refresh rate. 2.
Function arguments:
:param name: The name of the device
'''
super(MDevice, self).__init__()
self.lockLoggingSettings = kwargs.get("lock_logging_settings", False)
self.defaultLogLocation = kwargs.get("default_log_location", None)
self.dataType = kwargs.get("data_type", "float32")
# Create a new MFrame
web.devices.append(self)
self.frame = MFrame()
# print "Setting title to:", name, args
self.frame.setTitle(name)
self.name = name
self.refreshRate = 1
self.container = None
self.datachest = None
self.keepGoing = True
self.settingResultIndices = []
self.notifier_mailing_lists = []
self.doneLoading = False
#self.memory_tracker = tracker.SummaryTracker()
def log(self, log):
""" Tell the device whether to log data or not
:param log: Boolean
"""
if log == False:
for p in self.getParameters():
self.disableDataLogging(p)
else:
if(self.frame.getDataChestWrapper() == None):
self.configureDataLogging()
self.frame.masterEnableDataLogging(log)
def isLogging(self):
'''Getter for whether or not datalogging is enabled for this device.
:rtype: boolean
'''
return self.frame.isDataLogging()
def setContainer(self, container):
# traceback.print_stack()
self.container = container
self.frame.setContainer(container)
def getContainer(self):
return self.container
def updateContainer(self):
'''Refresh the devices container (Tile) on the GUI
by emitting an update signal
'''
if self.container != None:
self.updateSignal.emit()
def addButton(self, *args):
pass
def setTitle(self, title):
self.frame.setTitle(title)
def query(self, *args):
pass
def setYLabel(self, *args):
pass
def setRefreshRate(self, *args):
pass
def setPlotRefreshRate(self, *args):
pass
def addButtonToGui(self, button):
self.frame.appendButton(button)
def addReadout(self, name, units):
self.nicknames.append(name)
self.units.append(units)
def addPlot(self, length=None, *args):
if(self.isLogging()):
self.frame.addPlot(length)
# Datalogging must be enabled if we want to plot data.
return self.frame.getPlot()
else:
raise Exception("Cannot add plot before enabling data logging.")
return None
def getFrame(self):
"""Return the device's frame."""
return self.frame
def stop(self):
# print "stopping device thread..."
self.keepGoing = False
# print "device thread stopped."
#self.device_stop_signal.emit()
if self.frame.DataLoggingInfo()['chest']:
self.frame.DataLoggingInfo()['chest'].close()
self.close()
def __threadSafeClose(self):
if self.frame.DataLoggingInfo()['chest']:
self.frame.DataLoggingInfo()['chest'].close()
def plot(self, plot):
self.frame.setHasPlot(plot)
def begin(self, **kwargs):
'''Start the device.
'''
# Automatically refresh node data in callQuery
self.refreshNodeDataInCallQuery = kwargs.get('auto_refresh_node', True)
# if not self.refreshNodeDataInCallQuery:
# print self, "will not automatically refresh node data"
# traceback.print_stack()
self.onBegin()
# self.frame.setReadingIndex(self.settingResultIndices)
#self.configureDataLogging()
# Each device NEEDS to run on a different thread
# than the main thread (which ALWAYS runs the GUI).
# This thread is responsible for querying the devices.
# self.deviceThread = threading.Thread(target=self.callQuery, args=[])
# # If the main thread stops, stop the child thread.
# self.deviceThread.daemon = True
# # Start the thread.
self.start()
#self.callQuery()
def __threadSafeBegin(self):
self.configureDataLogging()
def configureDataLogging(self):
# print self, "is datalogging"
self.frame.DataLoggingInfo()['name'] = self.name
self.frame.DataLoggingInfo()[
'lock_logging_settings'] = self.lockLoggingSettings
if self.defaultLogLocation != None:
# If the current directory is a subdirectory of the default,
# then that is ok and the current directory should not be
# changed.
print "current location:", self.frame.DataLoggingInfo()['location']
print "default:", self.defaultLogLocation
if not(self.defaultLogLocation in self.frame.DataLoggingInfo()['location']):
print "Paths not ok"
self.frame.DataLoggingInfo()[
'location'] = self.defaultLogLocation
self.frame.DataLoggingInfo()['chest'] = MDataBaseWrapper(self)
self.datachest = self.frame.DataLoggingInfo()['chest']
def onBegin(self):
'''Called at the end of MDevice.begin(). This is called before
MView starts. This allows us to configure settings that
MView might use while starting. This might include datalog
locations or device-specific information.'''
pass
def loaded(self):
print self, "loaded."
self.onLoad()
self.doneLoading = True
def isLoaded(self):
return self.doneLoading
def onLoad(self):
'''Called at the end of MGui.startGui(), when the main
MView GUI has finished loading. This allows the
MDevice to configure pieces of MView only available
once the program has fully loaded.'''
pass
def onAddParameter(self, *args, **kwargs):
'''Called when when a new parameter is added.
It is passed whatever MDevice.addParameter() is passed.
(Note: MDevice.onAddParameter() and MDevice.addParameter()
are different). This function must return a tuple in
the form ((str) Parameter Name, (int)Precision, (str) units)
'''
return
def setPrecisions(self, precisions):
self.frame.setPrecisions(precisions)
def setSigFigs(self, parameter, sigfigs):
self.frame.setSigFigs(parameter, sigfigs)
def _setReadings(self, readings, update=True):
'''Tell the frame what the readings are so that they can be logged.
:param readings: Type: list
'''
# readings = self.frame.getReadings()
# print "set readings called"
# traceback.print_stack()
# if readings != None:
# node = self.frame.getNode()
# anchorData = []
# # # print "HERE A"
# if node is not None:
# for input in node.getAnchors():
# if input.getType() == 'input':
# print "INPUT ANCHOR", input
# data = input.getData()
# if data != None and type(data) is list:
# anchorData.append(data[-1])
# elif data != None:
# anchorData.append(None)
# print "readigns:", readings
# print "anchordata:", anchorData
# readings.extend(anchorData)
# print "comb readigns:", readings
# else:
def isOutOfRange(self, key):
return self.frame.getOutOfRangeStatus(key)
def setOutOfRange(self, key):
# print self, key, "is out of range"
self.frame.setOutOfRange(key)
def setInRange(self, key):
self.frame.setInRange(key)
def disableRange(self):
self.frame.disableRange()
def setReading(self, parameter, reading):
self.frame.setReading(parameter, reading)
def setMailingLists(self, lists):
self.notifier_mailing_lists = lists
def getMailingLists(self):
return self.notifier_mailing_lists;
def getUnit(self, parameter):
return self.frame.getUnit(parameter)
def setUnit(self, parameter, unit):
self.frame.setUnit(parameter, unit)
def setPrecision(self, parameter, precision):
self.frame.setPrecision(parameter, precision)
def getPrecision(self, parameter):
return self.frame.getPrecision(parameter)
def getSigFigs(self, parameter):
return self.frame.getSigFigs(parameter)
def getReading(self, parameter):
return self.frame.getReading(parameter)
def setReadingIndex(self, parameter, index):
self.frame.setReadingIndex(parameter, index)
def getReadingIndex(self, parameter):
return self.frame.getReadingIndex(parameter)
def setCommand(self, parameter, command):
# print "Setting command for", parameter, "is", command
self.frame.setCommand(parameter, command)
def getCommand(self, parameter):
# print "Getting Parameter:",self.frame.getCommand(parameter)
return self.frame.getCommand(parameter)
def getParameters(self):
return self.frame.getParameters()
def getNicknames(self):
return self.frame.getNicknames()
def setParamVisibility(self, parameter, visible):
self.frame.setParamVisibility(parameter, True)
def getParamVisibility(self, paramteter):
return self.frame.getParamVisibility(parameter)
def getReadingIndex(self, parameter):
return self.frame.getReadingIndex(parameter)
def enableDataLogging(self, parameter):
self.frame.DataLoggingInfo()['channels'][parameter] = True
def disableDataLogging(self, parameter):
self.frame.DataLoggingInfo()['channels'][parameter] = False
def isDataLoggingEnabled(self, parameter):
return self.frame.DataLoggingInfo()['channels'][parameter]
def getParameterType(self, parameter):
if type(parameter) is dict:
parameter = parameter.keys()[0]
return self.frame.getNode().getAnchorByName(parameter).getType()
def disableAllDataLogging(self):
self.frame.masterEnableDataLogging(False)
for p in self.getParameters():
self.disableDataLogging(p)
def enableAllDataLogging(self):
self.frame.masterEnableDataLogging(True)
def run(self):
'''Automatically called periodically,
determined by MDevice.Mframe.getRefreshRate().
There is also a MDevice.Mframe.setRefreshRate()
function with which the refresh rate can be configured.
'''
while True:
#t1 = time.time()
self.query()
node = self.frame.getNode()
if node is not None and self.refreshNodeDataInCallQuery:
self.frame.getNode().refreshData()
if self.datachest is not None and self.doneLoading:
try:
if self.frame.isDataLogging():
#print "MDevice:", str(self),"thread id:",int(QThread.currentThreadId())
self.datachest.save()
pass
except:
traceback.print_exc()
if web.gui != None and web.gui.MAlert != None:
web.gui.MAlert.monitorReadings(self)
self.updateContainer()
#t2 = time.time()
#print self, "time to run:", t2 - t1
if self.keepGoing:
self.msleep(int(self.frame.getRefreshRate()*1000))
else:
return
#threading.Timer(self.frame.getRefreshRate(),
# self.callQuery).start()
def prompt(self, button):
'''Called when
a device's button is pushed. Button is an array which
is associated with the button. The array is constructed
in the device driver code, and the PyQT button is then appended
to the end by MView. The array associated with the button is passed
to prompt() in the device driver. The device driver then determines
what to do based on the button pushed.
'''
pass
def close(self):
return
def addParameter(self, *args, **kwargs):
"""Adds a parameter to the GUI. The first argument is the name,
the other arguments are specific to the device driver.
"""
try:
name = args[0]
except:
raise AttributError(
"The first argument of addParameter() must be a name")
show = kwargs.get("show", True)
units = kwargs.get('units', None)
sigfigs = kwargs.get('significant_figures', None)
precision = kwargs.get('precision', None)
if sigfigs is None and precision is None:
precision = 2
index = kwargs.get('index', None)
log = kwargs.get("log", self.isLogging())
self.frame.addParameter((name, units, precision))
self.setReadingIndex(name, index)
self.setPrecision(name, precision)
self.setSigFigs(name, sigfigs)
self.setUnit(name, units)
self.onAddParameter(*args, **kwargs)
self.frame.setParamVisibility(name, show)
self.frame.DataLoggingInfo()['channels'][name] = log
self.addParameterSignal.emit(name)
# def logData(self, b):
# """Enable or disable datalogging for the device."""
# # if channels!= None:
# # self.frame.DataLoggingInfo['channels'] = channels
# self.frame.enableDataLogging(b)
def __str__(self):
if self.frame.getTitle() is None:
return "Unnamed Device"
return self.frame.getTitle()
| gpl-3.0 |
JazzeYoung/VeryDeepAutoEncoder | pylearn2/pylearn2/datasets/iris.py | 37 | 5565 | """
.. todo::
WRITEME
"""
__author__ = "Ian Goodfellow"
# TODO: add citation
import numpy as np
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
class Iris(DenseDesignMatrix):
"""
.. todo::
WRITEME
"""
def __init__(self, preprocessor=None):
"""
.. todo::
WRITEME
"""
self.class_names = ['Iris-setosa', 'Iris-versicolor', 'Iris-virginica']
lines = iris_data.split('\n')
X = []
y = []
for line in lines:
row = line.split(',')
X.append([float(elem) for elem in row[:-1]])
y.append(self.class_names.index(row[-1]))
X = np.array(X)
assert X.shape == (150, 4)
assert len(y) == 150
# Build a column array for y
y = np.array([[y_i] for y_i in y])
assert min(y) == 0
assert max(y) == 2
super(Iris, self).__init__(X=X, y=y, y_labels=3, preprocessor=preprocessor)
iris_data = \
"""5.1,3.5,1.4,0.2,Iris-setosa
4.9,3.0,1.4,0.2,Iris-setosa
4.7,3.2,1.3,0.2,Iris-setosa
4.6,3.1,1.5,0.2,Iris-setosa
5.0,3.6,1.4,0.2,Iris-setosa
5.4,3.9,1.7,0.4,Iris-setosa
4.6,3.4,1.4,0.3,Iris-setosa
5.0,3.4,1.5,0.2,Iris-setosa
4.4,2.9,1.4,0.2,Iris-setosa
4.9,3.1,1.5,0.1,Iris-setosa
5.4,3.7,1.5,0.2,Iris-setosa
4.8,3.4,1.6,0.2,Iris-setosa
4.8,3.0,1.4,0.1,Iris-setosa
4.3,3.0,1.1,0.1,Iris-setosa
5.8,4.0,1.2,0.2,Iris-setosa
5.7,4.4,1.5,0.4,Iris-setosa
5.4,3.9,1.3,0.4,Iris-setosa
5.1,3.5,1.4,0.3,Iris-setosa
5.7,3.8,1.7,0.3,Iris-setosa
5.1,3.8,1.5,0.3,Iris-setosa
5.4,3.4,1.7,0.2,Iris-setosa
5.1,3.7,1.5,0.4,Iris-setosa
4.6,3.6,1.0,0.2,Iris-setosa
5.1,3.3,1.7,0.5,Iris-setosa
4.8,3.4,1.9,0.2,Iris-setosa
5.0,3.0,1.6,0.2,Iris-setosa
5.0,3.4,1.6,0.4,Iris-setosa
5.2,3.5,1.5,0.2,Iris-setosa
5.2,3.4,1.4,0.2,Iris-setosa
4.7,3.2,1.6,0.2,Iris-setosa
4.8,3.1,1.6,0.2,Iris-setosa
5.4,3.4,1.5,0.4,Iris-setosa
5.2,4.1,1.5,0.1,Iris-setosa
5.5,4.2,1.4,0.2,Iris-setosa
4.9,3.1,1.5,0.1,Iris-setosa
5.0,3.2,1.2,0.2,Iris-setosa
5.5,3.5,1.3,0.2,Iris-setosa
4.9,3.1,1.5,0.1,Iris-setosa
4.4,3.0,1.3,0.2,Iris-setosa
5.1,3.4,1.5,0.2,Iris-setosa
5.0,3.5,1.3,0.3,Iris-setosa
4.5,2.3,1.3,0.3,Iris-setosa
4.4,3.2,1.3,0.2,Iris-setosa
5.0,3.5,1.6,0.6,Iris-setosa
5.1,3.8,1.9,0.4,Iris-setosa
4.8,3.0,1.4,0.3,Iris-setosa
5.1,3.8,1.6,0.2,Iris-setosa
4.6,3.2,1.4,0.2,Iris-setosa
5.3,3.7,1.5,0.2,Iris-setosa
5.0,3.3,1.4,0.2,Iris-setosa
7.0,3.2,4.7,1.4,Iris-versicolor
6.4,3.2,4.5,1.5,Iris-versicolor
6.9,3.1,4.9,1.5,Iris-versicolor
5.5,2.3,4.0,1.3,Iris-versicolor
6.5,2.8,4.6,1.5,Iris-versicolor
5.7,2.8,4.5,1.3,Iris-versicolor
6.3,3.3,4.7,1.6,Iris-versicolor
4.9,2.4,3.3,1.0,Iris-versicolor
6.6,2.9,4.6,1.3,Iris-versicolor
5.2,2.7,3.9,1.4,Iris-versicolor
5.0,2.0,3.5,1.0,Iris-versicolor
5.9,3.0,4.2,1.5,Iris-versicolor
6.0,2.2,4.0,1.0,Iris-versicolor
6.1,2.9,4.7,1.4,Iris-versicolor
5.6,2.9,3.6,1.3,Iris-versicolor
6.7,3.1,4.4,1.4,Iris-versicolor
5.6,3.0,4.5,1.5,Iris-versicolor
5.8,2.7,4.1,1.0,Iris-versicolor
6.2,2.2,4.5,1.5,Iris-versicolor
5.6,2.5,3.9,1.1,Iris-versicolor
5.9,3.2,4.8,1.8,Iris-versicolor
6.1,2.8,4.0,1.3,Iris-versicolor
6.3,2.5,4.9,1.5,Iris-versicolor
6.1,2.8,4.7,1.2,Iris-versicolor
6.4,2.9,4.3,1.3,Iris-versicolor
6.6,3.0,4.4,1.4,Iris-versicolor
6.8,2.8,4.8,1.4,Iris-versicolor
6.7,3.0,5.0,1.7,Iris-versicolor
6.0,2.9,4.5,1.5,Iris-versicolor
5.7,2.6,3.5,1.0,Iris-versicolor
5.5,2.4,3.8,1.1,Iris-versicolor
5.5,2.4,3.7,1.0,Iris-versicolor
5.8,2.7,3.9,1.2,Iris-versicolor
6.0,2.7,5.1,1.6,Iris-versicolor
5.4,3.0,4.5,1.5,Iris-versicolor
6.0,3.4,4.5,1.6,Iris-versicolor
6.7,3.1,4.7,1.5,Iris-versicolor
6.3,2.3,4.4,1.3,Iris-versicolor
5.6,3.0,4.1,1.3,Iris-versicolor
5.5,2.5,4.0,1.3,Iris-versicolor
5.5,2.6,4.4,1.2,Iris-versicolor
6.1,3.0,4.6,1.4,Iris-versicolor
5.8,2.6,4.0,1.2,Iris-versicolor
5.0,2.3,3.3,1.0,Iris-versicolor
5.6,2.7,4.2,1.3,Iris-versicolor
5.7,3.0,4.2,1.2,Iris-versicolor
5.7,2.9,4.2,1.3,Iris-versicolor
6.2,2.9,4.3,1.3,Iris-versicolor
5.1,2.5,3.0,1.1,Iris-versicolor
5.7,2.8,4.1,1.3,Iris-versicolor
6.3,3.3,6.0,2.5,Iris-virginica
5.8,2.7,5.1,1.9,Iris-virginica
7.1,3.0,5.9,2.1,Iris-virginica
6.3,2.9,5.6,1.8,Iris-virginica
6.5,3.0,5.8,2.2,Iris-virginica
7.6,3.0,6.6,2.1,Iris-virginica
4.9,2.5,4.5,1.7,Iris-virginica
7.3,2.9,6.3,1.8,Iris-virginica
6.7,2.5,5.8,1.8,Iris-virginica
7.2,3.6,6.1,2.5,Iris-virginica
6.5,3.2,5.1,2.0,Iris-virginica
6.4,2.7,5.3,1.9,Iris-virginica
6.8,3.0,5.5,2.1,Iris-virginica
5.7,2.5,5.0,2.0,Iris-virginica
5.8,2.8,5.1,2.4,Iris-virginica
6.4,3.2,5.3,2.3,Iris-virginica
6.5,3.0,5.5,1.8,Iris-virginica
7.7,3.8,6.7,2.2,Iris-virginica
7.7,2.6,6.9,2.3,Iris-virginica
6.0,2.2,5.0,1.5,Iris-virginica
6.9,3.2,5.7,2.3,Iris-virginica
5.6,2.8,4.9,2.0,Iris-virginica
7.7,2.8,6.7,2.0,Iris-virginica
6.3,2.7,4.9,1.8,Iris-virginica
6.7,3.3,5.7,2.1,Iris-virginica
7.2,3.2,6.0,1.8,Iris-virginica
6.2,2.8,4.8,1.8,Iris-virginica
6.1,3.0,4.9,1.8,Iris-virginica
6.4,2.8,5.6,2.1,Iris-virginica
7.2,3.0,5.8,1.6,Iris-virginica
7.4,2.8,6.1,1.9,Iris-virginica
7.9,3.8,6.4,2.0,Iris-virginica
6.4,2.8,5.6,2.2,Iris-virginica
6.3,2.8,5.1,1.5,Iris-virginica
6.1,2.6,5.6,1.4,Iris-virginica
7.7,3.0,6.1,2.3,Iris-virginica
6.3,3.4,5.6,2.4,Iris-virginica
6.4,3.1,5.5,1.8,Iris-virginica
6.0,3.0,4.8,1.8,Iris-virginica
6.9,3.1,5.4,2.1,Iris-virginica
6.7,3.1,5.6,2.4,Iris-virginica
6.9,3.1,5.1,2.3,Iris-virginica
5.8,2.7,5.1,1.9,Iris-virginica
6.8,3.2,5.9,2.3,Iris-virginica
6.7,3.3,5.7,2.5,Iris-virginica
6.7,3.0,5.2,2.3,Iris-virginica
6.3,2.5,5.0,1.9,Iris-virginica
6.5,3.0,5.2,2.0,Iris-virginica
6.2,3.4,5.4,2.3,Iris-virginica
5.9,3.0,5.1,1.8,Iris-virginica"""
| bsd-3-clause |
googledatalab/pydatalab | solutionbox/ml_workbench/test_tensorflow/test_feature_transforms.py | 2 | 14526 | from __future__ import absolute_import
from __future__ import print_function
import base64
import cStringIO
from PIL import Image
import json
import math
import numpy as np
import os
import shutil
import sys
import tempfile
import unittest
import tensorflow as tf
from tensorflow.python.lib.io import file_io
# To make 'import analyze' work without installing it.
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'tensorflow', 'trainer')))
import feature_transforms # noqa: E303
# Some tests put files in GCS or use BigQuery. If HAS_CREDENTIALS is false,
# those tests will not run.
HAS_CREDENTIALS = True
try:
import google.datalab as dl
dl.Context.default().project_id
except Exception:
HAS_CREDENTIALS = False
class TestGraphBuilding(unittest.TestCase):
"""Test the TITO functions work and can produce a working TF graph."""
def _run_graph(self, analysis_path, features, schema, stats, predict_data):
"""Runs the preprocessing graph.
Args:
analysis_path: path to folder containing analysis output. Should contain
the stats file.
features: features dict
schema: schema list
stats: stats dict
predict_data: list of csv strings
"""
stats = {'column_stats': {}}
with tf.Graph().as_default():
with tf.Session().as_default() as session:
outputs, labels, inputs = feature_transforms.build_csv_serving_tensors_for_transform_step(
analysis_path, features, schema, stats, keep_target=False)
feed_inputs = {inputs['csv_example']: predict_data}
session.run(tf.tables_initializer())
result = session.run(outputs, feed_dict=feed_inputs)
return result
def test_make_transform_graph_numerics(self):
output_folder = tempfile.mkdtemp()
stats_file_path = os.path.join(output_folder, feature_transforms.STATS_FILE)
try:
stats = {'column_stats':
{'num1': {'max': 10.0, 'mean': 9.5, 'min': 0.0}, # noqa
'num2': {'max': 1.0, 'mean': 2.0, 'min': -1.0},
'num3': {'max': 10.0, 'mean': 2.0, 'min': 5.0}}}
schema = [{'name': 'num1', 'type': 'FLOAT'},
{'name': 'num2', 'type': 'FLOAT'},
{'name': 'num3', 'type': 'INTEGER'}]
features = {'num1': {'transform': 'identity', 'source_column': 'num1'},
'num2': {'transform': 'scale', 'value': 10, 'source_column': 'num2'},
'num3': {'transform': 'scale', 'source_column': 'num3'}}
input_data = ['5.0,-1.0,10',
'10.0,1.0,5',
'15.0,0.5,7']
file_io.write_string_to_file(
stats_file_path,
json.dumps(stats))
results = self._run_graph(output_folder, features, schema, stats, input_data)
for result, expected_result in zip(results['num1'].flatten().tolist(),
[5, 10, 15]):
self.assertAlmostEqual(result, expected_result)
for result, expected_result in zip(results['num2'].flatten().tolist(),
[-10, 10, 5]):
self.assertAlmostEqual(result, expected_result)
for result, expected_result in zip(results['num3'].flatten().tolist(),
[1, -1, (7.0 - 5) * 2.0 / 5.0 - 1]):
self.assertAlmostEqual(result, expected_result)
finally:
shutil.rmtree(output_folder)
def test_make_transform_graph_category(self):
output_folder = tempfile.mkdtemp()
try:
file_io.write_string_to_file(
os.path.join(output_folder, feature_transforms.VOCAB_ANALYSIS_FILE % 'cat1'),
'\n'.join(['red,300', 'blue,200', 'green,100']))
file_io.write_string_to_file(
os.path.join(output_folder, feature_transforms.VOCAB_ANALYSIS_FILE % 'cat2'),
'\n'.join(['pizza,300', 'ice_cream,200', 'cookies,100']))
stats = {'column_stats': {}} # stats file needed but unused.
file_io.write_string_to_file(
os.path.join(output_folder, feature_transforms.STATS_FILE),
json.dumps(stats))
schema = [{'name': 'cat1', 'type': 'STRING'}, {'name': 'cat2', 'type': 'STRING'}]
features = {'cat1': {'transform': 'one_hot', 'source_column': 'cat1'},
'cat2': {'transform': 'embedding', 'source_column': 'cat2'}}
input_data = ['red,pizza',
'blue,',
'green,extra']
results = self._run_graph(output_folder, features, schema, stats, input_data)
for result, expected_result in zip(results['cat1'].flatten().tolist(), [0, 1, 2]):
self.assertEqual(result, expected_result)
for result, expected_result in zip(results['cat2'].flatten().tolist(),
[0, 3, 3]):
self.assertEqual(result, expected_result)
finally:
shutil.rmtree(output_folder)
def test_make_transform_graph_text_tfidf(self):
output_folder = tempfile.mkdtemp()
try:
# vocab id
# red 0
# blue 1
# green 2
# oov 3 (out of vocab)
# corpus size aka num_examples = 4
# IDF: log(num_examples/(1+number of examples that have this token))
# red: log(4/3)
# blue: log(4/3)
# green: log(4/2)
# oov: log(4/1)
file_io.write_string_to_file(
os.path.join(output_folder, feature_transforms.VOCAB_ANALYSIS_FILE % 'cat1'),
'\n'.join(['red,2', 'blue,2', 'green,1']))
stats = {'column_stats': {}, 'num_examples': 4}
file_io.write_string_to_file(
os.path.join(output_folder, feature_transforms.STATS_FILE),
json.dumps(stats))
# decode_csv does not like 1 column files with an empty row, so add
# a key column
schema = [{'name': 'key', 'type': 'STRING'},
{'name': 'cat1', 'type': 'STRING'}]
features = {'key': {'transform': 'key', 'source_column': 'key'},
'cat1': {'transform': 'tfidf', 'source_column': 'cat1'}}
input_data = ['0,red red red', # doc 0
'1,red green red', # doc 1
'2,blue', # doc 2
'3,blue blue', # doc 3
'4,', # doc 4
'5,brown', # doc 5
'6,brown blue'] # doc 6
results = self._run_graph(output_folder, features, schema, stats, input_data)
# indices are in the form [doc id, vocab id]
expected_indices = [[0, 0], [0, 1], [0, 2],
[1, 0], [1, 1], [1, 2],
[2, 0],
[3, 0], [3, 1],
[5, 0],
[6, 0], [6, 1]]
expected_ids = [0, 0, 0, 0, 2, 0, 1, 1, 1, 3, 3, 1]
self.assertEqual(results['cat1_ids'].indices.tolist(), expected_indices)
self.assertEqual(results['cat1_ids'].dense_shape.tolist(), [7, 3])
self.assertEqual(results['cat1_ids'].values.tolist(), expected_ids)
# Note, these are natural logs.
log_4_3 = math.log(4.0 / 3.0)
expected_weights = [
1.0 / 3.0 * log_4_3, 1.0 / 3.0 * log_4_3, 1.0 / 3.0 * log_4_3, # doc 0
1.0 / 3.0 * log_4_3, 1.0 / 3.0 * math.log(2.0), 1.0 / 3.0 * log_4_3, # doc 1
math.log(4.0 / 3.0), # doc 2
1.0 / 2.0 * log_4_3, 1.0 / 2.0 * log_4_3, # doc 3
math.log(4.0), # doc 5
1.0 / 2.0 * math.log(4.0), 1.0 / 2.0 * log_4_3] # doc 6
self.assertEqual(results['cat1_weights'].indices.tolist(), expected_indices)
self.assertEqual(results['cat1_weights'].dense_shape.tolist(), [7, 3])
self.assertEqual(results['cat1_weights'].values.size, len(expected_weights))
for weight, expected_weight in zip(results['cat1_weights'].values.tolist(), expected_weights):
self.assertAlmostEqual(weight, expected_weight)
finally:
shutil.rmtree(output_folder)
def test_make_transform_graph_text_multi_hot(self):
output_folder = tempfile.mkdtemp()
try:
# vocab id
# red 0
# blue 1
# green 2
# oov 3 (out of vocab)
file_io.write_string_to_file(
os.path.join(output_folder,
feature_transforms.VOCAB_ANALYSIS_FILE % 'cat1'),
'\n'.join(['red,2', 'blue,2', 'green,1']))
stats = {'column_stats': {}}
file_io.write_string_to_file(
os.path.join(output_folder, feature_transforms.STATS_FILE),
json.dumps(stats)) # Stats file needed but unused.
# decode_csv does not like 1 column files with an empty row, so add
# a key column
schema = [{'name': 'key', 'type': 'STRING'},
{'name': 'cat1', 'type': 'STRING'}]
features = {'key': {'transform': 'key', 'source_column': 'key'},
'cat1': {'transform': 'multi_hot', 'source_column': 'cat1', 'separator': '|'}}
input_data = ['0,red', # doc 0
'1,red|green', # doc 1
'2,blue', # doc 2
'3,red|blue|green', # doc 3
'4,'] # doc 4
results = self._run_graph(output_folder, features, schema, stats, input_data)
# indices are in the form [doc id, vocab id]
expected_indices = [[0, 0],
[1, 0], [1, 1],
[2, 0],
[3, 0], [3, 1], [3, 2]]
# doc id 0 1 1 2 3 3 3
expected_ids = [0, 0, 2, 1, 0, 1, 2] # noqa
self.assertEqual(results['cat1'].indices.tolist(), expected_indices)
self.assertEqual(results['cat1'].dense_shape.tolist(), [5, 3])
self.assertEqual(results['cat1'].values.tolist(), expected_ids)
finally:
shutil.rmtree(output_folder)
def test_make_transform_graph_text_bag_of_words(self):
output_folder = tempfile.mkdtemp()
try:
# vocab id
# red 0
# blue 1
# green 2
# oov 3 (out of vocab)
file_io.write_string_to_file(
os.path.join(output_folder,
feature_transforms.VOCAB_ANALYSIS_FILE % 'cat1'),
'\n'.join(['red,2', 'blue,2', 'green,1']))
stats = {'column_stats': {}}
file_io.write_string_to_file(
os.path.join(output_folder, feature_transforms.STATS_FILE),
json.dumps(stats)) # Stats file needed but unused.
# decode_csv does not like 1 column files with an empty row, so add
# a key column
schema = [{'name': 'key', 'type': 'STRING'},
{'name': 'cat1', 'type': 'STRING'}]
features = {'key': {'transform': 'key', 'source_column': 'key'},
'cat1': {'transform': 'bag_of_words', 'source_column': 'cat1'}}
input_data = ['0,red red red', # doc 0
'1,red green red', # doc 1
'2,blue', # doc 2
'3,blue blue', # doc 3
'4,', # doc 4
'5,brown', # doc 5
'6,brown blue'] # doc 6
results = self._run_graph(output_folder, features, schema, stats, input_data)
# indices are in the form [doc id, vocab id]
expected_indices = [[0, 0], [0, 1], [0, 2],
[1, 0], [1, 1], [1, 2],
[2, 0],
[3, 0], [3, 1],
[5, 0],
[6, 0], [6, 1]]
# Note in doc 6, is is blue, then brown.
# doc id 0 0 0 1 1 1 2 3 3 5 6 6
expected_ids = [0, 0, 0, 0, 2, 0, 1, 1, 1, 3, 3, 1] # noqa
expected_weights = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
self.assertEqual(results['cat1_ids'].indices.tolist(), expected_indices)
self.assertEqual(results['cat1_ids'].dense_shape.tolist(), [7, 3])
self.assertEqual(results['cat1_ids'].values.tolist(), expected_ids)
self.assertEqual(results['cat1_weights'].indices.tolist(),
expected_indices)
self.assertEqual(results['cat1_weights'].dense_shape.tolist(), [7, 3])
self.assertEqual(results['cat1_weights'].values.size,
len(expected_weights))
for weight, exp_weight in zip(results['cat1_weights'].values.tolist(),
expected_weights):
self.assertAlmostEqual(weight, exp_weight)
finally:
shutil.rmtree(output_folder)
@unittest.skipIf(not HAS_CREDENTIALS, 'GCS access missing')
def test_make_transform_graph_images(self):
print('Testing make_transform_graph with image_to_vec. ' +
'It may take a few minutes because it needs to download a large inception checkpoint.')
def _open_and_encode_image(img_url):
with file_io.FileIO(img_url, 'r') as f:
img = Image.open(f).convert('RGB')
output = cStringIO.StringIO()
img.save(output, 'jpeg')
return base64.urlsafe_b64encode(output.getvalue())
try:
output_folder = tempfile.mkdtemp()
stats_file_path = os.path.join(output_folder, feature_transforms.STATS_FILE)
stats = {'column_stats': {}}
file_io.write_string_to_file(stats_file_path, json.dumps(stats))
schema = [{'name': 'img', 'type': 'STRING'}]
features = {'img': {'transform': 'image_to_vec', 'source_column': 'img'}}
# Test transformation with encoded image content.
img_string1 = _open_and_encode_image(
'gs://cloud-ml-data/img/flower_photos/daisy/15207766_fc2f1d692c_n.jpg')
img_string2 = _open_and_encode_image(
'gs://cloud-ml-data/img/flower_photos/dandelion/8980164828_04fbf64f79_n.jpg')
# Test transformation with direct file path.
img_string3 = 'gs://cloud-ml-data/img/flower_photos/daisy/15207766_fc2f1d692c_n.jpg'
img_string4 = 'gs://cloud-ml-data/img/flower_photos/dandelion/8980164828_04fbf64f79_n.jpg'
input_data = [img_string1, img_string2, img_string3, img_string4]
results = self._run_graph(output_folder, features, schema, stats, input_data)
embeddings = results['img']
self.assertEqual(len(embeddings), 4)
self.assertEqual(len(embeddings[0]), 2048)
self.assertEqual(embeddings[0].dtype, np.float32)
self.assertTrue(any(x != 0.0 for x in embeddings[1]))
self.assertTrue(any(x != 0.0 for x in embeddings[3]))
finally:
shutil.rmtree(output_folder)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
dmacvicar/spacewalk | client/tools/rhncfg/config_common/handler_base.py | 9 | 2908 | #
# Copyright (c) 2008--2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import sys
import string
import getpass
from optparse import OptionParser, Option
import rhn_log
import cfg_exceptions
import local_config
class HandlerBase:
_options_table = []
_option_parser_class = OptionParser
_usage_options = "[options]"
_option_class = Option
def __init__(self, args, repository, mode=None, exec_name=None):
self.repository = repository
self.set_mode(mode)
self.set_exec_name(exec_name)
self.options, self.args = self._parse_args(args)
def set_mode(self, mode):
self.mode = mode
def set_exec_name(self, exec_name):
self.exec_name = exec_name
def _prog(self):
return "%s %s" % (sys.argv[0], self.mode or "<unknown>")
def _parse_args(self, args):
# Parses the arguments and returns a tuple (options, args)
usage = string.join(["%prog", self.mode, self._usage_options])
self._parser = self._option_parser_class(
option_list=self._options_table,
usage=usage)
return self._parser.parse_args(args)
def usage(self):
return self._parser.print_help()
def authenticate(self, username=None, password=None):
# entry point for repository authentication
try:
self.repository.login()
except cfg_exceptions.InvalidSession:
if not username :
username=local_config.get('username')
if not password :
(username, password) = self.get_auth_info(username)
try:
self.repository.login(username=username, password=password)
except cfg_exceptions.InvalidSession, e:
rhn_log.die(1, "Session error: %s\n" % e)
def get_auth_info(self, username=None):
if username is None:
username = self._read_username()
password = getpass.getpass()
return (username, password)
def _read_username(self):
tty = open("/dev/tty", "r+")
tty.write("Username: ")
try:
username = tty.readline()
except KeyboardInterrupt:
tty.write("\n")
sys.exit(0)
if username is None:
# EOF
tty.write("\n")
sys.exit(0)
return string.strip(username)
| gpl-2.0 |
thnee/ansible | test/units/modules/remote_management/oneview/test_oneview_network_set_info.py | 21 | 3617 | # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from units.compat import unittest
from oneview_module_loader import NetworkSetInfoModule
from hpe_test_utils import FactsParamsTestCase
ERROR_MSG = 'Fake message error'
PARAMS_GET_ALL = dict(
config='config.json',
name=None
)
PARAMS_GET_ALL_WITHOUT_ETHERNET = dict(
config='config.json',
name=None,
options=['withoutEthernet']
)
PARAMS_GET_BY_NAME = dict(
config='config.json',
name='Network Set 1'
)
PARAMS_GET_BY_NAME_WITHOUT_ETHERNET = dict(
config='config.json',
name='Network Set 1',
options=['withoutEthernet']
)
class NetworkSetInfoSpec(unittest.TestCase,
FactsParamsTestCase):
def setUp(self):
self.configure_mocks(self, NetworkSetInfoModule)
self.network_sets = self.mock_ov_client.network_sets
FactsParamsTestCase.configure_client_mock(self, self.network_sets)
def test_should_get_all_network_sets(self):
network_sets = [{
"name": "Network Set 1",
"networkUris": ['/rest/ethernet-networks/aaa-bbb-ccc']
}, {
"name": "Network Set 2",
"networkUris": ['/rest/ethernet-networks/ddd-eee-fff', '/rest/ethernet-networks/ggg-hhh-fff']
}]
self.network_sets.get_all.return_value = network_sets
self.mock_ansible_module.params = PARAMS_GET_ALL
NetworkSetInfoModule().run()
self.network_sets.get_all.assert_called_once_with()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
network_sets=network_sets)
def test_should_get_all_network_sets_without_ethernet(self):
network_sets = [{
"name": "Network Set 1",
"networkUris": []
}, {
"name": "Network Set 2",
"networkUris": []
}]
self.network_sets.get_all.return_value = network_sets
self.mock_ansible_module.params = PARAMS_GET_ALL
NetworkSetInfoModule().run()
self.network_sets.get_all.assert_called_once_with()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
network_sets=network_sets)
def test_should_get_network_set_by_name(self):
network_sets = [{
"name": "Network Set 1",
"networkUris": ['/rest/ethernet-networks/aaa-bbb-ccc']
}]
self.network_sets.get_by.return_value = network_sets
self.mock_ansible_module.params = PARAMS_GET_BY_NAME
NetworkSetInfoModule().run()
self.network_sets.get_by.assert_called_once_with('name', 'Network Set 1')
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
network_sets=network_sets)
def test_should_get_network_set_by_name_without_ethernet(self):
network_sets = [{
"name": "Network Set 1",
"networkUris": []
}]
self.network_sets.get_all_without_ethernet.return_value = network_sets
self.mock_ansible_module.params = PARAMS_GET_BY_NAME_WITHOUT_ETHERNET
NetworkSetInfoModule().run()
expected_filter = "\"'name'='Network Set 1'\""
self.network_sets.get_all_without_ethernet.assert_called_once_with(filter=expected_filter)
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
network_sets=network_sets)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
collinjackson/mojo | testing/scripts/get_compile_targets.py | 76 | 1285 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import os
import sys
import common
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--output', required=True)
parser.add_argument('args', nargs=argparse.REMAINDER)
args = parser.parse_args(argv)
passthrough_args = args.args
if passthrough_args[0] == '--':
passthrough_args = passthrough_args[1:]
results = {}
for filename in os.listdir(common.SCRIPT_DIR):
if not filename.endswith('.py'):
continue
if filename in ('common.py', 'get_compile_targets.py'):
continue
with common.temporary_file() as tempfile_path:
rc = common.run_command(
[sys.executable, os.path.join(common.SCRIPT_DIR, filename)] +
passthrough_args +
[
'compile_targets',
'--output', tempfile_path
]
)
if rc != 0:
return rc
with open(tempfile_path) as f:
results[filename] = json.load(f)
with open(args.output, 'w') as f:
json.dump(results, f)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause |
explosion/spaCy | spacy/lang/te/lex_attrs.py | 2 | 1263 | from ...attrs import LIKE_NUM
_num_words = [
"సున్నా",
"శూన్యం",
"ఒకటి",
"రెండు",
"మూడు",
"నాలుగు",
"ఐదు",
"ఆరు",
"ఏడు",
"ఎనిమిది",
"తొమ్మిది",
"పది",
"పదకొండు",
"పన్నెండు",
"పదమూడు",
"పద్నాలుగు",
"పదిహేను",
"పదహారు",
"పదిహేడు",
"పద్దెనిమిది",
"పందొమ్మిది",
"ఇరవై",
"ముప్పై",
"నలభై",
"యాభై",
"అరవై",
"డెబ్బై",
"ఎనభై",
"తొంబై",
"వంద",
"నూరు",
"వెయ్యి",
"లక్ష",
"కోటి",
]
def like_num(text):
text = text.replace(",", "").replace(".", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
if text.lower() in _num_words:
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}
| mit |
secondfoundation/Second-Foundation-Src | src/haruspex/python/rabbithole/optionshouse.py | 2 | 8188 | import sys, json, httplib, time, datetime, mysqlutil, couchdb
class OH:
def __init__(self):
# only works for one session
# need to look at how the login is being done
self.authToken = "502294fa-10b1-4cd5-ae2c-c46f13a440d6"
self.account = "1880009"
self.host = "trading.optionshouse.com"
s = couchdb.Server()
self.couch = s['options_test']
# maps from json structure name to db column name
self.quote_columns = {}
self.quote_columns['symbolId'] = 'symbol_sid'
self.quote_columns['symbol'] = 'symbol'
self.quote_columns['last'] = 'last_price'
self.quote_columns['open'] = 'open_price'
self.quote_columns['prevClose'] = 'prev_close_price'
self.quote_columns['volume'] = 'volume'
self.quote_columns['avg10dayVolume'] = 'av_10'
self.quote_columns['peRatio'] = 'pe_ratio'
self.quote_columns['pbRatio'] = 'pb_ratio'
self.quote_columns['beta'] = 'beta'
self.quote_columns['marketCap'] = 'market_cap'
self.quote_columns['day200movingAvg'] = 'ma_200'
self.quote_columns['day50movingAvg'] = 'ma_50'
self.quote_columns['day21movingAvg'] = 'ma_21'
self.symbol_columns = {}
self.symbol_columns['symbol'] = 'symbol'
self.symbol_columns['shortDescription'] = 'name'
self.symbol_columns['industryName'] = 'industry'
self.symbol_columns['exchange'] = 'exchange'
self.symbol_columns['symbolType'] = 'type'
self.series_columns = {}
self.series_columns['symbol'] = 'symbol'
self.series_columns['symbolId'] = 'symbol_sid'
self.series_columns['symbolLast'] = 'symbol_last_price'
self.series_columns['series'] = 'series_name'
self.series_columns['id'] = 'series_id'
self.series_columns['expDate'] = 'exp_date_str'
self.series_columns['expDay'] = 'exp_date_day'
self.series_columns['expMonth'] = 'exp_date_month'
self.series_columns['expYear'] = 'exp_date_year'
self.series_columns['strikePrice'] = 'strike_raw'
self.series_columns['strikeString'] = 'strike_decimal'
self.series_columns['cbid'] = 'call_bid'
self.series_columns['cask'] = 'call_ask'
self.series_columns['clast'] = 'call_last'
self.series_columns['cchange'] = 'call_change'
self.series_columns['callAskSize'] = 'call_ask_size'
self.series_columns['callBidSize'] = 'call_bid_size'
self.series_columns['cvol'] = 'call_volume'
self.series_columns['coi'] = 'call_open_interest'
self.series_columns['cdelt'] = 'call_delta'
self.series_columns['civ'] = 'call_implied_volatility'
self.series_columns['cthet'] = 'call_theta'
self.series_columns['cgam'] = 'call_gamma'
self.series_columns['cveg'] = 'call_vega'
self.series_columns['pbid'] = 'put_bid'
self.series_columns['pask'] = 'put_ask'
self.series_columns['plast'] = 'put_last'
self.series_columns['pchange'] = 'put_change'
self.series_columns['putAskSize'] = 'put_ask_size'
self.series_columns['putBidSize'] = 'put_bid_size'
self.series_columns['pvol'] = 'put_volume'
self.series_columns['poi'] = 'put_open_interest'
self.series_columns['pdelt'] = 'put_delta'
self.series_columns['piv'] = 'put_implied_volatility'
self.series_columns['pthet'] = 'put_theta'
self.series_columns['pgam'] = 'put_gamma'
self.series_columns['pveg'] = 'put_vega'
def is_market_open(self):
now = datetime.datetime.now()
return (9 <= now.hour <= 15) or (now.hour == 8 and now.minute >= 30)
def get_moneyness(self, series_json):
strike = float(series_json['strikeString'])
stock_price = series_json['symbolLast']
return stock_price >= strike
def get_response(self, action_string, data):
action = {}
action['action'] = action_string
action['data'] = data
ezList = {}
ezList['EZList'] = [action]
req_payload = json.dumps(ezList)
conn = httplib.HTTPSConnection(self.host)
conn.request("POST", "/m", req_payload)
return json.loads(conn.getresponse().read())
def get_stock_quote(self, symbol):
data = {}
data['authToken'] = self.authToken
data['account'] = self.account
data['symbol'] = symbol
data['description'] = True
data['fundamentals'] = True
data['bs'] = True
data['showDivEarnDetails'] = True
quote_json = self.get_response('view.quote', data)
print quote_json
return quote_json['EZMessage']['data']['quote']
def get_chain(self, symbol, ntm):
data = {}
data['authToken'] = self.authToken
data['account'] = self.account
data['symbol'] = symbol
data['weeklies'] = False
data['quarterlies'] = False
data['nonstandard'] = False
data['greeks'] = True
data['bs'] = True
data['quotesAfter'] = 0
data['ntm'] = ntm
data['nextGen'] = True
chain_json = self.get_response('view.chain', data)
# print chain_json
return chain_json['EZMessage']['data']['optionQuote']
def save_chain_quote(self, symbol, db):
quote_json = self.get_stock_quote(symbol)
# look up primary key for this symbol
symbol_row = self.get_symbol(symbol, db)
symbolId = symbol_row[0][0]
# add primary key field to json map
quote_json['symbolId'] = symbolId
mysqlutil.insert_dict(quote_json, self.quote_columns, 'quote', db)
self.couch.save(quote_json)
chain_json = self.get_chain(symbol, 10)
# last_exp = ""
for series_json in chain_json:
quote = quote_json['last']
timestamp = str(datetime.datetime.now())
# exp = series_json['exp']
# add symbol foreign key
series_json['symbolId'] = symbolId
# add symbol last quote info for convenience
series_json['symbol'] = symbol
series_json['symbolLast'] = quote
series_json['timestamp'] = timestamp
# save in couchdb and mysql
mysqlutil.insert_dict(series_json, self.series_columns, 'series', db)
series_json['docType'] = 'chain'
series_json['_id'] = timestamp + ':' + symbol + ':' + quote
self.couch.save(series_json)
def get_symbol(self, symbol, db):
# check if symbol is saved
query = "select * from symbol where symbol = '" + symbol + "'"
results = mysqlutil.db_get(query, db)
# if not saved, insert it
if (len(results) == 0):
quote_json = self.get_stock_quote(symbol)
mysqlutil.insert_dict(quote_json, self.symbol_columns, 'symbol', db)
query = "select * from symbol where symbol = '" + symbol + "'"
results = mysqlutil.db_get(query, db)
return results
def save_stock_quote(self, symbol, db):
symbol_row = self.get_symbol(symbol, db)
quote_json = self.get_stock_quote(symbol)
quote_json['symbolId'] = symbol_row[0][0]
mysqlutil.insert_dict(quote_json, self.quote_columns, 'quote', db)
# also save in couch db
quote_json['docType'] = 'quote'
self.couch.save(quote_json)
def save_chain_quotes(self, symbols, db, throttle):
self.save_symbol_data(self.save_chain_quote, symbols, db, throttle)
def save_stock_quotes(self, symbols, db, throttle):
self.save_symbol_data(self.save_stock_quote, symbols, db, throttle)
def save_symbol_data(self, symbol_save_func, symbols, db, throttle):
max_symbol_retries = 3
# sanity
max_total_retries = 30
total_retries = 0
for symbol in symbols:
symbol_retries = 0
retry = True
while (retry and symbol_retries <= max_symbol_retries and total_retries <= max_total_retries):
# don't pound the server
time.sleep(throttle)
# false unless a network-related error occurs
# these could be intermittent
retry = False
try:
symbol_save_func(symbol, db)
except IOError, e:
print str(e)
print "Network Error: will retry symbol " + symbol
symbol_retries += 1
total_retries += 1
retry = True
except Exception, e:
print str(e)
print "Unexpected Error: will skip symbol " + symbol
if (len(sys.argv) == 1):
print "Commands: chain [symbol], quote [symbol]"
else:
oh = OH()
command = sys.argv[1]
symbol_file = sys.argv[2]
try:
# try to treat arg as a file name
f = open(symbol_file, 'r')
symbols = f.read()
symbols = symbols.split(',')
except:
# treat arg as a single symbol name
symbols = [symbol_file]
# substitute your own db creds
db = mysqlutil.db_connect("root", "root", "opt")
if (command == 'quote'):
oh.save_stock_quotes(symbols, db, 0.6)
elif (command == 'chain'):
oh.save_chain_quotes(symbols, db, 0.6)
| lgpl-2.1 |
smi96/django-blog_website | lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/_base.py | 436 | 7014 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type, string_types
__all__ = ["DOCUMENT", "DOCTYPE", "TEXT", "ELEMENT", "COMMENT", "ENTITY", "UNKNOWN",
"TreeWalker", "NonRecursiveTreeWalker"]
from xml.dom import Node
DOCUMENT = Node.DOCUMENT_NODE
DOCTYPE = Node.DOCUMENT_TYPE_NODE
TEXT = Node.TEXT_NODE
ELEMENT = Node.ELEMENT_NODE
COMMENT = Node.COMMENT_NODE
ENTITY = Node.ENTITY_NODE
UNKNOWN = "<#UNKNOWN#>"
from ..constants import voidElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
def to_text(s, blank_if_none=True):
"""Wrapper around six.text_type to convert None to empty string"""
if s is None:
if blank_if_none:
return ""
else:
return None
elif isinstance(s, text_type):
return s
else:
return text_type(s)
def is_text_or_none(string):
"""Wrapper around isinstance(string_types) or is None"""
return string is None or isinstance(string, string_types)
class TreeWalker(object):
def __init__(self, tree):
self.tree = tree
def __iter__(self):
raise NotImplementedError
def error(self, msg):
return {"type": "SerializeError", "data": msg}
def emptyTag(self, namespace, name, attrs, hasChildren=False):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(name)
assert all((namespace is None or isinstance(namespace, string_types)) and
isinstance(name, string_types) and
isinstance(value, string_types)
for (namespace, name), value in attrs.items())
yield {"type": "EmptyTag", "name": to_text(name, False),
"namespace": to_text(namespace),
"data": attrs}
if hasChildren:
yield self.error("Void element has children")
def startTag(self, namespace, name, attrs):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(name)
assert all((namespace is None or isinstance(namespace, string_types)) and
isinstance(name, string_types) and
isinstance(value, string_types)
for (namespace, name), value in attrs.items())
return {"type": "StartTag",
"name": text_type(name),
"namespace": to_text(namespace),
"data": dict(((to_text(namespace, False), to_text(name)),
to_text(value, False))
for (namespace, name), value in attrs.items())}
def endTag(self, namespace, name):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(namespace)
return {"type": "EndTag",
"name": to_text(name, False),
"namespace": to_text(namespace),
"data": {}}
def text(self, data):
assert isinstance(data, string_types), type(data)
data = to_text(data)
middle = data.lstrip(spaceCharacters)
left = data[:len(data) - len(middle)]
if left:
yield {"type": "SpaceCharacters", "data": left}
data = middle
middle = data.rstrip(spaceCharacters)
right = data[len(middle):]
if middle:
yield {"type": "Characters", "data": middle}
if right:
yield {"type": "SpaceCharacters", "data": right}
def comment(self, data):
assert isinstance(data, string_types), type(data)
return {"type": "Comment", "data": text_type(data)}
def doctype(self, name, publicId=None, systemId=None, correct=True):
assert is_text_or_none(name), type(name)
assert is_text_or_none(publicId), type(publicId)
assert is_text_or_none(systemId), type(systemId)
return {"type": "Doctype",
"name": to_text(name),
"publicId": to_text(publicId),
"systemId": to_text(systemId),
"correct": to_text(correct)}
def entity(self, name):
assert isinstance(name, string_types), type(name)
return {"type": "Entity", "name": text_type(name)}
def unknown(self, nodeType):
return self.error("Unknown node type: " + nodeType)
class NonRecursiveTreeWalker(TreeWalker):
def getNodeDetails(self, node):
raise NotImplementedError
def getFirstChild(self, node):
raise NotImplementedError
def getNextSibling(self, node):
raise NotImplementedError
def getParentNode(self, node):
raise NotImplementedError
def __iter__(self):
currentNode = self.tree
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
hasChildren = False
if type == DOCTYPE:
yield self.doctype(*details)
elif type == TEXT:
for token in self.text(*details):
yield token
elif type == ELEMENT:
namespace, name, attributes, hasChildren = details
if name in voidElements:
for token in self.emptyTag(namespace, name, attributes,
hasChildren):
yield token
hasChildren = False
else:
yield self.startTag(namespace, name, attributes)
elif type == COMMENT:
yield self.comment(details[0])
elif type == ENTITY:
yield self.entity(details[0])
elif type == DOCUMENT:
hasChildren = True
else:
yield self.unknown(details[0])
if hasChildren:
firstChild = self.getFirstChild(currentNode)
else:
firstChild = None
if firstChild is not None:
currentNode = firstChild
else:
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
if type == ELEMENT:
namespace, name, attributes, hasChildren = details
if name not in voidElements:
yield self.endTag(namespace, name)
if self.tree is currentNode:
currentNode = None
break
nextSibling = self.getNextSibling(currentNode)
if nextSibling is not None:
currentNode = nextSibling
break
else:
currentNode = self.getParentNode(currentNode)
| mit |
beagles/neutron_hacking | neutron/tests/unit/test_agent_rpc.py | 1 | 4858 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from contextlib import nested
import mock
from oslo.config import cfg
from neutron.agent import rpc
from neutron.openstack.common import context
from neutron.tests import base
class AgentRPCPluginApi(base.BaseTestCase):
def _test_rpc_call(self, method):
agent = rpc.PluginApi('fake_topic')
ctxt = context.RequestContext('fake_user', 'fake_project')
expect_val = 'foo'
with mock.patch('oslo.messaging.RPCClient.call') as rpc_call:
rpc_call.return_value = expect_val
func_obj = getattr(agent, method)
if method == 'tunnel_sync':
actual_val = func_obj(ctxt, 'fake_tunnel_ip')
else:
actual_val = func_obj(ctxt, 'fake_device', 'fake_agent_id')
self.assertEqual(actual_val, expect_val)
def test_get_device_details(self):
self._test_rpc_call('get_device_details')
def test_update_device_down(self):
self._test_rpc_call('update_device_down')
def test_tunnel_sync(self):
self._test_rpc_call('tunnel_sync')
class AgentPluginReportState(base.BaseTestCase):
strtime = 'neutron.openstack.common.timeutils.strtime'
def test_plugin_report_state_use_call(self):
topic = 'test'
reportStateAPI = rpc.PluginReportStateAPI(topic)
expected_agent_state = {'agent': 'test'}
with nested(mock.patch.object(reportStateAPI.client, 'call'),
mock.patch(self.strtime)) as (call, time):
time.return_value = 'TESTTIME'
ctxt = context.RequestContext('fake_user', 'fake_project')
reportStateAPI.report_state(ctxt, expected_agent_state,
use_call=True)
expected_args = mock.call(
ctxt, 'report_state',
agent_state={'agent_state': expected_agent_state},
time='TESTTIME')
self.assertEqual(call.call_args, expected_args)
def test_plugin_report_state_cast(self):
topic = 'test'
reportStateAPI = rpc.PluginReportStateAPI(topic)
expected_agent_state = {'agent': 'test'}
with nested(mock.patch.object(reportStateAPI.client, 'cast'),
mock.patch(self.strtime)) as (cast, time):
time.return_value = 'TESTTIME'
ctxt = context.RequestContext('fake_user', 'fake_project')
reportStateAPI.report_state(ctxt, expected_agent_state)
expected_args = mock.call(
ctxt, 'report_state',
agent_state={'agent_state': expected_agent_state},
time='TESTTIME')
self.assertEqual(cast.call_args, expected_args)
class AgentRPCMethods(base.BaseTestCase):
def test_create_consumers(self):
endpoint = mock.Mock()
expected_get_server = [
mock.call(mock.ANY, endpoints=[endpoint]),
mock.call().start(),
]
expected_target = [
mock.call(topic='foo-topic-op', server=cfg.CONF.host),
]
get_server_call = 'neutron.common.rpc.get_server'
target_call = 'oslo.messaging.Target'
with nested(mock.patch(get_server_call),
mock.patch(target_call)) as (get_server, target):
rpc.create_servers([endpoint], 'foo', [('topic', 'op')])
target.assert_has_calls(expected_target)
get_server.assert_has_calls(expected_get_server)
def test_create_consumers_with_node_name(self):
endpoint = mock.Mock()
expected_get_server = [
mock.call(mock.ANY, endpoints=[endpoint]),
mock.call().start(),
]
expected_target = [
mock.call(topic='foo-topic-op', server='node1'),
]
get_server_call = 'neutron.common.rpc.get_server'
target_call = 'oslo.messaging.Target'
with nested(mock.patch(get_server_call),
mock.patch(target_call)) as (get_server, target):
rpc.create_servers([endpoint], 'foo', [('topic', 'op', 'node1')])
target.assert_has_calls(expected_target)
get_server.assert_has_calls(expected_get_server)
| apache-2.0 |
duyetdev/openerp-6.1.1 | openerp/addons/base_calendar/base_calendar.py | 7 | 78671 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta, date
from dateutil import parser
from dateutil import rrule
from dateutil.relativedelta import relativedelta
from osv import fields, osv
from service import web_services
from tools.translate import _
import pytz
import re
import time
import tools
months = {
1: "January", 2: "February", 3: "March", 4: "April", \
5: "May", 6: "June", 7: "July", 8: "August", 9: "September", \
10: "October", 11: "November", 12: "December"
}
def get_recurrent_dates(rrulestring, exdate, startdate=None, exrule=None):
"""
Get recurrent dates based on Rule string considering exdate and start date
@param rrulestring: Rulestring
@param exdate: List of exception dates for rrule
@param startdate: Startdate for computing recurrent dates
@return: List of Recurrent dates
"""
def todate(date):
val = parser.parse(''.join((re.compile('\d')).findall(date)))
return val
if not startdate:
startdate = datetime.now()
if not exdate:
exdate = []
rset1 = rrule.rrulestr(str(rrulestring), dtstart=startdate, forceset=True)
for date in exdate:
datetime_obj = todate(date)
rset1._exdate.append(datetime_obj)
if exrule:
rset1.exrule(rrule.rrulestr(str(exrule), dtstart=startdate))
return list(rset1)
def base_calendar_id2real_id(base_calendar_id=None, with_date=False):
"""
This function converts virtual event id into real id of actual event
@param base_calendar_id: Id of calendar
@param with_date: If value passed to this param it will return dates based on value of withdate + base_calendar_id
"""
if base_calendar_id and isinstance(base_calendar_id, (str, unicode)):
res = base_calendar_id.split('-')
if len(res) >= 2:
real_id = res[0]
if with_date:
real_date = time.strftime("%Y-%m-%d %H:%M:%S", \
time.strptime(res[1], "%Y%m%d%H%M%S"))
start = datetime.strptime(real_date, "%Y-%m-%d %H:%M:%S")
end = start + timedelta(hours=with_date)
return (int(real_id), real_date, end.strftime("%Y-%m-%d %H:%M:%S"))
return int(real_id)
return base_calendar_id and int(base_calendar_id) or base_calendar_id
def real_id2base_calendar_id(real_id, recurrent_date):
"""
Convert real id of record into virtual id using recurrent_date
e.g. real id is 1 and recurrent_date is 01-12-2009 10:00:00 then it will return
1-20091201100000
@return: real id with recurrent date.
"""
if real_id and recurrent_date:
recurrent_date = time.strftime("%Y%m%d%H%M%S", \
time.strptime(recurrent_date, "%Y-%m-%d %H:%M:%S"))
return '%d-%s' % (real_id, recurrent_date)
return real_id
def _links_get(self, cr, uid, context=None):
"""
Get request link.
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param context: A standard dictionary for contextual values
@return: list of dictionary which contain object and name and id.
"""
obj = self.pool.get('res.request.link')
ids = obj.search(cr, uid, [])
res = obj.read(cr, uid, ids, ['object', 'name'], context=context)
return [(r['object'], r['name']) for r in res]
html_invitation = """
<html>
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<title>%(name)s</title>
</head>
<body>
<table border="0" cellspacing="10" cellpadding="0" width="100%%"
style="font-family: Arial, Sans-serif; font-size: 14">
<tr>
<td width="100%%">Hello,</td>
</tr>
<tr>
<td width="100%%">You are invited for <i>%(company)s</i> Event.</td>
</tr>
<tr>
<td width="100%%">Below are the details of event:</td>
</tr>
</table>
<table cellspacing="0" cellpadding="5" border="0" summary=""
style="width: 90%%; font-family: Arial, Sans-serif; border: 1px Solid #ccc; background-color: #f6f6f6">
<tr valign="center" align="center">
<td bgcolor="DFDFDF">
<h3>%(name)s</h3>
</td>
</tr>
<tr>
<td>
<table cellpadding="8" cellspacing="0" border="0"
style="font-size: 14" summary="Eventdetails" bgcolor="f6f6f6"
width="90%%">
<tr>
<td width="21%%">
<div><b>Start Date</b></div>
</td>
<td><b>:</b></td>
<td>%(start_date)s</td>
<td width="15%%">
<div><b>End Date</b></div>
</td>
<td><b>:</b></td>
<td width="25%%">%(end_date)s</td>
</tr>
<tr valign="top">
<td><b>Description</b></td>
<td><b>:</b></td>
<td colspan="3">%(description)s</td>
</tr>
<tr valign="top">
<td>
<div><b>Location</b></div>
</td>
<td><b>:</b></td>
<td colspan="3">%(location)s</td>
</tr>
<tr valign="top">
<td>
<div><b>Event Attendees</b></div>
</td>
<td><b>:</b></td>
<td colspan="3">
<div>
<div>%(attendees)s</div>
</div>
</td>
</tr>
</table>
</td>
</tr>
</table>
<table border="0" cellspacing="10" cellpadding="0" width="100%%"
style="font-family: Arial, Sans-serif; font-size: 14">
<tr>
<td width="100%%">From:</td>
</tr>
<tr>
<td width="100%%">%(user)s</td>
</tr>
<tr valign="top">
<td width="100%%">-<font color="a7a7a7">-------------------------</font></td>
</tr>
<tr>
<td width="100%%"> <font color="a7a7a7">%(sign)s</font></td>
</tr>
</table>
</body>
</html>
"""
class calendar_attendee(osv.osv):
"""
Calendar Attendee Information
"""
_name = 'calendar.attendee'
_description = 'Attendee information'
_rec_name = 'cutype'
__attribute__ = {}
def _get_address(self, name=None, email=None):
"""
Gives email information in ical CAL-ADDRESS type format
@param name: Name for CAL-ADDRESS value
@param email: Email address for CAL-ADDRESS value
"""
if name and email:
name += ':'
return (name or '') + (email and ('MAILTO:' + email) or '')
def _compute_data(self, cr, uid, ids, name, arg, context=None):
"""
Compute data on function fields for attendee values .
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of calendar attendee’s IDs.
@param name: name of field.
@param context: A standard dictionary for contextual values
@return: Dictionary of form {id: {'field Name': value'}}.
"""
name = name[0]
result = {}
for attdata in self.browse(cr, uid, ids, context=context):
id = attdata.id
result[id] = {}
if name == 'sent_by':
if not attdata.sent_by_uid:
result[id][name] = ''
continue
else:
result[id][name] = self._get_address(attdata.sent_by_uid.name, \
attdata.sent_by_uid.user_email)
if name == 'cn':
if attdata.user_id:
result[id][name] = attdata.user_id.name
elif attdata.partner_address_id:
result[id][name] = attdata.partner_address_id.name or attdata.partner_id.name
else:
result[id][name] = attdata.email or ''
if name == 'delegated_to':
todata = []
for child in attdata.child_ids:
if child.email:
todata.append('MAILTO:' + child.email)
result[id][name] = ', '.join(todata)
if name == 'delegated_from':
fromdata = []
for parent in attdata.parent_ids:
if parent.email:
fromdata.append('MAILTO:' + parent.email)
result[id][name] = ', '.join(fromdata)
if name == 'event_date':
if attdata.ref:
result[id][name] = attdata.ref.date
else:
result[id][name] = False
if name == 'event_end_date':
if attdata.ref:
result[id][name] = attdata.ref.date_deadline
else:
result[id][name] = False
if name == 'sent_by_uid':
if attdata.ref:
result[id][name] = (attdata.ref.user_id.id, attdata.ref.user_id.name)
else:
result[id][name] = uid
if name == 'language':
user_obj = self.pool.get('res.users')
lang = user_obj.read(cr, uid, uid, ['context_lang'], context=context)['context_lang']
result[id][name] = lang.replace('_', '-')
return result
def _links_get(self, cr, uid, context=None):
"""
Get request link for ref field in calendar attendee.
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param context: A standard dictionary for contextual values
@return: list of dictionary which contain object and name and id.
"""
obj = self.pool.get('res.request.link')
ids = obj.search(cr, uid, [])
res = obj.read(cr, uid, ids, ['object', 'name'], context=context)
return [(r['object'], r['name']) for r in res]
def _lang_get(self, cr, uid, context=None):
"""
Get language for language selection field.
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param context: A standard dictionary for contextual values
@return: list of dictionary which contain code and name and id.
"""
obj = self.pool.get('res.lang')
ids = obj.search(cr, uid, [])
res = obj.read(cr, uid, ids, ['code', 'name'], context=context)
res = [((r['code']).replace('_', '-').lower(), r['name']) for r in res]
return res
_columns = {
'cutype': fields.selection([('individual', 'Individual'), \
('group', 'Group'), ('resource', 'Resource'), \
('room', 'Room'), ('unknown', 'Unknown') ], \
'Invite Type', help="Specify the type of Invitation"),
'member': fields.char('Member', size=124,
help="Indicate the groups that the attendee belongs to"),
'role': fields.selection([('req-participant', 'Participation required'), \
('chair', 'Chair Person'), \
('opt-participant', 'Optional Participation'), \
('non-participant', 'For information Purpose')], 'Role', \
help='Participation role for the calendar user'),
'state': fields.selection([('tentative', 'Tentative'),
('needs-action', 'Needs Action'),
('accepted', 'Accepted'),
('declined', 'Declined'),
('delegated', 'Delegated')], 'State', readonly=True, \
help="Status of the attendee's participation"),
'rsvp': fields.boolean('Required Reply?',
help="Indicats whether the favor of a reply is requested"),
'delegated_to': fields.function(_compute_data, \
string='Delegated To', type="char", size=124, store=True, \
multi='delegated_to', help="The users that the original \
request was delegated to"),
'delegated_from': fields.function(_compute_data, string=\
'Delegated From', type="char", store=True, size=124, multi='delegated_from'),
'parent_ids': fields.many2many('calendar.attendee', 'calendar_attendee_parent_rel', \
'attendee_id', 'parent_id', 'Delegrated From'),
'child_ids': fields.many2many('calendar.attendee', 'calendar_attendee_child_rel', \
'attendee_id', 'child_id', 'Delegrated To'),
'sent_by': fields.function(_compute_data, string='Sent By', \
type="char", multi='sent_by', store=True, size=124, \
help="Specify the user that is acting on behalf of the calendar user"),
'sent_by_uid': fields.function(_compute_data, string='Sent By User', \
type="many2one", relation="res.users", multi='sent_by_uid'),
'cn': fields.function(_compute_data, string='Common name', \
type="char", size=124, multi='cn', store=True),
'dir': fields.char('URI Reference', size=124, help="Reference to the URI\
that points to the directory information corresponding to the attendee."),
'language': fields.function(_compute_data, string='Language', \
type="selection", selection=_lang_get, multi='language', \
store=True, help="To specify the language for text values in a\
property or property parameter."),
'user_id': fields.many2one('res.users', 'User'),
'partner_address_id': fields.many2one('res.partner.address', 'Contact'),
'partner_id': fields.related('partner_address_id', 'partner_id', type='many2one', \
relation='res.partner', string='Partner', help="Partner related to contact"),
'email': fields.char('Email', size=124, help="Email of Invited Person"),
'event_date': fields.function(_compute_data, string='Event Date', \
type="datetime", multi='event_date'),
'event_end_date': fields.function(_compute_data, \
string='Event End Date', type="datetime", \
multi='event_end_date'),
'ref': fields.reference('Event Ref', selection=_links_get, size=128),
'availability': fields.selection([('free', 'Free'), ('busy', 'Busy')], 'Free/Busy', readonly="True"),
}
_defaults = {
'state': 'needs-action',
'role': 'req-participant',
'rsvp': True,
'cutype': 'individual',
}
def copy(self, cr, uid, id, default=None, context=None):
raise osv.except_osv(_('Warning!'), _('You cannot duplicate a calendar attendee.'))
def get_ics_file(self, cr, uid, event_obj, context=None):
"""
Returns iCalendar file for the event invitation
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param event_obj: Event object (browse record)
@param context: A standard dictionary for contextual values
@return: .ics file content
"""
res = None
def ics_datetime(idate, short=False):
if idate:
if short or len(idate)<=10:
return date.fromtimestamp(time.mktime(time.strptime(idate, '%Y-%m-%d')))
else:
return datetime.strptime(idate, '%Y-%m-%d %H:%M:%S')
else:
return False
try:
# FIXME: why isn't this in CalDAV?
import vobject
except ImportError:
return res
cal = vobject.iCalendar()
event = cal.add('vevent')
if not event_obj.date_deadline or not event_obj.date:
raise osv.except_osv(_('Warning !'),_("Couldn't Invite because date is not specified!"))
event.add('created').value = ics_datetime(time.strftime('%Y-%m-%d %H:%M:%S'))
event.add('dtstart').value = ics_datetime(event_obj.date)
event.add('dtend').value = ics_datetime(event_obj.date_deadline)
event.add('summary').value = event_obj.name
if event_obj.description:
event.add('description').value = event_obj.description
if event_obj.location:
event.add('location').value = event_obj.location
if event_obj.rrule:
event.add('rrule').value = event_obj.rrule
if event_obj.organizer:
event_org = event.add('organizer')
event_org.params['CN'] = [event_obj.organizer]
event_org.value = 'MAILTO:' + (event_obj.organizer)
elif event_obj.user_id or event_obj.organizer_id:
event_org = event.add('organizer')
organizer = event_obj.organizer_id
if not organizer:
organizer = event_obj.user_id
event_org.params['CN'] = [organizer.name]
event_org.value = 'MAILTO:' + (organizer.user_email or organizer.name)
if event_obj.alarm_id:
# computes alarm data
valarm = event.add('valarm')
alarm_object = self.pool.get('res.alarm')
alarm_data = alarm_object.read(cr, uid, event_obj.alarm_id.id, context=context)
# Compute trigger data
interval = alarm_data['trigger_interval']
occurs = alarm_data['trigger_occurs']
duration = (occurs == 'after' and alarm_data['trigger_duration']) \
or -(alarm_data['trigger_duration'])
related = alarm_data['trigger_related']
trigger = valarm.add('TRIGGER')
trigger.params['related'] = [related.upper()]
if interval == 'days':
delta = timedelta(days=duration)
if interval == 'hours':
delta = timedelta(hours=duration)
if interval == 'minutes':
delta = timedelta(minutes=duration)
trigger.value = delta
# Compute other details
valarm.add('DESCRIPTION').value = alarm_data['name'] or 'OpenERP'
for attendee in event_obj.attendee_ids:
attendee_add = event.add('attendee')
attendee_add.params['CUTYPE'] = [str(attendee.cutype)]
attendee_add.params['ROLE'] = [str(attendee.role)]
attendee_add.params['RSVP'] = [str(attendee.rsvp)]
attendee_add.value = 'MAILTO:' + (attendee.email or '')
res = cal.serialize()
return res
def _send_mail(self, cr, uid, ids, mail_to, email_from=tools.config.get('email_from', False), context=None):
"""
Send mail for event invitation to event attendees.
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of attendee’s IDs.
@param email_from: Email address for user sending the mail
@param context: A standard dictionary for contextual values
@return: True
"""
if context is None:
context = {}
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.name
mail_message = self.pool.get('mail.message')
for att in self.browse(cr, uid, ids, context=context):
sign = att.sent_by_uid and att.sent_by_uid.signature or ''
sign = '<br>'.join(sign and sign.split('\n') or [])
res_obj = att.ref
if res_obj:
att_infos = []
sub = res_obj.name
other_invitation_ids = self.search(cr, uid, [('ref', '=', res_obj._name + ',' + str(res_obj.id))])
for att2 in self.browse(cr, uid, other_invitation_ids):
att_infos.append(((att2.user_id and att2.user_id.name) or \
(att2.partner_id and att2.partner_id.name) or \
att2.email) + ' - Status: ' + att2.state.title())
body_vals = {'name': res_obj.name,
'start_date': res_obj.date,
'end_date': res_obj.date_deadline or False,
'description': res_obj.description or '-',
'location': res_obj.location or '-',
'attendees': '<br>'.join(att_infos),
'user': res_obj.user_id and res_obj.user_id.name or 'OpenERP User',
'sign': sign,
'company': company
}
body = html_invitation % body_vals
if mail_to and email_from:
attach = self.get_ics_file(cr, uid, res_obj, context=context)
mail_message.schedule_with_attach(cr, uid,
email_from,
mail_to,
sub,
body,
attachments=attach and {'invitation.ics': attach} or None,
subtype='html',
reply_to=email_from,
context=context
)
return True
def onchange_user_id(self, cr, uid, ids, user_id, *args, **argv):
"""
Make entry on email and availbility on change of user_id field.
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of calendar attendee’s IDs.
@param user_id: Changed value of User id
@return: dictionary of value. which put value in email and availability fields.
"""
if not user_id:
return {'value': {'email': ''}}
usr_obj = self.pool.get('res.users')
user = usr_obj.browse(cr, uid, user_id, *args)
return {'value': {'email': user.user_email, 'availability':user.availability}}
def do_tentative(self, cr, uid, ids, context=None, *args):
""" Makes event invitation as Tentative
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of calendar attendee’s IDs
@param *args: Get Tupple value
@param context: A standard dictionary for contextual values
"""
return self.write(cr, uid, ids, {'state': 'tentative'}, context)
def do_accept(self, cr, uid, ids, context=None, *args):
"""
Update state of invitation as Accepted and
if the invited user is other then event user it will make a copy of this event for invited user
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of calendar attendee’s IDs.
@param context: A standard dictionary for contextual values
@return: True
"""
if context is None:
context = {}
for vals in self.browse(cr, uid, ids, context=context):
if vals.ref and vals.ref.user_id:
mod_obj = self.pool.get(vals.ref._name)
defaults = {'user_id': vals.user_id.id, 'organizer_id': vals.ref.user_id.id}
mod_obj.copy(cr, uid, vals.ref.id, default=defaults, context=context)
self.write(cr, uid, vals.id, {'state': 'accepted'}, context)
return True
def do_decline(self, cr, uid, ids, context=None, *args):
""" Marks event invitation as Declined
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of calendar attendee’s IDs
@param *args: Get Tupple value
@param context: A standard dictionary for contextual values """
if context is None:
context = {}
return self.write(cr, uid, ids, {'state': 'declined'}, context)
def create(self, cr, uid, vals, context=None):
""" Overrides orm create method.
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param vals: Get Values
@param context: A standard dictionary for contextual values """
if context is None:
context = {}
if not vals.get("email") and vals.get("cn"):
cnval = vals.get("cn").split(':')
email = filter(lambda x:x.__contains__('@'), cnval)
vals['email'] = email and email[0] or ''
vals['cn'] = vals.get("cn")
res = super(calendar_attendee, self).create(cr, uid, vals, context)
return res
calendar_attendee()
class res_alarm(osv.osv):
"""Resource Alarm """
_name = 'res.alarm'
_description = 'Basic Alarm Information'
_columns = {
'name':fields.char('Name', size=256, required=True),
'trigger_occurs': fields.selection([('before', 'Before'), \
('after', 'After')], \
'Triggers', required=True),
'trigger_interval': fields.selection([('minutes', 'Minutes'), \
('hours', 'Hours'), \
('days', 'Days')], 'Interval', \
required=True),
'trigger_duration': fields.integer('Duration', required=True),
'trigger_related': fields.selection([('start', 'The event starts'), \
('end', 'The event ends')], \
'Related to', required=True),
'duration': fields.integer('Duration', help="""Duration' and 'Repeat' \
are both optional, but if one occurs, so MUST the other"""),
'repeat': fields.integer('Repeat'),
'active': fields.boolean('Active', help="If the active field is set to \
true, it will allow you to hide the event alarm information without removing it.")
}
_defaults = {
'trigger_interval': 'minutes',
'trigger_duration': 5,
'trigger_occurs': 'before',
'trigger_related': 'start',
'active': 1,
}
def do_alarm_create(self, cr, uid, ids, model, date, context=None):
"""
Create Alarm for event.
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of res alarm’s IDs.
@param model: Model name.
@param date: Event date
@param context: A standard dictionary for contextual values
@return: True
"""
if context is None:
context = {}
alarm_obj = self.pool.get('calendar.alarm')
res_alarm_obj = self.pool.get('res.alarm')
ir_obj = self.pool.get('ir.model')
model_id = ir_obj.search(cr, uid, [('model', '=', model)])[0]
model_obj = self.pool.get(model)
for data in model_obj.browse(cr, uid, ids, context=context):
basic_alarm = data.alarm_id
cal_alarm = data.base_calendar_alarm_id
if (not basic_alarm and cal_alarm) or (basic_alarm and cal_alarm):
new_res_alarm = None
# Find for existing res.alarm
duration = cal_alarm.trigger_duration
interval = cal_alarm.trigger_interval
occurs = cal_alarm.trigger_occurs
related = cal_alarm.trigger_related
domain = [('trigger_duration', '=', duration), ('trigger_interval', '=', interval), ('trigger_occurs', '=', occurs), ('trigger_related', '=', related)]
alarm_ids = res_alarm_obj.search(cr, uid, domain, context=context)
if not alarm_ids:
val = {
'trigger_duration': duration,
'trigger_interval': interval,
'trigger_occurs': occurs,
'trigger_related': related,
'name': str(duration) + ' ' + str(interval) + ' ' + str(occurs)
}
new_res_alarm = res_alarm_obj.create(cr, uid, val, context=context)
else:
new_res_alarm = alarm_ids[0]
cr.execute('UPDATE %s ' % model_obj._table + \
' SET base_calendar_alarm_id=%s, alarm_id=%s ' \
' WHERE id=%s',
(cal_alarm.id, new_res_alarm, data.id))
self.do_alarm_unlink(cr, uid, [data.id], model)
if basic_alarm:
vals = {
'action': 'display',
'description': data.description,
'name': data.name,
'attendee_ids': [(6, 0, map(lambda x:x.id, data.attendee_ids))],
'trigger_related': basic_alarm.trigger_related,
'trigger_duration': basic_alarm.trigger_duration,
'trigger_occurs': basic_alarm.trigger_occurs,
'trigger_interval': basic_alarm.trigger_interval,
'duration': basic_alarm.duration,
'repeat': basic_alarm.repeat,
'state': 'run',
'event_date': data[date],
'res_id': data.id,
'model_id': model_id,
'user_id': uid
}
alarm_id = alarm_obj.create(cr, uid, vals)
cr.execute('UPDATE %s ' % model_obj._table + \
' SET base_calendar_alarm_id=%s, alarm_id=%s '
' WHERE id=%s', \
( alarm_id, basic_alarm.id, data.id) )
return True
def do_alarm_unlink(self, cr, uid, ids, model, context=None):
"""
Delete alarm specified in ids
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of res alarm’s IDs.
@param model: Model name for which alarm is to be cleared.
@return: True
"""
if context is None:
context = {}
alarm_obj = self.pool.get('calendar.alarm')
ir_obj = self.pool.get('ir.model')
model_id = ir_obj.search(cr, uid, [('model', '=', model)])[0]
model_obj = self.pool.get(model)
for datas in model_obj.browse(cr, uid, ids, context=context):
alarm_ids = alarm_obj.search(cr, uid, [('model_id', '=', model_id), ('res_id', '=', datas.id)])
if alarm_ids:
alarm_obj.unlink(cr, uid, alarm_ids)
cr.execute('Update %s set base_calendar_alarm_id=NULL, alarm_id=NULL\
where id=%%s' % model_obj._table,(datas.id,))
return True
res_alarm()
class calendar_alarm(osv.osv):
_name = 'calendar.alarm'
_description = 'Event alarm information'
_inherit = 'res.alarm'
__attribute__ = {}
_columns = {
'alarm_id': fields.many2one('res.alarm', 'Basic Alarm', ondelete='cascade'),
'name': fields.char('Summary', size=124, help="""Contains the text to be \
used as the message subject for email \
or contains the text to be used for display"""),
'action': fields.selection([('audio', 'Audio'), ('display', 'Display'), \
('procedure', 'Procedure'), ('email', 'Email') ], 'Action', \
required=True, help="Defines the action to be invoked when an alarm is triggered"),
'description': fields.text('Description', help='Provides a more complete \
description of the calendar component, than that \
provided by the "SUMMARY" property'),
'attendee_ids': fields.many2many('calendar.attendee', 'alarm_attendee_rel', \
'alarm_id', 'attendee_id', 'Attendees', readonly=True),
'attach': fields.binary('Attachment', help="""* Points to a sound resource,\
which is rendered when the alarm is triggered for audio,
* File which is intended to be sent as message attachments for email,
* Points to a procedure resource, which is invoked when\
the alarm is triggered for procedure."""),
'res_id': fields.integer('Resource ID'),
'model_id': fields.many2one('ir.model', 'Model'),
'user_id': fields.many2one('res.users', 'Owner'),
'event_date': fields.datetime('Event Date'),
'event_end_date': fields.datetime('Event End Date'),
'trigger_date': fields.datetime('Trigger Date', readonly="True"),
'state':fields.selection([
('draft', 'Draft'),
('run', 'Run'),
('stop', 'Stop'),
('done', 'Done'),
], 'State', select=True, readonly=True),
}
_defaults = {
'action': 'email',
'state': 'run',
}
def create(self, cr, uid, vals, context=None):
"""
Overrides orm create method.
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param vals: dictionary of fields value.{‘name_of_the_field’: value, ...}
@param context: A standard dictionary for contextual values
@return: new record id for calendar_alarm.
"""
if context is None:
context = {}
event_date = vals.get('event_date', False)
if event_date:
dtstart = datetime.strptime(vals['event_date'], "%Y-%m-%d %H:%M:%S")
if vals['trigger_interval'] == 'days':
delta = timedelta(days=vals['trigger_duration'])
if vals['trigger_interval'] == 'hours':
delta = timedelta(hours=vals['trigger_duration'])
if vals['trigger_interval'] == 'minutes':
delta = timedelta(minutes=vals['trigger_duration'])
trigger_date = dtstart + (vals['trigger_occurs'] == 'after' and delta or -delta)
vals['trigger_date'] = trigger_date
res = super(calendar_alarm, self).create(cr, uid, vals, context=context)
return res
def do_run_scheduler(self, cr, uid, automatic=False, use_new_cursor=False, \
context=None):
"""Scheduler for event reminder
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of calendar alarm’s IDs.
@param use_new_cursor: False or the dbname
@param context: A standard dictionary for contextual values
"""
if context is None:
context = {}
mail_message = self.pool.get('mail.message')
current_datetime = datetime.now()
request_obj = self.pool.get('res.request')
alarm_ids = self.search(cr, uid, [('state', '!=', 'done')], context=context)
mail_to = []
for alarm in self.browse(cr, uid, alarm_ids, context=context):
next_trigger_date = None
update_vals = {}
model_obj = self.pool.get(alarm.model_id.model)
res_obj = model_obj.browse(cr, uid, alarm.res_id, context=context)
re_dates = []
if res_obj.rrule:
event_date = datetime.strptime(res_obj.date, '%Y-%m-%d %H:%M:%S')
recurrent_dates = get_recurrent_dates(res_obj.rrule, res_obj.exdate, event_date, res_obj.exrule)
trigger_interval = alarm.trigger_interval
if trigger_interval == 'days':
delta = timedelta(days=alarm.trigger_duration)
if trigger_interval == 'hours':
delta = timedelta(hours=alarm.trigger_duration)
if trigger_interval == 'minutes':
delta = timedelta(minutes=alarm.trigger_duration)
delta = alarm.trigger_occurs == 'after' and delta or -delta
for rdate in recurrent_dates:
if rdate + delta > current_datetime:
break
if rdate + delta <= current_datetime:
re_dates.append(rdate.strftime("%Y-%m-%d %H:%M:%S"))
rest_dates = recurrent_dates[len(re_dates):]
next_trigger_date = rest_dates and rest_dates[0] or None
else:
re_dates = [alarm.trigger_date]
for r_date in re_dates:
ref = alarm.model_id.model + ',' + str(alarm.res_id)
# search for alreay sent requests
if request_obj.search(cr, uid, [('trigger_date', '=', r_date), ('ref_doc1', '=', ref)], context=context):
continue
if alarm.action == 'display':
value = {
'name': alarm.name,
'act_from': alarm.user_id.id,
'act_to': alarm.user_id.id,
'body': alarm.description,
'trigger_date': r_date,
'ref_doc1': ref
}
request_id = request_obj.create(cr, uid, value)
request_ids = [request_id]
for attendee in res_obj.attendee_ids:
if attendee.user_id:
value['act_to'] = attendee.user_id.id
request_id = request_obj.create(cr, uid, value)
request_ids.append(request_id)
request_obj.request_send(cr, uid, request_ids)
if alarm.action == 'email':
sub = '[Openobject Reminder] %s' % (alarm.name)
body = """
Event: %s
Event Date: %s
Description: %s
From:
%s
----
%s
""" % (alarm.name, alarm.trigger_date, alarm.description, \
alarm.user_id.name, alarm.user_id.signature)
mail_to = [alarm.user_id.user_email]
for att in alarm.attendee_ids:
mail_to.append(att.user_id.user_email)
if mail_to:
mail_message.schedule_with_attach(cr, uid,
tools.config.get('email_from', False),
mail_to,
sub,
body,
context=context
)
if next_trigger_date:
update_vals.update({'trigger_date': next_trigger_date})
else:
update_vals.update({'state': 'done'})
self.write(cr, uid, [alarm.id], update_vals)
return True
calendar_alarm()
class calendar_event(osv.osv):
_name = "calendar.event"
_description = "Calendar Event"
__attribute__ = {}
def _tz_get(self, cr, uid, context=None):
return [(x.lower(), x) for x in pytz.all_timezones]
def onchange_dates(self, cr, uid, ids, start_date, duration=False, end_date=False, allday=False, context=None):
"""Returns duration and/or end date based on values passed
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of calendar event’s IDs.
@param start_date: Starting date
@param duration: Duration between start date and end date
@param end_date: Ending Datee
@param context: A standard dictionary for contextual values
"""
if context is None:
context = {}
value = {}
if not start_date:
return value
if not end_date and not duration:
duration = 1.00
value['duration'] = duration
if allday: # For all day event
value = {'duration': 24.0}
duration = 24.0
if start_date:
start = datetime.strptime(start_date, "%Y-%m-%d %H:%M:%S")
start_date = datetime.strftime(datetime(start.year, start.month, start.day, 0,0,0), "%Y-%m-%d %H:%M:%S")
value['date'] = start_date
start = datetime.strptime(start_date, "%Y-%m-%d %H:%M:%S")
if end_date and not duration:
end = datetime.strptime(end_date, "%Y-%m-%d %H:%M:%S")
diff = end - start
duration = float(diff.days)* 24 + (float(diff.seconds) / 3600)
value['duration'] = round(duration, 2)
elif not end_date:
end = start + timedelta(hours=duration)
value['date_deadline'] = end.strftime("%Y-%m-%d %H:%M:%S")
elif end_date and duration and not allday:
# we have both, keep them synchronized:
# set duration based on end_date (arbitrary decision: this avoid
# getting dates like 06:31:48 instead of 06:32:00)
end = datetime.strptime(end_date, "%Y-%m-%d %H:%M:%S")
diff = end - start
duration = float(diff.days)* 24 + (float(diff.seconds) / 3600)
value['duration'] = round(duration, 2)
return {'value': value}
def unlink_events(self, cr, uid, ids, context=None):
"""
This function deletes event which are linked with the event with recurrent_uid
(Removes the events which refers to the same UID value)
"""
if context is None:
context = {}
for event_id in ids:
cr.execute("select id from %s where recurrent_uid=%%s" % (self._table), (event_id,))
r_ids = map(lambda x: x[0], cr.fetchall())
self.unlink(cr, uid, r_ids, context=context)
return True
def _get_rulestring(self, cr, uid, ids, name, arg, context=None):
"""
Gets Recurrence rule string according to value type RECUR of iCalendar from the values given.
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param id: List of calendar event's ids.
@param context: A standard dictionary for contextual values
@return: dictionary of rrule value.
"""
result = {}
if not isinstance(ids, list):
ids = [ids]
for datas in self.read(cr, uid, ids, ['id','byday','recurrency', 'month_list','end_date', 'rrule_type', 'select1', 'interval', 'count', 'end_type', 'mo', 'tu', 'we', 'th', 'fr', 'sa', 'su', 'exrule', 'day', 'week_list' ], context=context):
event = datas['id']
if datas.get('interval', 0) < 0:
raise osv.except_osv(_('Warning!'), _('Interval cannot be negative'))
if datas.get('count', 0) < 0:
raise osv.except_osv(_('Warning!'), _('Count cannot be negative'))
if datas['recurrency']:
result[event] = self.compute_rule_string(datas)
else:
result[event] = ""
return result
def _rrule_write(self, obj, cr, uid, ids, field_name, field_value, args, context=None):
data = self._get_empty_rrule_data()
if field_value:
data['recurrency'] = True
for event in self.browse(cr, uid, ids, context=context):
rdate = rule_date or event.date
update_data = self._parse_rrule(field_value, dict(data), rdate)
data.update(update_data)
super(calendar_event, obj).write(cr, uid, ids, data, context=context)
return True
_columns = {
'id': fields.integer('ID', readonly=True),
'sequence': fields.integer('Sequence'),
'name': fields.char('Description', size=64, required=False, states={'done': [('readonly', True)]}),
'date': fields.datetime('Date', states={'done': [('readonly', True)]}),
'date_deadline': fields.datetime('Deadline', states={'done': [('readonly', True)]}),
'create_date': fields.datetime('Created', readonly=True),
'duration': fields.float('Duration', states={'done': [('readonly', True)]}),
'description': fields.text('Description', states={'done': [('readonly', True)]}),
'class': fields.selection([('public', 'Public'), ('private', 'Private'), \
('confidential', 'Public for Employees')], 'Mark as', states={'done': [('readonly', True)]}),
'location': fields.char('Location', size=264, help="Location of Event", states={'done': [('readonly', True)]}),
'show_as': fields.selection([('free', 'Free'), ('busy', 'Busy')], \
'Show as', states={'done': [('readonly', True)]}),
'base_calendar_url': fields.char('Caldav URL', size=264),
'state': fields.selection([('tentative', 'Tentative'),
('confirmed', 'Confirmed'),
('cancelled', 'Cancelled')], 'State', readonly=True),
'exdate': fields.text('Exception Date/Times', help="This property \
defines the list of date/time exceptions for a recurring calendar component."),
'exrule': fields.char('Exception Rule', size=352, help="Defines a \
rule or repeating pattern of time to exclude from the recurring rule."),
'rrule': fields.function(_get_rulestring, type='char', size=124, \
fnct_inv=_rrule_write, store=True, string='Recurrent Rule'),
'rrule_type': fields.selection([('none', ''), ('daily', 'Daily'), \
('weekly', 'Weekly'), ('monthly', 'Monthly'), \
('yearly', 'Yearly'),],
'Recurrency', states={'done': [('readonly', True)]},
help="Let the event automatically repeat at that interval"),
'alarm_id': fields.many2one('res.alarm', 'Alarm', states={'done': [('readonly', True)]},
help="Set an alarm at this time, before the event occurs" ),
'base_calendar_alarm_id': fields.many2one('calendar.alarm', 'Alarm'),
'recurrent_uid': fields.integer('Recurrent ID'),
'recurrent_id': fields.datetime('Recurrent ID date'),
'vtimezone': fields.selection(_tz_get, size=64, string='Timezone'),
'user_id': fields.many2one('res.users', 'Responsible', states={'done': [('readonly', True)]}),
'organizer': fields.char("Organizer", size=256, states={'done': [('readonly', True)]}), # Map with Organizer Attribure of VEvent.
'organizer_id': fields.many2one('res.users', 'Organizer', states={'done': [('readonly', True)]}),
'end_type' : fields.selection([('count', 'Number of repetitions'), ('end_date','End date')], 'Recurrence termination'),
'interval': fields.integer('Repeat every', help="Repeat every (Days/Week/Month/Year)"),
'count': fields.integer('Repeat', help="Repeat x times"),
'mo': fields.boolean('Mon'),
'tu': fields.boolean('Tue'),
'we': fields.boolean('Wed'),
'th': fields.boolean('Thu'),
'fr': fields.boolean('Fri'),
'sa': fields.boolean('Sat'),
'su': fields.boolean('Sun'),
'select1': fields.selection([('date', 'Date of month'),
('day', 'Day of month')], 'Option'),
'day': fields.integer('Date of month'),
'week_list': fields.selection([('MO', 'Monday'), ('TU', 'Tuesday'), \
('WE', 'Wednesday'), ('TH', 'Thursday'), \
('FR', 'Friday'), ('SA', 'Saturday'), \
('SU', 'Sunday')], 'Weekday'),
'byday': fields.selection([('1', 'First'), ('2', 'Second'), \
('3', 'Third'), ('4', 'Fourth'), \
('5', 'Fifth'), ('-1', 'Last')], 'By day'),
'month_list': fields.selection(months.items(), 'Month'),
'end_date': fields.date('Repeat Until'),
'attendee_ids': fields.many2many('calendar.attendee', 'event_attendee_rel', \
'event_id', 'attendee_id', 'Attendees'),
'allday': fields.boolean('All Day', states={'done': [('readonly', True)]}),
'active': fields.boolean('Active', help="If the active field is set to \
true, it will allow you to hide the event alarm information without removing it."),
'recurrency': fields.boolean('Recurrent', help="Recurrent Meeting"),
}
def default_organizer(self, cr, uid, context=None):
user_pool = self.pool.get('res.users')
user = user_pool.browse(cr, uid, uid, context=context)
res = user.name
if user.user_email:
res += " <%s>" %(user.user_email)
return res
_defaults = {
'end_type' : 'count',
'count' : 1,
'rrule_type' : 'none',
'state': 'tentative',
'class': 'public',
'show_as': 'busy',
'select1': 'date',
'interval': 1,
'active': 1,
'user_id': lambda self, cr, uid, ctx: uid,
'organizer': default_organizer,
}
def get_recurrent_ids(self, cr, uid, select, domain, limit=100, context=None):
"""Gives virtual event ids for recurring events based on value of Recurrence Rule
This method gives ids of dates that comes between start date and end date of calendar views
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param limit: The Number of Results to Return """
if not context:
context = {}
result = []
for data in super(calendar_event, self).read(cr, uid, select, context=context):
if not data['rrule']:
result.append(data['id'])
continue
event_date = datetime.strptime(data['date'], "%Y-%m-%d %H:%M:%S")
# To check: If the start date is replace by event date .. the event date will be changed by that of calendar code
if not data['rrule']:
continue
exdate = data['exdate'] and data['exdate'].split(',') or []
rrule_str = data['rrule']
new_rrule_str = []
rrule_until_date = False
is_until = False
for rule in rrule_str.split(';'):
name, value = rule.split('=')
if name == "UNTIL":
is_until = True
value = parser.parse(value)
rrule_until_date = parser.parse(value.strftime("%Y-%m-%d %H:%M:%S"))
value = value.strftime("%Y%m%d%H%M%S")
new_rule = '%s=%s' % (name, value)
new_rrule_str.append(new_rule)
new_rrule_str = ';'.join(new_rrule_str)
rdates = get_recurrent_dates(str(new_rrule_str), exdate, event_date, data['exrule'])
for r_date in rdates:
ok = True
for arg in domain:
if arg[0] in ('date', 'date_deadline'):
if (arg[1]=='='):
ok = ok and r_date.strftime('%Y-%m-%d')==arg[2]
if (arg[1]=='>'):
ok = ok and r_date.strftime('%Y-%m-%d')>arg[2]
if (arg[1]=='<'):
ok = ok and r_date.strftime('%Y-%m-%d')<arg[2]
if (arg[1]=='>='):
ok = ok and r_date.strftime('%Y-%m-%d')>=arg[2]
if (arg[1]=='<='):
ok = ok and r_date.strftime('%Y-%m-%d')<=arg[2]
if not ok:
continue
idval = real_id2base_calendar_id(data['id'], r_date.strftime("%Y-%m-%d %H:%M:%S"))
result.append(idval)
if isinstance(select, (str, int, long)):
return ids and ids[0] or False
else:
ids = list(set(result))
return ids
def compute_rule_string(self, datas):
"""
Compute rule string according to value type RECUR of iCalendar from the values given.
@param self: the object pointer
@param datas: dictionary of freq and interval value.
"""
def get_week_string(freq, datas):
weekdays = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su']
if freq == 'weekly':
byday = map(lambda x: x.upper(), filter(lambda x: datas.get(x) and x in weekdays, datas))
if byday:
return ';BYDAY=' + ','.join(byday)
return ''
def get_month_string(freq, datas):
if freq == 'monthly':
if datas.get('select1')=='date' and (datas.get('day') < 1 or datas.get('day') > 31):
raise osv.except_osv(_('Error!'), ("Please select proper Day of month"))
if datas.get('select1')=='day':
return ';BYDAY=' + datas.get('byday') + datas.get('week_list')
elif datas.get('select1')=='date':
return ';BYMONTHDAY=' + str(datas.get('day'))
return ''
def get_end_date(datas):
if datas.get('end_date'):
datas['end_date_new'] = ''.join((re.compile('\d')).findall(datas.get('end_date'))) + 'T235959Z'
return (datas.get('end_type') == 'count' and (';COUNT=' + str(datas.get('count'))) or '') +\
((datas.get('end_date_new') and datas.get('end_type') == 'end_date' and (';UNTIL=' + datas.get('end_date_new'))) or '')
freq=datas.get('rrule_type')
if freq == 'none':
return ''
interval_srting = datas.get('interval') and (';INTERVAL=' + str(datas.get('interval'))) or ''
return 'FREQ=' + freq.upper() + get_week_string(freq, datas) + interval_srting + get_end_date(datas) + get_month_string(freq, datas)
def _get_empty_rrule_data(self):
return {
'byday' : False,
'recurrency' : False,
'end_date' : False,
'rrule_type' : False,
'select1' : False,
'interval' : 0,
'count' : False,
'end_type' : False,
'mo' : False,
'tu' : False,
'we' : False,
'th' : False,
'fr' : False,
'sa' : False,
'su' : False,
'exrule' : False,
'day' : False,
'week_list' : False
}
#def _write_rrule(self, cr, uid, ids, field_value, rule_date=False, context=None):
# data = self._get_empty_rrule_data()
#
# if field_value:
# data['recurrency'] = True
# for event in self.browse(cr, uid, ids, context=context):
# rdate = rule_date or event.date
# update_data = self._parse_rrule(field_value, dict(data), rdate)
# data.update(update_data)
# #parse_rrule
# self.write(cr, uid, event.id, data, context=context)
def _parse_rrule(self, rule, data, date_start):
day_list = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su']
rrule_type = ['yearly', 'monthly', 'weekly', 'daily']
r = rrule.rrulestr(rule, dtstart=datetime.strptime(date_start, "%Y-%m-%d %H:%M:%S"))
if r._freq > 0 and r._freq < 4:
data['rrule_type'] = rrule_type[r._freq]
data['count'] = r._count
data['interval'] = r._interval
data['end_date'] = r._until and r._until.strftime("%Y-%m-%d %H:%M:%S")
#repeat weekly
if r._byweekday:
for i in xrange(0,7):
if i in r._byweekday:
data[day_list[i]] = True
data['rrule_type'] = 'weekly'
#repeat monthly bynweekday ((weekday, weeknumber), )
if r._bynweekday:
data['week_list'] = day_list[r._bynweekday[0][0]].upper()
data['byday'] = r._bynweekday[0][1]
data['select1'] = 'day'
data['rrule_type'] = 'monthly'
if r._bymonthday:
data['day'] = r._bymonthday[0]
data['select1'] = 'date'
data['rrule_type'] = 'monthly'
#yearly but for openerp it's monthly, take same information as monthly but interval is 12 times
if r._bymonth:
data['interval'] = data['interval'] * 12
#FIXEME handle forever case
#end of recurrence
#in case of repeat for ever that we do not support right now
if not (data.get('count') or data.get('end_date')):
data['count'] = 100
if data.get('count'):
data['end_type'] = 'count'
else:
data['end_type'] = 'end_date'
return data
def remove_virtual_id(self, ids):
if isinstance(ids, (str, int, long)):
return base_calendar_id2real_id(ids)
if isinstance(ids, (list, tuple)):
res = []
for id in ids:
res.append(base_calendar_id2real_id(id))
return res
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
context = context or {}
args_without_date = []
filter_date = []
for arg in args:
if arg[0] == "id":
new_id = self.remove_virtual_id(arg[2])
new_arg = (arg[0], arg[1], new_id)
args_without_date.append(new_arg)
elif arg[0] not in ('date', unicode('date'), 'date_deadline', unicode('date_deadline')):
args_without_date.append(arg)
else:
if context.get('virtual_id', True):
args_without_date.append('|')
args_without_date.append(arg)
if context.get('virtual_id', True):
args_without_date.append(('recurrency','=',1))
filter_date.append(arg)
res = super(calendar_event, self).search(cr, uid, args_without_date, \
0, 0, order, context, count=False)
if context.get('virtual_id', True):
res = self.get_recurrent_ids(cr, uid, res, args, limit, context=context)
if count:
return len(res)
elif limit:
return res[offset:offset+limit]
else:
return res
def _get_data(self, cr, uid, id, context=None):
res = self.read(cr, uid, [id],['date', 'date_deadline'])
return res[0]
def need_to_update(self, event_id, vals):
split_id = str(event_id).split("-")
if len(split_id) < 2:
return False
else:
date_start = vals.get('date', '')
try:
date_start = datetime.strptime(date_start, '%Y-%m-%d %H:%M:%S').strftime("%Y%m%d%H%M%S")
return date_start == split_id[1]
except Exception:
return True
def write(self, cr, uid, ids, vals, context=None, check=True, update_check=True):
context = context or {}
if isinstance(ids, (str, int, long)):
ids = [ids]
res = False
# Special write of complex IDS
for event_id in ids[:]:
if len(str(event_id).split('-')) == 1:
continue
ids.remove(event_id)
real_event_id = base_calendar_id2real_id(event_id)
if not vals.get('recurrency', True):
ids.append(real_event_id)
continue
#if edit one instance of a reccurrent id
data = self.read(cr, uid, event_id, ['date', 'date_deadline', \
'rrule', 'duration', 'exdate'])
if data.get('rrule'):
data.update(vals)
data.update({
'recurrent_uid': real_event_id,
'recurrent_id': data.get('date'),
'rrule_type': 'none',
'rrule': '',
'recurrency' : False,
})
new_id = self.copy(cr, uid, real_event_id, default=data, context=context)
date_new = event_id.split('-')[1]
date_new = time.strftime("%Y%m%dT%H%M%S", \
time.strptime(date_new, "%Y%m%d%H%M%S"))
exdate = (data['exdate'] and (data['exdate'] + ',') or '') + date_new
res = self.write(cr, uid, [real_event_id], {'exdate': exdate})
context.update({'active_id': new_id, 'active_ids': [new_id]})
continue
if vals.get('vtimezone', '') and vals.get('vtimezone', '').startswith('/freeassociation.sourceforge.net/tzfile/'):
vals['vtimezone'] = vals['vtimezone'][40:]
res = super(calendar_event, self).write(cr, uid, ids, vals, context=context)
if ('alarm_id' in vals or 'base_calendar_alarm_id' in vals)\
or ('date' in vals or 'duration' in vals or 'date_deadline' in vals):
alarm_obj = self.pool.get('res.alarm')
alarm_obj.do_alarm_create(cr, uid, ids, self._name, 'date', context=context)
return res or True and False
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False):
if not context:
context = {}
if 'date' in groupby:
raise osv.except_osv(_('Warning !'), _('Group by date not supported, use the calendar view instead'))
virtual_id = context.get('virtual_id', True)
context.update({'virtual_id': False})
res = super(calendar_event, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby)
for re in res:
#remove the count, since the value is not consistent with the result of the search when expand the group
for groupname in groupby:
if re.get(groupname + "_count"):
del re[groupname + "_count"]
re.get('__context', {}).update({'virtual_id' : virtual_id})
return res
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
# FIXME This whole id mangling has to go!
if context is None:
context = {}
fields2 = fields and fields[:] or None
EXTRAFIELDS = ('class','user_id','duration')
for f in EXTRAFIELDS:
if fields and (f not in fields):
fields2.append(f)
if isinstance(ids, (str, int, long)):
select = [ids]
else:
select = ids
select = map(lambda x: (x, base_calendar_id2real_id(x)), select)
result = []
real_data = super(calendar_event, self).read(cr, uid,
[real_id for base_calendar_id, real_id in select],
fields=fields2, context=context, load=load)
real_data = dict(zip([x['id'] for x in real_data], real_data))
for base_calendar_id, real_id in select:
res = real_data[real_id].copy()
ls = base_calendar_id2real_id(base_calendar_id, with_date=res and res.get('duration', 0) or 0)
if not isinstance(ls, (str, int, long)) and len(ls) >= 2:
res['date'] = ls[1]
res['date_deadline'] = ls[2]
res['id'] = base_calendar_id
result.append(res)
for r in result:
if r['user_id']:
user_id = type(r['user_id']) in (tuple,list) and r['user_id'][0] or r['user_id']
if user_id==uid:
continue
if r['class']=='private':
for f in r.keys():
if f not in ('id','date','date_deadline','duration','user_id','state'):
r[f] = False
if f=='name':
r[f] = _('Busy')
for r in result:
for k in EXTRAFIELDS:
if (k in r) and ((not fields) or (k not in fields)):
del r[k]
if isinstance(ids, (str, int, long)):
return result and result[0] or False
return result
def copy(self, cr, uid, id, default=None, context=None):
if context is None:
context = {}
res = super(calendar_event, self).copy(cr, uid, base_calendar_id2real_id(id), default, context)
alarm_obj = self.pool.get('res.alarm')
alarm_obj.do_alarm_create(cr, uid, [res], self._name, 'date', context=context)
return res
def unlink(self, cr, uid, ids, context=None):
if not isinstance(ids, list):
ids = [ids]
res = False
attendee_obj=self.pool.get('calendar.attendee')
for event_id in ids[:]:
if len(str(event_id).split('-')) == 1:
continue
real_event_id = base_calendar_id2real_id(event_id)
data = self.read(cr, uid, real_event_id, ['exdate'], context=context)
date_new = event_id.split('-')[1]
date_new = time.strftime("%Y%m%dT%H%M%S", \
time.strptime(date_new, "%Y%m%d%H%M%S"))
exdate = (data['exdate'] and (data['exdate'] + ',') or '') + date_new
self.write(cr, uid, [real_event_id], {'exdate': exdate})
ids.remove(event_id)
for event in self.browse(cr, uid, ids, context=context):
if event.attendee_ids:
attendee_obj.unlink(cr, uid, [x.id for x in event.attendee_ids], context=context)
res = super(calendar_event, self).unlink(cr, uid, ids, context=context)
self.pool.get('res.alarm').do_alarm_unlink(cr, uid, ids, self._name)
self.unlink_events(cr, uid, ids, context=context)
return res
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals.get('vtimezone', '') and vals.get('vtimezone', '').startswith('/freeassociation.sourceforge.net/tzfile/'):
vals['vtimezone'] = vals['vtimezone'][40:]
#updated_vals = self.onchange_dates(cr, uid, [],
# vals.get('date', False),
# vals.get('duration', False),
# vals.get('date_deadline', False),
# vals.get('allday', False),
# context=context)
#vals.update(updated_vals.get('value', {}))
res = super(calendar_event, self).create(cr, uid, vals, context)
alarm_obj = self.pool.get('res.alarm')
alarm_obj.do_alarm_create(cr, uid, [res], self._name, 'date', context=context)
return res
def do_tentative(self, cr, uid, ids, context=None, *args):
""" Makes event invitation as Tentative
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Event IDs
@param *args: Get Tupple value
@param context: A standard dictionary for contextual values
"""
return self.write(cr, uid, ids, {'state': 'tentative'}, context)
def do_cancel(self, cr, uid, ids, context=None, *args):
""" Makes event invitation as Tentative
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Event IDs
@param *args: Get Tupple value
@param context: A standard dictionary for contextual values
"""
return self.write(cr, uid, ids, {'state': 'cancelled'}, context)
def do_confirm(self, cr, uid, ids, context=None, *args):
""" Makes event invitation as Tentative
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Event IDs
@param *args: Get Tupple value
@param context: A standard dictionary for contextual values
"""
return self.write(cr, uid, ids, {'state': 'confirmed'}, context)
calendar_event()
class calendar_todo(osv.osv):
""" Calendar Task """
_name = "calendar.todo"
_inherit = "calendar.event"
_description = "Calendar Task"
def _get_date(self, cr, uid, ids, name, arg, context=None):
"""
Get Date
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of calendar todo's IDs.
@param args: list of tuples of form [(‘name_of_the_field’, ‘operator’, value), ...].
@param context: A standard dictionary for contextual values
"""
res = {}
for event in self.browse(cr, uid, ids, context=context):
res[event.id] = event.date_start
return res
def _set_date(self, cr, uid, id, name, value, arg, context=None):
"""
Set Date
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param id: calendar's ID.
@param value: Get Value
@param args: list of tuples of form [(‘name_of_the_field’, ‘operator’, value), ...].
@param context: A standard dictionary for contextual values
"""
assert name == 'date'
return self.write(cr, uid, id, { 'date_start': value }, context=context)
_columns = {
'date': fields.function(_get_date, fnct_inv=_set_date, \
string='Duration', store=True, type='datetime'),
'duration': fields.integer('Duration'),
}
__attribute__ = {}
calendar_todo()
class ir_attachment(osv.osv):
_name = 'ir.attachment'
_inherit = 'ir.attachment'
def search_count(self, cr, user, args, context=None):
new_args = []
for domain_item in args:
if isinstance(domain_item, (list, tuple)) and len(domain_item) == 3 and domain_item[0] == 'res_id':
new_args.append((domain_item[0], domain_item[1], base_calendar_id2real_id(domain_item[2])))
else:
new_args.append(domain_item)
return super(ir_attachment, self).search_count(cr, user, new_args, context)
def create(self, cr, uid, vals, context=None):
if context:
id = context.get('default_res_id', False)
context.update({'default_res_id' : base_calendar_id2real_id(id)})
return super(ir_attachment, self).create(cr, uid, vals, context=context)
def search(self, cr, uid, args, offset=0, limit=None, order=None,
context=None, count=False):
new_args = []
for domain_item in args:
if isinstance(domain_item, (list, tuple)) and len(domain_item) == 3 and domain_item[0] == 'res_id':
new_args.append((domain_item[0], domain_item[1], base_calendar_id2real_id(domain_item[2])))
else:
new_args.append(domain_item)
return super(ir_attachment, self).search(cr, uid, new_args, offset=offset,
limit=limit, order=order, context=context, count=False)
ir_attachment()
class ir_values(osv.osv):
_inherit = 'ir.values'
def set(self, cr, uid, key, key2, name, models, value, replace=True, \
isobject=False, meta=False, preserve_user=False, company=False):
"""
Set IR Values
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param model: Get The Model
"""
new_model = []
for data in models:
if type(data) in (list, tuple):
new_model.append((data[0], base_calendar_id2real_id(data[1])))
else:
new_model.append(data)
return super(ir_values, self).set(cr, uid, key, key2, name, new_model, \
value, replace, isobject, meta, preserve_user, company)
def get(self, cr, uid, key, key2, models, meta=False, context=None, \
res_id_req=False, without_user=True, key2_req=True):
"""
Get IR Values
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param model: Get The Model
"""
if context is None:
context = {}
new_model = []
for data in models:
if type(data) in (list, tuple):
new_model.append((data[0], base_calendar_id2real_id(data[1])))
else:
new_model.append(data)
return super(ir_values, self).get(cr, uid, key, key2, new_model, \
meta, context, res_id_req, without_user, key2_req)
ir_values()
class ir_model(osv.osv):
_inherit = 'ir.model'
def read(self, cr, uid, ids, fields=None, context=None,
load='_classic_read'):
"""
Overrides orm read method.
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of IR Model’s IDs.
@param context: A standard dictionary for contextual values
"""
new_ids = isinstance(ids, (str, int, long)) and [ids] or ids
if context is None:
context = {}
data = super(ir_model, self).read(cr, uid, new_ids, fields=fields, \
context=context, load=load)
if data:
for val in data:
val['id'] = base_calendar_id2real_id(val['id'])
return isinstance(ids, (str, int, long)) and data[0] or data
ir_model()
class virtual_report_spool(web_services.report_spool):
def exp_report(self, db, uid, object, ids, datas=None, context=None):
"""
Export Report
@param self: The object pointer
@param db: get the current database,
@param uid: the current user’s ID for security checks,
@param context: A standard dictionary for contextual values
"""
if object == 'printscreen.list':
return super(virtual_report_spool, self).exp_report(db, uid, \
object, ids, datas, context)
new_ids = []
for id in ids:
new_ids.append(base_calendar_id2real_id(id))
if datas.get('id', False):
datas['id'] = base_calendar_id2real_id(datas['id'])
return super(virtual_report_spool, self).exp_report(db, uid, object, new_ids, datas, context)
virtual_report_spool()
class res_users(osv.osv):
_inherit = 'res.users'
def _get_user_avail(self, cr, uid, ids, context=None):
"""
Get User Availability
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of res user’s IDs.
@param context: A standard dictionary for contextual values
"""
current_datetime = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
res = {}
attendee_obj = self.pool.get('calendar.attendee')
attendee_ids = attendee_obj.search(cr, uid, [
('event_date', '<=', current_datetime), ('event_end_date', '<=', current_datetime),
('state', '=', 'accepted'), ('user_id', 'in', ids)
])
for attendee_data in attendee_obj.read(cr, uid, attendee_ids, ['user_id']):
user_id = attendee_data['user_id']
status = 'busy'
res.update({user_id:status})
#TOCHECK: Delegated Event
for user_id in ids:
if user_id not in res:
res[user_id] = 'free'
return res
def _get_user_avail_fun(self, cr, uid, ids, name, args, context=None):
"""
Get User Availability Function
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of res user’s IDs.
@param context: A standard dictionary for contextual values
"""
return self._get_user_avail(cr, uid, ids, context=context)
_columns = {
'availability': fields.function(_get_user_avail_fun, type='selection', \
selection=[('free', 'Free'), ('busy', 'Busy')], \
string='Free/Busy'),
}
res_users()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
giacomov/XtDac | XtDac/FixedBinSearch/fitsRegions.py | 1 | 2417 | from astropy.io import fits as pyfits
import pyregion
import warnings
import numpy
class FitsRegionFile(object):
def __init__(self, filename, minimumSize=0):
with pyfits.open(filename) as f:
header = f['EVENTS'].header
self.X = f['EVENTS'].data.field("X")
self.Y = f['EVENTS'].data.field("Y")
self.t = f['EVENTS'].data.field("TIME")
self.regions = []
for i,row in enumerate( f['REGION'].data ):
X = row.field("X")[0]
Y = row.field("Y")[0]
r1,r2 = row.field("R")[:2]
rot = row.field("ROTANG")[0]
reg = "physical;ellipse({X},{Y},{r1},{r2},{rot})".format(**locals())
try:
r = pyregion.parse(reg).as_imagecoord( header )
except:
warnings.warn("Could not parse region %s" %(i+1))
if r1 >= minimumSize and r2 >= minimumSize:
self.regions.append( r )
else:
warnings.warn("Removing region %s because is too small" %(i+1))
def iteritems(self):
'''Loop over the non-empty regions returning the corresponding events. Use like this:
rr = FitsRegionFile( filename )
for x,y,t,filt,reg in rr.iteritems():
...
'''
for i in range( len( self.regions ) ):
filt = self.regions[i].get_filter()
res = filt.inside( self.X, self.Y )
if self.X[res].shape[0] == 0:
#Empty. Skip to the next one
continue
yield self.X[res], self.Y[res], self.t[res], filt, self.regions[i]
class FitsRegion( FitsRegionFile ):
def __init__(self, X, Y, t, header, regionDefinition):
self.X = X
self.Y = Y
self.t = t
self.regions = [ pyregion.parse( regionDefinition ).as_imagecoord( header ) ]
| bsd-3-clause |
pfmoore/pip | tests/functional/test_new_resolver_target.py | 4 | 2049 | import pytest
from pip._internal.cli.status_codes import ERROR, SUCCESS
from tests.lib.path import Path
from tests.lib.wheel import make_wheel
@pytest.fixture()
def make_fake_wheel(script):
def _make_fake_wheel(wheel_tag):
wheel_house = script.scratch_path.joinpath("wheelhouse")
wheel_house.mkdir()
wheel_builder = make_wheel(
name="fake",
version="1.0",
wheel_metadata_updates={"Tag": []},
)
wheel_path = wheel_house.joinpath(f"fake-1.0-{wheel_tag}.whl")
wheel_builder.save_to(wheel_path)
return wheel_path
return _make_fake_wheel
@pytest.mark.parametrize("implementation", [None, "fakepy"])
@pytest.mark.parametrize("python_version", [None, "1"])
@pytest.mark.parametrize("abi", [None, "fakeabi"])
@pytest.mark.parametrize("platform", [None, "fakeplat"])
def test_new_resolver_target_checks_compatibility_failure(
script,
make_fake_wheel,
implementation,
python_version,
abi,
platform,
):
fake_wheel_tag = "fakepy1-fakeabi-fakeplat"
args = [
"install",
"--only-binary=:all:",
"--no-cache-dir", "--no-index",
"--target", str(script.scratch_path.joinpath("target")),
make_fake_wheel(fake_wheel_tag),
]
if implementation:
args += ["--implementation", implementation]
if python_version:
args += ["--python-version", python_version]
if abi:
args += ["--abi", abi]
if platform:
args += ["--platform", platform]
args_tag = "{}{}-{}-{}".format(
implementation,
python_version,
abi,
platform,
)
wheel_tag_matches = (args_tag == fake_wheel_tag)
result = script.pip(*args, expect_error=(not wheel_tag_matches))
dist_info = Path("scratch", "target", "fake-1.0.dist-info")
if wheel_tag_matches:
assert result.returncode == SUCCESS
result.did_create(dist_info)
else:
assert result.returncode == ERROR
result.did_not_create(dist_info)
| mit |
rosemead/namebench | libnamebench/benchmark.py | 173 | 6727 | # Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple DNS server comparison benchmarking tool.
Designed to assist system administrators in selection and prioritization.
"""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import Queue
import random
import threading
import time
class BenchmarkThreads(threading.Thread):
"""Benchmark multiple nameservers in parallel."""
def __init__(self, input_queue, results_queue):
threading.Thread.__init__(self)
self.input = input_queue
self.results = results_queue
def run(self):
"""Iterate over the queue, processing each item."""
while not self.input.empty():
try:
(ns, request_type, hostname) = self.input.get_nowait()
# We've moved this here so that it's after all of the random selection goes through.
if '__RANDOM__' in hostname:
hostname = hostname.replace('__RANDOM__', str(random.random() * random.randint(0, 99999)))
(response, duration, error_msg) = ns.TimedRequest(request_type, hostname)
self.results.put((ns, request_type, hostname, response, duration, error_msg))
except Queue.Empty:
return
class Benchmark(object):
"""The main benchmarking class."""
def __init__(self, nameservers, run_count=2, query_count=30, thread_count=1,
status_callback=None):
"""Constructor.
Args:
nameservers: a list of NameServerData objects
run_count: How many test-runs to perform on each nameserver (int)
query_count: How many DNS lookups to test in each test-run (int)
thread_count: How many benchmark threads to use (int)
status_callback: Where to send msg() updates to.
"""
self.query_count = query_count
self.run_count = run_count
self.thread_count = thread_count
self.nameservers = nameservers
self.results = {}
self.status_callback = status_callback
def msg(self, msg, **kwargs):
if self.status_callback:
self.status_callback(msg, **kwargs)
def _CheckForIndexHostsInResults(self, test_records):
"""Check if we have already tested index hosts.
Args:
test_records: List of tuples of test records (type, record)
Returns:
A list of results that have already been tested
A list of records that still need to be tested.
"""
needs_test = []
index_results = {}
for test in test_records:
matched = False
for ns in self.results:
for result in self.results[ns][0]:
hostname, request_type = result[0:2]
if (request_type, hostname) == test:
matched = True
index_results.setdefault(ns, []).append(result)
# So that we don't include the second results if duplicates exist.
break
if not matched:
needs_test.append(test)
return (index_results, needs_test)
def RunIndex(self, test_records):
"""Run index tests using the same mechanism as a standard benchmark."""
if not test_records:
print 'No records to test.'
return None
index_results, pending_tests = self._CheckForIndexHostsInResults(test_records)
run_results = self._SingleTestRun(pending_tests)
for ns in run_results:
index_results.setdefault(ns, []).extend(run_results[ns])
return index_results
def Run(self, test_records=None):
"""Run all test runs for all nameservers."""
# We don't want to keep stats on how many queries timed out from previous runs.
for ns in self.nameservers.enabled_servers:
ns.ResetErrorCounts()
for _ in range(self.run_count):
run_results = self._SingleTestRun(test_records)
for ns in run_results:
self.results.setdefault(ns, []).append(run_results[ns])
return self.results
def _SingleTestRun(self, test_records):
"""Manage and execute a single test-run on all nameservers.
We used to run all tests for a nameserver, but the results proved to be
unfair if the bandwidth was suddenly constrained. We now run a test on
each server before moving on to the next.
Args:
test_records: a list of tuples in the form of (request_type, hostname)
Returns:
results: A dictionary of tuples, keyed by nameserver.
"""
input_queue = Queue.Queue()
shuffled_records = {}
results = {}
# Pre-compute the shuffled test records per-nameserver to avoid thread
# contention.
for ns in self.nameservers.enabled_servers:
random.shuffle(test_records)
shuffled_records[ns.ip] = list(test_records)
# Feed the pre-computed records into the input queue.
for i in range(len(test_records)):
for ns in self.nameservers.enabled_servers:
(request_type, hostname) = shuffled_records[ns.ip][i]
input_queue.put((ns, request_type, hostname))
results_queue = self._LaunchBenchmarkThreads(input_queue)
errors = []
while results_queue.qsize():
(ns, request_type, hostname, response, duration, error_msg) = results_queue.get()
if error_msg:
duration = ns.timeout * 1000
errors.append((ns, error_msg))
results.setdefault(ns, []).append((hostname, request_type, duration, response, error_msg))
for (ns, error_msg) in errors:
self.msg('Error querying %s: %s' % (ns, error_msg))
return results
def _LaunchBenchmarkThreads(self, input_queue):
"""Launch and manage the benchmark threads."""
results_queue = Queue.Queue()
expected_total = input_queue.qsize()
threads = []
for unused_thread_num in range(0, self.thread_count):
thread = BenchmarkThreads(input_queue, results_queue)
thread.start()
threads.append(thread)
query_count = expected_total / len(self.nameservers.enabled_servers)
status_message = ('Sending %s queries to %s servers' %
(query_count, len(self.nameservers.enabled_servers)))
while results_queue.qsize() != expected_total:
self.msg(status_message, count=results_queue.qsize(), total=expected_total)
time.sleep(0.5)
self.msg(status_message, count=results_queue.qsize(), total=expected_total)
for thread in threads:
thread.join()
return results_queue
| apache-2.0 |
taxipp/ipp-macro-series-parser | ipp_macro_series_parser/demographie/dependance.py | 2 | 2651 | # -*- coding:utf-8 -*-
from __future__ import division
import logging
import numpy as np
import os
import pandas as pd
from liam2.importer import array_to_disk_array
from ipp_macro_series_parser.scripts.utils import line_prepender
log = logging.getLogger(__name__)
def check_directory_existence(directory):
if not os.path.exists(directory):
log.info('Creating directory {}'.format(directory))
os.makedirs(directory)
def build_prevalence_2010(input_dir = None, output_dir = None, uniform_weight = None,
drees_filename = 'dss43_horizon_2060.xls', output_filename = 'dependance_prevalence_2010.csv'):
data_path = os.path.join(input_dir, drees_filename)
data = pd.read_excel(
data_path,
sheetname ='Tab2',
header = 3,
parse_cols = 'B:O',
skip_footer = 4
)
columns_to_delete = [
column for column in data.columns if column.startswith('Unnamed') or column.startswith('Ensemble')]
for column in columns_to_delete:
del data[column]
data.index = [index.year for index in data.index]
data.columns = range(1, 7)
check_directory_existence(output_dir)
csv_file_path = os.path.join(output_dir, output_filename)
data = pd.DataFrame(data.xs(2010)).T
data = np.round(data / uniform_weight)
data.astype(int).to_csv(csv_file_path, index = False)
line_prepender(csv_file_path, 'age_category')
def build_prevalence_all_years(globals_node = None, output_dir = None, input_dir = None, to_csv = None,
drees_filename = 'dss43_horizon_2060.xls'):
assert globals_node or to_csv
data_path = os.path.join(
input_dir,
drees_filename)
data = pd.read_excel(
data_path,
sheetname ='Tab6A',
header = 3,
parse_cols = 'B:E',
skip_footer = 3
)
# "Au 1er janvier"
data.columns = ['year', 'dependants_optimiste', 'DEPENDANTS', 'dependants_pessimiste']
data.set_index('year', inplace = True)
data = data.reindex(index = range(2010, 2061)).interpolate(method = 'polynomial', order = 7)
data.index = [int(str(year - 1)) for year in data.index]
data.index.name = "PERIOD"
if globals_node:
array_to_disk_array(
globals_node,
'dependance_prevalence_all_years',
data.DEPENDANTS.values
)
elif to_csv:
check_directory_existence(output_dir)
csv_file_path = os.path.join(output_dir, 'dependance_prevalence_all_years.csv')
data = data.reset_index()[['PERIOD', 'DEPENDANTS']]
data.astype(int) \
.to_csv(csv_file_path, index = False)
| gpl-3.0 |
skoslowski/gnuradio | gnuradio-runtime/python/gnuradio/gru/freqz.py | 3 | 10887 | #!/usr/bin/env python
#
# Copyright 2005,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
# This code lifted from various parts of www.scipy.org -eb 2005-01-24
# Copyright (c) 2001, 2002 Enthought, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# a. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# b. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# c. Neither the name of the Enthought nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
from __future__ import division
from __future__ import unicode_literals
__all__ = ['freqz']
import numpy
from numpy import *
Num=numpy
def atleast_1d(*arys):
""" Force a sequence of arrays to each be at least 1D.
Description:
Force an array to be at least 1D. If an array is 0D, the
array is converted to a single row of values. Otherwise,
the array is unaltered.
Arguments:
*arys -- arrays to be converted to 1 or more dimensional array.
Returns:
input array converted to at least 1D array.
"""
res = []
for ary in arys:
ary = asarray(ary)
if len(ary.shape) == 0:
result = numpy.array([ary[0]])
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def polyval(p,x):
"""Evaluate the polynomial p at x. If x is a polynomial then composition.
Description:
If p is of length N, this function returns the value:
p[0]*(x**N-1) + p[1]*(x**N-2) + ... + p[N-2]*x + p[N-1]
x can be a sequence and p(x) will be returned for all elements of x.
or x can be another polynomial and the composite polynomial p(x) will be
returned.
"""
p = asarray(p)
if isinstance(x,poly1d):
y = 0
else:
x = asarray(x)
y = numpy.zeros(x.shape,x.dtype)
for i in range(len(p)):
y = x * y + p[i]
return y
class poly1d(object):
"""A one-dimensional polynomial class.
p = poly1d([1,2,3]) constructs the polynomial x**2 + 2 x + 3
p(0.5) evaluates the polynomial at the location
p.r is a list of roots
p.c is the coefficient array [1,2,3]
p.order is the polynomial order (after leading zeros in p.c are removed)
p[k] is the coefficient on the kth power of x (backwards from
sequencing the coefficient array.
polynomials can be added, subtracted, multiplied and divided (returns
quotient and remainder).
asarray(p) will also give the coefficient array, so polynomials can
be used in all functions that accept arrays.
"""
def __init__(self, c_or_r, r=0):
if isinstance(c_or_r,poly1d):
for key in list(c_or_r.__dict__.keys()):
self.__dict__[key] = c_or_r.__dict__[key]
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = numpy.array([0])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
def __array__(self,t=None):
if t:
return asarray(self.coeffs,t)
else:
return asarray(self.coeffs)
def __coerce__(self,other):
return None
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
N = self.order
thestr = "0"
for k in range(len(self.coeffs)):
coefstr ='%.4g' % abs(self.coeffs[k])
if coefstr[-4:] == '0000':
coefstr = coefstr[:-5]
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == '1':
newstr = 'x'
else:
newstr = '%s x' % (coefstr,)
else:
if coefstr == '0':
newstr = ''
elif coefstr == '1':
newstr = 'x**%d' % (power,)
else:
newstr = '%s x**%d' % (coefstr, power)
if k > 0:
if newstr != '':
if self.coeffs[k] < 0:
thestr = "%s - %s" % (thestr, newstr)
else:
thestr = "%s + %s" % (thestr, newstr)
elif (k == 0) and (newstr != '') and (self.coeffs[k] < 0):
thestr = "-%s" % (newstr,)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for k in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs / other)
else:
other = poly1d(other)
return list(map(poly1d,polydiv(self.coeffs, other.coeffs)))
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other / self.coeffs)
else:
other = poly1d(other)
return list(map(poly1d,polydiv(other.coeffs, self.coeffs)))
def __setattr__(self, key, val):
raise ValueError("Attributes cannot be changed this way.")
def __getattr__(self, key):
if key in ['r','roots']:
return roots(self.coeffs)
elif key in ['c','coef','coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
return self.__dict__[key]
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = numpy.zeros(key-self.order,self.coeffs.typecode())
self.__dict__['coeffs'] = numpy.concatenate((zr,self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def integ(self, m=1, k=0):
return poly1d(polyint(self.coeffs,m=m,k=k))
def deriv(self, m=1):
return poly1d(polyder(self.coeffs,m=m))
def freqz(b, a, worN=None, whole=0, plot=None):
"""Compute frequency response of a digital filter.
Description:
Given the numerator (b) and denominator (a) of a digital filter compute
its frequency response.
jw -jw -jmw
jw B(e) b[0] + b[1]e + .... + b[m]e
H(e) = ---- = ------------------------------------
jw -jw -jnw
A(e) a[0] + a[2]e + .... + a[n]e
Inputs:
b, a --- the numerator and denominator of a linear filter.
worN --- If None, then compute at 512 frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN
whole -- Normally, frequencies are computed from 0 to pi (upper-half of
unit-circle. If whole is non-zero compute frequencies from 0
to 2*pi.
Outputs: (h,w)
h -- The frequency response.
w -- The frequencies at which h was computed.
"""
b, a = list(map(atleast_1d, (b,a)))
if whole:
lastpoint = 2*pi
else:
lastpoint = pi
if worN is None:
N = 512
w = Num.arange(0,lastpoint,lastpoint / N)
elif isinstance(worN, int):
N = worN
w = Num.arange(0,lastpoint,lastpoint / N)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j*w)
h = polyval(b[::-1] / zm1, polyval(a[::-1], zm1))
# if not plot is None:
# plot(w, h)
return h, w
| gpl-3.0 |
gvb/odoo | addons/procurement/wizard/schedulers_all.py | 306 | 3456 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import threading
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import osv
from openerp.api import Environment
_logger = logging.getLogger(__name__)
class procurement_compute_all(osv.osv_memory):
_name = 'procurement.order.compute.all'
_description = 'Compute all schedulers'
def _procure_calculation_all(self, cr, uid, ids, context=None):
"""
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
"""
with Environment.manage():
proc_obj = self.pool.get('procurement.order')
#As this function is in a new thread, i need to open a new cursor, because the old one may be closed
new_cr = self.pool.cursor()
scheduler_cron_id = self.pool['ir.model.data'].get_object_reference(new_cr, SUPERUSER_ID, 'procurement', 'ir_cron_scheduler_action')[1]
# Avoid to run the scheduler multiple times in the same time
try:
with tools.mute_logger('openerp.sql_db'):
new_cr.execute("SELECT id FROM ir_cron WHERE id = %s FOR UPDATE NOWAIT", (scheduler_cron_id,))
except Exception:
_logger.info('Attempt to run procurement scheduler aborted, as already running')
new_cr.rollback()
new_cr.close()
return {}
user = self.pool.get('res.users').browse(new_cr, uid, uid, context=context)
comps = [x.id for x in user.company_ids]
for comp in comps:
proc_obj.run_scheduler(new_cr, uid, use_new_cursor=new_cr.dbname, company_id = comp, context=context)
#close the new cursor
new_cr.close()
return {}
def procure_calculation(self, cr, uid, ids, context=None):
"""
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
"""
threaded_calculation = threading.Thread(target=self._procure_calculation_all, args=(cr, uid, ids, context))
threaded_calculation.start()
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
HLFH/CouchPotatoServer | couchpotato/core/media/_base/providers/torrent/sceneaccess.py | 44 | 5755 | import traceback
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': 'https://www.sceneaccess.eu/',
'login': 'https://www.sceneaccess.eu/login',
'login_check': 'https://www.sceneaccess.eu/inbox',
'detail': 'https://www.sceneaccess.eu/details?id=%s',
'search': 'https://www.sceneaccess.eu/browse?c%d=%d',
'archive': 'https://www.sceneaccess.eu/archive?&c%d=%d',
'download': 'https://www.sceneaccess.eu/%s',
}
http_time_between_calls = 1 # Seconds
def _searchOnTitle(self, title, media, quality, results):
url = self.buildUrl(title, media, quality)
data = self.getHTMLData(url)
if data:
html = BeautifulSoup(data)
try:
resultsTable = html.find('table', attrs = {'id': 'torrents-table'})
if resultsTable is None:
return
entries = resultsTable.find_all('tr', attrs = {'class': 'tt_row'})
for result in entries:
link = result.find('td', attrs = {'class': 'ttr_name'}).find('a')
url = result.find('td', attrs = {'class': 'td_dl'}).find('a')
seeders = result.find('td', attrs = {'class': 'ttr_seeders'}).find('a')
leechers = result.find('td', attrs = {'class': 'ttr_leechers'}).find('a')
torrent_id = link['href'].replace('details?id=', '')
results.append({
'id': torrent_id,
'name': link['title'],
'url': self.urls['download'] % url['href'],
'detail_url': self.urls['detail'] % torrent_id,
'size': self.parseSize(result.find('td', attrs = {'class': 'ttr_size'}).contents[0]),
'seeders': tryInt(seeders.string) if seeders else 0,
'leechers': tryInt(leechers.string) if leechers else 0,
'get_more_info': self.getMoreInfo,
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getMoreInfo(self, item):
full_description = self.getCache('sceneaccess.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
html = BeautifulSoup(full_description)
nfo_pre = html.find('div', attrs = {'id': 'details_table'})
description = toUnicode(nfo_pre.text) if nfo_pre else ''
item['description'] = description
return item
# Login
def getLoginParams(self):
return {
'username': self.conf('username'),
'password': self.conf('password'),
'submit': 'come on in',
}
def loginSuccess(self, output):
return '/inbox' in output.lower()
loginCheckSuccess = loginSuccess
config = [{
'name': 'sceneaccess',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'SceneAccess',
'description': '<a href="https://sceneaccess.eu/">SceneAccess</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAACT0lEQVR4AYVQS0sbURidO3OTmajJ5FElTTOkPmZ01GhHrIq0aoWAj1Vc+A/cuRMXbl24V9SlCGqrLhVFCrooEhCp2BAx0mobTY2kaR7qmOm87EXL1EWxh29xL+c7nPMdgGHYO5bF/gdbefnr6WlbWRnxluMwAB4Z0uEgXa7nwaDL7+/RNPzxbYvb/XJ0FBYVfd/ayh0fQ4qCGEHcm0KLRZUk7Pb2YRJPRwcsKMidnKD3t9VVT3s7BDh+z5FOZ3Vfn3h+Hltfx00mRRSRWFcUmmVNhYVqPn8dj3va2oh+txvcQRVF9ebm1fi4k+dRFbosY5rm4Hk7xxULQnJnx93S4g0EIEEQRoDLo6PrWEw8Pc0eHLwYGopMTDirqlJ7eyhYYGHhfgfHCcKYksZGVB/NcXI2mw6HhZERqrjYTNPHi4tFPh8aJIYIhgPlcCRDoZLW1s75+Z/7+59nZ/OJhLWigqAoKZX6Mjf3dXkZ3pydGYLc4aEoCCkInzQ1fRobS2xuvllaonkedfArnY5OTdGVldBkOADgqq2Nr6z8CIWaJietDHOhKB+HhwFKC6Gnq4ukKJvP9zcSbjYDXbeVlkKzuZBhnnV3e3t6UOmaJO0ODibW1hB1GYkg8R/gup7Z3TVZLJ5AILW9LcZiVpYtYBhw16O3t7cauckyeF9Tgz0ATpL2+nopmWycmbnY2LiKRjFk6/d7+/vRJfl4HGzV1T0UIM43MGBvaIBWK/YvwM5w+IMgGH8tkyEgvIpE7M3Nt6qqZrNyOq1kMmouh455Ggz+BhKY4GEc2CfwAAAAAElFTkSuQmCC',
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': False,
},
{
'name': 'username',
'default': '',
},
{
'name': 'password',
'default': '',
'type': 'password',
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 1,
'description': 'Will not be (re)moved until this seed ratio is met.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 40,
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 20,
'description': 'Starting score for each release found via this provider.',
}
],
},
],
}]
| gpl-3.0 |
phenoxim/nova | nova/scheduler/filters/aggregate_multitenancy_isolation.py | 7 | 2103 | # Copyright (c) 2011-2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from nova.scheduler import filters
from nova.scheduler.filters import utils
LOG = logging.getLogger(__name__)
class AggregateMultiTenancyIsolation(filters.BaseHostFilter):
"""Isolate tenants in specific aggregates."""
# Aggregate data and tenant do not change within a request
run_filter_once_per_request = True
RUN_ON_REBUILD = False
def host_passes(self, host_state, spec_obj):
"""If a host is in an aggregate that has the metadata key
"filter_tenant_id" it can only create instances from that tenant(s).
A host can be in different aggregates.
If a host doesn't belong to an aggregate with the metadata key
"filter_tenant_id" it can create instances from all tenants.
"""
tenant_id = spec_obj.project_id
metadata = utils.aggregate_metadata_get_by_host(host_state,
key="filter_tenant_id")
if metadata != {}:
configured_tenant_ids = metadata.get("filter_tenant_id")
if configured_tenant_ids:
if tenant_id not in configured_tenant_ids:
LOG.debug("%s fails tenant id on aggregate", host_state)
return False
LOG.debug("Host tenant id %s matched", tenant_id)
else:
LOG.debug("No tenant id's defined on host. Host passes.")
return True
| apache-2.0 |
Yong-Lee/decode-Django | Django-1.5.1/django/contrib/gis/tests/geoapp/test_feeds.py | 111 | 4189 | from __future__ import absolute_import
from xml.dom import minidom
from django.conf import settings
from django.contrib.sites.models import Site
from django.test import TestCase
from .models import City
class GeoFeedTest(TestCase):
urls = 'django.contrib.gis.tests.geoapp.urls'
def setUp(self):
Site(id=settings.SITE_ID, domain="example.com", name="example.com").save()
self.old_Site_meta_installed = Site._meta.installed
Site._meta.installed = True
def tearDown(self):
Site._meta.installed = self.old_Site_meta_installed
def assertChildNodes(self, elem, expected):
"Taken from regressiontests/syndication/tests.py."
actual = set([n.nodeName for n in elem.childNodes])
expected = set(expected)
self.assertEqual(actual, expected)
def test_geofeed_rss(self):
"Tests geographic feeds using GeoRSS over RSSv2."
# Uses `GEOSGeometry` in `item_geometry`
doc1 = minidom.parseString(self.client.get('/feeds/rss1/').content)
# Uses a 2-tuple in `item_geometry`
doc2 = minidom.parseString(self.client.get('/feeds/rss2/').content)
feed1, feed2 = doc1.firstChild, doc2.firstChild
# Making sure the box got added to the second GeoRSS feed.
self.assertChildNodes(feed2.getElementsByTagName('channel')[0],
['title', 'link', 'description', 'language',
'lastBuildDate', 'item', 'georss:box', 'atom:link']
)
# Incrementing through the feeds.
for feed in [feed1, feed2]:
# Ensuring the georss namespace was added to the <rss> element.
self.assertEqual(feed.getAttribute('xmlns:georss'), 'http://www.georss.org/georss')
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), City.objects.count())
# Ensuring the georss element was added to each item in the feed.
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'georss:point'])
def test_geofeed_atom(self):
"Testing geographic feeds using GeoRSS over Atom."
doc1 = minidom.parseString(self.client.get('/feeds/atom1/').content)
doc2 = minidom.parseString(self.client.get('/feeds/atom2/').content)
feed1, feed2 = doc1.firstChild, doc2.firstChild
# Making sure the box got added to the second GeoRSS feed.
self.assertChildNodes(feed2, ['title', 'link', 'id', 'updated', 'entry', 'georss:box'])
for feed in [feed1, feed2]:
# Ensuring the georsss namespace was added to the <feed> element.
self.assertEqual(feed.getAttribute('xmlns:georss'), 'http://www.georss.org/georss')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), City.objects.count())
# Ensuring the georss element was added to each entry in the feed.
for entry in entries:
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'georss:point'])
def test_geofeed_w3c(self):
"Testing geographic feeds using W3C Geo."
doc = minidom.parseString(self.client.get('/feeds/w3cgeo1/').content)
feed = doc.firstChild
# Ensuring the geo namespace was added to the <feed> element.
self.assertEqual(feed.getAttribute('xmlns:geo'), 'http://www.w3.org/2003/01/geo/wgs84_pos#')
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), City.objects.count())
# Ensuring the geo:lat and geo:lon element was added to each item in the feed.
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'geo:lat', 'geo:lon'])
# Boxes and Polygons aren't allowed in W3C Geo feeds.
self.assertRaises(ValueError, self.client.get, '/feeds/w3cgeo2/') # Box in <channel>
self.assertRaises(ValueError, self.client.get, '/feeds/w3cgeo3/') # Polygons in <entry>
| gpl-2.0 |
6WIND/scapy | scapy/sendrecv.py | 1 | 40789 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# This program is published under a GPLv2 license
"""
Functions to send and receive packets.
"""
from __future__ import absolute_import, print_function
import itertools
from threading import Thread, Event
import os
import re
import subprocess
import time
import types
from scapy.compat import plain_str
from scapy.data import ETH_P_ALL
from scapy.config import conf
from scapy.error import warning
from scapy.packet import Gen, Packet
from scapy.utils import get_temp_file, tcpdump, wrpcap, \
ContextManagerSubprocess, PcapReader
from scapy.plist import PacketList, SndRcvList
from scapy.error import log_runtime, log_interactive, Scapy_Exception
from scapy.base_classes import SetGen
from scapy.modules import six
from scapy.modules.six.moves import map
from scapy.sessions import DefaultSession
from scapy.supersocket import SuperSocket
if conf.route is None:
# unused import, only to initialize conf.route
import scapy.route # noqa: F401
#################
# Debug class #
#################
class debug:
recv = []
sent = []
match = []
crashed_on = None
####################
# Send / Receive #
####################
_DOC_SNDRCV_PARAMS = """
:param pks: SuperSocket instance to send/receive packets
:param pkt: the packet to send
:param rcv_pks: if set, will be used instead of pks to receive packets.
packets will still be sent through pks
:param nofilter: put 1 to avoid use of BPF filters
:param retry: if positive, how many times to resend unanswered packets
if negative, how many times to retry when no more packets
are answered
:param timeout: how much time to wait after the last packet has been sent
:param verbose: set verbosity level
:param multi: whether to accept multiple answers for the same stimulus
:param store_unanswered: whether to store not-answered packets or not.
setting it to False will increase speed, and will return
None as the unans list.
:param process: if specified, only result from process(pkt) will be stored.
the function should follow the following format:
``lambda sent, received: (func(sent), func2(received))``
if the packet is unanswered, `received` will be None.
if `store_unanswered` is False, the function won't be called on
un-answered packets.
:param prebuild: pre-build the packets before starting to send them.
Automatically enabled when a generator is passed as the packet
"""
class SndRcvHandler(object):
"""
Util to send/receive packets, used by sr*().
Do not use directly.
This matches the requests and answers.
Notes::
- threaded mode: enabling threaded mode will likely
break packet timestamps, but might result in a speedup
when sending a big amount of packets. Disabled by default
- DEVS: store the outgoing timestamp right BEFORE sending the packet
to avoid races that could result in negative latency. We aren't Stadia
"""
def __init__(self, pks, pkt,
timeout=None, inter=0, verbose=None,
chainCC=False,
retry=0, multi=False, rcv_pks=None,
prebuild=False, _flood=None,
threaded=False,
session=None):
# Instantiate all arguments
if verbose is None:
verbose = conf.verb
if conf.debug_match:
debug.recv = PacketList([], "Received")
debug.sent = PacketList([], "Sent")
debug.match = SndRcvList([], "Matched")
self.nbrecv = 0
self.ans = []
self.pks = pks
self.rcv_pks = rcv_pks or pks
self.inter = inter
self.verbose = verbose
self.chainCC = chainCC
self.multi = multi
self.timeout = timeout
self.session = session
# Instantiate packet holders
if _flood:
self.tobesent = pkt
self.notans = _flood[0]
else:
if isinstance(pkt, types.GeneratorType) or prebuild:
self.tobesent = [p for p in pkt]
self.notans = len(self.tobesent)
else:
self.tobesent = (
SetGen(pkt) if not isinstance(pkt, Gen) else pkt
)
self.notans = self.tobesent.__iterlen__()
if retry < 0:
autostop = retry = -retry
else:
autostop = 0
if timeout is not None and timeout < 0:
self.timeout = None
while retry >= 0:
self.hsent = {}
if threaded or _flood:
# Send packets in thread.
# https://github.com/secdev/scapy/issues/1791
snd_thread = Thread(
target=self._sndrcv_snd
)
snd_thread.setDaemon(True)
# Start routine with callback
self._sndrcv_rcv(snd_thread.start)
# Ended. Let's close gracefully
if _flood:
# Flood: stop send thread
_flood[1]()
snd_thread.join()
else:
self._sndrcv_rcv(self._sndrcv_snd)
if multi:
remain = [
p for p in itertools.chain(*six.itervalues(self.hsent))
if not hasattr(p, '_answered')
]
else:
remain = list(itertools.chain(*six.itervalues(self.hsent)))
if autostop and len(remain) > 0 and \
len(remain) != len(self.tobesent):
retry = autostop
self.tobesent = remain
if len(self.tobesent) == 0:
break
retry -= 1
if conf.debug_match:
debug.sent = PacketList(remain[:], "Sent")
debug.match = SndRcvList(self.ans[:])
# Clean the ans list to delete the field _answered
if multi:
for snd, _ in self.ans:
if hasattr(snd, '_answered'):
del snd._answered
if verbose:
print(
"\nReceived %i packets, got %i answers, "
"remaining %i packets" % (
self.nbrecv + len(self.ans), len(self.ans), self.notans
)
)
self.ans_result = SndRcvList(self.ans)
self.unans_result = PacketList(remain, "Unanswered")
def results(self):
return self.ans_result, self.unans_result
def _sndrcv_snd(self):
"""Function used in the sending thread of sndrcv()"""
try:
if self.verbose:
print("Begin emission:")
i = 0
for p in self.tobesent:
# Populate the dictionary of _sndrcv_rcv
# _sndrcv_rcv won't miss the answer of a packet that
# has not been sent
self.hsent.setdefault(p.hashret(), []).append(p)
# Send packet
self.pks.send(p)
time.sleep(self.inter)
i += 1
if self.verbose:
print("Finished sending %i packets." % i)
except SystemExit:
pass
except Exception:
log_runtime.exception("--- Error sending packets")
def _process_packet(self, r):
"""Internal function used to process each packet."""
if r is None:
return
ok = False
h = r.hashret()
if h in self.hsent:
hlst = self.hsent[h]
for i, sentpkt in enumerate(hlst):
if r.answers(sentpkt):
self.ans.append((sentpkt, r))
if self.verbose > 1:
os.write(1, b"*")
ok = True
if not self.multi:
del hlst[i]
self.notans -= 1
else:
if not hasattr(sentpkt, '_answered'):
self.notans -= 1
sentpkt._answered = 1
break
if self.notans <= 0 and not self.multi:
self.sniffer.stop(join=False)
if not ok:
if self.verbose > 1:
os.write(1, b".")
self.nbrecv += 1
if conf.debug_match:
debug.recv.append(r)
def _sndrcv_rcv(self, callback):
"""Function used to receive packets and check their hashret"""
self.sniffer = None
try:
self.sniffer = AsyncSniffer()
self.sniffer._run(
prn=self._process_packet,
timeout=self.timeout,
store=False,
opened_socket=self.pks,
session=self.session,
started_callback=callback
)
except KeyboardInterrupt:
if self.chainCC:
raise
def sndrcv(*args, **kwargs):
"""Scapy raw function to send a packet and receive its answer.
WARNING: This is an internal function. Using sr/srp/sr1/srp is
more appropriate in many cases.
"""
sndrcver = SndRcvHandler(*args, **kwargs)
return sndrcver.results()
def __gen_send(s, x, inter=0, loop=0, count=None, verbose=None, realtime=None, return_packets=False, *args, **kargs): # noqa: E501
if isinstance(x, str):
x = conf.raw_layer(load=x)
if not isinstance(x, Gen):
x = SetGen(x)
if verbose is None:
verbose = conf.verb
n = 0
if count is not None:
loop = -count
elif not loop:
loop = -1
if return_packets:
sent_packets = PacketList()
try:
while loop:
dt0 = None
for p in x:
if realtime:
ct = time.time()
if dt0:
st = dt0 + float(p.time) - ct
if st > 0:
time.sleep(st)
else:
dt0 = ct - float(p.time)
s.send(p)
if return_packets:
sent_packets.append(p)
n += 1
if verbose:
os.write(1, b".")
time.sleep(inter)
if loop < 0:
loop += 1
except KeyboardInterrupt:
pass
if verbose:
print("\nSent %i packets." % n)
if return_packets:
return sent_packets
@conf.commands.register
def send(x, inter=0, loop=0, count=None, verbose=None, realtime=None,
return_packets=False, socket=None, iface=None, *args, **kargs):
"""
Send packets at layer 3
:param x: the packets
:param inter: time (in s) between two packets (default 0)
:param loop: send packet indefinetly (default 0)
:param count: number of packets to send (default None=1)
:param verbose: verbose mode (default None=conf.verbose)
:param realtime: check that a packet was sent before sending the next one
:param return_packets: return the sent packets
:param socket: the socket to use (default is conf.L3socket(kargs))
:param iface: the interface to send the packets on
:param monitor: (not on linux) send in monitor mode
:returns: None
"""
need_closing = socket is None
kargs["iface"] = _interface_selection(iface, x)
socket = socket or conf.L3socket(*args, **kargs)
results = __gen_send(socket, x, inter=inter, loop=loop,
count=count, verbose=verbose,
realtime=realtime, return_packets=return_packets)
if need_closing:
socket.close()
return results
@conf.commands.register
def sendp(x, inter=0, loop=0, iface=None, iface_hint=None, count=None,
verbose=None, realtime=None,
return_packets=False, socket=None, *args, **kargs):
"""
Send packets at layer 2
:param x: the packets
:param inter: time (in s) between two packets (default 0)
:param loop: send packet indefinetly (default 0)
:param count: number of packets to send (default None=1)
:param verbose: verbose mode (default None=conf.verbose)
:param realtime: check that a packet was sent before sending the next one
:param return_packets: return the sent packets
:param socket: the socket to use (default is conf.L3socket(kargs))
:param iface: the interface to send the packets on
:param monitor: (not on linux) send in monitor mode
:returns: None
"""
if iface is None and iface_hint is not None and socket is None:
iface = conf.route.route(iface_hint)[0]
need_closing = socket is None
socket = socket or conf.L2socket(iface=iface, *args, **kargs)
results = __gen_send(socket, x, inter=inter, loop=loop,
count=count, verbose=verbose,
realtime=realtime, return_packets=return_packets)
if need_closing:
socket.close()
return results
@conf.commands.register
def sendpfast(x, pps=None, mbps=None, realtime=None, loop=0, file_cache=False, iface=None, replay_args=None, # noqa: E501
parse_results=False):
"""Send packets at layer 2 using tcpreplay for performance
:param pps: packets per second
:param mpbs: MBits per second
:param realtime: use packet's timestamp, bending time with real-time value
:param loop: number of times to process the packet list
:param file_cache: cache packets in RAM instead of reading from
disk at each iteration
:param iface: output interface
:param replay_args: List of additional tcpreplay args (List[str])
:param parse_results: Return a dictionary of information
outputted by tcpreplay (default=False)
:returns: stdout, stderr, command used
"""
if iface is None:
iface = conf.iface
argv = [conf.prog.tcpreplay, "--intf1=%s" % iface]
if pps is not None:
argv.append("--pps=%i" % pps)
elif mbps is not None:
argv.append("--mbps=%f" % mbps)
elif realtime is not None:
argv.append("--multiplier=%f" % realtime)
else:
argv.append("--topspeed")
if loop:
argv.append("--loop=%i" % loop)
if file_cache:
argv.append("--preload-pcap")
# Check for any additional args we didn't cover.
if replay_args is not None:
argv.extend(replay_args)
f = get_temp_file()
argv.append(f)
wrpcap(f, x)
results = None
with ContextManagerSubprocess(conf.prog.tcpreplay):
try:
cmd = subprocess.Popen(argv, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except KeyboardInterrupt:
log_interactive.info("Interrupted by user")
except Exception:
os.unlink(f)
raise
else:
stdout, stderr = cmd.communicate()
if stderr:
log_runtime.warning(stderr.decode())
if parse_results:
results = _parse_tcpreplay_result(stdout, stderr, argv)
elif conf.verb > 2:
log_runtime.info(stdout.decode())
os.unlink(f)
return results
def _parse_tcpreplay_result(stdout, stderr, argv):
"""
Parse the output of tcpreplay and modify the results_dict to populate output information. # noqa: E501
Tested with tcpreplay v3.4.4
Tested with tcpreplay v4.1.2
:param stdout: stdout of tcpreplay subprocess call
:param stderr: stderr of tcpreplay subprocess call
:param argv: the command used in the subprocess call
:return: dictionary containing the results
"""
try:
results = {}
stdout = plain_str(stdout).lower()
stderr = plain_str(stderr).strip().split("\n")
elements = {
"actual": (int, int, float),
"rated": (float, float, float),
"flows": (int, float, int, int),
"attempted": (int,),
"successful": (int,),
"failed": (int,),
"truncated": (int,),
"retried packets (eno": (int,),
"retried packets (eag": (int,),
}
multi = {
"actual": ("packets", "bytes", "time"),
"rated": ("bps", "mbps", "pps"),
"flows": ("flows", "fps", "flow_packets", "non_flow"),
"retried packets (eno": ("retried_enobufs",),
"retried packets (eag": ("retried_eagain",),
}
float_reg = r"([0-9]*\.[0-9]+|[0-9]+)"
int_reg = r"([0-9]+)"
any_reg = r"[^0-9]*"
r_types = {int: int_reg, float: float_reg}
for line in stdout.split("\n"):
line = line.strip()
for elt, _types in elements.items():
if line.startswith(elt):
regex = any_reg.join([r_types[x] for x in _types])
matches = re.search(regex, line)
for i, typ in enumerate(_types):
name = multi.get(elt, [elt])[i]
results[name] = typ(matches.group(i + 1))
results["command"] = " ".join(argv)
results["warnings"] = stderr[:-1]
return results
except Exception as parse_exception:
if not conf.interactive:
raise
log_runtime.error("Error parsing output: " + str(parse_exception))
return {}
@conf.commands.register
def sr(x, promisc=None, filter=None, iface=None, nofilter=0, *args, **kargs):
"""
Send and receive packets at layer 3
"""
s = conf.L3socket(promisc=promisc, filter=filter,
iface=iface, nofilter=nofilter)
result = sndrcv(s, x, *args, **kargs)
s.close()
return result
def _interface_selection(iface, packet):
"""
Select the network interface according to the layer 3 destination
"""
if iface is None:
try:
iff = packet.route()[0]
except AttributeError:
iff = None
return iff or conf.iface
return iface
@conf.commands.register
def sr1(x, promisc=None, filter=None, iface=None, nofilter=0, *args, **kargs):
"""
Send packets at layer 3 and return only the first answer
"""
iface = _interface_selection(iface, x)
s = conf.L3socket(promisc=promisc, filter=filter,
nofilter=nofilter, iface=iface)
ans, _ = sndrcv(s, x, *args, **kargs)
s.close()
if len(ans) > 0:
return ans[0][1]
@conf.commands.register
def srp(x, promisc=None, iface=None, iface_hint=None, filter=None,
nofilter=0, type=ETH_P_ALL, *args, **kargs):
"""
Send and receive packets at layer 2
"""
if iface is None and iface_hint is not None:
iface = conf.route.route(iface_hint)[0]
s = conf.L2socket(promisc=promisc, iface=iface,
filter=filter, nofilter=nofilter, type=type)
result = sndrcv(s, x, *args, **kargs)
s.close()
return result
@conf.commands.register
def srp1(*args, **kargs):
"""
Send and receive packets at layer 2 and return only the first answer
"""
ans, _ = srp(*args, **kargs)
if len(ans) > 0:
return ans[0][1]
# Append doc
for sr_func in [srp, srp1, sr, sr1]:
if sr_func.__doc__ is not None:
sr_func.__doc__ += _DOC_SNDRCV_PARAMS
# SEND/RECV LOOP METHODS
def __sr_loop(srfunc, pkts, prn=lambda x: x[1].summary(),
prnfail=lambda x: x.summary(),
inter=1, timeout=None, count=None, verbose=None, store=1,
*args, **kargs):
n = 0
r = 0
ct = conf.color_theme
if verbose is None:
verbose = conf.verb
parity = 0
ans = []
unans = []
if timeout is None:
timeout = min(2 * inter, 5)
try:
while True:
parity ^= 1
col = [ct.even, ct.odd][parity]
if count is not None:
if count == 0:
break
count -= 1
start = time.time()
if verbose > 1:
print("\rsend...\r", end=' ')
res = srfunc(pkts, timeout=timeout, verbose=0, chainCC=True, *args, **kargs) # noqa: E501
n += len(res[0]) + len(res[1])
r += len(res[0])
if verbose > 1 and prn and len(res[0]) > 0:
msg = "RECV %i:" % len(res[0])
print("\r" + ct.success(msg), end=' ')
for p in res[0]:
print(col(prn(p)))
print(" " * len(msg), end=' ')
if verbose > 1 and prnfail and len(res[1]) > 0:
msg = "fail %i:" % len(res[1])
print("\r" + ct.fail(msg), end=' ')
for p in res[1]:
print(col(prnfail(p)))
print(" " * len(msg), end=' ')
if verbose > 1 and not (prn or prnfail):
print("recv:%i fail:%i" % tuple(map(len, res[:2])))
if store:
ans += res[0]
unans += res[1]
end = time.time()
if end - start < inter:
time.sleep(inter + start - end)
except KeyboardInterrupt:
pass
if verbose and n > 0:
print(ct.normal("\nSent %i packets, received %i packets. %3.1f%% hits." % (n, r, 100.0 * r / n))) # noqa: E501
return SndRcvList(ans), PacketList(unans)
@conf.commands.register
def srloop(pkts, *args, **kargs):
"""Send a packet at layer 3 in loop and print the answer each time
srloop(pkts, [prn], [inter], [count], ...) --> None"""
return __sr_loop(sr, pkts, *args, **kargs)
@conf.commands.register
def srploop(pkts, *args, **kargs):
"""Send a packet at layer 2 in loop and print the answer each time
srloop(pkts, [prn], [inter], [count], ...) --> None"""
return __sr_loop(srp, pkts, *args, **kargs)
# SEND/RECV FLOOD METHODS
def sndrcvflood(pks, pkt, inter=0, verbose=None, chainCC=False, timeout=None):
"""sndrcv equivalent for flooding."""
stopevent = Event()
def send_in_loop(tobesent, stopevent):
"""Infinite generator that produces the same
packet until stopevent is triggered."""
while True:
for p in tobesent:
if stopevent.is_set():
return
yield p
infinite_gen = send_in_loop(pkt, stopevent)
_flood_len = pkt.__iterlen__() if isinstance(pkt, Gen) else len(pkt)
_flood = [_flood_len, stopevent.set]
return sndrcv(
pks, infinite_gen,
inter=inter, verbose=verbose,
chainCC=chainCC, timeout=None,
_flood=_flood
)
@conf.commands.register
def srflood(x, promisc=None, filter=None, iface=None, nofilter=None, *args, **kargs): # noqa: E501
"""Flood and receive packets at layer 3
:param prn: function applied to packets received
:param unique: only consider packets whose print
:param nofilter: put 1 to avoid use of BPF filters
:param filter: provide a BPF filter
:param iface: listen answers only on the given interface
"""
s = conf.L3socket(promisc=promisc, filter=filter, iface=iface, nofilter=nofilter) # noqa: E501
r = sndrcvflood(s, x, *args, **kargs)
s.close()
return r
@conf.commands.register
def sr1flood(x, promisc=None, filter=None, iface=None, nofilter=0, *args, **kargs): # noqa: E501
"""Flood and receive packets at layer 3 and return only the first answer
:param prn: function applied to packets received
:param verbose: set verbosity level
:param nofilter: put 1 to avoid use of BPF filters
:param filter: provide a BPF filter
:param iface: listen answers only on the given interface
"""
s = conf.L3socket(promisc=promisc, filter=filter, nofilter=nofilter, iface=iface) # noqa: E501
ans, _ = sndrcvflood(s, x, *args, **kargs)
s.close()
if len(ans) > 0:
return ans[0][1]
@conf.commands.register
def srpflood(x, promisc=None, filter=None, iface=None, iface_hint=None, nofilter=None, *args, **kargs): # noqa: E501
"""Flood and receive packets at layer 2
:param prn: function applied to packets received
:param unique: only consider packets whose print
:param nofilter: put 1 to avoid use of BPF filters
:param filter: provide a BPF filter
:param iface: listen answers only on the given interface
"""
if iface is None and iface_hint is not None:
iface = conf.route.route(iface_hint)[0]
s = conf.L2socket(promisc=promisc, filter=filter, iface=iface, nofilter=nofilter) # noqa: E501
r = sndrcvflood(s, x, *args, **kargs)
s.close()
return r
@conf.commands.register
def srp1flood(x, promisc=None, filter=None, iface=None, nofilter=0, *args, **kargs): # noqa: E501
"""Flood and receive packets at layer 2 and return only the first answer
:param prn: function applied to packets received
:param verbose: set verbosity level
:param nofilter: put 1 to avoid use of BPF filters
:param filter: provide a BPF filter
:param iface: listen answers only on the given interface
"""
s = conf.L2socket(promisc=promisc, filter=filter, nofilter=nofilter, iface=iface) # noqa: E501
ans, _ = sndrcvflood(s, x, *args, **kargs)
s.close()
if len(ans) > 0:
return ans[0][1]
# SNIFF METHODS
class AsyncSniffer(object):
"""
Sniff packets and return a list of packets.
Args:
count: number of packets to capture. 0 means infinity.
store: whether to store sniffed packets or discard them
prn: function to apply to each packet. If something is returned, it
is displayed.
--Ex: prn = lambda x: x.summary()
session: a session = a flow decoder used to handle stream of packets.
--Ex: session=TCPSession
See below for more details.
filter: BPF filter to apply.
lfilter: Python function applied to each packet to determine if
further action may be done.
--Ex: lfilter = lambda x: x.haslayer(Padding)
offline: PCAP file (or list of PCAP files) to read packets from,
instead of sniffing them
quiet: when set to True, the process stderr is discarded
(default: False).
timeout: stop sniffing after a given time (default: None).
L2socket: use the provided L2socket (default: use conf.L2listen).
opened_socket: provide an object (or a list of objects) ready to use
.recv() on.
stop_filter: Python function applied to each packet to determine if
we have to stop the capture after this packet.
--Ex: stop_filter = lambda x: x.haslayer(TCP)
iface: interface or list of interfaces (default: None for sniffing
on all interfaces).
monitor: use monitor mode. May not be available on all OS
started_callback: called as soon as the sniffer starts sniffing
(default: None).
The iface, offline and opened_socket parameters can be either an
element, a list of elements, or a dict object mapping an element to a
label (see examples below).
For more information about the session argument, see
https://scapy.rtfd.io/en/latest/usage.html#advanced-sniffing-sniffing-sessions
Examples: synchronous
>>> sniff(filter="arp")
>>> sniff(filter="tcp",
... session=IPSession, # defragment on-the-flow
... prn=lambda x: x.summary())
>>> sniff(lfilter=lambda pkt: ARP in pkt)
>>> sniff(iface="eth0", prn=Packet.summary)
>>> sniff(iface=["eth0", "mon0"],
... prn=lambda pkt: "%s: %s" % (pkt.sniffed_on,
... pkt.summary()))
>>> sniff(iface={"eth0": "Ethernet", "mon0": "Wifi"},
... prn=lambda pkt: "%s: %s" % (pkt.sniffed_on,
... pkt.summary()))
Examples: asynchronous
>>> t = AsyncSniffer(iface="enp0s3")
>>> t.start()
>>> time.sleep(1)
>>> print("nice weather today")
>>> t.stop()
"""
def __init__(self, *args, **kwargs):
# Store keyword arguments
self.args = args
self.kwargs = kwargs
self.running = False
self.thread = None
self.results = None
def _setup_thread(self):
# Prepare sniffing thread
self.thread = Thread(
target=self._run,
args=self.args,
kwargs=self.kwargs
)
self.thread.setDaemon(True)
def _run(self,
count=0, store=True, offline=None,
quiet=False, prn=None, lfilter=None,
L2socket=None, timeout=None, opened_socket=None,
stop_filter=None, iface=None, started_callback=None,
session=None, session_args=[], session_kwargs={},
*arg, **karg):
self.running = True
# Start main thread
# instantiate session
if not isinstance(session, DefaultSession):
session = session or DefaultSession
session = session(prn=prn, store=store,
*session_args, **session_kwargs)
else:
session.prn = prn
session.store = store
# sniff_sockets follows: {socket: label}
sniff_sockets = {}
if opened_socket is not None:
if isinstance(opened_socket, list):
sniff_sockets.update(
(s, "socket%d" % i)
for i, s in enumerate(opened_socket)
)
elif isinstance(opened_socket, dict):
sniff_sockets.update(
(s, label)
for s, label in six.iteritems(opened_socket)
)
else:
sniff_sockets[opened_socket] = "socket0"
if offline is not None:
flt = karg.get('filter')
if isinstance(offline, list) and \
all(isinstance(elt, str) for elt in offline):
sniff_sockets.update((PcapReader(
fname if flt is None else
tcpdump(fname, args=["-w", "-"], flt=flt, getfd=True)
), fname) for fname in offline)
elif isinstance(offline, dict):
sniff_sockets.update((PcapReader(
fname if flt is None else
tcpdump(fname, args=["-w", "-"], flt=flt, getfd=True)
), label) for fname, label in six.iteritems(offline))
else:
# Write Scapy Packet objects to a pcap file
def _write_to_pcap(packets_list):
filename = get_temp_file(autoext=".pcap")
wrpcap(filename, offline)
return filename, filename
if isinstance(offline, Packet):
tempfile_written, offline = _write_to_pcap([offline])
elif isinstance(offline, list) and \
all(isinstance(elt, Packet) for elt in offline):
tempfile_written, offline = _write_to_pcap(offline)
sniff_sockets[PcapReader(
offline if flt is None else
tcpdump(offline,
args=["-w", "-"],
flt=flt,
getfd=True,
quiet=quiet)
)] = offline
if not sniff_sockets or iface is not None:
if L2socket is None:
L2socket = conf.L2listen
if isinstance(iface, list):
sniff_sockets.update(
(L2socket(type=ETH_P_ALL, iface=ifname, *arg, **karg),
ifname)
for ifname in iface
)
elif isinstance(iface, dict):
sniff_sockets.update(
(L2socket(type=ETH_P_ALL, iface=ifname, *arg, **karg),
iflabel)
for ifname, iflabel in six.iteritems(iface)
)
else:
sniff_sockets[L2socket(type=ETH_P_ALL, iface=iface,
*arg, **karg)] = iface
# Get select information from the sockets
_main_socket = next(iter(sniff_sockets))
select_func = _main_socket.select
_backup_read_func = _main_socket.__class__.recv
nonblocking_socket = _main_socket.nonblocking_socket
# We check that all sockets use the same select(), or raise a warning
if not all(select_func == sock.select for sock in sniff_sockets):
warning("Warning: inconsistent socket types ! "
"The used select function "
"will be the one of the first socket")
if nonblocking_socket:
# select is non blocking
def stop_cb():
self.continue_sniff = False
self.stop_cb = stop_cb
close_pipe = None
else:
# select is blocking: Add special control socket
from scapy.automaton import ObjectPipe
close_pipe = ObjectPipe()
sniff_sockets[close_pipe] = "control_socket"
def stop_cb():
if self.running:
close_pipe.send(None)
self.continue_sniff = False
self.stop_cb = stop_cb
try:
if started_callback:
started_callback()
self.continue_sniff = True
# Start timeout
if timeout is not None:
stoptime = time.time() + timeout
remain = None
while sniff_sockets and self.continue_sniff:
if timeout is not None:
remain = stoptime - time.time()
if remain <= 0:
break
sockets, read_func = select_func(sniff_sockets, remain)
read_func = read_func or _backup_read_func
dead_sockets = []
for s in sockets:
if s is close_pipe:
break
try:
p = read_func(s)
except EOFError:
# End of stream
try:
s.close()
except Exception:
pass
dead_sockets.append(s)
continue
except Exception as ex:
msg = " It was closed."
try:
# Make sure it's closed
s.close()
except Exception as ex:
msg = " close() failed with '%s'" % ex
warning(
"Socket %s failed with '%s'." % (s, ex) + msg
)
dead_sockets.append(s)
if conf.debug_dissector >= 2:
raise
continue
if p is None:
continue
if lfilter and not lfilter(p):
continue
p.sniffed_on = sniff_sockets[s]
# on_packet_received handles the prn/storage
session.on_packet_received(p)
# check
if (stop_filter and stop_filter(p)) or \
(0 < count <= session.count):
self.continue_sniff = False
break
# Removed dead sockets
for s in dead_sockets:
del sniff_sockets[s]
except KeyboardInterrupt:
pass
self.running = False
if opened_socket is None:
for s in sniff_sockets:
s.close()
elif close_pipe:
close_pipe.close()
self.results = session.toPacketList()
def start(self):
"""Starts AsyncSniffer in async mode"""
self._setup_thread()
self.thread.start()
def stop(self, join=True):
"""Stops AsyncSniffer if not in async mode"""
if self.running:
try:
self.stop_cb()
except AttributeError:
raise Scapy_Exception(
"Unsupported (offline or unsupported socket)"
)
if join:
self.join()
return self.results
else:
raise Scapy_Exception("Not started !")
def join(self, *args, **kwargs):
if self.thread:
self.thread.join(*args, **kwargs)
@conf.commands.register
def sniff(*args, **kwargs):
sniffer = AsyncSniffer()
sniffer._run(*args, **kwargs)
return sniffer.results
sniff.__doc__ = AsyncSniffer.__doc__
@conf.commands.register
def bridge_and_sniff(if1, if2, xfrm12=None, xfrm21=None, prn=None, L2socket=None, # noqa: E501
*args, **kargs):
"""Forward traffic between interfaces if1 and if2, sniff and return
the exchanged packets.
:param if1: the interfaces to use (interface names or opened sockets).
:param if2:
:param xfrm12: a function to call when forwarding a packet from if1 to
if2. If it returns True, the packet is forwarded as it. If it
returns False or None, the packet is discarded. If it returns a
packet, this packet is forwarded instead of the original packet
one.
:param xfrm21: same as xfrm12 for packets forwarded from if2 to if1.
The other arguments are the same than for the function sniff(),
except for offline, opened_socket and iface that are ignored.
See help(sniff) for more.
"""
for arg in ['opened_socket', 'offline', 'iface']:
if arg in kargs:
log_runtime.warning("Argument %s cannot be used in "
"bridge_and_sniff() -- ignoring it.", arg)
del kargs[arg]
def _init_socket(iface, count):
if isinstance(iface, SuperSocket):
return iface, "iface%d" % count
else:
return (L2socket or conf.L2socket)(iface=iface), iface
sckt1, if1 = _init_socket(if1, 1)
sckt2, if2 = _init_socket(if2, 2)
peers = {if1: sckt2, if2: sckt1}
xfrms = {}
if xfrm12 is not None:
xfrms[if1] = xfrm12
if xfrm21 is not None:
xfrms[if2] = xfrm21
def prn_send(pkt):
try:
sendsock = peers[pkt.sniffed_on]
except KeyError:
return
if pkt.sniffed_on in xfrms:
try:
newpkt = xfrms[pkt.sniffed_on](pkt)
except Exception:
log_runtime.warning(
'Exception in transformation function for packet [%s] '
'received on %s -- dropping',
pkt.summary(), pkt.sniffed_on, exc_info=True
)
return
else:
if newpkt is True:
newpkt = pkt.original
elif not newpkt:
return
else:
newpkt = pkt.original
try:
sendsock.send(newpkt)
except Exception:
log_runtime.warning('Cannot forward packet [%s] received on %s',
pkt.summary(), pkt.sniffed_on, exc_info=True)
if prn is None:
prn = prn_send
else:
prn_orig = prn
def prn(pkt):
prn_send(pkt)
return prn_orig(pkt)
return sniff(opened_socket={sckt1: if1, sckt2: if2}, prn=prn,
*args, **kargs)
@conf.commands.register
def tshark(*args, **kargs):
"""Sniff packets and print them calling pkt.summary().
This tries to replicate what text-wireshark (tshark) would look like"""
if 'iface' in kargs:
iface = kargs.get('iface')
elif 'opened_socket' in kargs:
iface = kargs.get('opened_socket').iface
else:
iface = conf.iface
print("Capturing on '%s'" % iface)
# This should be a nonlocal variable, using a mutable object
# for Python 2 compatibility
i = [0]
def _cb(pkt):
print("%5d\t%s" % (i[0], pkt.summary()))
i[0] += 1
sniff(prn=_cb, store=False, *args, **kargs)
print("\n%d packet%s captured" % (i[0], 's' if i[0] > 1 else ''))
| gpl-2.0 |
postrational/django | django/views/generic/list.py | 110 | 7014 | from __future__ import unicode_literals
from django.core.paginator import Paginator, InvalidPage
from django.core.exceptions import ImproperlyConfigured
from django.http import Http404
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateResponseMixin, ContextMixin, View
class MultipleObjectMixin(ContextMixin):
"""
A mixin for views manipulating multiple objects.
"""
allow_empty = True
queryset = None
model = None
paginate_by = None
paginate_orphans = 0
context_object_name = None
paginator_class = Paginator
page_kwarg = 'page'
def get_queryset(self):
"""
Get the list of items for this view. This must be an iterable, and may
be a queryset (in which qs-specific behavior will be enabled).
"""
if self.queryset is not None:
queryset = self.queryset
if hasattr(queryset, '_clone'):
queryset = queryset._clone()
elif self.model is not None:
queryset = self.model._default_manager.all()
else:
raise ImproperlyConfigured("'%s' must define 'queryset' or 'model'"
% self.__class__.__name__)
return queryset
def paginate_queryset(self, queryset, page_size):
"""
Paginate the queryset, if needed.
"""
paginator = self.get_paginator(
queryset, page_size, orphans=self.get_paginate_orphans(),
allow_empty_first_page=self.get_allow_empty())
page_kwarg = self.page_kwarg
page = self.kwargs.get(page_kwarg) or self.request.GET.get(page_kwarg) or 1
try:
page_number = int(page)
except ValueError:
if page == 'last':
page_number = paginator.num_pages
else:
raise Http404(_("Page is not 'last', nor can it be converted to an int."))
try:
page = paginator.page(page_number)
return (paginator, page, page.object_list, page.has_other_pages())
except InvalidPage as e:
raise Http404(_('Invalid page (%(page_number)s): %(message)s') % {
'page_number': page_number,
'message': str(e)
})
def get_paginate_by(self, queryset):
"""
Get the number of items to paginate by, or ``None`` for no pagination.
"""
return self.paginate_by
def get_paginator(self, queryset, per_page, orphans=0,
allow_empty_first_page=True, **kwargs):
"""
Return an instance of the paginator for this view.
"""
return self.paginator_class(
queryset, per_page, orphans=orphans,
allow_empty_first_page=allow_empty_first_page, **kwargs)
def get_paginate_orphans(self):
"""
Returns the maximum number of orphans extend the last page by when
paginating.
"""
return self.paginate_orphans
def get_allow_empty(self):
"""
Returns ``True`` if the view should display empty lists, and ``False``
if a 404 should be raised instead.
"""
return self.allow_empty
def get_context_object_name(self, object_list):
"""
Get the name of the item to be used in the context.
"""
if self.context_object_name:
return self.context_object_name
elif hasattr(object_list, 'model'):
return '%s_list' % object_list.model._meta.model_name
else:
return None
def get_context_data(self, **kwargs):
"""
Get the context for this view.
"""
queryset = kwargs.pop('object_list', self.object_list)
page_size = self.get_paginate_by(queryset)
context_object_name = self.get_context_object_name(queryset)
if page_size:
paginator, page, queryset, is_paginated = self.paginate_queryset(queryset, page_size)
context = {
'paginator': paginator,
'page_obj': page,
'is_paginated': is_paginated,
'object_list': queryset
}
else:
context = {
'paginator': None,
'page_obj': None,
'is_paginated': False,
'object_list': queryset
}
if context_object_name is not None:
context[context_object_name] = queryset
context.update(kwargs)
return super(MultipleObjectMixin, self).get_context_data(**context)
class BaseListView(MultipleObjectMixin, View):
"""
A base view for displaying a list of objects.
"""
def get(self, request, *args, **kwargs):
self.object_list = self.get_queryset()
allow_empty = self.get_allow_empty()
if not allow_empty:
# When pagination is enabled and object_list is a queryset,
# it's better to do a cheap query than to load the unpaginated
# queryset in memory.
if (self.get_paginate_by(self.object_list) is not None
and hasattr(self.object_list, 'exists')):
is_empty = not self.object_list.exists()
else:
is_empty = len(self.object_list) == 0
if is_empty:
raise Http404(_("Empty list and '%(class_name)s.allow_empty' is False.")
% {'class_name': self.__class__.__name__})
context = self.get_context_data()
return self.render_to_response(context)
class MultipleObjectTemplateResponseMixin(TemplateResponseMixin):
"""
Mixin for responding with a template and list of objects.
"""
template_name_suffix = '_list'
def get_template_names(self):
"""
Return a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
"""
try:
names = super(MultipleObjectTemplateResponseMixin, self).get_template_names()
except ImproperlyConfigured:
# If template_name isn't specified, it's not a problem --
# we just start with an empty list.
names = []
# If the list is a queryset, we'll invent a template name based on the
# app and model name. This name gets put at the end of the template
# name list so that user-supplied names override the automatically-
# generated ones.
if hasattr(self.object_list, 'model'):
opts = self.object_list.model._meta
names.append("%s/%s%s.html" % (opts.app_label, opts.model_name, self.template_name_suffix))
return names
class ListView(MultipleObjectTemplateResponseMixin, BaseListView):
"""
Render some list of objects, set by `self.model` or `self.queryset`.
`self.queryset` can actually be any iterable of items, not just a queryset.
"""
| bsd-3-clause |
davidyezsetz/kuma | vendor/packages/Werkzeug/examples/cupoftee/pages.py | 8 | 2424 | # -*- coding: utf-8 -*-
"""
cupoftee.pages
~~~~~~~~~~~~~~
The pages.
:copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import operator
from werkzeug import redirect
from werkzeug.exceptions import NotFound
from cupoftee.application import Page
from cupoftee.utils import unicodecmp
class ServerList(Page):
url_rule = '/'
def order_link(self, name, title):
cls = ''
link = '?order_by=' + name
desc = False
if name == self.order_by:
desc = not self.order_desc
cls = ' class="%s"' % (desc and 'down' or 'up')
if desc:
link += '&dir=desc'
return '<a href="%s"%s>%s</a>' % (link, cls, title)
def process(self):
self.order_by = self.request.args.get('order_by') or 'name'
sort_func = {
'name': lambda x: x,
'map': lambda x: x.map,
'gametype': lambda x: x.gametype,
'players': lambda x: x.player_count,
'progression': lambda x: x.progression,
}.get(self.order_by)
if sort_func is None:
return redirect(self.url_for('serverlist'))
self.servers = self.cup.master.servers.values()
self.servers.sort(key=sort_func)
if self.request.args.get('dir') == 'desc':
self.servers.reverse()
self.order_desc = True
else:
self.order_desc = False
self.players = reduce(lambda a, b: a + b.players, self.servers, [])
self.players.sort(lambda a, b: unicodecmp(a.name, b.name))
class Server(Page):
url_rule = '/server/<id>'
def process(self, id):
try:
self.server = self.cup.master.servers[id]
except KeyError:
raise NotFound()
class Search(Page):
url_rule = '/search'
def process(self):
self.user = self.request.args.get('user')
if self.user:
self.results = []
for server in self.cup.master.servers.itervalues():
for player in server.players:
if player.name == self.user:
self.results.append(server)
class MissingPage(Page):
def get_response(self):
response = super(MissingPage, self).get_response()
response.status_code = 404
return response
| mpl-2.0 |
houghb/HDSAviz | savvy/tests/test_sensitivity_tools.py | 2 | 3009 | import unittest
import os
import numpy as np
from ..sensitivity_tools import gen_params
cwd = os.getcwd()
class TestGenParams(unittest.TestCase):
"""Tests for gen_params"""
def test_num_vars_not_integer(self):
"""Is an error raised if num_vars is not an integer?"""
names = ['para1', 'para2', 'para3']
bounds = [[0, 1], [2, 6], [0, 2.3]]
self.assertRaises(TypeError, gen_params, 9.6, names, bounds, 10,
cwd, True)
self.assertRaises(TypeError, gen_params, '10', names, bounds, 10,
cwd, True)
def test_missing_bounds(self):
"""Is an error raised when there are different numbers
of bounds than num_vars?"""
names = ['para1', 'para2', 'para3']
bounds = [[0, 1], [2, 6], [0, 2.3]]
self.assertRaises(ValueError, gen_params, 5, names, bounds, 10,
cwd, True)
def test_names_exist_for_all_params(self):
"""Is an error raised if the length of names != num_vars"""
names = ['para1', 'para2', 'para3']
bounds = [[0, 1], [2, 6], [0, 2.3], [0, 1], [0, 2]]
self.assertRaises(ValueError, gen_params, 5, names, bounds, 10,
cwd, True)
def test_gen_params_gives_expected_sets(self):
"""Does gen_params return the expected parameter sets?"""
names = ['para1', 'para2', 'para3']
bounds = [[0, 1], [2, 6], [0, 2.3]]
expectedt = np.array([[0.21972656, 2.38671875, 1.19267578],
[0.67675781, 2.38671875, 1.19267578],
[0.21972656, 3.12109375, 1.19267578],
[0.21972656, 2.38671875, 2.08662109],
[0.21972656, 3.12109375, 2.08662109],
[0.67675781, 2.38671875, 2.08662109],
[0.67675781, 3.12109375, 1.19267578],
[0.67675781, 3.12109375, 2.08662109]])
expectedf = np.array([[0.21972656, 2.38671875, 1.19267578],
[0.67675781, 2.38671875, 1.19267578],
[0.21972656, 3.12109375, 1.19267578],
[0.21972656, 2.38671875, 2.08662109],
[0.67675781, 3.12109375, 2.08662109]])
self.assertEqual(gen_params(3, names, bounds, 1, cwd, False).all(),
expectedf.all())
self.assertEqual(gen_params(3, names, bounds, 1, cwd, True).all(),
expectedt.all())
def tearDown(self):
[os.remove(cwd+'/'+name) for name in os.listdir(cwd)
if name.startswith('saparams')]
# the function `analyze_sensitivity` is not being tested because this
# function does not do anything that has not already been tested in SALib
# (`analyze_sensitivity` just formats the bash command that runs the analysis)
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
Slezhuk/ansible | lib/ansible/parsing/splitter.py | 80 | 10658 | # (c) 2014 James Cammarata, <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import codecs
from ansible.errors import AnsibleParserError
from ansible.module_utils._text import to_text
from ansible.parsing.quoting import unquote
# Decode escapes adapted from rspeer's answer here:
# http://stackoverflow.com/questions/4020539/process-escape-sequences-in-a-string-in-python
_HEXCHAR = '[a-fA-F0-9]'
_ESCAPE_SEQUENCE_RE = re.compile(r'''
( \\U{0} # 8-digit hex escapes
| \\u{1} # 4-digit hex escapes
| \\x{2} # 2-digit hex escapes
| \\N\{{[^}}]+\}} # Unicode characters by name
| \\[\\'"abfnrtv] # Single-character escapes
)'''.format(_HEXCHAR*8, _HEXCHAR*4, _HEXCHAR*2), re.UNICODE | re.VERBOSE)
def _decode_escapes(s):
def decode_match(match):
return codecs.decode(match.group(0), 'unicode-escape')
return _ESCAPE_SEQUENCE_RE.sub(decode_match, s)
def parse_kv(args, check_raw=False):
'''
Convert a string of key/value items to a dict. If any free-form params
are found and the check_raw option is set to True, they will be added
to a new parameter called '_raw_params'. If check_raw is not enabled,
they will simply be ignored.
'''
args = to_text(args, nonstring='passthru')
options = {}
if args is not None:
try:
vargs = split_args(args)
except ValueError as ve:
if 'no closing quotation' in str(ve).lower():
raise AnsibleParserError("error parsing argument string, try quoting the entire line.")
else:
raise
raw_params = []
for orig_x in vargs:
x = _decode_escapes(orig_x)
if "=" in x:
pos = 0
try:
while True:
pos = x.index('=', pos + 1)
if pos > 0 and x[pos - 1] != '\\':
break
except ValueError:
# ran out of string, but we must have some escaped equals,
# so replace those and append this to the list of raw params
raw_params.append(x.replace('\\=', '='))
continue
k = x[:pos]
v = x[pos + 1:]
# FIXME: make the retrieval of this list of shell/command
# options a function, so the list is centralized
if check_raw and k not in ('creates', 'removes', 'chdir', 'executable', 'warn'):
raw_params.append(orig_x)
else:
options[k.strip()] = unquote(v.strip())
else:
raw_params.append(orig_x)
# recombine the free-form params, if any were found, and assign
# them to a special option for use later by the shell/command module
if len(raw_params) > 0:
options[u'_raw_params'] = ' '.join(raw_params)
return options
def _get_quote_state(token, quote_char):
'''
the goal of this block is to determine if the quoted string
is unterminated in which case it needs to be put back together
'''
# the char before the current one, used to see if
# the current character is escaped
prev_char = None
for idx, cur_char in enumerate(token):
if idx > 0:
prev_char = token[idx-1]
if cur_char in '"\'' and prev_char != '\\':
if quote_char:
if cur_char == quote_char:
quote_char = None
else:
quote_char = cur_char
return quote_char
def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
'''
this function counts the number of opening/closing blocks for a
given opening/closing type and adjusts the current depth for that
block based on the difference
'''
num_open = token.count(open_token)
num_close = token.count(close_token)
if num_open != num_close:
cur_depth += (num_open - num_close)
if cur_depth < 0:
cur_depth = 0
return cur_depth
def split_args(args):
'''
Splits args on whitespace, but intelligently reassembles
those that may have been split over a jinja2 block or quotes.
When used in a remote module, we won't ever have to be concerned about
jinja2 blocks, however this function is/will be used in the
core portions as well before the args are templated.
example input: a=b c="foo bar"
example output: ['a=b', 'c="foo bar"']
Basically this is a variation shlex that has some more intelligence for
how Ansible needs to use it.
'''
# the list of params parsed out of the arg string
# this is going to be the result value when we are done
params = []
# Initial split on white space
args = args.strip()
items = args.strip().split('\n')
# iterate over the tokens, and reassemble any that may have been
# split on a space inside a jinja2 block.
# ex if tokens are "{{", "foo", "}}" these go together
# These variables are used
# to keep track of the state of the parsing, since blocks and quotes
# may be nested within each other.
quote_char = None
inside_quotes = False
print_depth = 0 # used to count nested jinja2 {{ }} blocks
block_depth = 0 # used to count nested jinja2 {% %} blocks
comment_depth = 0 # used to count nested jinja2 {# #} blocks
# now we loop over each split chunk, coalescing tokens if the white space
# split occurred within quotes or a jinja2 block of some kind
for itemidx,item in enumerate(items):
# we split on spaces and newlines separately, so that we
# can tell which character we split on for reassembly
# inside quotation characters
tokens = item.strip().split(' ')
line_continuation = False
for idx,token in enumerate(tokens):
# if we hit a line continuation character, but
# we're not inside quotes, ignore it and continue
# on to the next token while setting a flag
if token == '\\' and not inside_quotes:
line_continuation = True
continue
# store the previous quoting state for checking later
was_inside_quotes = inside_quotes
quote_char = _get_quote_state(token, quote_char)
inside_quotes = quote_char is not None
# multiple conditions may append a token to the list of params,
# so we keep track with this flag to make sure it only happens once
# append means add to the end of the list, don't append means concatenate
# it to the end of the last token
appended = False
# if we're inside quotes now, but weren't before, append the token
# to the end of the list, since we'll tack on more to it later
# otherwise, if we're inside any jinja2 block, inside quotes, or we were
# inside quotes (but aren't now) concat this token to the last param
if inside_quotes and not was_inside_quotes and not(print_depth or block_depth or comment_depth):
params.append(token)
appended = True
elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
if idx == 0 and was_inside_quotes:
params[-1] = "%s%s" % (params[-1], token)
elif len(tokens) > 1:
spacer = ''
if idx > 0:
spacer = ' '
params[-1] = "%s%s%s" % (params[-1], spacer, token)
else:
params[-1] = "%s\n%s" % (params[-1], token)
appended = True
# if the number of paired block tags is not the same, the depth has changed, so we calculate that here
# and may append the current token to the params (if we haven't previously done so)
prev_print_depth = print_depth
print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}")
if print_depth != prev_print_depth and not appended:
params.append(token)
appended = True
prev_block_depth = block_depth
block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}")
if block_depth != prev_block_depth and not appended:
params.append(token)
appended = True
prev_comment_depth = comment_depth
comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}")
if comment_depth != prev_comment_depth and not appended:
params.append(token)
appended = True
# finally, if we're at zero depth for all blocks and not inside quotes, and have not
# yet appended anything to the list of params, we do so now
if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '':
params.append(token)
# if this was the last token in the list, and we have more than
# one item (meaning we split on newlines), add a newline back here
# to preserve the original structure
if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation:
params[-1] += '\n'
# always clear the line continuation flag
line_continuation = False
# If we're done and things are not at zero depth or we're still inside quotes,
# raise an error to indicate that the args were unbalanced
if print_depth or block_depth or comment_depth or inside_quotes:
raise AnsibleParserError("failed at splitting arguments, either an unbalanced jinja2 block or quotes: {}".format(args))
return params
| gpl-3.0 |
saurabhbajaj207/CarpeDiem | venv/Lib/site-packages/Crypto/Hash/SHA512.py | 124 | 2850 | # -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""SHA-512 cryptographic hash algorithm.
SHA-512 belongs to the SHA-2_ family of cryptographic hashes.
It produces the 512 bit digest of a message.
>>> from Crypto.Hash import SHA512
>>>
>>> h = SHA512.new()
>>> h.update(b'Hello')
>>> print h.hexdigest()
*SHA* stands for Secure Hash Algorithm.
.. _SHA-2: http://csrc.nist.gov/publications/fips/fips180-2/fips180-2.pdf
"""
_revision__ = "$Id$"
__all__ = ['new', 'digest_size', 'SHA512Hash' ]
from Crypto.Util.py3compat import *
from Crypto.Hash.hashalgo import HashAlgo
try:
import hashlib
hashFactory = hashlib.sha512
except ImportError:
from Crypto.Hash import _SHA512
hashFactory = _SHA512
class SHA512Hash(HashAlgo):
"""Class that implements a SHA-512 hash
:undocumented: block_size
"""
#: ASN.1 Object identifier (OID)::
#:
#: id-sha512 OBJECT IDENTIFIER ::= {
#: joint-iso-itu-t(2)
#: country(16) us(840) organization(1) gov(101) csor(3) nistalgorithm(4) hashalgs(2) 3
#: }
#:
#: This value uniquely identifies the SHA-512 algorithm.
oid = b('\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x03')
digest_size = 64
block_size = 128
def __init__(self, data=None):
HashAlgo.__init__(self, hashFactory, data)
def new(self, data=None):
return SHA512Hash(data)
def new(data=None):
"""Return a fresh instance of the hash object.
:Parameters:
data : byte string
The very first chunk of the message to hash.
It is equivalent to an early call to `SHA512Hash.update()`.
Optional.
:Return: A `SHA512Hash` object
"""
return SHA512Hash().new(data)
#: The size of the resulting hash in bytes.
digest_size = SHA512Hash.digest_size
#: The internal block size of the hash algorithm in bytes.
block_size = SHA512Hash.block_size
| mit |
40223211/cadb_g7_w18test | static/Brython3.1.1-20150328-091302/Lib/linecache.py | 785 | 3864 | """Cache lines from files.
This is intended to read lines from modules imported -- hence if a filename
is not found, it will look down the module search path for a file by
that name.
"""
import sys
import os
import tokenize
__all__ = ["getline", "clearcache", "checkcache"]
def getline(filename, lineno, module_globals=None):
lines = getlines(filename, module_globals)
if 1 <= lineno <= len(lines):
return lines[lineno-1]
else:
return ''
# The cache
cache = {} # The cache
def clearcache():
"""Clear the cache entirely."""
global cache
cache = {}
def getlines(filename, module_globals=None):
"""Get the lines for a file from the cache.
Update the cache if it doesn't contain an entry for this file already."""
if filename in cache:
return cache[filename][2]
else:
return updatecache(filename, module_globals)
def checkcache(filename=None):
"""Discard cache entries that are out of date.
(This is not checked upon each call!)"""
if filename is None:
filenames = list(cache.keys())
else:
if filename in cache:
filenames = [filename]
else:
return
for filename in filenames:
size, mtime, lines, fullname = cache[filename]
if mtime is None:
continue # no-op for files loaded via a __loader__
try:
stat = os.stat(fullname)
except os.error:
del cache[filename]
continue
if size != stat.st_size or mtime != stat.st_mtime:
del cache[filename]
def updatecache(filename, module_globals=None):
"""Update a cache entry and return its list of lines.
If something's wrong, print a message, discard the cache entry,
and return an empty list."""
if filename in cache:
del cache[filename]
if not filename or (filename.startswith('<') and filename.endswith('>')):
return []
fullname = filename
try:
stat = os.stat(fullname)
except OSError:
basename = filename
# Try for a __loader__, if available
if module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return []
cache[filename] = (
len(data), None,
[line+'\n' for line in data.splitlines()], fullname
)
return cache[filename][2]
# Try looking through the module search path, which is only useful
# when handling a relative filename.
if os.path.isabs(filename):
return []
for dirname in sys.path:
try:
fullname = os.path.join(dirname, basename)
except (TypeError, AttributeError):
# Not sufficiently string-like to do anything useful with.
continue
try:
stat = os.stat(fullname)
break
except os.error:
pass
else:
return []
try:
with tokenize.open(fullname) as fp:
lines = fp.readlines()
except IOError:
return []
if lines and not lines[-1].endswith('\n'):
lines[-1] += '\n'
size, mtime = stat.st_size, stat.st_mtime
cache[filename] = size, mtime, lines, fullname
return lines
| gpl-3.0 |
bottompawn/kbengine | kbe/src/lib/python/Parser/asdl.py | 37 | 11756 | """An implementation of the Zephyr Abstract Syntax Definition Language.
See http://asdl.sourceforge.net/ and
http://www.cs.princeton.edu/research/techreps/TR-554-97
Only supports top level module decl, not view. I'm guessing that view
is intended to support the browser and I'm not interested in the
browser.
Changes for Python: Add support for module versions
"""
import os
import sys
import traceback
import spark
def output(*strings):
for s in strings:
sys.stdout.write(str(s) + "\n")
class Token(object):
# spark seems to dispatch in the parser based on a token's
# type attribute
def __init__(self, type, lineno):
self.type = type
self.lineno = lineno
def __str__(self):
return self.type
def __repr__(self):
return str(self)
class Id(Token):
def __init__(self, value, lineno):
self.type = 'Id'
self.value = value
self.lineno = lineno
def __str__(self):
return self.value
class String(Token):
def __init__(self, value, lineno):
self.type = 'String'
self.value = value
self.lineno = lineno
class ASDLSyntaxError(Exception):
def __init__(self, lineno, token=None, msg=None):
self.lineno = lineno
self.token = token
self.msg = msg
def __str__(self):
if self.msg is None:
return "Error at '%s', line %d" % (self.token, self.lineno)
else:
return "%s, line %d" % (self.msg, self.lineno)
class ASDLScanner(spark.GenericScanner, object):
def tokenize(self, input):
self.rv = []
self.lineno = 1
super(ASDLScanner, self).tokenize(input)
return self.rv
def t_id(self, s):
r"[\w\.]+"
# XXX doesn't distinguish upper vs. lower, which is
# significant for ASDL.
self.rv.append(Id(s, self.lineno))
def t_string(self, s):
r'"[^"]*"'
self.rv.append(String(s, self.lineno))
def t_xxx(self, s): # not sure what this production means
r"<="
self.rv.append(Token(s, self.lineno))
def t_punctuation(self, s):
r"[\{\}\*\=\|\(\)\,\?\:]"
self.rv.append(Token(s, self.lineno))
def t_comment(self, s):
r"\-\-[^\n]*"
pass
def t_newline(self, s):
r"\n"
self.lineno += 1
def t_whitespace(self, s):
r"[ \t]+"
pass
def t_default(self, s):
r" . +"
raise ValueError("unmatched input: %r" % s)
class ASDLParser(spark.GenericParser, object):
def __init__(self):
super(ASDLParser, self).__init__("module")
def typestring(self, tok):
return tok.type
def error(self, tok):
raise ASDLSyntaxError(tok.lineno, tok)
def p_module_0(self, info):
" module ::= Id Id { } "
module, name, _0, _1 = info
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, None)
def p_module(self, info):
" module ::= Id Id { definitions } "
module, name, _0, definitions, _1 = info
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, definitions)
def p_definition_0(self, definition):
" definitions ::= definition "
return definition[0]
def p_definition_1(self, definitions):
" definitions ::= definition definitions "
return definitions[0] + definitions[1]
def p_definition(self, info):
" definition ::= Id = type "
id, _, type = info
return [Type(id, type)]
def p_type_0(self, product):
" type ::= product "
return product[0]
def p_type_1(self, sum):
" type ::= sum "
return Sum(sum[0])
def p_type_2(self, info):
" type ::= sum Id ( fields ) "
sum, id, _0, attributes, _1 = info
if id.value != "attributes":
raise ASDLSyntaxError(id.lineno,
msg="expected attributes, found %s" % id)
return Sum(sum, attributes)
def p_product_0(self, info):
" product ::= ( fields ) "
_0, fields, _1 = info
return Product(fields)
def p_product_1(self, info):
" product ::= ( fields ) Id ( fields ) "
_0, fields, _1, id, _2, attributes, _3 = info
if id.value != "attributes":
raise ASDLSyntaxError(id.lineno,
msg="expected attributes, found %s" % id)
return Product(fields, attributes)
def p_sum_0(self, constructor):
" sum ::= constructor "
return [constructor[0]]
def p_sum_1(self, info):
" sum ::= constructor | sum "
constructor, _, sum = info
return [constructor] + sum
def p_sum_2(self, info):
" sum ::= constructor | sum "
constructor, _, sum = info
return [constructor] + sum
def p_constructor_0(self, id):
" constructor ::= Id "
return Constructor(id[0])
def p_constructor_1(self, info):
" constructor ::= Id ( fields ) "
id, _0, fields, _1 = info
return Constructor(id, fields)
def p_fields_0(self, field):
" fields ::= field "
return [field[0]]
def p_fields_1(self, info):
" fields ::= fields , field "
fields, _, field = info
return fields + [field]
def p_field_0(self, type_):
" field ::= Id "
return Field(type_[0])
def p_field_1(self, info):
" field ::= Id Id "
type, name = info
return Field(type, name)
def p_field_2(self, info):
" field ::= Id * Id "
type, _, name = info
return Field(type, name, seq=True)
def p_field_3(self, info):
" field ::= Id ? Id "
type, _, name = info
return Field(type, name, opt=True)
def p_field_4(self, type_):
" field ::= Id * "
return Field(type_[0], seq=True)
def p_field_5(self, type_):
" field ::= Id ? "
return Field(type[0], opt=True)
builtin_types = ("identifier", "string", "bytes", "int", "object", "singleton")
# below is a collection of classes to capture the AST of an AST :-)
# not sure if any of the methods are useful yet, but I'm adding them
# piecemeal as they seem helpful
class AST(object):
pass # a marker class
class Module(AST):
def __init__(self, name, dfns):
self.name = name
self.dfns = dfns
self.types = {} # maps type name to value (from dfns)
for type in dfns:
self.types[type.name.value] = type.value
def __repr__(self):
return "Module(%s, %s)" % (self.name, self.dfns)
class Type(AST):
def __init__(self, name, value):
self.name = name
self.value = value
def __repr__(self):
return "Type(%s, %s)" % (self.name, self.value)
class Constructor(AST):
def __init__(self, name, fields=None):
self.name = name
self.fields = fields or []
def __repr__(self):
return "Constructor(%s, %s)" % (self.name, self.fields)
class Field(AST):
def __init__(self, type, name=None, seq=False, opt=False):
self.type = type
self.name = name
self.seq = seq
self.opt = opt
def __repr__(self):
if self.seq:
extra = ", seq=True"
elif self.opt:
extra = ", opt=True"
else:
extra = ""
if self.name is None:
return "Field(%s%s)" % (self.type, extra)
else:
return "Field(%s, %s%s)" % (self.type, self.name, extra)
class Sum(AST):
def __init__(self, types, attributes=None):
self.types = types
self.attributes = attributes or []
def __repr__(self):
if self.attributes is None:
return "Sum(%s)" % self.types
else:
return "Sum(%s, %s)" % (self.types, self.attributes)
class Product(AST):
def __init__(self, fields, attributes=None):
self.fields = fields
self.attributes = attributes or []
def __repr__(self):
if self.attributes is None:
return "Product(%s)" % self.fields
else:
return "Product(%s, %s)" % (self.fields, self.attributes)
class VisitorBase(object):
def __init__(self, skip=False):
self.cache = {}
self.skip = skip
def visit(self, object, *args):
meth = self._dispatch(object)
if meth is None:
return
try:
meth(object, *args)
except Exception:
output("Error visiting" + repr(object))
output(str(sys.exc_info()[1]))
traceback.print_exc()
# XXX hack
if hasattr(self, 'file'):
self.file.flush()
os._exit(1)
def _dispatch(self, object):
assert isinstance(object, AST), repr(object)
klass = object.__class__
meth = self.cache.get(klass)
if meth is None:
methname = "visit" + klass.__name__
if self.skip:
meth = getattr(self, methname, None)
else:
meth = getattr(self, methname)
self.cache[klass] = meth
return meth
class Check(VisitorBase):
def __init__(self):
super(Check, self).__init__(skip=True)
self.cons = {}
self.errors = 0
self.types = {}
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, str(type.name))
def visitSum(self, sum, name):
for t in sum.types:
self.visit(t, name)
def visitConstructor(self, cons, name):
key = str(cons.name)
conflict = self.cons.get(key)
if conflict is None:
self.cons[key] = name
else:
output("Redefinition of constructor %s" % key)
output("Defined in %s and %s" % (conflict, name))
self.errors += 1
for f in cons.fields:
self.visit(f, key)
def visitField(self, field, name):
key = str(field.type)
l = self.types.setdefault(key, [])
l.append(name)
def visitProduct(self, prod, name):
for f in prod.fields:
self.visit(f, name)
def check(mod):
v = Check()
v.visit(mod)
for t in v.types:
if t not in mod.types and not t in builtin_types:
v.errors += 1
uses = ", ".join(v.types[t])
output("Undefined type %s, used in %s" % (t, uses))
return not v.errors
def parse(file):
scanner = ASDLScanner()
parser = ASDLParser()
f = open(file)
try:
buf = f.read()
finally:
f.close()
tokens = scanner.tokenize(buf)
try:
return parser.parse(tokens)
except ASDLSyntaxError:
err = sys.exc_info()[1]
output(str(err))
lines = buf.split("\n")
output(lines[err.lineno - 1]) # lines starts at 0, files at 1
if __name__ == "__main__":
import glob
import sys
if len(sys.argv) > 1:
files = sys.argv[1:]
else:
testdir = "tests"
files = glob.glob(testdir + "/*.asdl")
for file in files:
output(file)
mod = parse(file)
if not mod:
break
output("module", mod.name)
output(len(mod.dfns), "definitions")
if not check(mod):
output("Check failed")
else:
for dfn in mod.dfns:
output(dfn.name, dfn.value)
| lgpl-3.0 |
MoamerEncsConcordiaCa/tensorflow | tensorflow/python/kernel_tests/sparse_xent_op_test.py | 103 | 13989 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseSoftmaxCrossEntropyWithLogits op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import time
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import app
from tensorflow.python.platform import test
class SparseXentTest(test.TestCase):
def _npXent(self, features, labels):
features = np.reshape(features, [-1, features.shape[-1]])
labels = np.reshape(labels, [-1])
batch_dim = 0
class_dim = 1
batch_size = features.shape[batch_dim]
e = np.exp(features - np.reshape(
np.amax(
features, axis=class_dim), [batch_size, 1]))
probs = e / np.reshape(np.sum(e, axis=class_dim), [batch_size, 1])
labels_mat = np.zeros_like(probs).astype(probs.dtype)
labels_mat[np.arange(batch_size), labels] = 1.0
bp = (probs - labels_mat)
l = -np.sum(labels_mat * np.log(probs + 1.0e-20), axis=1)
return l, bp
def _testXent(self, np_features, np_labels):
np_loss, np_backprop = self._npXent(np_features, np_labels)
with self.test_session(use_gpu=True) as sess:
loss, backprop = gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
np_features, np_labels)
tf_loss, tf_backprop = sess.run([loss, backprop])
self.assertAllCloseAccordingToType(np_loss, tf_loss)
self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
def testSingleClass(self):
for label_dtype in np.int32, np.int64:
with self.test_session(use_gpu=True) as sess:
loss, backprop = gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
np.array([[1.], [-1.], [0.]]).astype(np.float32),
np.array([0, 0, 0]).astype(label_dtype))
tf_loss, tf_backprop = sess.run([loss, backprop])
self.assertAllClose([0.0, 0.0, 0.0], tf_loss)
self.assertAllClose([[0.0], [0.0], [0.0]], tf_backprop)
def testInvalidLabel(self):
features = [[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 2., 3., 4.],
[1., 2., 3., 4.]]
labels = [4, 3, 0, -1]
if test.is_built_with_cuda() and test.is_gpu_available():
with self.test_session(use_gpu=True) as sess:
loss, backprop = (gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
features, labels))
tf_loss, tf_backprop = sess.run([loss, backprop])
self.assertAllClose(
[[np.nan] * 4, [0.25, 0.25, 0.25, -0.75],
[-0.968, 0.087, 0.237, 0.6439], [np.nan] * 4],
tf_backprop,
rtol=1e-3,
atol=1e-3)
self.assertAllClose(
[np.nan, 1.3862, 3.4420, np.nan], tf_loss, rtol=1e-3, atol=1e-3)
with self.test_session(use_gpu=False) as sess:
loss, backprop = (gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
features, labels))
with self.assertRaisesOpError("Received a label value of"):
sess.run([loss, backprop])
def testNpXent(self):
# We create 2 batches of logits for testing.
# batch 0 is the boring uniform distribution: 1, 1, 1, 1, with target 3.
# batch 1 has a bit of difference: 1, 2, 3, 4, with target 0.
features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
labels = [3, 0]
# For batch 0, we expect the uniform distribution: 0.25, 0.25, 0.25, 0.25
# With a hard target 3, the backprop is [0.25, 0.25, 0.25, -0.75]
# The loss for this batch is -log(0.25) = 1.386
#
# For batch 1, we have:
# exp(0) = 1
# exp(1) = 2.718
# exp(2) = 7.389
# exp(3) = 20.085
# SUM = 31.192
# So we have as probabilities:
# exp(0) / SUM = 0.032
# exp(1) / SUM = 0.087
# exp(2) / SUM = 0.237
# exp(3) / SUM = 0.644
# With a hard 1, the backprop is [0.032 - 1.0 = -0.968, 0.087, 0.237, 0.644]
# The loss for this batch is [1.0 * -log(0.25), 1.0 * -log(0.032)]
# = [1.3862, 3.4420]
np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))
self.assertAllClose(
np.array([[0.25, 0.25, 0.25, -0.75], [-0.968, 0.087, 0.237, 0.6439]]),
np_backprop,
rtol=1.e-3,
atol=1.e-3)
self.assertAllClose(
np.array([1.3862, 3.4420]), np_loss, rtol=1.e-3, atol=1.e-3)
def testShapeMismatch(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, ".*Rank mismatch:*"):
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=[[0, 2]], logits=[[0., 1.], [2., 3.], [2., 3.]])
def testScalar(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, ".*Logits cannot be scalars*"):
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=constant_op.constant(0), logits=constant_op.constant(1.0))
def testLabelsPlaceholderScalar(self):
with self.test_session(use_gpu=True):
labels = array_ops.placeholder(np.int32)
y = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=[[7.]])
with self.assertRaisesOpError("labels must be 1-D"):
y.eval(feed_dict={labels: 0})
def testVector(self):
with self.test_session(use_gpu=True):
loss = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=constant_op.constant(0), logits=constant_op.constant([1.0]))
self.assertAllClose(0.0, loss.eval())
def testFloat(self):
for label_dtype in np.int32, np.int64:
self._testXent(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32),
np.array([3, 0]).astype(label_dtype))
def testDouble(self):
for label_dtype in np.int32, np.int64:
self._testXent(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64),
np.array([0, 3]).astype(label_dtype))
def testHalf(self):
for label_dtype in np.int32, np.int64:
self._testXent(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16),
np.array([3, 0]).astype(label_dtype))
def testEmpty(self):
self._testXent(np.zeros((0, 3)), np.zeros((0,), dtype=np.int32))
def testGradient(self):
with self.test_session(use_gpu=True):
l = constant_op.constant([3, 0, 1], name="l")
f = constant_op.constant(
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
shape=[3, 4],
dtype=dtypes.float64,
name="f")
x = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=l, logits=f, name="xent")
err = gradient_checker.compute_gradient_error(f, [3, 4], x, [3])
print("cross entropy gradient err = ", err)
self.assertLess(err, 5e-8)
def testSecondGradient(self):
images_placeholder = array_ops.placeholder(dtypes.float32, shape=(3, 2))
labels_placeholder = array_ops.placeholder(dtypes.int32, shape=(3))
weights = variables.Variable(random_ops.truncated_normal([2], stddev=1.0))
weights_with_zeros = array_ops.stack([array_ops.zeros([2]), weights],
axis=1)
logits = math_ops.matmul(images_placeholder, weights_with_zeros)
cross_entropy = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=labels_placeholder, logits=logits)
loss = math_ops.reduce_mean(cross_entropy)
# Taking ths second gradient should fail, since it is not
# yet supported.
with self.assertRaisesRegexp(LookupError,
"explicitly disabled"):
_ = gradients_impl.hessians(loss, [weights])
def _testHighDim(self, features, labels):
np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))
# manually reshape loss
np_loss = np.reshape(np_loss, np.array(labels).shape)
with self.test_session(use_gpu=True) as sess:
loss = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=features)
backprop = loss.op.inputs[0].op.outputs[1]
tf_loss, tf_backprop = sess.run([loss, backprop])
self.assertAllCloseAccordingToType(np_loss, tf_loss)
self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
def testHighDim(self):
features = [[[1., 1., 1., 1.]], [[1., 2., 3., 4.]]]
labels = [[3], [0]]
self._testHighDim(features, labels)
def testHighDim2(self):
features = [[[1., 1., 1., 1.], [2., 2., 2., 2.]],
[[1., 2., 3., 4.], [5., 6., 7., 8.]]]
labels = [[3, 2], [0, 3]]
self._testHighDim(features, labels)
def testScalarHandling(self):
with self.test_session(use_gpu=False) as sess:
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
".*labels must be 1-D.*"):
labels = array_ops.placeholder(dtypes.int32, shape=[None, 1])
logits = array_ops.placeholder(dtypes.float32, shape=[None, 3])
ce = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=array_ops.squeeze(labels), logits=logits)
labels_v2 = np.zeros((1, 1), dtype=np.int32)
logits_v2 = np.random.randn(1, 3)
sess.run([ce], feed_dict={labels: labels_v2, logits: logits_v2})
def _sparse_vs_dense_xent_benchmark_dense(labels, logits):
labels = array_ops.identity(labels)
logits = array_ops.identity(logits)
with ops_lib.device("/cpu:0"): # Sparse-to-dense must be on CPU
batch_size = array_ops.shape(logits)[0]
num_entries = array_ops.shape(logits)[1]
length = batch_size * num_entries
labels += num_entries * math_ops.range(batch_size)
target = sparse_ops.sparse_to_dense(labels,
array_ops.stack([length]), 1.0, 0.0)
target = array_ops.reshape(target, array_ops.stack([-1, num_entries]))
crossent = nn_ops.softmax_cross_entropy_with_logits(
labels=target, logits=logits, name="SequenceLoss/CrossEntropy")
crossent_sum = math_ops.reduce_sum(crossent)
grads = gradients_impl.gradients([crossent_sum], [logits])[0]
return (crossent_sum, grads)
def _sparse_vs_dense_xent_benchmark_sparse(labels, logits):
# Using sparse_softmax_cross_entropy_with_logits
labels = labels.astype(np.int64)
labels = array_ops.identity(labels)
logits = array_ops.identity(logits)
crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits, labels, name="SequenceLoss/CrossEntropy")
crossent_sum = math_ops.reduce_sum(crossent)
grads = gradients_impl.gradients([crossent_sum], [logits])[0]
return (crossent_sum, grads)
def sparse_vs_dense_xent_benchmark(batch_size, num_entries, use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
config.gpu_options.per_process_gpu_memory_fraction = 0.3
labels = np.random.randint(num_entries, size=batch_size).astype(np.int32)
logits = np.random.randn(batch_size, num_entries).astype(np.float32)
def _timer(sess, ops):
# Warm in
for _ in range(20):
sess.run(ops)
# Timing run
start = time.time()
for _ in range(20):
sess.run(ops)
end = time.time()
return (end - start) / 20.0 # Average runtime per iteration
# Using sparse_to_dense and softmax_cross_entropy_with_logits
with session.Session(config=config) as sess:
if not use_gpu:
with ops_lib.device("/cpu:0"):
ops = _sparse_vs_dense_xent_benchmark_dense(labels, logits)
else:
ops = _sparse_vs_dense_xent_benchmark_dense(labels, logits)
delta_dense = _timer(sess, ops)
# Using sparse_softmax_cross_entropy_with_logits
with session.Session(config=config) as sess:
if not use_gpu:
with ops_lib.device("/cpu:0"):
ops = _sparse_vs_dense_xent_benchmark_sparse(labels, logits)
else:
ops = _sparse_vs_dense_xent_benchmark_sparse(labels, logits)
delta_sparse = _timer(sess, ops)
print("%d \t %d \t %s \t %f \t %f \t %f" % (batch_size, num_entries, use_gpu,
delta_dense, delta_sparse,
delta_sparse / delta_dense))
def main(_):
print("Sparse Xent vs. SparseToDense + Xent")
print("batch \t depth \t gpu \t dt(dense) \t dt(sparse) "
"\t dt(sparse)/dt(dense)")
for use_gpu in (False, True):
for batch_size in (32, 64, 128):
for num_entries in (100, 1000, 10000):
sparse_vs_dense_xent_benchmark(batch_size, num_entries, use_gpu)
sparse_vs_dense_xent_benchmark(32, 100000, use_gpu)
sparse_vs_dense_xent_benchmark(8, 1000000, use_gpu)
if __name__ == "__main__":
if "--benchmarks" in sys.argv:
sys.argv.remove("--benchmarks")
app.run()
else:
test.main()
| apache-2.0 |
jmesteve/medical | openerp/addons/l10n_es_account_invoice_sequence/__init__.py | 6 | 1270 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2011 NaN Projectes de Programari Lliure, S.L. All Rights Reserved.
# http://www.NaN-tic.com
# Copyright (c) 2013 Serv. Tecnol. Avanzados (http://www.serviciosbaeza.com)
# Pedro Manuel Baeza <pedro.baeza@serviciosbaeza.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_invoice
import account_journal
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
adobecs5/urp2015 | lib/python3.4/site-packages/pip/_vendor/requests/packages/urllib3/exceptions.py | 214 | 4109 |
## Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class HTTPWarning(Warning):
"Base warning used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
class ProtocolError(HTTPError):
"Raised when something unexpected happens mid-request/response."
pass
#: Renamed to ProtocolError but aliased for backwards compatibility.
ConnectionError = ProtocolError
## Leaf Exceptions
class MaxRetryError(RequestError):
"""Raised when the maximum number of retries is exceeded.
:param pool: The connection pool
:type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
:param string url: The requested Url
:param exceptions.Exception reason: The underlying error
"""
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s (Caused by %r)" % (
url, reason)
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationValueError(ValueError, HTTPError):
"Raised when there is something wrong with a given URL input."
pass
class LocationParseError(LocationValueError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
class ResponseError(HTTPError):
"Used as a container for an error reason supplied in a MaxRetryError."
GENERIC_ERROR = 'too many error responses'
SPECIFIC_ERROR = 'too many {status_code} error responses'
class SecurityWarning(HTTPWarning):
"Warned when perfoming security reducing actions"
pass
class InsecureRequestWarning(SecurityWarning):
"Warned when making an unverified HTTPS request."
pass
class SystemTimeWarning(SecurityWarning):
"Warned when system time is suspected to be wrong"
pass
| apache-2.0 |
popazerty/enigma2cuberevo | lib/python/Plugins/SystemPlugins/SkinSelector/plugin.py | 7 | 4037 | # -*- coding: iso-8859-1 -*-
# (c) 2006 Stephan Reichholf
# This Software is Free, use it where you want, when you want for whatever you want and modify it if you want but don't remove my copyright!
from Screens.Screen import Screen
from Screens.Standby import TryQuitMainloop
from Screens.MessageBox import MessageBox
from Components.ActionMap import NumberActionMap
from Components.Pixmap import Pixmap
from Components.Sources.StaticText import StaticText
from Components.MenuList import MenuList
from Plugins.Plugin import PluginDescriptor
from Components.config import config
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
from os import path, walk
from enigma import eEnv
class SkinSelector(Screen):
# for i18n:
# _("Choose your Skin")
skinlist = []
root = eEnv.resolve("${datadir}/enigma2/")
def __init__(self, session, args = None):
Screen.__init__(self, session)
self.skinlist = []
self.previewPath = ""
path.walk(self.root, self.find, "")
self["key_red"] = StaticText(_("Close"))
self["introduction"] = StaticText(_("Press OK to activate the selected skin."))
self.skinlist.sort()
self["SkinList"] = MenuList(self.skinlist)
self["Preview"] = Pixmap()
self["actions"] = NumberActionMap(["WizardActions", "InputActions", "EPGSelectActions"],
{
"ok": self.ok,
"back": self.close,
"red": self.close,
"up": self.up,
"down": self.down,
"left": self.left,
"right": self.right,
"info": self.info,
}, -1)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
tmp = config.skin.primary_skin.value.find('/skin.xml')
if tmp != -1:
tmp = config.skin.primary_skin.value[:tmp]
idx = 0
for skin in self.skinlist:
if skin == tmp:
break
idx += 1
if idx < len(self.skinlist):
self["SkinList"].moveToIndex(idx)
self.loadPreview()
def up(self):
self["SkinList"].up()
self.loadPreview()
def down(self):
self["SkinList"].down()
self.loadPreview()
def left(self):
self["SkinList"].pageUp()
self.loadPreview()
def right(self):
self["SkinList"].pageDown()
self.loadPreview()
def info(self):
aboutbox = self.session.open(MessageBox,_("Enigma2 Skinselector\n\nIf you experience any problems please contact\nstephan@reichholf.net\n\n\xA9 2006 - Stephan Reichholf"), MessageBox.TYPE_INFO)
aboutbox.setTitle(_("About..."))
def find(self, arg, dirname, names):
for x in names:
if x == "skin.xml":
if dirname <> self.root:
subdir = dirname[19:]
self.skinlist.append(subdir)
else:
subdir = "Default Skin"
self.skinlist.append(subdir)
def ok(self):
if self["SkinList"].getCurrent() == "Default Skin":
skinfile = "skin.xml"
else:
skinfile = self["SkinList"].getCurrent()+"/skin.xml"
print "Skinselector: Selected Skin: "+self.root+skinfile
config.skin.primary_skin.value = skinfile
config.skin.primary_skin.save()
restartbox = self.session.openWithCallback(self.restartGUI,MessageBox,_("GUI needs a restart to apply a new skin\nDo you want to Restart the GUI now?"), MessageBox.TYPE_YESNO)
restartbox.setTitle(_("Restart GUI now?"))
def loadPreview(self):
if self["SkinList"].getCurrent() == "Default Skin":
pngpath = self.root+"/prev.png"
else:
pngpath = self.root+self["SkinList"].getCurrent()+"/prev.png"
if not path.exists(pngpath):
pngpath = resolveFilename(SCOPE_PLUGINS, "SystemPlugins/SkinSelector/noprev.png")
if self.previewPath != pngpath:
self.previewPath = pngpath
self["Preview"].instance.setPixmapFromFile(self.previewPath)
def restartGUI(self, answer):
if answer is True:
self.session.open(TryQuitMainloop, 3)
def SkinSelMain(session, **kwargs):
session.open(SkinSelector)
def SkinSelSetup(menuid, **kwargs):
if menuid == "system":
return [(_("Skin"), SkinSelMain, "skin_selector", None)]
else:
return []
def Plugins(**kwargs):
return PluginDescriptor(name="Skinselector", description="Select Your Skin", where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc=SkinSelSetup)
| gpl-2.0 |
jeremymcintyre/jeremymcintyre.github.io | node_modules/bootstrap/node_modules/npm-shrinkwrap/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSToolFile.py | 2736 | 1804 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
| mit |
divya-csekar/flask-microblog-server | flask/Lib/site-packages/whoosh/filedb/filestore.py | 73 | 21402 | # Copyright 2009 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import with_statement
import errno, os, sys, tempfile
from threading import Lock
from whoosh.compat import BytesIO, memoryview_
from whoosh.filedb.structfile import BufferFile, StructFile
from whoosh.index import _DEF_INDEX_NAME, EmptyIndexError
from whoosh.util import random_name
from whoosh.util.filelock import FileLock
# Exceptions
class StorageError(Exception):
pass
class ReadOnlyError(StorageError):
pass
# Base class
class Storage(object):
"""Abstract base class for storage objects.
A storage object is a virtual flat filesystem, allowing the creation and
retrieval of file-like objects
(:class:`~whoosh.filedb.structfile.StructFile` objects). The default
implementation (:class:`FileStorage`) uses actual files in a directory.
All access to files in Whoosh goes through this object. This allows more
different forms of storage (for example, in RAM, in a database, in a single
file) to be used transparently.
For example, to create a :class:`FileStorage` object::
# Create a storage object
st = FileStorage("indexdir")
# Create the directory if it doesn't already exist
st.create()
The :meth:`Storage.create` method makes it slightly easier to swap storage
implementations. The ``create()`` method handles set-up of the storage
object. For example, ``FileStorage.create()`` creates the directory. A
database implementation might create tables. This is designed to let you
avoid putting implementation-specific setup code in your application.
"""
readonly = False
supports_mmap = False
def __iter__(self):
return iter(self.list())
def __enter__(self):
self.create()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def create(self):
"""Creates any required implementation-specific resources. For example,
a filesystem-based implementation might create a directory, while a
database implementation might create tables. For example::
from whoosh.filedb.filestore import FileStorage
# Create a storage object
st = FileStorage("indexdir")
# Create any necessary resources
st.create()
This method returns ``self`` so you can also say::
st = FileStorage("indexdir").create()
Storage implementations should be written so that calling create() a
second time on the same storage
:return: a :class:`Storage` instance.
"""
return self
def destroy(self, *args, **kwargs):
"""Removes any implementation-specific resources related to this storage
object. For example, a filesystem-based implementation might delete a
directory, and a database implementation might drop tables.
The arguments are implementation-specific.
"""
pass
def create_index(self, schema, indexname=_DEF_INDEX_NAME, indexclass=None):
"""Creates a new index in this storage.
>>> from whoosh import fields
>>> from whoosh.filedb.filestore import FileStorage
>>> schema = fields.Schema(content=fields.TEXT)
>>> # Create the storage directory
>>> st = FileStorage.create("indexdir")
>>> # Create an index in the storage
>>> ix = st.create_index(schema)
:param schema: the :class:`whoosh.fields.Schema` object to use for the
new index.
:param indexname: the name of the index within the storage object. You
can use this option to store multiple indexes in the same storage.
:param indexclass: an optional custom ``Index`` sub-class to use to
create the index files. The default is
:class:`whoosh.index.FileIndex`. This method will call the
``create`` class method on the given class to create the index.
:return: a :class:`whoosh.index.Index` instance.
"""
if self.readonly:
raise ReadOnlyError
if indexclass is None:
import whoosh.index
indexclass = whoosh.index.FileIndex
return indexclass.create(self, schema, indexname)
def open_index(self, indexname=_DEF_INDEX_NAME, schema=None, indexclass=None):
"""Opens an existing index (created using :meth:`create_index`) in this
storage.
>>> from whoosh.filedb.filestore import FileStorage
>>> st = FileStorage("indexdir")
>>> # Open an index in the storage
>>> ix = st.open_index()
:param indexname: the name of the index within the storage object. You
can use this option to store multiple indexes in the same storage.
:param schema: if you pass in a :class:`whoosh.fields.Schema` object
using this argument, it will override the schema that was stored
with the index.
:param indexclass: an optional custom ``Index`` sub-class to use to
open the index files. The default is
:class:`whoosh.index.FileIndex`. This method will instantiate the
class with this storage object.
:return: a :class:`whoosh.index.Index` instance.
"""
if indexclass is None:
import whoosh.index
indexclass = whoosh.index.FileIndex
return indexclass(self, schema=schema, indexname=indexname)
def index_exists(self, indexname=None):
"""Returns True if a non-empty index exists in this storage.
:param indexname: the name of the index within the storage object. You
can use this option to store multiple indexes in the same storage.
:rtype: bool
"""
if indexname is None:
indexname = _DEF_INDEX_NAME
try:
ix = self.open_index(indexname)
gen = ix.latest_generation()
ix.close()
return gen > -1
except EmptyIndexError:
pass
return False
def create_file(self, name):
"""Creates a file with the given name in this storage.
:param name: the name for the new file.
:return: a :class:`whoosh.filedb.structfile.StructFile` instance.
"""
raise NotImplementedError
def open_file(self, name, *args, **kwargs):
"""Opens a file with the given name in this storage.
:param name: the name for the new file.
:return: a :class:`whoosh.filedb.structfile.StructFile` instance.
"""
raise NotImplementedError
def list(self):
"""Returns a list of file names in this storage.
:return: a list of strings
"""
raise NotImplementedError
def file_exists(self, name):
"""Returns True if the given file exists in this storage.
:param name: the name to check.
:rtype: bool
"""
raise NotImplementedError
def file_modified(self, name):
"""Returns the last-modified time of the given file in this storage (as
a "ctime" UNIX timestamp).
:param name: the name to check.
:return: a "ctime" number.
"""
raise NotImplementedError
def file_length(self, name):
"""Returns the size (in bytes) of the given file in this storage.
:param name: the name to check.
:rtype: int
"""
raise NotImplementedError
def delete_file(self, name):
"""Removes the given file from this storage.
:param name: the name to delete.
"""
raise NotImplementedError
def rename_file(self, frm, to, safe=False):
"""Renames a file in this storage.
:param frm: The current name of the file.
:param to: The new name for the file.
:param safe: if True, raise an exception if a file with the new name
already exists.
"""
raise NotImplementedError
def lock(self, name):
"""Return a named lock object (implementing ``.acquire()`` and
``.release()`` methods). Different storage implementations may use
different lock types with different guarantees. For example, the
RamStorage object uses Python thread locks, while the FileStorage
object uses filesystem-based locks that are valid across different
processes.
:param name: a name for the lock.
:return: a lock-like object.
"""
raise NotImplementedError
def close(self):
"""Closes any resources opened by this storage object. For some storage
implementations this will be a no-op, but for others it is necessary
to release locks and/or prevent leaks, so it's a good idea to call it
when you're done with a storage object.
"""
pass
def optimize(self):
"""Optimizes the storage object. The meaning and cost of "optimizing"
will vary by implementation. For example, a database implementation
might run a garbage collection procedure on the underlying database.
"""
pass
def temp_storage(self, name=None):
"""Creates a new storage object for temporary files. You can call
:meth:`Storage.destroy` on the new storage when you're finished with
it.
:param name: a name for the new storage. This may be optional or
required depending on the storage implementation.
:rtype: :class:`Storage`
"""
raise NotImplementedError
class OverlayStorage(Storage):
"""Overlays two storage objects. Reads are processed from the first if it
has the named file, otherwise the second. Writes always go to the second.
"""
def __init__(self, a, b):
self.a = a
self.b = b
def create_index(self, *args, **kwargs):
self.b.create_index(*args, **kwargs)
def open_index(self, *args, **kwargs):
self.a.open_index(*args, **kwargs)
def create_file(self, *args, **kwargs):
return self.b.create_file(*args, **kwargs)
def open_file(self, name, *args, **kwargs):
if self.a.file_exists(name):
return self.a.open_file(name, *args, **kwargs)
else:
return self.b.open_file(name, *args, **kwargs)
def list(self):
return list(set(self.a.list()) | set(self.b.list()))
def file_exists(self, name):
return self.a.file_exists(name) or self.b.file_exists(name)
def file_modified(self, name):
if self.a.file_exists(name):
return self.a.file_modified(name)
else:
return self.b.file_modified(name)
def file_length(self, name):
if self.a.file_exists(name):
return self.a.file_length(name)
else:
return self.b.file_length(name)
def delete_file(self, name):
return self.b.delete_file(name)
def rename_file(self, *args, **kwargs):
raise NotImplementedError
def lock(self, name):
return self.b.lock(name)
def close(self):
self.a.close()
self.b.close()
def optimize(self):
self.a.optimize()
self.b.optimize()
def temp_storage(self, name=None):
return self.b.temp_storage(name=name)
class FileStorage(Storage):
"""Storage object that stores the index as files in a directory on disk.
Prior to version 3, the initializer would raise an IOError if the directory
did not exist. As of version 3, the object does not check if the
directory exists at initialization. This change is to support using the
:meth:`FileStorage.create` method.
"""
supports_mmap = True
def __init__(self, path, supports_mmap=True, readonly=False, debug=False):
"""
:param path: a path to a directory.
:param supports_mmap: if True (the default), use the ``mmap`` module to
open memory mapped files. You can open the storage object with
``supports_mmap=False`` to force Whoosh to open files normally
instead of with ``mmap``.
:param readonly: If ``True``, the object will raise an exception if you
attempt to create or rename a file.
"""
self.folder = path
self.supports_mmap = supports_mmap
self.readonly = readonly
self._debug = debug
self.locks = {}
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.folder)
def create(self):
"""Creates this storage object's directory path using ``os.makedirs`` if
it doesn't already exist.
>>> from whoosh.filedb.filestore import FileStorage
>>> st = FileStorage("indexdir")
>>> st.create()
This method returns ``self``, you can say::
st = FileStorage("indexdir").create()
Note that you can simply create handle the creation of the directory
yourself and open the storage object using the initializer::
dirname = "indexdir"
os.mkdir(dirname)
st = FileStorage(dirname)
However, using the ``create()`` method allows you to potentially swap in
other storage implementations more easily.
:return: a :class:`Storage` instance.
"""
dirpath = os.path.abspath(self.folder)
# If the given directory does not already exist, try to create it
try:
os.makedirs(dirpath)
except OSError:
# This is necessary for compatibility between Py2 and Py3
e = sys.exc_info()[1]
# If we get an error because the path already exists, ignore it
if e.errno != errno.EEXIST:
raise
# Raise an exception if the given path is not a directory
if not os.path.isdir(dirpath):
e = IOError("%r is not a directory" % dirpath)
e.errno = errno.ENOTDIR
raise e
return self
def destroy(self):
"""Removes any files in this storage object and then removes the
storage object's directory. What happens if any of the files or the
directory are in use depends on the underlying platform.
"""
# Remove all files
self.clean()
# Try to remove the directory
os.rmdir(self.folder)
def create_file(self, name, excl=False, mode="wb", **kwargs):
"""Creates a file with the given name in this storage.
:param name: the name for the new file.
:param excl: if True, try to open the file in "exclusive" mode.
:param mode: the mode flags with which to open the file. The default is
``"wb"``.
:return: a :class:`whoosh.filedb.structfile.StructFile` instance.
"""
if self.readonly:
raise ReadOnlyError
path = self._fpath(name)
if excl:
flags = os.O_CREAT | os.O_EXCL | os.O_RDWR
if hasattr(os, "O_BINARY"):
flags |= os.O_BINARY
fd = os.open(path, flags)
fileobj = os.fdopen(fd, mode)
else:
fileobj = open(path, mode)
f = StructFile(fileobj, name=name, **kwargs)
return f
def open_file(self, name, **kwargs):
"""Opens an existing file in this storage.
:param name: the name of the file to open.
:param kwargs: additional keyword arguments are passed through to the
:class:`~whoosh.filedb.structfile.StructFile` initializer.
:return: a :class:`whoosh.filedb.structfile.StructFile` instance.
"""
f = StructFile(open(self._fpath(name), "rb"), name=name, **kwargs)
return f
def _fpath(self, fname):
return os.path.abspath(os.path.join(self.folder, fname))
def clean(self, ignore=False):
if self.readonly:
raise ReadOnlyError
path = self.folder
files = self.list()
for fname in files:
try:
os.remove(os.path.join(path, fname))
except OSError:
if not ignore:
raise
def list(self):
try:
files = os.listdir(self.folder)
except IOError:
files = []
return files
def file_exists(self, name):
return os.path.exists(self._fpath(name))
def file_modified(self, name):
return os.path.getmtime(self._fpath(name))
def file_length(self, name):
return os.path.getsize(self._fpath(name))
def delete_file(self, name):
if self.readonly:
raise ReadOnlyError
os.remove(self._fpath(name))
def rename_file(self, oldname, newname, safe=False):
if self.readonly:
raise ReadOnlyError
if os.path.exists(self._fpath(newname)):
if safe:
raise NameError("File %r exists" % newname)
else:
os.remove(self._fpath(newname))
os.rename(self._fpath(oldname), self._fpath(newname))
def lock(self, name):
return FileLock(self._fpath(name))
def temp_storage(self, name=None):
name = name or "%s.tmp" % random_name()
path = os.path.join(self.folder, name)
tempstore = FileStorage(path)
return tempstore.create()
class RamStorage(Storage):
"""Storage object that keeps the index in memory.
"""
supports_mmap = False
def __init__(self):
self.files = {}
self.locks = {}
self.folder = ''
def destroy(self):
del self.files
del self.locks
def list(self):
return list(self.files.keys())
def clean(self):
self.files = {}
def total_size(self):
return sum(self.file_length(f) for f in self.list())
def file_exists(self, name):
return name in self.files
def file_length(self, name):
if name not in self.files:
raise NameError(name)
return len(self.files[name])
def file_modified(self, name):
return -1
def delete_file(self, name):
if name not in self.files:
raise NameError(name)
del self.files[name]
def rename_file(self, name, newname, safe=False):
if name not in self.files:
raise NameError(name)
if safe and newname in self.files:
raise NameError("File %r exists" % newname)
content = self.files[name]
del self.files[name]
self.files[newname] = content
def create_file(self, name, **kwargs):
def onclose_fn(sfile):
self.files[name] = sfile.file.getvalue()
f = StructFile(BytesIO(), name=name, onclose=onclose_fn)
return f
def open_file(self, name, **kwargs):
if name not in self.files:
raise NameError(name)
buf = memoryview_(self.files[name])
return BufferFile(buf, name=name, **kwargs)
def lock(self, name):
if name not in self.locks:
self.locks[name] = Lock()
return self.locks[name]
def temp_storage(self, name=None):
tdir = tempfile.gettempdir()
name = name or "%s.tmp" % random_name()
path = os.path.join(tdir, name)
tempstore = FileStorage(path)
return tempstore.create()
def copy_storage(sourcestore, deststore):
"""Copies the files from the source storage object to the destination
storage object using ``shutil.copyfileobj``.
"""
from shutil import copyfileobj
for name in sourcestore.list():
with sourcestore.open_file(name) as source:
with deststore.create_file(name) as dest:
copyfileobj(source, dest)
def copy_to_ram(storage):
"""Copies the given FileStorage object into a new RamStorage object.
:rtype: :class:`RamStorage`
"""
ram = RamStorage()
copy_storage(storage, ram)
return ram
| bsd-3-clause |
liorvh/Empire | lib/modules/situational_awareness/host/computerdetails.py | 19 | 4385 | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Get-ComputerDetails',
'Author': ['@JosephBialek'],
'Description': ('Enumerates useful information on the system. By default, all checks are run.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : True,
'OpsecSafe' : True,
'MinPSVersion' : '2',
'Comments': [
'https://github.com/mattifestation/PowerSploit/blob/master/Recon/Get-ComputerDetails.ps1'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'4648' : {
'Description' : 'Switch. Only return 4648 logon information (RDP to another machine).',
'Required' : False,
'Value' : ''
},
'4624' : {
'Description' : 'Switch. Only return 4624 logon information (logons to this machine).',
'Required' : False,
'Value' : ''
},
'AppLocker' : {
'Description' : 'Switch. Only return AppLocker logs.',
'Required' : False,
'Value' : ''
},
'PSScripts' : {
'Description' : 'Switch. Only return PowerShell scripts run from operational log.',
'Required' : False,
'Value' : ''
},
'SavedRDP' : {
'Description' : 'Switch. Only return saved RDP connections.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/host/Get-ComputerDetails.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if option == "4648":
script += "$SecurityLog = Get-EventLog -LogName Security;$Filtered4624 = Find-4624Logons $SecurityLog;Write-Output $Filtered4624.Values | Format-List"
return script
if option == "4624":
script += "$SecurityLog = Get-EventLog -LogName Security;$Filtered4648 = Find-4648Logons $SecurityLog;Write-Output $Filtered4648.Values | Format-List"
return script
if option == "AppLocker":
script += "$AppLockerLogs = Find-AppLockerLogs;Write-Output $AppLockerLogs.Values | Format-List"
return script
if option == "PSLogs":
script += "$PSLogs = Find-PSScriptsInPSAppLog;Write-Output $PSLogs.Values | Format-List"
return script
if option == "SavedRDP":
script += "$RdpClientData = Find-RDPClientConnections;Write-Output $RdpClientData.Values | Format-List"
return script
# if we get to this point, no switched were specified
return script + "Get-ComputerDetails -ToString"
| bsd-3-clause |
aminert/scikit-learn | examples/neighbors/plot_species_kde.py | 282 | 4059 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
mezz64/home-assistant | homeassistant/components/amcrest/sensor.py | 10 | 4366 | """Support for Amcrest IP camera sensors."""
from datetime import timedelta
import logging
from amcrest import AmcrestError
from homeassistant.const import CONF_NAME, CONF_SENSORS, PERCENTAGE
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from .const import DATA_AMCREST, DEVICES, SENSOR_SCAN_INTERVAL_SECS, SERVICE_UPDATE
from .helpers import log_update_error, service_signal
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=SENSOR_SCAN_INTERVAL_SECS)
SENSOR_PTZ_PRESET = "ptz_preset"
SENSOR_SDCARD = "sdcard"
# Sensor types are defined like: Name, units, icon
SENSORS = {
SENSOR_PTZ_PRESET: ["PTZ Preset", None, "mdi:camera-iris"],
SENSOR_SDCARD: ["SD Used", PERCENTAGE, "mdi:sd"],
}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up a sensor for an Amcrest IP Camera."""
if discovery_info is None:
return
name = discovery_info[CONF_NAME]
device = hass.data[DATA_AMCREST][DEVICES][name]
async_add_entities(
[
AmcrestSensor(name, device, sensor_type)
for sensor_type in discovery_info[CONF_SENSORS]
],
True,
)
class AmcrestSensor(Entity):
"""A sensor implementation for Amcrest IP camera."""
def __init__(self, name, device, sensor_type):
"""Initialize a sensor for Amcrest camera."""
self._name = f"{name} {SENSORS[sensor_type][0]}"
self._signal_name = name
self._api = device.api
self._sensor_type = sensor_type
self._state = None
self._attrs = {}
self._unit_of_measurement = SENSORS[sensor_type][1]
self._icon = SENSORS[sensor_type][2]
self._unsub_dispatcher = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attrs
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return self._unit_of_measurement
@property
def available(self):
"""Return True if entity is available."""
return self._api.available
def update(self):
"""Get the latest data and updates the state."""
if not self.available:
return
_LOGGER.debug("Updating %s sensor", self._name)
try:
if self._sensor_type == SENSOR_PTZ_PRESET:
self._state = self._api.ptz_presets_count
elif self._sensor_type == SENSOR_SDCARD:
storage = self._api.storage_all
try:
self._attrs[
"Total"
] = f"{storage['total'][0]:.2f} {storage['total'][1]}"
except ValueError:
self._attrs[
"Total"
] = f"{storage['total'][0]} {storage['total'][1]}"
try:
self._attrs[
"Used"
] = f"{storage['used'][0]:.2f} {storage['used'][1]}"
except ValueError:
self._attrs["Used"] = f"{storage['used'][0]} {storage['used'][1]}"
try:
self._state = f"{storage['used_percent']:.2f}"
except ValueError:
self._state = storage["used_percent"]
except AmcrestError as error:
log_update_error(_LOGGER, "update", self.name, "sensor", error)
async def async_on_demand_update(self):
"""Update state."""
self.async_schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Subscribe to update signal."""
self._unsub_dispatcher = async_dispatcher_connect(
self.hass,
service_signal(SERVICE_UPDATE, self._signal_name),
self.async_on_demand_update,
)
async def async_will_remove_from_hass(self):
"""Disconnect from update signal."""
self._unsub_dispatcher()
| apache-2.0 |
xkmato/youtube-dl | youtube_dl/extractor/thisav.py | 164 | 1595 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import determine_ext
class ThisAVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?thisav\.com/video/(?P<id>[0-9]+)/.*'
_TEST = {
'url': 'http://www.thisav.com/video/47734/%98%26sup1%3B%83%9E%83%82---just-fit.html',
'md5': '0480f1ef3932d901f0e0e719f188f19b',
'info_dict': {
'id': '47734',
'ext': 'flv',
'title': '高樹マリア - Just fit',
'uploader': 'dj7970',
'uploader_id': 'dj7970'
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<h1>([^<]*)</h1>', webpage, 'title')
video_url = self._html_search_regex(
r"addVariable\('file','([^']+)'\);", webpage, 'video url')
uploader = self._html_search_regex(
r': <a href="http://www.thisav.com/user/[0-9]+/(?:[^"]+)">([^<]+)</a>',
webpage, 'uploader name', fatal=False)
uploader_id = self._html_search_regex(
r': <a href="http://www.thisav.com/user/[0-9]+/([^"]+)">(?:[^<]+)</a>',
webpage, 'uploader id', fatal=False)
ext = determine_ext(video_url)
return {
'id': video_id,
'url': video_url,
'uploader': uploader,
'uploader_id': uploader_id,
'title': title,
'ext': ext,
}
| unlicense |
yrapop01/fable | legacy/back/http_server.py | 1 | 1725 | import os
import glob
from fable import front
from fable import config
from fable.logs import log, handler, shutdown
from fable.back.end import main, bye
from sanic import Sanic
from sanic import log as sanic_log
_app = Sanic()
_log = log('backend')
def add_static(app, root, path):
extensions = {'png', 'woff', 'woff2', 'css', 'map', 'js', 'html', 'ico', 'eot', 'ttf', 'svg'}
files = glob.glob(os.path.join(path, '**'), recursive=True)
files = [(filename, filename[len(path)+1:]) for filename in files]
for filename in files:
_log.debug('Adding static file:', filename[0])
if filename[0].rsplit('.', 2)[-1] not in extensions:
continue
app.static(root + '/' + filename[1], filename[0])
if filename[1] == 'fable.html':
app.static(root + '/', filename[0])
@_app.websocket(config.root + '/ws')
async def serve(request, ws):
await main(ws)
@_app.listener('after_server_start')
def after_start(app, loop):
url = 'http://{0}:{1}/{2}'.format(config.host, config.port, config.root)
_log.info('Started Fable on address ' + url)
print('Welcome to Fable. URL:', url, flush=True)
@_app.listener('before_server_stop')
def after_end(app, loop):
bye()
@_app.listener('after_server_stop')
def after_end(app, loop):
shutdown()
def redirect_sanic_logs():
sanic_log.log.handlers = []
sanic_log.netlog.handlers = []
sanic_log.log.addHandler(handler('sanic'))
sanic_log.netlog.addHandler(handler('sanic.network'))
def run():
add_static(_app, config.root, os.path.dirname(front.__file__))
redirect_sanic_logs()
_app.run(host=config.host, port=config.port, log_config=None)
if __name__ == '__main__':
run()
| mit |
JianyuWang/nova | nova/api/openstack/compute/flavor_manage.py | 6 | 3946 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.compute.schemas import flavor_manage
from nova.api.openstack.compute.views import flavors as flavors_view
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova.compute import flavors
from nova import exception
ALIAS = "os-flavor-manage"
authorize = extensions.os_compute_authorizer(ALIAS)
class FlavorManageController(wsgi.Controller):
"""The Flavor Lifecycle API controller for the OpenStack API."""
_view_builder_class = flavors_view.ViewBuilderV21
def __init__(self):
super(FlavorManageController, self).__init__()
# NOTE(oomichi): Return 202 for backwards compatibility but should be
# 204 as this operation complete the deletion of aggregate resource and
# return no response body.
@wsgi.response(202)
@extensions.expected_errors((404))
@wsgi.action("delete")
def _delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
flavor = flavors.get_flavor_by_flavor_id(
id, ctxt=context, read_deleted="no")
except exception.FlavorNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
flavors.destroy(flavor['name'])
# NOTE(oomichi): Return 200 for backwards compatibility but should be 201
# as this operation complete the creation of flavor resource.
@wsgi.action("create")
@extensions.expected_errors((400, 409, 500))
@validation.schema(flavor_manage.create)
def _create(self, req, body):
context = req.environ['nova.context']
authorize(context)
vals = body['flavor']
name = vals['name']
flavorid = vals.get('id')
memory = vals['ram']
vcpus = vals['vcpus']
root_gb = vals['disk']
ephemeral_gb = vals.get('OS-FLV-EXT-DATA:ephemeral', 0)
swap = vals.get('swap', 0)
rxtx_factor = vals.get('rxtx_factor', 1.0)
is_public = vals.get('os-flavor-access:is_public', True)
try:
flavor = flavors.create(name, memory, vcpus, root_gb,
ephemeral_gb=ephemeral_gb,
flavorid=flavorid, swap=swap,
rxtx_factor=rxtx_factor,
is_public=is_public)
# NOTE(gmann): For backward compatibility, non public flavor
# access is not being added for created tenant. Ref -bug/1209101
req.cache_db_flavor(flavor)
except (exception.FlavorExists,
exception.FlavorIdExists) as err:
raise webob.exc.HTTPConflict(explanation=err.format_message())
except exception.FlavorCreateFailed as err:
raise webob.exc.HTTPInternalServerError(explanation=
err.format_message())
return self._view_builder.show(req, flavor)
class FlavorManage(extensions.V21APIExtensionBase):
"""Flavor create/delete API support."""
name = "FlavorManage"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = FlavorManageController()
extension = extensions.ControllerExtension(self, 'flavors', controller)
return [extension]
def get_resources(self):
return []
| apache-2.0 |
takeshineshiro/cinder | cinder/volume/drivers/windows/vhdutils.py | 17 | 17323 | # Copyright 2014 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VHD related operations.
Official VHD format specs can be retrieved at:
http://technet.microsoft.com/en-us/library/bb676673.aspx
See "Download the Specifications Without Registering"
Official VHDX format specs can be retrieved at:
http://www.microsoft.com/en-us/download/details.aspx?id=34750
VHD related Win32 API reference:
http://msdn.microsoft.com/en-us/library/windows/desktop/dd323700.aspx
"""
import ctypes
import os
if os.name == 'nt':
from ctypes import wintypes
kernel32 = ctypes.windll.kernel32
virtdisk = ctypes.windll.virtdisk
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers.windows import constants
LOG = logging.getLogger(__name__)
if os.name == 'nt':
class Win32_GUID(ctypes.Structure):
_fields_ = [("Data1", wintypes.DWORD),
("Data2", wintypes.WORD),
("Data3", wintypes.WORD),
("Data4", wintypes.BYTE * 8)]
class Win32_VIRTUAL_STORAGE_TYPE(ctypes.Structure):
_fields_ = [
('DeviceId', wintypes.ULONG),
('VendorId', Win32_GUID)
]
class Win32_RESIZE_VIRTUAL_DISK_PARAMETERS(ctypes.Structure):
_fields_ = [
('Version', wintypes.DWORD),
('NewSize', ctypes.c_ulonglong)
]
class Win32_OPEN_VIRTUAL_DISK_PARAMETERS_V1(ctypes.Structure):
_fields_ = [
('Version', wintypes.DWORD),
('RWDepth', ctypes.c_ulong),
]
class Win32_OPEN_VIRTUAL_DISK_PARAMETERS_V2(ctypes.Structure):
_fields_ = [
('Version', wintypes.DWORD),
('GetInfoOnly', wintypes.BOOL),
('ReadOnly', wintypes.BOOL),
('ResiliencyGuid', Win32_GUID)
]
class Win32_MERGE_VIRTUAL_DISK_PARAMETERS(ctypes.Structure):
_fields_ = [
('Version', wintypes.DWORD),
('MergeDepth', ctypes.c_ulong)
]
class Win32_CREATE_VIRTUAL_DISK_PARAMETERS(ctypes.Structure):
_fields_ = [
('Version', wintypes.DWORD),
('UniqueId', Win32_GUID),
('MaximumSize', ctypes.c_ulonglong),
('BlockSizeInBytes', wintypes.ULONG),
('SectorSizeInBytes', wintypes.ULONG),
('PhysicalSectorSizeInBytes', wintypes.ULONG),
('ParentPath', wintypes.LPCWSTR),
('SourcePath', wintypes.LPCWSTR),
('OpenFlags', wintypes.DWORD),
('ParentVirtualStorageType', Win32_VIRTUAL_STORAGE_TYPE),
('SourceVirtualStorageType', Win32_VIRTUAL_STORAGE_TYPE),
('ResiliencyGuid', Win32_GUID)
]
class Win32_SIZE(ctypes.Structure):
_fields_ = [("VirtualSize", wintypes.ULARGE_INTEGER),
("PhysicalSize", wintypes.ULARGE_INTEGER),
("BlockSize", wintypes.ULONG),
("SectorSize", wintypes.ULONG)]
class Win32_PARENT_LOCATION(ctypes.Structure):
_fields_ = [('ParentResolved', wintypes.BOOL),
('ParentLocationBuffer', wintypes.WCHAR * 512)]
class Win32_PHYSICAL_DISK(ctypes.Structure):
_fields_ = [("LogicalSectorSize", wintypes.ULONG),
("PhysicalSectorSize", wintypes.ULONG),
("IsRemote", wintypes.BOOL)]
class Win32_VHD_INFO(ctypes.Union):
_fields_ = [("Size", Win32_SIZE),
("Identifier", Win32_GUID),
("ParentLocation", Win32_PARENT_LOCATION),
("ParentIdentifier", Win32_GUID),
("ParentTimestamp", wintypes.ULONG),
("VirtualStorageType", Win32_VIRTUAL_STORAGE_TYPE),
("ProviderSubtype", wintypes.ULONG),
("Is4kAligned", wintypes.BOOL),
("PhysicalDisk", Win32_PHYSICAL_DISK),
("VhdPhysicalSectorSize", wintypes.ULONG),
("SmallestSafeVirtualSize",
wintypes.ULARGE_INTEGER),
("FragmentationPercentage", wintypes.ULONG)]
class Win32_GET_VIRTUAL_DISK_INFO_PARAMETERS(ctypes.Structure):
_fields_ = [("VERSION", wintypes.UINT),
("VhdInfo", Win32_VHD_INFO)]
class Win32_SET_VIRTUAL_DISK_INFO_PARAMETERS(ctypes.Structure):
_fields_ = [
('Version', wintypes.DWORD),
('ParentFilePath', wintypes.LPCWSTR)
]
VIRTUAL_STORAGE_TYPE_DEVICE_ISO = 1
VIRTUAL_STORAGE_TYPE_DEVICE_VHD = 2
VIRTUAL_STORAGE_TYPE_DEVICE_VHDX = 3
VIRTUAL_DISK_ACCESS_NONE = 0
VIRTUAL_DISK_ACCESS_ALL = 0x003f0000
VIRTUAL_DISK_ACCESS_CREATE = 0x00100000
VIRTUAL_DISK_ACCESS_GET_INFO = 0x80000
OPEN_VIRTUAL_DISK_FLAG_NONE = 0
OPEN_VIRTUAL_DISK_FLAG_NO_PARENTS = 1
OPEN_VIRTUAL_DISK_VERSION_1 = 1
OPEN_VIRTUAL_DISK_VERSION_2 = 2
RESIZE_VIRTUAL_DISK_FLAG_NONE = 0
RESIZE_VIRTUAL_DISK_VERSION_1 = 1
CREATE_VIRTUAL_DISK_VERSION_2 = 2
CREATE_VHD_PARAMS_DEFAULT_BLOCK_SIZE = 0
CREATE_VIRTUAL_DISK_FLAG_NONE = 0
CREATE_VIRTUAL_DISK_FLAG_FULL_PHYSICAL_ALLOCATION = 1
MERGE_VIRTUAL_DISK_VERSION_1 = 1
MERGE_VIRTUAL_DISK_FLAG_NONE = 0x00000000
GET_VIRTUAL_DISK_INFO_SIZE = 1
GET_VIRTUAL_DISK_INFO_PARENT_LOCATION = 3
GET_VIRTUAL_DISK_INFO_VIRTUAL_STORAGE_TYPE = 6
GET_VIRTUAL_DISK_INFO_PROVIDER_SUBTYPE = 7
SET_VIRTUAL_DISK_INFO_PARENT_PATH = 1
FORMAT_MESSAGE_FROM_SYSTEM = 0x00001000
FORMAT_MESSAGE_ALLOCATE_BUFFER = 0x00000100
FORMAT_MESSAGE_IGNORE_INSERTS = 0x00000200
ERROR_VHD_INVALID_TYPE = 0xC03A001B
class VHDUtils(object):
def __init__(self):
self._ext_device_id_map = {
'vhd': VIRTUAL_STORAGE_TYPE_DEVICE_VHD,
'vhdx': VIRTUAL_STORAGE_TYPE_DEVICE_VHDX}
self.create_virtual_disk_flags = {
constants.VHD_TYPE_FIXED: (
CREATE_VIRTUAL_DISK_FLAG_FULL_PHYSICAL_ALLOCATION),
constants.VHD_TYPE_DYNAMIC: CREATE_VIRTUAL_DISK_FLAG_NONE
}
self._vhd_info_members = {
GET_VIRTUAL_DISK_INFO_SIZE: 'Size',
GET_VIRTUAL_DISK_INFO_PARENT_LOCATION: 'ParentLocation',
GET_VIRTUAL_DISK_INFO_VIRTUAL_STORAGE_TYPE:
'VirtualStorageType',
GET_VIRTUAL_DISK_INFO_PROVIDER_SUBTYPE: 'ProviderSubtype',
}
if os.name == 'nt':
self._msft_vendor_id = (
self.get_WIN32_VIRTUAL_STORAGE_TYPE_VENDOR_MSFT())
def _run_and_check_output(self, func, *args, **kwargs):
"""Convenience helper method for running Win32 API methods."""
ignored_error_codes = kwargs.pop('ignored_error_codes', [])
ret_val = func(*args, **kwargs)
# The VHD Win32 API functions return non-zero error codes
# in case of failure.
if ret_val and ret_val not in ignored_error_codes:
error_message = self._get_error_message(ret_val)
func_name = getattr(func, '__name__', '')
err = (_("Executing Win32 API function %(func_name)s failed. "
"Error code: %(error_code)s. "
"Error message: %(error_message)s") %
{'func_name': func_name,
'error_code': ret_val,
'error_message': error_message})
LOG.exception(err)
raise exception.VolumeBackendAPIException(err)
@staticmethod
def _get_error_message(error_code):
message_buffer = ctypes.c_char_p()
kernel32.FormatMessageA(
FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_IGNORE_INSERTS,
None, error_code, 0, ctypes.byref(message_buffer), 0, None)
error_message = message_buffer.value
kernel32.LocalFree(message_buffer)
return error_message
@staticmethod
def get_WIN32_VIRTUAL_STORAGE_TYPE_VENDOR_MSFT():
guid = Win32_GUID()
guid.Data1 = 0xec984aec
guid.Data2 = 0xa0f9
guid.Data3 = 0x47e9
ByteArray8 = wintypes.BYTE * 8
guid.Data4 = ByteArray8(0x90, 0x1f, 0x71, 0x41, 0x5a, 0x66, 0x34, 0x5b)
return guid
def _open(self, vhd_path, open_flag=OPEN_VIRTUAL_DISK_FLAG_NONE,
open_access_mask=VIRTUAL_DISK_ACCESS_ALL,
open_params=0):
device_id = self._get_device_id_by_path(vhd_path)
vst = Win32_VIRTUAL_STORAGE_TYPE()
vst.DeviceId = device_id
vst.VendorId = self._msft_vendor_id
handle = wintypes.HANDLE()
self._run_and_check_output(virtdisk.OpenVirtualDisk,
ctypes.byref(vst),
ctypes.c_wchar_p(vhd_path),
open_access_mask,
open_flag,
open_params,
ctypes.byref(handle))
return handle
def _close(self, handle):
kernel32.CloseHandle(handle)
def _get_device_id_by_path(self, vhd_path):
ext = os.path.splitext(vhd_path)[1][1:].lower()
device_id = self._ext_device_id_map.get(ext)
if not device_id:
raise exception.VolumeBackendAPIException(
_("Unsupported virtual disk extension: %s") % ext)
return device_id
def resize_vhd(self, vhd_path, new_max_size):
handle = self._open(vhd_path)
params = Win32_RESIZE_VIRTUAL_DISK_PARAMETERS()
params.Version = RESIZE_VIRTUAL_DISK_VERSION_1
params.NewSize = new_max_size
try:
self._run_and_check_output(virtdisk.ResizeVirtualDisk,
handle,
RESIZE_VIRTUAL_DISK_FLAG_NONE,
ctypes.byref(params),
None)
finally:
self._close(handle)
def merge_vhd(self, vhd_path):
open_params = Win32_OPEN_VIRTUAL_DISK_PARAMETERS_V1()
open_params.Version = OPEN_VIRTUAL_DISK_VERSION_1
open_params.RWDepth = 2
handle = self._open(vhd_path,
open_params=ctypes.byref(open_params))
params = Win32_MERGE_VIRTUAL_DISK_PARAMETERS()
params.Version = MERGE_VIRTUAL_DISK_VERSION_1
params.MergeDepth = 1
try:
self._run_and_check_output(virtdisk.MergeVirtualDisk,
handle,
MERGE_VIRTUAL_DISK_FLAG_NONE,
ctypes.byref(params),
None)
finally:
self._close(handle)
def _create_vhd(self, new_vhd_path, new_vhd_type, src_path=None,
max_internal_size=0, parent_path=None):
new_device_id = self._get_device_id_by_path(new_vhd_path)
vst = Win32_VIRTUAL_STORAGE_TYPE()
vst.DeviceId = new_device_id
vst.VendorId = self._msft_vendor_id
params = Win32_CREATE_VIRTUAL_DISK_PARAMETERS()
params.Version = CREATE_VIRTUAL_DISK_VERSION_2
params.UniqueId = Win32_GUID()
params.BlockSizeInBytes = CREATE_VHD_PARAMS_DEFAULT_BLOCK_SIZE
params.SectorSizeInBytes = 0x200
params.PhysicalSectorSizeInBytes = 0x200
params.OpenFlags = OPEN_VIRTUAL_DISK_FLAG_NONE
params.ResiliencyGuid = Win32_GUID()
params.MaximumSize = max_internal_size
params.ParentPath = parent_path
params.ParentVirtualStorageType = Win32_VIRTUAL_STORAGE_TYPE()
if src_path:
src_device_id = self._get_device_id_by_path(src_path)
params.SourcePath = src_path
params.SourceVirtualStorageType = Win32_VIRTUAL_STORAGE_TYPE()
params.SourceVirtualStorageType.DeviceId = src_device_id
params.SourceVirtualStorageType.VendorId = self._msft_vendor_id
handle = wintypes.HANDLE()
create_virtual_disk_flag = self.create_virtual_disk_flags.get(
new_vhd_type)
try:
self._run_and_check_output(virtdisk.CreateVirtualDisk,
ctypes.byref(vst),
ctypes.c_wchar_p(new_vhd_path),
VIRTUAL_DISK_ACCESS_NONE,
None,
create_virtual_disk_flag,
0,
ctypes.byref(params),
None,
ctypes.byref(handle))
finally:
self._close(handle)
def get_vhd_info(self, vhd_path, info_members=None):
vhd_info = {}
info_members = info_members or self._vhd_info_members
handle = self._open(vhd_path,
open_access_mask=VIRTUAL_DISK_ACCESS_GET_INFO)
try:
for member in info_members:
info = self._get_vhd_info_member(handle, member)
vhd_info.update(info)
finally:
self._close(handle)
return vhd_info
def _get_vhd_info_member(self, vhd_file, info_member):
virt_disk_info = Win32_GET_VIRTUAL_DISK_INFO_PARAMETERS()
virt_disk_info.VERSION = ctypes.c_uint(info_member)
infoSize = ctypes.sizeof(virt_disk_info)
virtdisk.GetVirtualDiskInformation.restype = wintypes.DWORD
# Note(lpetrut): If the vhd has no parent image, this will
# return an error. No need to raise an exception in this case.
ignored_error_codes = []
if info_member == GET_VIRTUAL_DISK_INFO_PARENT_LOCATION:
ignored_error_codes.append(ERROR_VHD_INVALID_TYPE)
self._run_and_check_output(virtdisk.GetVirtualDiskInformation,
vhd_file,
ctypes.byref(ctypes.c_ulong(infoSize)),
ctypes.byref(virt_disk_info),
0,
ignored_error_codes=ignored_error_codes)
return self._parse_vhd_info(virt_disk_info, info_member)
def _parse_vhd_info(self, virt_disk_info, info_member):
vhd_info = {}
vhd_info_member = self._vhd_info_members[info_member]
info = getattr(virt_disk_info.VhdInfo, vhd_info_member)
if hasattr(info, '_fields_'):
for field in info._fields_:
vhd_info[field[0]] = getattr(info, field[0])
else:
vhd_info[vhd_info_member] = info
return vhd_info
def get_vhd_size(self, vhd_path):
"""Return vhd size.
Returns a dict containing the virtual size, physical size,
block size and sector size of the vhd.
"""
size = self.get_vhd_info(vhd_path,
[GET_VIRTUAL_DISK_INFO_SIZE])
return size
def get_vhd_parent_path(self, vhd_path):
vhd_info = self.get_vhd_info(vhd_path,
[GET_VIRTUAL_DISK_INFO_PARENT_LOCATION])
parent_path = vhd_info['ParentLocationBuffer']
if len(parent_path) > 0:
return parent_path
return None
def create_dynamic_vhd(self, path, max_internal_size):
self._create_vhd(path,
constants.VHD_TYPE_DYNAMIC,
max_internal_size=max_internal_size)
def convert_vhd(self, src, dest,
vhd_type=constants.VHD_TYPE_DYNAMIC):
self._create_vhd(dest, vhd_type, src_path=src)
def create_differencing_vhd(self, path, parent_path):
self._create_vhd(path,
constants.VHD_TYPE_DIFFERENCING,
parent_path=parent_path)
def reconnect_parent(self, child_path, parent_path):
open_params = Win32_OPEN_VIRTUAL_DISK_PARAMETERS_V2()
open_params.Version = OPEN_VIRTUAL_DISK_VERSION_2
open_params.GetInfoOnly = False
handle = self._open(
child_path,
open_flag=OPEN_VIRTUAL_DISK_FLAG_NO_PARENTS,
open_access_mask=VIRTUAL_DISK_ACCESS_NONE,
open_params=ctypes.byref(open_params))
params = Win32_SET_VIRTUAL_DISK_INFO_PARAMETERS()
params.Version = SET_VIRTUAL_DISK_INFO_PARENT_PATH
params.ParentFilePath = parent_path
try:
self._run_and_check_output(virtdisk.SetVirtualDiskInformation,
handle,
ctypes.byref(params))
finally:
self._close(handle)
| apache-2.0 |
arborh/tensorflow | tensorflow/python/training/saving/saveable_object_util.py | 7 | 14161 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for working with and creating SaveableObjects."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.eager import context
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.training.saving import saveable_object
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
# Op names which identify variable reads which should be saved.
_VARIABLE_OPS = set(["Variable",
"VariableV2",
"AutoReloadVariable",
"VarHandleOp",
"ReadVariableOp"])
def set_cpu0(device_string):
"""Creates a new device string based on `device_string` but using /CPU:0.
If the device is already on /CPU:0, this is a no-op.
Args:
device_string: A device string.
Returns:
A device string.
"""
parsed_device = pydev.DeviceSpec.from_string(device_string)
parsed_device = parsed_device.replace(device_type="CPU", device_index=0)
return parsed_device.to_string()
class ReferenceVariableSaveable(saveable_object.SaveableObject):
"""SaveableObject implementation that handles reference variables."""
def __init__(self, var, slice_spec, name):
spec = saveable_object.SaveSpec(var, slice_spec, name, dtype=var.dtype)
super(ReferenceVariableSaveable, self).__init__(var, [spec], name)
def restore(self, restored_tensors, restored_shapes):
restored_tensor = restored_tensors[0]
if restored_shapes is not None:
restored_tensor = array_ops.reshape(restored_tensor, restored_shapes[0])
return state_ops.assign(
self.op,
restored_tensor,
validate_shape=restored_shapes is None and
self.op.get_shape().is_fully_defined())
class ResourceVariableSaveable(saveable_object.SaveableObject):
"""SaveableObject implementation that handles ResourceVariables."""
def __init__(self, var, slice_spec, name):
self._var_device = var.device
self._var_shape = var.shape
if isinstance(var, ops.Tensor):
self.handle_op = var.op.inputs[0]
tensor = var
elif resource_variable_ops.is_resource_variable(var):
def _read_variable_closure(v):
def f():
with ops.device(v.device):
x = v.read_value()
# To allow variables placed on non-CPU devices to be checkpointed,
# we copy them to CPU on the same machine first.
with ops.device("/device:CPU:0"):
return array_ops.identity(x)
return f
self.handle_op = var.handle
tensor = _read_variable_closure(var)
else:
raise ValueError(
"Saveable is neither a resource variable nor a read operation."
" Got: %s" % repr(var))
spec = saveable_object.SaveSpec(tensor, slice_spec, name,
dtype=var.dtype, device=var.device)
super(ResourceVariableSaveable, self).__init__(var, [spec], name)
def restore(self, restored_tensors, restored_shapes):
restored_tensor = restored_tensors[0]
if restored_shapes is not None:
restored_tensor = array_ops.reshape(restored_tensor, restored_shapes[0])
# Copy the restored tensor to the variable's device.
with ops.device(self._var_device):
restored_tensor = array_ops.identity(restored_tensor)
return resource_variable_ops.shape_safe_assign_variable_handle(
self.handle_op, self._var_shape, restored_tensor)
def _tensor_comes_from_variable(v):
return isinstance(v, ops.Tensor) and v.op.type in _VARIABLE_OPS
def saveable_objects_for_op(op, name):
"""Create `SaveableObject`s from an operation.
Args:
op: A variable, operation, or SaveableObject to coerce into a
SaveableObject.
name: A string name for the SaveableObject.
Yields:
`SaveableObject`s which together save/restore `op`.
Raises:
TypeError: If `name` is not a string.
ValueError: For operations with no known conversion to SaveableObject.
"""
if not isinstance(name, six.string_types):
raise TypeError(
"names_to_saveables must be a dict mapping string names to "
"trackable operations. Name is not a string: %s" % name)
if isinstance(op, saveable_object.SaveableObject):
yield op
elif isinstance(op, (list, tuple, variables.PartitionedVariable)):
if isinstance(op, variables.PartitionedVariable):
op = list(op)
# A set of slices.
slice_name = None
# pylint: disable=protected-access
for variable in op:
if isinstance(variable, saveable_object.SaveableObject):
yield variable
continue
if not isinstance(variable, variables.Variable):
raise ValueError("Slices must all be Variables: %s" % variable)
if not variable._save_slice_info:
raise ValueError("Slices must all be slices: %s" % variable)
if slice_name is None:
slice_name = variable._save_slice_info.full_name
elif slice_name != variable._save_slice_info.full_name:
raise ValueError(
"Slices must all be from the same tensor: %s != %s" %
(slice_name, variable._save_slice_info.full_name))
if variable.op.type in ["Variable", "VariableV2",
"AutoReloadVariable"]:
yield ReferenceVariableSaveable(
variable, variable._save_slice_info.spec, name)
else:
yield ResourceVariableSaveable(
variable, variable._save_slice_info.spec, name)
# pylint: enable=protected-access
elif isinstance(op, trackable.Trackable) and not isinstance(
op, variables.Variable):
# pylint: disable=protected-access
for attr, factory in op._gather_saveables_for_checkpoint().items():
if attr == trackable.VARIABLE_VALUE_KEY:
# Keep original name for classes masquerading as variables.
full_name = name
else:
full_name = name + "_" + attr
op = (factory(full_name) if callable(factory) else factory)
for op in saveable_objects_for_op(op, op.name):
yield op
# pylint: enable=protected-access
else:
# A variable or tensor.
if isinstance(op, resource_variable_ops.BaseResourceVariable):
# pylint: disable=protected-access
if op._in_graph_mode:
variable = op._graph_element
else:
variable = op
# pylint: enable=protected-access
yield ResourceVariableSaveable(variable, "", name)
else:
if context.executing_eagerly():
raise ValueError("Can only save/restore ResourceVariables when "
"executing eagerly, got type: %s." % type(op))
variable = ops.convert_to_tensor(op, as_ref=True)
if not _tensor_comes_from_variable(variable):
raise TypeError("names_to_saveables must be a dict mapping string "
"names to Tensors/Variables. Not a variable: %s" %
variable)
if variable.op.type in ["Variable", "VariableV2",
"AutoReloadVariable"]:
yield ReferenceVariableSaveable(variable, "", name)
else:
yield ResourceVariableSaveable(
variable, "", name)
def op_list_to_dict(op_list, convert_variable_to_tensor=True):
"""Create a dictionary of names to operation lists.
Args:
op_list: A (nested) list, tuple, or set of Variables or SaveableObjects.
convert_variable_to_tensor: Whether or not to convert single Variables
with no slice info into Tensors.
Returns:
A dictionary of names to the operations that must be saved under
that name. Variables with save_slice_info are grouped together under the
same key in no particular order.
Raises:
TypeError: If the type of op_list or its elements is not supported.
ValueError: If at least two saveables share the same name.
"""
if not isinstance(op_list, (list, tuple, set)):
raise TypeError("Variables to save should be passed in a dict or a "
"list: %s" % op_list)
# List casting is necessary to support sets.
op_list = nest.flatten(list(op_list))
# When ResourceVariables are converted to Tensors, read ops are added to the
# graph. Sorting the op_list ensures that the resulting graph is always
# constructed in a deterministic way:
op_list = sorted(op_list, key=lambda x: x.name)
names_to_saveables = {}
# pylint: disable=protected-access
for var in op_list:
resource_or_ref_variable = (
isinstance(var, resource_variable_ops.BaseResourceVariable) or
isinstance(var, variables.RefVariable))
if isinstance(var, saveable_object.SaveableObject):
names_to_saveables[var.name] = var
elif isinstance(var, variables.PartitionedVariable):
if var.name in names_to_saveables:
raise ValueError("At least two variables have the same name: %s" %
var.name)
names_to_saveables[var.name] = var
elif isinstance(var, variables.Variable) and var._save_slice_info:
name = var._save_slice_info.full_name
if name in names_to_saveables:
if not isinstance(names_to_saveables[name], list):
raise ValueError("Mixing slices and non-slices with the same name: "
"%s" % name)
names_to_saveables[name].append(var)
else:
names_to_saveables[name] = [var]
elif isinstance(var, trackable.Trackable) and not resource_or_ref_variable:
trackable_saveables = [
(factory() if callable(factory) else factory)
for factory in var._gather_saveables_for_checkpoint().values()]
names_to_saveables.update(
op_list_to_dict(trackable_saveables))
else:
# Variables (reference and resource) have an _in_graph_mode property
# indicating whether they were created in a graph building context. We
# also get Tensors when graph building, which do not have this property.
if not getattr(var, "_in_graph_mode", True):
if not isinstance(var, resource_variable_ops.BaseResourceVariable):
raise ValueError(
"Can only save/restore ResourceVariables when eager execution "
"is enabled, type: %s." % type(var))
set_var = names_to_saveables.setdefault(var._shared_name, var)
if set_var is not var:
raise ValueError(
("Two different ResourceVariable objects with the same "
"shared_name '%s' were passed to the Saver. This likely means "
"that they were created in different Graphs or isolation "
"contexts, and may not be checkpointed together.") %
(var._shared_name,))
else:
if convert_variable_to_tensor:
if isinstance(var, resource_variable_ops.BaseResourceVariable):
var = var._graph_element # pylint: disable=protected-access
else:
var = ops.convert_to_tensor(var, as_ref=True)
if not _tensor_comes_from_variable(var):
raise TypeError("Variable to save is not a Variable: %s" % var)
if var.op.type == "ReadVariableOp":
name = var.op.inputs[0].op.name
else:
name = var.op.name
if name in names_to_saveables:
raise ValueError("At least two variables have the same name: %s" %
name)
names_to_saveables[name] = var
# pylint: enable=protected-access
return names_to_saveables
def _add_saveable(saveables, seen_ops, saveable):
"""Adds the saveable to the saveables list.
Args:
saveables: List to append the SaveableObject to.
seen_ops: Set of the ops of the saveables already processed. Used to
check that each saveable is only saved once.
saveable: The saveable.
Raises:
ValueError: If the saveable has already been processed.
"""
if saveable.op in seen_ops:
raise ValueError("The same saveable will be restored with two names: %s" %
saveable.name)
saveables.append(saveable)
seen_ops.add(saveable.op)
def validate_and_slice_inputs(names_to_saveables):
"""Returns the variables and names that will be used for a Saver.
Args:
names_to_saveables: A dict (k, v) where k is the name of an operation and
v is an operation to save or a BaseSaverBuilder.Saver.
Returns:
A list of SaveableObjects.
Raises:
TypeError: If any of the keys are not strings or any of the
values are not one of Tensor or Variable or a trackable operation.
ValueError: If the same operation is given in more than one value
(this also applies to slices of SlicedVariables).
"""
if not isinstance(names_to_saveables, dict):
names_to_saveables = op_list_to_dict(names_to_saveables)
saveables = []
seen_ops = object_identity.ObjectIdentitySet()
for name, op in sorted(names_to_saveables.items(),
# Avoid comparing ops, sort only by name.
key=lambda x: x[0]):
for converted_saveable_object in saveable_objects_for_op(op, name):
_add_saveable(saveables, seen_ops, converted_saveable_object)
return saveables
| apache-2.0 |
TOC-Shard/moul-scripts | Python/Negilahn.py | 13 | 2134 | # -*- coding: utf-8 -*-
""" *==LICENSE==*
CyanWorlds.com Engine - MMOG client, server and tools
Copyright (C) 2011 Cyan Worlds, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Additional permissions under GNU GPL version 3 section 7
If you modify this Program, or any covered work, by linking or
combining it with any of RAD Game Tools Bink SDK, Autodesk 3ds Max SDK,
NVIDIA PhysX SDK, Microsoft DirectX SDK, OpenSSL library, Independent
JPEG Group JPEG library, Microsoft Windows Media SDK, or Apple QuickTime SDK
(or a modified version of those libraries),
containing parts covered by the terms of the Bink SDK EULA, 3ds Max EULA,
PhysX SDK EULA, DirectX SDK EULA, OpenSSL and SSLeay licenses, IJG
JPEG Library README, Windows Media SDK EULA, or QuickTime SDK EULA, the
licensors of this Program grant you additional
permission to convey the resulting work. Corresponding Source for a
non-source form of such a combination shall include the source code for
the parts of OpenSSL and IJG JPEG Library used as well as that of the covered
work.
You can contact Cyan Worlds, Inc. by email legal@cyan.com
or by snail mail at:
Cyan Worlds, Inc.
14617 N Newport Hwy
Mead, WA 99021
*==LICENSE==* """
"""
Module: Negilahn.py
Age: Negilahn
Date: October 2003
AgeSDL hooks for Negilahn
"""
from Plasma import *
from PlasmaTypes import *
class Negilahn(ptResponder):
def __init__(self):
ptResponder.__init__(self)
self.id = 5239
self.version = 1
def OnNotify(self,state,id,events):
pass
| gpl-3.0 |
ywangd/stash | bin/rm.py | 1 | 2852 | # -*- coding: utf-8 -*-
"""
Remove (delete) files and directories.
usage: rm.py [-h] [-r] [-i] [-f] [-v] paths [paths ...]
positional arguments:
paths files or directories to delete
optional arguments:
-h, --help show this help message and exit
-r, --recursive remove directory and its contents recursively
-i, --interactive prompt before every removal
-f, --force attempt to delete without confirmation or warning due to
permission or file existence (override -i)
-v, --verbose explain what is being done
"""
from __future__ import print_function
import os
import sys
import shutil
from argparse import ArgumentParser
from six.moves import input
def main(args):
ap = ArgumentParser()
ap.add_argument(
'-r',
'--recursive',
action="store_true",
default=False,
help='remove directory and its contents recursively'
)
ap.add_argument('-i', '--interactive', action="store_true", default=False, help='prompt before every removal')
ap.add_argument(
'-f',
'--force',
action='store_true',
default=False,
help='attempt to delete without confirmation or warning due to permission or file existence (override -i)'
)
ap.add_argument('-v', '--verbose', action="store_true", default=False, help='explain what is being done')
ap.add_argument('paths', action="store", nargs='+', help='files or directories to delete')
ns = ap.parse_args(args)
#setup print function
if ns.verbose:
def printp(text):
print(text)
else:
def printp(text):
pass
if ns.interactive and not ns.force:
def prompt(file):
result = input('Delete %s? [Y,n]: ' % file)
if result == 'Y' or result == 'y':
return True
else:
return False
else:
def prompt(file):
return True
for path in ns.paths:
if os.path.isfile(path):
if prompt(path):
try:
os.remove(path)
printp('%s has been deleted' % path)
except:
if not ns.force:
print('%s: unable to remove' % path)
elif os.path.isdir(path) and ns.recursive:
if prompt(path):
try:
shutil.rmtree(path)
printp('%s has been deleted' % path)
except:
if not ns.force:
print('%s: unable to remove' % path)
elif os.path.isdir(path):
print('%s: is a directory' % path)
else:
if not ns.force:
print('%s: does not exist' % path)
if __name__ == '__main__':
main(sys.argv[1:])
| mit |
charany1/Bookie | bookie/tests/test_utils/test_readable.py | 6 | 6931 | """Test the fulltext implementation"""
import logging
import os
import transaction
import urllib
from pyramid import testing
from unittest import TestCase
from bookie.lib.readable import ReadContent
from bookie.lib.readable import ReadUrl
from bookie.models import DBSession
from bookie.tests import empty_db
LOG = logging.getLogger(__file__)
API_KEY = None
class TestReadable(TestCase):
"""Test that our fulltext classes function"""
def test_url_content(self):
"""Test that we set the correct status"""
url = 'http://lococast.net/archives/475'
read = ReadUrl.parse(url)
self.assertTrue(
read.status == 200, "The status is 200" + str(read.status))
self.assertTrue(not read.is_image(), "The content is not an image")
self.assertTrue(read.content is not None, "Content should not be none")
self.assertTrue(
'Lococast' in read.content,
"The word Lococast is in the content: " + str(read.content))
def test_404_url(self):
"""Test that we get the proper errors in a missing url"""
url = 'http://lococast.net/archives/001'
read = ReadUrl.parse(url)
self.assertTrue(
read.status == 404, "The status is 404: " + str(read.status))
self.assertTrue(
not read.is_image(), "The content is not an image")
self.assertTrue(
read.content is None, "Content should be none")
def test_given_content(self):
"""Test that we can parse out given html content ahead of time"""
file_path = os.path.dirname(__file__)
html_content = open(os.path.join(file_path, 'readable_sample.html'))
read = ReadContent.parse(html_content)
self.assertTrue(
read.status == 1, "The status is 1: " + str(read.status))
self.assertTrue(not read.is_image(), "The content is not an image")
self.assertTrue(read.content is not None, "Content should not be none")
self.assertTrue(
'Bookie' in read.content,
u"The word Bookie is in the content: " + unicode(read.content))
def test_non_net_url(self):
"""I might be bookmarking something internal bookie can't access"""
test_url = "http://r2"
read = ReadUrl.parse(test_url)
self.assertTrue(
read.status == 901,
"The status is 901: " + str(read.status))
self.assertTrue(not read.is_image(), "The content is not an image")
self.assertTrue(
read.content is None,
"Content should be none: " + str(read.content))
def test_image_url(self):
"""Verify we don't store, but just tag an image url"""
img_url = 'http://www.ndftz.com/nickelanddime.png'
read = ReadUrl.parse(img_url)
self.assertTrue(
read.status == 200, "The status is 200: " + str(read.status))
self.assertTrue(
read.content is None, "Content should be none: ")
def test_nonworking_url(self):
"""Testing some urls we know we had issues with initially"""
urls = {
'CouchSurfing': ('http://allthatiswrong.wordpress.com/2010/01'
'/24/a-criticism-of-couchsurfing-and-review-o'
'f-alternatives/#problems'),
# 'Electronic': ('https://www.fbo.gov/index?s=opportunity&mode='
# 'form&tab=core&id=dd11f27254c796f80f2aadcbe415'
# '8407'),
}
for key, url in urls.iteritems():
read = ReadUrl.parse(url)
self.assertTrue(
read.status == 200, "The status is 200: " + str(read.status))
self.assertTrue(
read.content is not None, "Content should not be none: ")
class TestReadableFulltext(TestCase):
"""Test that our fulltext index function"""
def setUp(self):
"""Setup Tests"""
from pyramid.paster import get_app
from bookie.tests import BOOKIE_TEST_INI
app = get_app(BOOKIE_TEST_INI, 'bookie')
from webtest import TestApp
self.testapp = TestApp(app)
testing.setUp()
global API_KEY
if API_KEY is None:
res = DBSession.execute(
"SELECT api_key FROM users WHERE username = 'admin'").\
fetchone()
API_KEY = res['api_key']
def tearDown(self):
"""Tear down each test"""
testing.tearDown()
empty_db()
def _get_good_request(self):
"""Return the basics for a good add bookmark request"""
session = DBSession()
prms = {
'url': u'http://google.com',
'description': u'This is my google desc',
'extended': u'And some extended notes about it in full form',
'tags': u'python search',
'api_key': API_KEY,
'content': 'bmark content is the best kind of content man',
}
req_params = urllib.urlencode(prms)
res = self.testapp.post('/api/v1/admin/bmark',
params=req_params)
session.flush()
transaction.commit()
from bookie.bcelery import tasks
tasks.reindex_fulltext_allbookmarks(sync=True)
return res
def test_restlike_search(self):
"""Verify that our search still works in a restful url method"""
# first let's add a bookmark we can search on
self._get_good_request()
search_res = self.testapp.get(
'/api/v1/admin/bmarks/search/search?search_content=True',
params={'api_key': API_KEY})
self.assertTrue(
search_res.status == '200 OK',
"Status is 200: " + search_res.status)
self.assertTrue(
'python' in search_res.body,
"We should find the python tag in the results: " + search_res.body)
def test_fulltext_schema(self):
"""Verify the fulltext schema"""
from bookie.models.fulltext import WIX
schema = WIX.schema
self.assertTrue(
'bid' in schema,
"We should find bid in schema: " + str(schema))
self.assertTrue(
'description' in schema,
"We should find description in schema: " + str(schema))
self.assertTrue(
'extended' in schema,
"We should find extended in schema: " + str(schema))
self.assertTrue(
'is_private' in schema,
"We should find is_private in schema: " + str(schema))
self.assertTrue(
'readable' in schema,
"We should find readable in schema: " + str(schema))
self.assertTrue(
'tags' in schema,
"We should find tags in schema: " + str(schema))
self.assertTrue(
'username' in schema,
"We should find username in schema: " + str(schema))
| agpl-3.0 |
hammerlab/mhctools | mhctools/mhcflurry.py | 1 | 3784 | # Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import logging
from numpy import nan
from .base_predictor import BasePredictor
from .binding_prediction import BindingPrediction
from .binding_prediction_collection import BindingPredictionCollection
from .unsupported_allele import UnsupportedAllele
logger = logging.getLogger(__name__)
class MHCflurry(BasePredictor):
"""
Wrapper around MHCflurry. Users will need to download MHCflurry models
first.
See https://github.com/hammerlab/mhcflurry
"""
def __init__(
self,
alleles,
default_peptide_lengths=[9],
predictor=None,
models_path=None):
"""
Parameters
-----------
alleles : list of str
default_peptide_lengths : list of int
predictor : mhcflurry.Class1AffinityPredictor (optional)
MHCflurry predictor to use
models_path : string
Models dir to use if predictor argument is None
"""
# moving import here since the mhcflurry package imports
# Keras and its backend (either Theano or TF) which end up
# slowing down responsive for any CLI application using MHCtools
from mhcflurry import Class1AffinityPredictor
BasePredictor.__init__(
self,
alleles=alleles,
default_peptide_lengths=default_peptide_lengths,
min_peptide_length=8,
max_peptide_length=15)
if predictor:
self.predictor = predictor
elif models_path:
logging.info("Loading MHCflurry models from %s" % models_path)
self.predictor = Class1AffinityPredictor.load(models_path)
else:
self.predictor = Class1AffinityPredictor.load()
# relying on BasePredictor and MHCflurry to both normalize
# allele names the same way using mhcnames
for allele in self.alleles:
if allele not in self.predictor.supported_alleles:
raise UnsupportedAllele(allele)
def predict_peptides(self, peptides):
"""
Predict MHC affinity for peptides.
"""
# importing locally to avoid slowing down CLI applications which
# don't use MHCflurry
from mhcflurry.encodable_sequences import EncodableSequences
binding_predictions = []
encodable_sequences = EncodableSequences.create(peptides)
for allele in self.alleles:
predictions_df = self.predictor.predict_to_dataframe(
encodable_sequences, allele=allele)
for (_, row) in predictions_df.iterrows():
binding_prediction = BindingPrediction(
allele=allele,
peptide=row.peptide,
affinity=row.prediction,
percentile_rank=(
row.prediction_percentile
if 'prediction_percentile' in row else nan),
prediction_method_name="mhcflurry")
binding_predictions.append(binding_prediction)
return BindingPredictionCollection(binding_predictions)
| apache-2.0 |
onceuponatimeforever/oh-mainline | vendor/packages/twisted/twisted/internet/test/test_endpoints.py | 18 | 41223 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test the C{I...Endpoint} implementations that wrap the L{IReactorTCP},
L{IReactorSSL}, and L{IReactorUNIX} interfaces found in
L{twisted.internet.endpoints}.
"""
from errno import EPERM
from zope.interface import implements
from twisted.trial import unittest
from twisted.internet import error, interfaces
from twisted.internet import endpoints
from twisted.internet.address import IPv4Address, UNIXAddress
from twisted.internet.protocol import ClientFactory, Protocol
from twisted.test.proto_helpers import MemoryReactor, RaisingMemoryReactor
from twisted.python.failure import Failure
from twisted import plugins
from twisted.python.modules import getModule
from twisted.python.filepath import FilePath
pemPath = getModule("twisted.test").filePath.sibling("server.pem")
casPath = getModule(__name__).filePath.sibling("fake_CAs")
escapedPEMPathName = endpoints.quoteStringArgument(pemPath.path)
escapedCAsPathName = endpoints.quoteStringArgument(casPath.path)
try:
from twisted.test.test_sslverify import makeCertificate
from twisted.internet.ssl import CertificateOptions, Certificate, \
KeyPair, PrivateCertificate
from OpenSSL.SSL import ContextType
testCertificate = Certificate.loadPEM(pemPath.getContent())
testPrivateCertificate = PrivateCertificate.loadPEM(pemPath.getContent())
skipSSL = False
except ImportError:
skipSSL = "OpenSSL is required to construct SSL Endpoints"
class TestProtocol(Protocol):
"""
Protocol whose only function is to callback deferreds on the
factory when it is connected or disconnected.
"""
def __init__(self):
self.data = []
self.connectionsLost = []
self.connectionMadeCalls = 0
def connectionMade(self):
self.connectionMadeCalls += 1
def dataReceived(self, data):
self.data.append(data)
def connectionLost(self, reason):
self.connectionsLost.append(reason)
class TestHalfCloseableProtocol(TestProtocol):
"""
A Protocol that implements L{IHalfCloseableProtocol} and records that
its C{readConnectionLost} and {writeConnectionLost} methods.
"""
implements(interfaces.IHalfCloseableProtocol)
def __init__(self):
TestProtocol.__init__(self)
self.readLost = False
self.writeLost = False
def readConnectionLost(self):
self.readLost = True
def writeConnectionLost(self):
self.writeLost = True
class TestFactory(ClientFactory):
"""
Simple factory to be used both when connecting and listening. It contains
two deferreds which are called back when my protocol connects and
disconnects.
"""
protocol = TestProtocol
class WrappingFactoryTests(unittest.TestCase):
"""
Test the behaviour of our ugly implementation detail C{_WrappingFactory}.
"""
def test_failedBuildProtocol(self):
"""
An exception raised in C{buildProtocol} of our wrappedFactory
results in our C{onConnection} errback being fired.
"""
class BogusFactory(ClientFactory):
"""
A one off factory whose C{buildProtocol} raises an C{Exception}.
"""
def buildProtocol(self, addr):
raise ValueError("My protocol is poorly defined.")
wf = endpoints._WrappingFactory(BogusFactory(), None)
wf.buildProtocol(None)
d = self.assertFailure(wf._onConnection, ValueError)
d.addCallback(lambda e: self.assertEquals(
e.args,
("My protocol is poorly defined.",)))
return d
def test_wrappedProtocolDataReceived(self):
"""
The wrapped C{Protocol}'s C{dataReceived} will get called when our
C{_WrappingProtocol}'s C{dataReceived} gets called.
"""
wf = endpoints._WrappingFactory(TestFactory(), None)
p = wf.buildProtocol(None)
p.makeConnection(None)
p.dataReceived('foo')
self.assertEquals(p._wrappedProtocol.data, ['foo'])
p.dataReceived('bar')
self.assertEquals(p._wrappedProtocol.data, ['foo', 'bar'])
def test_wrappedProtocolTransport(self):
"""
Our transport is properly hooked up to the wrappedProtocol when a
connection is made.
"""
wf = endpoints._WrappingFactory(TestFactory(), None)
p = wf.buildProtocol(None)
dummyTransport = object()
p.makeConnection(dummyTransport)
self.assertEquals(p.transport, dummyTransport)
self.assertEquals(p._wrappedProtocol.transport, dummyTransport)
def test_wrappedProtocolConnectionLost(self):
"""
Our wrappedProtocol's connectionLost method is called when
L{_WrappingProtocol.connectionLost} is called.
"""
tf = TestFactory()
wf = endpoints._WrappingFactory(tf, None)
p = wf.buildProtocol(None)
p.connectionLost("fail")
self.assertEquals(p._wrappedProtocol.connectionsLost, ["fail"])
def test_clientConnectionFailed(self):
"""
Calls to L{_WrappingFactory.clientConnectionLost} should errback the
L{_WrappingFactory._onConnection} L{Deferred}
"""
wf = endpoints._WrappingFactory(TestFactory(), None)
expectedFailure = Failure(error.ConnectError(string="fail"))
wf.clientConnectionFailed(
None,
expectedFailure)
errors = []
def gotError(f):
errors.append(f)
wf._onConnection.addErrback(gotError)
self.assertEquals(errors, [expectedFailure])
def test_wrappingProtocolHalfCloseable(self):
"""
Our L{_WrappingProtocol} should be an L{IHalfCloseableProtocol} if
the C{wrappedProtocol} is.
"""
cd = object()
hcp = TestHalfCloseableProtocol()
p = endpoints._WrappingProtocol(cd, hcp)
self.assertEquals(
interfaces.IHalfCloseableProtocol.providedBy(p), True)
def test_wrappingProtocolNotHalfCloseable(self):
"""
Our L{_WrappingProtocol} should not provide L{IHalfCloseableProtocol}
if the C{WrappedProtocol} doesn't.
"""
tp = TestProtocol()
p = endpoints._WrappingProtocol(None, tp)
self.assertEquals(
interfaces.IHalfCloseableProtocol.providedBy(p), False)
def test_wrappedProtocolReadConnectionLost(self):
"""
L{_WrappingProtocol.readConnectionLost} should proxy to the wrapped
protocol's C{readConnectionLost}
"""
hcp = TestHalfCloseableProtocol()
p = endpoints._WrappingProtocol(None, hcp)
p.readConnectionLost()
self.assertEquals(hcp.readLost, True)
def test_wrappedProtocolWriteConnectionLost(self):
"""
L{_WrappingProtocol.writeConnectionLost} should proxy to the wrapped
protocol's C{writeConnectionLost}
"""
hcp = TestHalfCloseableProtocol()
p = endpoints._WrappingProtocol(None, hcp)
p.writeConnectionLost()
self.assertEquals(hcp.writeLost, True)
class EndpointTestCaseMixin(object):
"""
Generic test methods to be mixed into all endpoint test classes.
"""
def retrieveConnectedFactory(self, reactor):
"""
Retrieve a single factory that has connected using the given reactor.
(This behavior is valid for TCP and SSL but needs to be overridden for
UNIX.)
@param reactor: a L{MemoryReactor}
"""
return self.expectedClients(reactor)[0][2]
def test_endpointConnectSuccess(self):
"""
A client endpoint can connect and returns a deferred who gets called
back with a protocol instance.
"""
proto = object()
mreactor = MemoryReactor()
clientFactory = object()
ep, expectedArgs, ignoredDest = self.createClientEndpoint(
mreactor, clientFactory)
d = ep.connect(clientFactory)
receivedProtos = []
def checkProto(p):
receivedProtos.append(p)
d.addCallback(checkProto)
factory = self.retrieveConnectedFactory(mreactor)
factory._onConnection.callback(proto)
self.assertEquals(receivedProtos, [proto])
expectedClients = self.expectedClients(mreactor)
self.assertEquals(len(expectedClients), 1)
self.assertConnectArgs(expectedClients[0], expectedArgs)
def test_endpointConnectFailure(self):
"""
If an endpoint tries to connect to a non-listening port it gets
a C{ConnectError} failure.
"""
expectedError = error.ConnectError(string="Connection Failed")
mreactor = RaisingMemoryReactor(connectException=expectedError)
clientFactory = object()
ep, ignoredArgs, ignoredDest = self.createClientEndpoint(
mreactor, clientFactory)
d = ep.connect(clientFactory)
receivedExceptions = []
def checkFailure(f):
receivedExceptions.append(f.value)
d.addErrback(checkFailure)
self.assertEquals(receivedExceptions, [expectedError])
def test_endpointConnectingCancelled(self):
"""
Calling L{Deferred.cancel} on the L{Deferred} returned from
L{IStreamClientEndpoint.connect} is errbacked with an expected
L{ConnectingCancelledError} exception.
"""
mreactor = MemoryReactor()
clientFactory = object()
ep, ignoredArgs, address = self.createClientEndpoint(
mreactor, clientFactory)
d = ep.connect(clientFactory)
receivedFailures = []
def checkFailure(f):
receivedFailures.append(f)
d.addErrback(checkFailure)
d.cancel()
self.assertEquals(len(receivedFailures), 1)
failure = receivedFailures[0]
self.assertIsInstance(failure.value, error.ConnectingCancelledError)
self.assertEquals(failure.value.address, address)
def test_endpointListenSuccess(self):
"""
An endpoint can listen and returns a deferred that gets called back
with a port instance.
"""
mreactor = MemoryReactor()
factory = object()
ep, expectedArgs, expectedHost = self.createServerEndpoint(
mreactor, factory)
d = ep.listen(factory)
receivedHosts = []
def checkPortAndServer(port):
receivedHosts.append(port.getHost())
d.addCallback(checkPortAndServer)
self.assertEquals(receivedHosts, [expectedHost])
self.assertEquals(self.expectedServers(mreactor), [expectedArgs])
def test_endpointListenFailure(self):
"""
When an endpoint tries to listen on an already listening port, a
C{CannotListenError} failure is errbacked.
"""
factory = object()
exception = error.CannotListenError('', 80, factory)
mreactor = RaisingMemoryReactor(listenException=exception)
ep, ignoredArgs, ignoredDest = self.createServerEndpoint(
mreactor, factory)
d = ep.listen(object())
receivedExceptions = []
def checkFailure(f):
receivedExceptions.append(f.value)
d.addErrback(checkFailure)
self.assertEquals(receivedExceptions, [exception])
def test_endpointConnectNonDefaultArgs(self):
"""
The endpoint should pass it's connectArgs parameter to the reactor's
listen methods.
"""
factory = object()
mreactor = MemoryReactor()
ep, expectedArgs, ignoredHost = self.createClientEndpoint(
mreactor, factory,
**self.connectArgs())
ep.connect(factory)
expectedClients = self.expectedClients(mreactor)
self.assertEquals(len(expectedClients), 1)
self.assertConnectArgs(expectedClients[0], expectedArgs)
def test_endpointListenNonDefaultArgs(self):
"""
The endpoint should pass it's listenArgs parameter to the reactor's
listen methods.
"""
factory = object()
mreactor = MemoryReactor()
ep, expectedArgs, ignoredHost = self.createServerEndpoint(
mreactor, factory,
**self.listenArgs())
ep.listen(factory)
expectedServers = self.expectedServers(mreactor)
self.assertEquals(expectedServers, [expectedArgs])
class TCP4EndpointsTestCase(EndpointTestCaseMixin,
unittest.TestCase):
"""
Tests for TCP Endpoints.
"""
def expectedServers(self, reactor):
"""
@return: List of calls to L{IReactorTCP.listenTCP}
"""
return reactor.tcpServers
def expectedClients(self, reactor):
"""
@return: List of calls to L{IReactorTCP.connectTCP}
"""
return reactor.tcpClients
def assertConnectArgs(self, receivedArgs, expectedArgs):
"""
Compare host, port, timeout, and bindAddress in C{receivedArgs}
to C{expectedArgs}. We ignore the factory because we don't
only care what protocol comes out of the
C{IStreamClientEndpoint.connect} call.
@param receivedArgs: C{tuple} of (C{host}, C{port}, C{factory},
C{timeout}, C{bindAddress}) that was passed to
L{IReactorTCP.connectTCP}.
@param expectedArgs: C{tuple} of (C{host}, C{port}, C{factory},
C{timeout}, C{bindAddress}) that we expect to have been passed
to L{IReactorTCP.connectTCP}.
"""
(host, port, ignoredFactory, timeout, bindAddress) = receivedArgs
(expectedHost, expectedPort, _ignoredFactory,
expectedTimeout, expectedBindAddress) = expectedArgs
self.assertEquals(host, expectedHost)
self.assertEquals(port, expectedPort)
self.assertEquals(timeout, expectedTimeout)
self.assertEquals(bindAddress, expectedBindAddress)
def connectArgs(self):
"""
@return: C{dict} of keyword arguments to pass to connect.
"""
return {'timeout': 10, 'bindAddress': ('localhost', 49595)}
def listenArgs(self):
"""
@return: C{dict} of keyword arguments to pass to listen
"""
return {'backlog': 100, 'interface': '127.0.0.1'}
def createServerEndpoint(self, reactor, factory, **listenArgs):
"""
Create an L{TCP4ServerEndpoint} and return the values needed to verify
its behaviour.
@param reactor: A fake L{IReactorTCP} that L{TCP4ServerEndpoint} can
call L{IReactorTCP.listenTCP} on.
@param factory: The thing that we expect to be passed to our
L{IStreamServerEndpoint.listen} implementation.
@param listenArgs: Optional dictionary of arguments to
L{IReactorTCP.listenTCP}.
"""
address = IPv4Address("TCP", "0.0.0.0", 0)
if listenArgs is None:
listenArgs = {}
return (endpoints.TCP4ServerEndpoint(reactor,
address.port,
**listenArgs),
(address.port, factory,
listenArgs.get('backlog', 50),
listenArgs.get('interface', '')),
address)
def createClientEndpoint(self, reactor, clientFactory, **connectArgs):
"""
Create an L{TCP4ClientEndpoint} and return the values needed to verify
its behavior.
@param reactor: A fake L{IReactorTCP} that L{TCP4ClientEndpoint} can
call L{IReactorTCP.connectTCP} on.
@param clientFactory: The thing that we expect to be passed to our
L{IStreamClientEndpoint.connect} implementation.
@param connectArgs: Optional dictionary of arguments to
L{IReactorTCP.connectTCP}
"""
address = IPv4Address("TCP", "localhost", 80)
return (endpoints.TCP4ClientEndpoint(reactor,
address.host,
address.port,
**connectArgs),
(address.host, address.port, clientFactory,
connectArgs.get('timeout', 30),
connectArgs.get('bindAddress', None)),
address)
class SSL4EndpointsTestCase(EndpointTestCaseMixin,
unittest.TestCase):
"""
Tests for SSL Endpoints.
"""
if skipSSL:
skip = skipSSL
def expectedServers(self, reactor):
"""
@return: List of calls to L{IReactorSSL.listenSSL}
"""
return reactor.sslServers
def expectedClients(self, reactor):
"""
@return: List of calls to L{IReactorSSL.connectSSL}
"""
return reactor.sslClients
def assertConnectArgs(self, receivedArgs, expectedArgs):
"""
Compare host, port, contextFactory, timeout, and bindAddress in
C{receivedArgs} to C{expectedArgs}. We ignore the factory because we
don't only care what protocol comes out of the
C{IStreamClientEndpoint.connect} call.
@param receivedArgs: C{tuple} of (C{host}, C{port}, C{factory},
C{contextFactory}, C{timeout}, C{bindAddress}) that was passed to
L{IReactorSSL.connectSSL}.
@param expectedArgs: C{tuple} of (C{host}, C{port}, C{factory},
C{contextFactory}, C{timeout}, C{bindAddress}) that we expect to
have been passed to L{IReactorSSL.connectSSL}.
"""
(host, port, ignoredFactory, contextFactory, timeout,
bindAddress) = receivedArgs
(expectedHost, expectedPort, _ignoredFactory, expectedContextFactory,
expectedTimeout, expectedBindAddress) = expectedArgs
self.assertEquals(host, expectedHost)
self.assertEquals(port, expectedPort)
self.assertEquals(contextFactory, expectedContextFactory)
self.assertEquals(timeout, expectedTimeout)
self.assertEquals(bindAddress, expectedBindAddress)
def connectArgs(self):
"""
@return: C{dict} of keyword arguments to pass to connect.
"""
return {'timeout': 10, 'bindAddress': ('localhost', 49595)}
def listenArgs(self):
"""
@return: C{dict} of keyword arguments to pass to listen
"""
return {'backlog': 100, 'interface': '127.0.0.1'}
def setUp(self):
"""
Set up client and server SSL contexts for use later.
"""
self.sKey, self.sCert = makeCertificate(
O="Server Test Certificate",
CN="server")
self.cKey, self.cCert = makeCertificate(
O="Client Test Certificate",
CN="client")
self.serverSSLContext = CertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
requireCertificate=False)
self.clientSSLContext = CertificateOptions(
requireCertificate=False)
def createServerEndpoint(self, reactor, factory, **listenArgs):
"""
Create an L{SSL4ServerEndpoint} and return the tools to verify its
behaviour.
@param factory: The thing that we expect to be passed to our
L{IStreamServerEndpoint.listen} implementation.
@param reactor: A fake L{IReactorSSL} that L{SSL4ServerEndpoint} can
call L{IReactorSSL.listenSSL} on.
@param listenArgs: Optional dictionary of arguments to
L{IReactorSSL.listenSSL}.
"""
address = IPv4Address("TCP", "0.0.0.0", 0)
return (endpoints.SSL4ServerEndpoint(reactor,
address.port,
self.serverSSLContext,
**listenArgs),
(address.port, factory, self.serverSSLContext,
listenArgs.get('backlog', 50),
listenArgs.get('interface', '')),
address)
def createClientEndpoint(self, reactor, clientFactory, **connectArgs):
"""
Create an L{SSL4ClientEndpoint} and return the values needed to verify
its behaviour.
@param reactor: A fake L{IReactorSSL} that L{SSL4ClientEndpoint} can
call L{IReactorSSL.connectSSL} on.
@param clientFactory: The thing that we expect to be passed to our
L{IStreamClientEndpoint.connect} implementation.
@param connectArgs: Optional dictionary of arguments to
L{IReactorSSL.connectSSL}
"""
address = IPv4Address("TCP", "localhost", 80)
if connectArgs is None:
connectArgs = {}
return (endpoints.SSL4ClientEndpoint(reactor,
address.host,
address.port,
self.clientSSLContext,
**connectArgs),
(address.host, address.port, clientFactory,
self.clientSSLContext,
connectArgs.get('timeout', 30),
connectArgs.get('bindAddress', None)),
address)
class UNIXEndpointsTestCase(EndpointTestCaseMixin,
unittest.TestCase):
"""
Tests for UnixSocket Endpoints.
"""
def retrieveConnectedFactory(self, reactor):
"""
Override L{EndpointTestCaseMixin.retrieveConnectedFactory} to account
for different index of 'factory' in C{connectUNIX} args.
"""
return self.expectedClients(reactor)[0][1]
def expectedServers(self, reactor):
"""
@return: List of calls to L{IReactorUNIX.listenUNIX}
"""
return reactor.unixServers
def expectedClients(self, reactor):
"""
@return: List of calls to L{IReactorUNIX.connectUNIX}
"""
return reactor.unixClients
def assertConnectArgs(self, receivedArgs, expectedArgs):
"""
Compare path, timeout, checkPID in C{receivedArgs} to C{expectedArgs}.
We ignore the factory because we don't only care what protocol comes
out of the C{IStreamClientEndpoint.connect} call.
@param receivedArgs: C{tuple} of (C{path}, C{timeout}, C{checkPID})
that was passed to L{IReactorUNIX.connectUNIX}.
@param expectedArgs: C{tuple} of (C{path}, C{timeout}, C{checkPID})
that we expect to have been passed to L{IReactorUNIX.connectUNIX}.
"""
(path, ignoredFactory, timeout, checkPID) = receivedArgs
(expectedPath, _ignoredFactory, expectedTimeout,
expectedCheckPID) = expectedArgs
self.assertEquals(path, expectedPath)
self.assertEquals(timeout, expectedTimeout)
self.assertEquals(checkPID, expectedCheckPID)
def connectArgs(self):
"""
@return: C{dict} of keyword arguments to pass to connect.
"""
return {'timeout': 10, 'checkPID': 1}
def listenArgs(self):
"""
@return: C{dict} of keyword arguments to pass to listen
"""
return {'backlog': 100, 'mode': 0600, 'wantPID': 1}
def createServerEndpoint(self, reactor, factory, **listenArgs):
"""
Create an L{UNIXServerEndpoint} and return the tools to verify its
behaviour.
@param reactor: A fake L{IReactorUNIX} that L{UNIXServerEndpoint} can
call L{IReactorUNIX.listenUNIX} on.
@param factory: The thing that we expect to be passed to our
L{IStreamServerEndpoint.listen} implementation.
@param listenArgs: Optional dictionary of arguments to
L{IReactorUNIX.listenUNIX}.
"""
address = UNIXAddress(self.mktemp())
return (endpoints.UNIXServerEndpoint(reactor, address.name,
**listenArgs),
(address.name, factory,
listenArgs.get('backlog', 50),
listenArgs.get('mode', 0666),
listenArgs.get('wantPID', 0)),
address)
def createClientEndpoint(self, reactor, clientFactory, **connectArgs):
"""
Create an L{UNIXClientEndpoint} and return the values needed to verify
its behaviour.
@param reactor: A fake L{IReactorUNIX} that L{UNIXClientEndpoint} can
call L{IReactorUNIX.connectUNIX} on.
@param clientFactory: The thing that we expect to be passed to our
L{IStreamClientEndpoint.connect} implementation.
@param connectArgs: Optional dictionary of arguments to
L{IReactorUNIX.connectUNIX}
"""
address = UNIXAddress(self.mktemp())
return (endpoints.UNIXClientEndpoint(reactor, address.name,
**connectArgs),
(address.name, clientFactory,
connectArgs.get('timeout', 30),
connectArgs.get('checkPID', 0)),
address)
class ParserTestCase(unittest.TestCase):
"""
Tests for L{endpoints._parseServer}, the low-level parsing logic.
"""
f = "Factory"
def parse(self, *a, **kw):
"""
Provide a hook for test_strports to substitute the deprecated API.
"""
return endpoints._parseServer(*a, **kw)
def test_simpleTCP(self):
"""
Simple strings with a 'tcp:' prefix should be parsed as TCP.
"""
self.assertEquals(self.parse('tcp:80', self.f),
('TCP', (80, self.f), {'interface':'', 'backlog':50}))
def test_interfaceTCP(self):
"""
TCP port descriptions parse their 'interface' argument as a string.
"""
self.assertEquals(
self.parse('tcp:80:interface=127.0.0.1', self.f),
('TCP', (80, self.f), {'interface':'127.0.0.1', 'backlog':50}))
def test_backlogTCP(self):
"""
TCP port descriptions parse their 'backlog' argument as an integer.
"""
self.assertEquals(self.parse('tcp:80:backlog=6', self.f),
('TCP', (80, self.f),
{'interface':'', 'backlog':6}))
def test_simpleUNIX(self):
"""
L{endpoints._parseServer} returns a C{'UNIX'} port description with
defaults for C{'mode'}, C{'backlog'}, and C{'wantPID'} when passed a
string with the C{'unix:'} prefix and no other parameter values.
"""
self.assertEquals(
self.parse('unix:/var/run/finger', self.f),
('UNIX', ('/var/run/finger', self.f),
{'mode': 0666, 'backlog': 50, 'wantPID': True}))
def test_modeUNIX(self):
"""
C{mode} can be set by including C{"mode=<some integer>"}.
"""
self.assertEquals(
self.parse('unix:/var/run/finger:mode=0660', self.f),
('UNIX', ('/var/run/finger', self.f),
{'mode': 0660, 'backlog': 50, 'wantPID': True}))
def test_wantPIDUNIX(self):
"""
C{wantPID} can be set to false by included C{"lockfile=0"}.
"""
self.assertEquals(
self.parse('unix:/var/run/finger:lockfile=0', self.f),
('UNIX', ('/var/run/finger', self.f),
{'mode': 0666, 'backlog': 50, 'wantPID': False}))
def test_escape(self):
"""
Backslash can be used to escape colons and backslashes in port
descriptions.
"""
self.assertEquals(
self.parse(r'unix:foo\:bar\=baz\:qux\\', self.f),
('UNIX', ('foo:bar=baz:qux\\', self.f),
{'mode': 0666, 'backlog': 50, 'wantPID': True}))
def test_quoteStringArgument(self):
"""
L{endpoints.quoteStringArgument} should quote backslashes and colons
for interpolation into L{endpoints.serverFromString} and
L{endpoints.clientFactory} arguments.
"""
self.assertEquals(endpoints.quoteStringArgument("some : stuff \\"),
"some \\: stuff \\\\")
def test_impliedEscape(self):
"""
In strports descriptions, '=' in a parameter value does not need to be
quoted; it will simply be parsed as part of the value.
"""
self.assertEquals(
self.parse(r'unix:address=foo=bar', self.f),
('UNIX', ('foo=bar', self.f),
{'mode': 0666, 'backlog': 50, 'wantPID': True}))
def test_nonstandardDefault(self):
"""
For compatibility with the old L{twisted.application.strports.parse},
the third 'mode' argument may be specified to L{endpoints.parse} to
indicate a default other than TCP.
"""
self.assertEquals(
self.parse('filename', self.f, 'unix'),
('UNIX', ('filename', self.f),
{'mode': 0666, 'backlog': 50, 'wantPID': True}))
def test_unknownType(self):
"""
L{strports.parse} raises C{ValueError} when given an unknown endpoint
type.
"""
self.assertRaises(ValueError, self.parse, "bogus-type:nothing", self.f)
class ServerStringTests(unittest.TestCase):
"""
Tests for L{twisted.internet.endpoints.serverFromString}.
"""
def test_tcp(self):
"""
When passed a TCP strports description, L{endpoints.serverFromString}
returns a L{TCP4ServerEndpoint} instance initialized with the values
from the string.
"""
reactor = object()
server = endpoints.serverFromString(
reactor, "tcp:1234:backlog=12:interface=10.0.0.1")
self.assertIsInstance(server, endpoints.TCP4ServerEndpoint)
self.assertIdentical(server._reactor, reactor)
self.assertEquals(server._port, 1234)
self.assertEquals(server._backlog, 12)
self.assertEquals(server._interface, "10.0.0.1")
def test_ssl(self):
"""
When passed an SSL strports description, L{endpoints.serverFromString}
returns a L{SSL4ServerEndpoint} instance initialized with the values
from the string.
"""
reactor = object()
server = endpoints.serverFromString(
reactor,
"ssl:1234:backlog=12:privateKey=%s:"
"certKey=%s:interface=10.0.0.1" % (escapedPEMPathName,
escapedPEMPathName))
self.assertIsInstance(server, endpoints.SSL4ServerEndpoint)
self.assertIdentical(server._reactor, reactor)
self.assertEquals(server._port, 1234)
self.assertEquals(server._backlog, 12)
self.assertEquals(server._interface, "10.0.0.1")
ctx = server._sslContextFactory.getContext()
self.assertIsInstance(ctx, ContextType)
if skipSSL:
test_ssl.skip = skipSSL
def test_unix(self):
"""
When passed a UNIX strports description, L{endpoint.serverFromString}
returns a L{UNIXServerEndpoint} instance initialized with the values
from the string.
"""
reactor = object()
endpoint = endpoints.serverFromString(
reactor,
"unix:/var/foo/bar:backlog=7:mode=0123:lockfile=1")
self.assertIsInstance(endpoint, endpoints.UNIXServerEndpoint)
self.assertIdentical(endpoint._reactor, reactor)
self.assertEquals(endpoint._address, "/var/foo/bar")
self.assertEquals(endpoint._backlog, 7)
self.assertEquals(endpoint._mode, 0123)
self.assertEquals(endpoint._wantPID, True)
def test_implicitDefaultNotAllowed(self):
"""
The older service-based API (L{twisted.internet.strports.service})
allowed an implicit default of 'tcp' so that TCP ports could be
specified as a simple integer, but we've since decided that's a bad
idea, and the new API does not accept an implicit default argument; you
have to say 'tcp:' now. If you try passing an old implicit port number
to the new API, you'll get a C{ValueError}.
"""
value = self.assertRaises(
ValueError, endpoints.serverFromString, None, "4321")
self.assertEquals(
str(value),
"Unqualified strport description passed to 'service'."
"Use qualified endpoint descriptions; for example, 'tcp:4321'.")
def test_unknownType(self):
"""
L{endpoints.serverFromString} raises C{ValueError} when given an
unknown endpoint type.
"""
value = self.assertRaises(
# faster-than-light communication not supported
ValueError, endpoints.serverFromString, None,
"ftl:andromeda/carcosa/hali/2387")
self.assertEquals(
str(value),
"Unknown endpoint type: 'ftl'")
def test_typeFromPlugin(self):
"""
L{endpoints.serverFromString} looks up plugins of type
L{IStreamServerEndpoint} and constructs endpoints from them.
"""
# Set up a plugin which will only be accessible for the duration of
# this test.
addFakePlugin(self)
# Plugin is set up: now actually test.
notAReactor = object()
fakeEndpoint = endpoints.serverFromString(
notAReactor, "fake:hello:world:yes=no:up=down")
from twisted.plugins.fakeendpoint import fake
self.assertIdentical(fakeEndpoint.parser, fake)
self.assertEquals(fakeEndpoint.args, (notAReactor, 'hello', 'world'))
self.assertEquals(fakeEndpoint.kwargs, dict(yes='no', up='down'))
def addFakePlugin(testCase, dropinSource="fakeendpoint.py"):
"""
For the duration of C{testCase}, add a fake plugin to twisted.plugins which
contains some sample endpoint parsers.
"""
import sys
savedModules = sys.modules.copy()
savedPluginPath = plugins.__path__
def cleanup():
sys.modules.clear()
sys.modules.update(savedModules)
plugins.__path__[:] = savedPluginPath
testCase.addCleanup(cleanup)
fp = FilePath(testCase.mktemp())
fp.createDirectory()
getModule(__name__).filePath.sibling(dropinSource).copyTo(
fp.child(dropinSource))
plugins.__path__.append(fp.path)
class ClientStringTests(unittest.TestCase):
"""
Tests for L{twisted.internet.endpoints.clientFromString}.
"""
def test_tcp(self):
"""
When passed a TCP strports description, L{endpointClient} returns a
L{TCP4ClientEndpoint} instance initialized with the values from the
string.
"""
reactor = object()
client = endpoints.clientFromString(
reactor,
"tcp:host=example.com:port=1234:timeout=7:bindAddress=10.0.0.2")
self.assertIsInstance(client, endpoints.TCP4ClientEndpoint)
self.assertIdentical(client._reactor, reactor)
self.assertEquals(client._host, "example.com")
self.assertEquals(client._port, 1234)
self.assertEquals(client._timeout, 7)
self.assertEquals(client._bindAddress, "10.0.0.2")
def test_tcpDefaults(self):
"""
A TCP strports description may omit I{timeout} or I{bindAddress} to
allow the default to be used.
"""
reactor = object()
client = endpoints.clientFromString(
reactor,
"tcp:host=example.com:port=1234")
self.assertEquals(client._timeout, 30)
self.assertEquals(client._bindAddress, None)
def test_unix(self):
"""
When passed a UNIX strports description, L{endpointClient} returns a
L{UNIXClientEndpoint} instance initialized with the values from the
string.
"""
reactor = object()
client = endpoints.clientFromString(
reactor,
"unix:path=/var/foo/bar:lockfile=1:timeout=9")
self.assertIsInstance(client, endpoints.UNIXClientEndpoint)
self.assertIdentical(client._reactor, reactor)
self.assertEquals(client._path, "/var/foo/bar")
self.assertEquals(client._timeout, 9)
self.assertEquals(client._checkPID, True)
def test_unixDefaults(self):
"""
A UNIX strports description may omit I{lockfile} or I{timeout} to allow
the defaults to be used.
"""
client = endpoints.clientFromString(object(), "unix:path=/var/foo/bar")
self.assertEquals(client._timeout, 30)
self.assertEquals(client._checkPID, False)
def test_typeFromPlugin(self):
"""
L{endpoints.clientFromString} looks up plugins of type
L{IStreamClientEndpoint} and constructs endpoints from them.
"""
addFakePlugin(self)
notAReactor = object()
clientEndpoint = endpoints.clientFromString(
notAReactor, "cfake:alpha:beta:cee=dee:num=1")
from twisted.plugins.fakeendpoint import fakeClient
self.assertIdentical(clientEndpoint.parser, fakeClient)
self.assertEquals(clientEndpoint.args, ('alpha', 'beta'))
self.assertEquals(clientEndpoint.kwargs, dict(cee='dee', num='1'))
def test_unknownType(self):
"""
L{endpoints.serverFromString} raises C{ValueError} when given an
unknown endpoint type.
"""
value = self.assertRaises(
# faster-than-light communication not supported
ValueError, endpoints.clientFromString, None,
"ftl:andromeda/carcosa/hali/2387")
self.assertEquals(
str(value),
"Unknown endpoint type: 'ftl'")
class SSLClientStringTests(unittest.TestCase):
"""
Tests for L{twisted.internet.endpoints.clientFromString} which require SSL.
"""
if skipSSL:
skip = skipSSL
def test_ssl(self):
"""
When passed an SSL strports description, L{clientFromString} returns a
L{SSL4ClientEndpoint} instance initialized with the values from the
string.
"""
reactor = object()
client = endpoints.clientFromString(
reactor,
"ssl:host=example.net:port=4321:privateKey=%s:"
"certKey=%s:bindAddress=10.0.0.3:timeout=3:caCertsDir=%s" %
(escapedPEMPathName,
escapedPEMPathName,
escapedCAsPathName))
self.assertIsInstance(client, endpoints.SSL4ClientEndpoint)
self.assertIdentical(client._reactor, reactor)
self.assertEquals(client._host, "example.net")
self.assertEquals(client._port, 4321)
self.assertEquals(client._timeout, 3)
self.assertEquals(client._bindAddress, "10.0.0.3")
certOptions = client._sslContextFactory
self.assertIsInstance(certOptions, CertificateOptions)
ctx = certOptions.getContext()
self.assertIsInstance(ctx, ContextType)
self.assertEquals(Certificate(certOptions.certificate),
testCertificate)
privateCert = PrivateCertificate(certOptions.certificate)
privateCert._setPrivateKey(KeyPair(certOptions.privateKey))
self.assertEquals(privateCert, testPrivateCertificate)
expectedCerts = [
Certificate.loadPEM(x.getContent()) for x in
[casPath.child("thing1.pem"), casPath.child("thing2.pem")]
if x.basename().lower().endswith('.pem')
]
self.assertEquals([Certificate(x) for x in certOptions.caCerts],
expectedCerts)
def test_unreadableCertificate(self):
"""
If a certificate in the directory is unreadable,
L{endpoints._loadCAsFromDir} will ignore that certificate.
"""
class UnreadableFilePath(FilePath):
def getContent(self):
data = FilePath.getContent(self)
# There is a duplicate of thing2.pem, so ignore anything that
# looks like it.
if data == casPath.child("thing2.pem").getContent():
raise IOError(EPERM)
else:
return data
casPathClone = casPath.child("ignored").parent()
casPathClone.clonePath = UnreadableFilePath
self.assertEquals(
[Certificate(x) for x in endpoints._loadCAsFromDir(casPathClone)],
[Certificate.loadPEM(casPath.child("thing1.pem").getContent())])
def test_sslSimple(self):
"""
When passed an SSL strports description without any extra parameters,
L{clientFromString} returns a simple non-verifying endpoint that will
speak SSL.
"""
reactor = object()
client = endpoints.clientFromString(
reactor, "ssl:host=simple.example.org:port=4321")
certOptions = client._sslContextFactory
self.assertIsInstance(certOptions, CertificateOptions)
self.assertEquals(certOptions.verify, False)
ctx = certOptions.getContext()
self.assertIsInstance(ctx, ContextType)
| agpl-3.0 |
camradal/ansible | lib/ansible/modules/windows/win_owner.py | 14 | 1759 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2015, Hans-Joachim Kliemeck <git@kliemeck.de>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = r'''
---
module: win_owner
version_added: "2.1"
short_description: Set owner
description:
- Set owner of files or directories
options:
path:
description:
- Path to be used for changing owner
required: true
user:
description:
- Name to be used for changing owner
required: true
recurse:
description:
- Indicates if the owner should be changed recursively
required: false
choices:
- no
- yes
default: no
author: Hans-Joachim Kliemeck (@h0nIg)
'''
EXAMPLES = r'''
- name: Change owner of Path
win_owner:
path: C:\apache
user: apache
recurse: True
- name: Set the owner of root directory
win_owner:
path: C:\apache
user: SYSTEM
recurse: False
'''
RETURN = r'''
'''
| gpl-3.0 |
mojolab/LivingData | lib/livdatscenario.py | 1 | 3138 | import os,sys
sys.path.append("/opt/livingdata/lib")
from livdatbender import *
from libsoma import *
import pygsheets
from bs4 import BeautifulSoup
def get_color_json(dictionary):
formatted_json=get_formatted_json(dictionary)
colorful_json = highlight(unicode(formatted_json, 'UTF-8'), lexers.JsonLexer(), formatters.TerminalFormatter())
return colorful_json
def get_formatted_json(dictionary):
formatted_json=json.dumps(dictionary,sort_keys=True, indent=4)
return formatted_json
class ScenarioGenerator(DataBender):
def __init__(self,*args, **kwargs):
super(ScenarioGenerator,self).__init__(*args, **kwargs)
self.scenariobookkey=self.config.get("Scenario","bookkey")
try:
print "Trying to get scenario book..."
self.scenariobook=self.gc.open_by_key(self.scenariobookkey)
self.scenariodef=self.scenariobook.worksheet_by_title("Scenario").get_as_df()
self.runsheet=self.scenariobook.worksheet_by_title("RunSheet")
self.scenariossheet=self.scenariobook.worksheet_by_title("Scenarios")
self.scenariosdf=self.scenariobook.worksheet_by_title("Scenarios").get_as_df()
self.name=self.config.get("Scenario","name")
print sorted(list(self.scenariodef.Name))
self.scenariossheet.update_row(1,sorted(list(self.scenariodef.Name)))
except:
print "Failed to open scenario book by key " + self.scenariobookkey
def blank_scenario(self):
scenario={}
for row in self.scenariodef.itertuples():
if row.Type=="read":
print "Reading cell " + str(row.Cell) + " from RunSheet for field " + row.Name
scenario[row.Name]=self.scenariobook.worksheet_by_title("RunSheet").cell(str(row.Cell)).value
if row.Type=="variable":
print row.Name + " is a variable to be put in " + row.Cell
scenario[row.Name]=None
if row.Type=="result":
print row.Name + " is a result to be put in " + row.Cell
scenario[row.Name]=None
return scenario
def get_scenario(self):
scenario={}
for row in self.scenariodef.itertuples():
print "Reading cell " + str(row.Cell) + " from RunSheet for field " + row.Name +"..."
scenario[row.Name]=self.scenariobook.worksheet_by_title("RunSheet").cell(str(row.Cell)).value
print scenario[row.Name]
return scenario
def lookup_cell_for_key(self,key):
return str(self.scenariodef[self.scenariodef.Name==key].Cell.iloc[0])
def lookup_type_for_key(self,key):
return str(self.scenariodef[self.scenariodef.Name==key].Type.iloc[0])
def lookup_unit_for_key(self,key):
return str(self.scenariodef[self.scenariodef.Name==key].Unit.iloc[0])
def put_scenario(self,scenario):
for key in scenario.keys():
if self.lookup_type_for_key(key)=="variable":
print "Setting variable "+ key + " in cell " + self.lookup_cell_for_key(key) + "to " +str(scenario[key])
self.runsheet.update_cell(self.lookup_cell_for_key(key),scenario[key])
def append_scenarios(self,scenarios):
self.scenariosdf=self.scenariosdf.append(scenarios,ignore_index=True).drop_duplicates()
def put_scenarios(self):
self.scenariossheet.set_dataframe(self.scenariosdf,(1,1))
def get_scenarios(self):
self.scenariosdf=self.scenariossheet.get_as_df()
| apache-2.0 |
zuku1985/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 33 | 20167 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hierarchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hierarchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering_wrong_arg_memory():
# Test either if an error is raised when memory is not
# either a str or a joblib.Memory instance
rng = np.random.RandomState(0)
n_samples = 100
X = rng.randn(n_samples, 50)
memory = 5
clustering = AgglomerativeClustering(memory=memory)
assert_raises(ValueError, clustering.fit, X)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
Itxaka/st2 | st2reactor/st2reactor/rules/datatransform.py | 7 | 2116 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from st2common.constants.rules import TRIGGER_PAYLOAD_PREFIX
from st2common.constants.system import SYSTEM_KV_PREFIX
from st2common.services.keyvalues import KeyValueLookup
from st2common.util import jinja as jinja_utils
class Jinja2BasedTransformer(object):
def __init__(self, payload):
self._payload_context = Jinja2BasedTransformer.\
_construct_context(TRIGGER_PAYLOAD_PREFIX, payload, {})
def __call__(self, mapping):
context = copy.copy(self._payload_context)
context[SYSTEM_KV_PREFIX] = KeyValueLookup()
return jinja_utils.render_values(mapping=mapping, context=context)
@staticmethod
def _construct_context(prefix, data, context):
if data is None:
return context
context = {SYSTEM_KV_PREFIX: KeyValueLookup()}
# add in the data in the context without any processing. Payload may
# contain renderable keys however those are often due to nature of the
# events being posted e.g. ActionTrigger with template variables. Rendering
# these values would lead to bugs in the data so best to avoid.
if prefix not in context:
context[prefix] = {}
context[prefix].update(data)
return context
def get_transformer(payload):
return Jinja2BasedTransformer(payload)
| apache-2.0 |
kbsezginel/lammps-data-file | tests/test_extrapolate_bonds.py | 1 | 2374 | from lammps_data.bonds import extrapolate_bonds
def test_atoms_too_close_should_not_be_bonded():
atoms = [(0.0, 0.0, 0.0, 1), (0.0, 0.0, 0.159, 1)]
bonds = extrapolate_bonds(atoms)
assert len(bonds) == 0
def test_atoms_at_0_16_angstrom_should_be_bonded():
atoms = [(0.0, 0.0, 0.0, 1), (0.0, 0.0, 0.16, 1)]
bonds = extrapolate_bonds(atoms)
assert len(bonds) == 1
assert bonds == [(0, 1)]
def test_h_atoms_at_lte_1_09_angstrom_should_be_bonded():
atoms = [(0.0, 0.0, 0.0, 1), (0.0, 0.0, 1.09, 1)]
bonds = extrapolate_bonds(atoms)
assert len(bonds) == 1
assert bonds == [(0, 1)]
def test_h_atoms_at_gt_1_09_angstrom_should_not_be_bonded():
atoms = [(0.0, 0.0, 0.0, 1), (0.0, 0.0, 1.10, 1)]
bonds = extrapolate_bonds(atoms)
assert len(bonds) == 0
def test_si_atoms_at_lte_2_77_angstrom_should_be_bonded():
atoms = [(0.0, 0.0, 0.0, 14), (0.0, 0.0, 2.77, 14)]
bonds = extrapolate_bonds(atoms)
assert len(bonds) == 1
assert bonds == [(0, 1)]
def test_si_atoms_at_gt_2_77_angstrom_should_not_be_bonded():
atoms = [(0.0, 0.0, 0.0, 14), (0.0, 0.0, 2.78, 14)]
bonds = extrapolate_bonds(atoms)
assert len(bonds) == 0
def test_bond_tuples_should_be_sorted_by_atom_index():
atoms = [(0.0, 0.0, 0.0, 1), (0.0, 0.0, 0.16, 1)]
bonds = extrapolate_bonds(atoms)
assert bonds == [(0, 1)]
atoms = [(0.0, 0.0, 0.16, 1), (0.0, 0.0, 0.0, 1)]
bonds = extrapolate_bonds(atoms)
assert bonds == [(0, 1)]
def test_ethane_should_have_seven_bonds():
atoms = [( 1.185080, -0.003838, 0.987524, 1),
( 0.751621, -0.022441, -0.020839, 6),
( 1.166929, 0.833015, -0.569312, 1),
( 1.115519, -0.932892, -0.514525, 1),
(-0.751587, 0.022496, 0.020891, 6),
(-1.166882, -0.833372, 0.568699, 1),
(-1.115691, 0.932608, 0.515082, 1),
(-1.184988, 0.004424, -0.987522, 1)]
bonds = extrapolate_bonds(atoms)
assert len(bonds) == 7
expected_bonds = [(0, 1),
(1, 2),
(1, 3),
(1, 4),
(4, 5),
(4, 6),
(4, 7)]
expected_bonds = {frozenset(s) for s in expected_bonds}
bonds = {frozenset(s) for s in bonds}
assert len(expected_bonds ^ bonds) == 0
| mit |
igboyes/virtool | virtool/software/utils.py | 2 | 2626 | import logging
import os
import shutil
import sys
from typing import List
import semver
logger = logging.getLogger(__name__)
INSTALL_PATH = sys.path[0]
SOFTWARE_REPO = "virtool/virtool"
RELEASE_KEYS = [
"name",
"body",
"prerelease",
"published_at",
"html_url"
]
def check_software_files(path):
if not {"client", "run", "VERSION"}.issubset(set(os.listdir(path))):
return False
client_content = os.listdir(os.path.join(path, "client"))
if "favicon.ico" not in client_content or "index.html" not in client_content:
return False
if not any(["app." in filename and ".js" in filename for filename in client_content]):
return False
return True
def copy_software_files(src, dest):
for dir_name in ["templates", "lib", "client", "assets"]:
shutil.rmtree(os.path.join(dest, dir_name), ignore_errors=True)
for name in os.listdir(src):
src_path = os.path.join(src, name)
dest_path = os.path.join(dest, name)
if os.path.isfile(dest_path):
os.remove(dest_path)
if os.path.isfile(src_path):
shutil.copy(src_path, dest_path)
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
if os.path.isdir(src_path):
shutil.copytree(src_path, dest_path)
def filter_releases_by_channel(releases: List[dict], channel: str):
"""
Filter releases by channel (stable, beta, or alpha).
:param releases: a list of releases
:param channel: a software channel (
:return: a filtered list of releases
"""
if channel not in ["stable", "beta", "alpha"]:
raise ValueError("Channel must be one of 'stable', 'beta', 'alpha'")
if channel == "stable":
return [r for r in releases if "alpha" not in r["name"] and "beta" not in r["name"]]
elif channel == "beta":
return [r for r in releases if "alpha" not in r["name"]]
return list(releases)
def filter_releases_by_newer(releases: List[dict], version: str) -> List[dict]:
"""
Returns a list containing only releases with versions later than the passed `version`.
The passed `releases` are assumed to be sorted by descending version.
:param releases: a list of releases
:param version: the version the returned releases must be later than
:return: filtered releases
"""
stripped = version.replace("v", "")
newer = list()
for release in releases:
if semver.compare(release["name"].replace("v", ""), stripped.replace("v", "")) < 1:
return newer
newer.append(release)
return newer
| mit |
polyfunc/flask-todolist | tests/test_basics.py | 1 | 8434 | # -*- coding: utf-8 -*-
import unittest
from flask import current_app
from app import create_app, db
from app.models import User, Todo, TodoList
class TodolistTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
self.username_adam = 'adam'
self.shopping_list_title = 'shopping list'
self.read_todo_description = 'Read a book about TDD'
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
@staticmethod
def add_user(username):
user_data = {
'email': username + '@example.com',
'username': username,
'password': 'correcthorsebatterystaple'
}
user = User.from_dict(user_data)
return User.query.filter_by(username=user.username).first()
@staticmethod
def add_todo(description, user, todolist_id=None):
todo_data = {
'description': description,
'todolist_id': todolist_id or TodoList().save().id,
'creator': user.username
}
read_todo = Todo.from_dict(todo_data)
return Todo.query.filter_by(id=read_todo.id).first()
def test_app_exists(self):
self.assertTrue(current_app is not None)
def test_app_is_testing(self):
self.assertTrue(current_app.config['TESTING'])
def test_password_setter(self):
u = User(password='correcthorsebatterystaple')
self.assertTrue(u.password_hash is not None)
def test_no_password_getter(self):
u = User(password='correcthorsebatterystaple')
with self.assertRaises(AttributeError):
u.password
def test_password_verification(self):
u = User(password='correcthorsebatterystaple')
self.assertTrue(u.verify_password('correcthorsebatterystaple'))
self.assertFalse(u.verify_password('incorrecthorsebatterystaple'))
def test_password_salts_are_random(self):
u = User(password='correcthorsebatterystaple')
u2 = User(password='correcthorsebatterystaple')
self.assertNotEqual(u.password_hash, u2.password_hash)
def test_adding_new_user(self):
new_user = self.add_user(self.username_adam)
self.assertEqual(new_user.username, self.username_adam)
self.assertEqual(new_user.email, self.username_adam + '@example.com')
def test_adding_new_todo_without_user(self):
todo = Todo(description=self.read_todo_description,
todolist_id=TodoList().save().id).save()
todo_from_db = Todo.query.filter_by(id=todo.id).first()
self.assertEqual(todo_from_db.description, self.read_todo_description)
self.assertIsNone(todo_from_db.creator)
def test_adding_new_todo_with_user(self):
some_user = self.add_user(self.username_adam)
new_todo = self.add_todo(self.read_todo_description, some_user)
self.assertEqual(new_todo.description, self.read_todo_description)
self.assertEqual(new_todo.creator, some_user.username)
def test_closing_todo(self):
some_user = self.add_user(self.username_adam)
new_todo = self.add_todo(self.read_todo_description, some_user)
self.assertFalse(new_todo.is_finished)
new_todo.finished()
self.assertTrue(new_todo.is_finished)
self.assertEqual(new_todo.description, self.read_todo_description)
self.assertEqual(new_todo.creator, some_user.username)
def test_reopen_closed_todo(self):
some_user = self.add_user(self.username_adam)
new_todo = self.add_todo(self.read_todo_description, some_user)
self.assertFalse(new_todo.is_finished)
new_todo.finished()
self.assertTrue(new_todo.is_finished)
new_todo.reopen()
self.assertFalse(new_todo.is_finished)
self.assertEqual(new_todo.description, self.read_todo_description)
self.assertEqual(new_todo.creator, some_user.username)
def test_adding_two_todos_with_the_same_description(self):
some_user = self.add_user(self.username_adam)
first_todo = self.add_todo(self.read_todo_description, some_user)
second_todo = self.add_todo(self.read_todo_description, some_user)
self.assertEqual(first_todo.description, second_todo.description)
self.assertEqual(first_todo.creator, second_todo.creator)
self.assertNotEqual(first_todo.id, second_todo.id)
def test_adding_new_todolist_without_user(self):
todolist = TodoList(self.shopping_list_title).save()
todolist_from_db = TodoList.query.filter_by(id=todolist.id).first()
self.assertEqual(todolist_from_db.title, self.shopping_list_title)
self.assertIsNone(todolist_from_db.creator)
def test_adding_new_todolist_with_user(self):
user = self.add_user(self.username_adam)
todolist = TodoList(title=self.shopping_list_title,
creator=user.username).save()
todolist_from_db = TodoList.query.filter_by(id=todolist.id).first()
self.assertEqual(todolist_from_db.title, self.shopping_list_title)
self.assertEqual(todolist_from_db.creator, user.username)
def test_adding_two_todolists_with_the_same_title(self):
user = self.add_user(self.username_adam)
ftodolist = TodoList(title=self.shopping_list_title,
creator=user.username).save()
first_todolist = TodoList.query.filter_by(id=ftodolist.id).first()
stodolist = TodoList(title=self.shopping_list_title,
creator=user.username).save()
second_todolist = TodoList.query.filter_by(id=stodolist.id).first()
self.assertEqual(first_todolist.title,
second_todolist.title)
self.assertEqual(first_todolist.creator, second_todolist.creator)
self.assertNotEqual(first_todolist.id, second_todolist.id)
def test_adding_todo_to_todolist(self):
user = self.add_user(self.username_adam)
todolist = TodoList(title=self.shopping_list_title,
creator=user.username).save()
todolist_from_db = TodoList.query.filter_by(id=todolist.id).first()
todo_description = 'A book about TDD'
todo = self.add_todo(todo_description, user, todolist_from_db.id)
self.assertEqual(todolist_from_db.todo_count, 1)
self.assertEqual(todolist.title, self.shopping_list_title)
self.assertEqual(todolist.creator, user.username)
self.assertEqual(todo.todolist_id, todolist_from_db.id)
self.assertEqual(todolist.todos.first(), todo)
def test_counting_todos_of_todolist(self):
user = self.add_user(self.username_adam)
todolist = TodoList(title=self.shopping_list_title,
creator=user.username).save()
todolist_from_db = TodoList.query.filter_by(id=todolist.id).first()
todo_description = 'A book about TDD'
todo = self.add_todo(todo_description, user, todolist_from_db.id)
self.assertEqual(todolist.title, self.shopping_list_title)
self.assertEqual(todolist.creator, user.username)
self.assertEqual(todo.todolist_id, todolist_from_db.id)
self.assertEqual(todolist.todos.first(), todo)
self.assertEqual(todolist_from_db.finished_count, 0)
self.assertEqual(todolist_from_db.open_count, 1)
todo.finished()
self.assertEqual(todolist_from_db.finished_count, 1)
self.assertEqual(todolist_from_db.open_count, 0)
# test delete functions
def test_delete_user(self):
user = self.add_user(self.username_adam)
user_id = user.id
user.delete()
self.assertIsNone(User.query.get(user_id))
def test_delete_todolist(self):
todolist = TodoList(self.shopping_list_title).save()
todolist_id = todolist.id
todolist.delete()
self.assertIsNone(TodoList.query.get(todolist_id))
def test_delete_todo(self):
todolist = TodoList(self.shopping_list_title).save()
todo = Todo('A book about TDD', todolist.id).save()
self.assertEqual(todolist.todo_count, 1)
todo_id = todo.id
todo.delete()
self.assertIsNone(Todo.query.get(todo_id))
self.assertEqual(todolist.todo_count, 0)
| mit |
refnode/python-cycle | bootstrap.py | 172 | 6501 | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", help="use a specific zc.buildout version")
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
parser.add_option("--allow-site-packages",
action="store_true", default=False,
help=("Let bootstrap.py use existing site packages"))
parser.add_option("--setuptools-version",
help="use a specific setuptools version")
options, args = parser.parse_args()
######################################################################
# load/install setuptools
try:
if options.allow_site_packages:
import setuptools
import pkg_resources
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
ez = {}
exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
if not options.allow_site_packages:
# ez_setup imports site, which adds site packages
# this will remove them from the path to ensure that incompatible versions
# of setuptools are not in the path
import site
# inside a virtualenv, there is no 'getsitepackages'.
# We can't remove these reliably
if hasattr(site, 'getsitepackages'):
for sitepackage_path in site.getsitepackages():
sys.path[:] = [x for x in sys.path if sitepackage_path not in x]
setup_args = dict(to_dir=tmpeggs, download_delay=0)
if options.setuptools_version is not None:
setup_args['version'] = options.setuptools_version
ez['use_setuptools'](**setup_args)
import setuptools
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
cmd = [sys.executable, '-c',
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
try:
return not parsed_version.is_prerelease
except AttributeError:
# Older setuptools
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd, env=dict(os.environ, PYTHONPATH=setuptools_path)) != 0:
raise Exception(
"Failed to execute command:\n%s" % repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
| apache-2.0 |
jameskdev/lge-kernel-msm8960-common | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
implausible/pangyp | gyp/pylib/gyp/MSVSUserFile.py | 2710 | 5094 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
| mit |
geonition/django_opensocial_mediaitems | opensocial_mediaitems/views.py | 1 | 1576 | from geonition_utils.HttpResponseExtenders import HttpResponseNotImplemented
from geonition_utils.HttpResponseExtenders import HttpResponseCreated
from geonition_utils.HttpResponseExtenders import HttpResponseUnauthorized
from django.core.urlresolvers import reverse
from models import MediaItem
import json
def media_items(request, *args, **kwargs):
"""
albums are extended with a @all that can be used to request
mediaitems when the albums service is not in use
"""
if request.method == 'POST':
if request.user.is_authenticated():
mediaitem = MediaItem(owner_id = request.user,
media_file = request.FILES.get('mediaitem'))
mediaitem.save()
return HttpResponseCreated(json.dumps({"msg": "The file was uploaded and saved",
"mediaitem-id": mediaitem.id,
"location": "%s/%s/@self/@all/%s" % (reverse('mediaItems'),
request.user,
mediaitem.id)}))
else:
return HttpResponseUnauthorized("To save files you have to sign in first")
return media_items_not_implemented()
def media_items_not_implemented():
return HttpResponseNotImplemented("This part of mediaItems service has not been implemented") | mit |
nemesisdesign/django | tests/test_client/tests.py | 11 | 33177 | # -*- coding: utf-8 -*-
"""
Testing using the Test Client
The test client is a class that can act like a simple
browser for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
``Client`` objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the ``Client`` instance.
This is not intended as a replacement for Twill, Selenium, or
other browser automation frameworks - it is here to allow
testing against the contexts and templates produced by a view,
rather than the HTML rendered to the end-user.
"""
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.core import mail
from django.http import HttpResponse
from django.test import (
Client, RequestFactory, SimpleTestCase, TestCase, override_settings,
)
from django.urls import reverse_lazy
from .views import get_view, post_view, trace_view
@override_settings(ROOT_URLCONF='test_client.urls')
class ClientTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_user(username='testclient', password='password')
cls.u2 = User.objects.create_user(username='inactive', password='password', is_active=False)
def test_get_view(self):
"GET a view"
# The data is ignored, but let's check it doesn't crash the system
# anyway.
data = {'var': '\xf2'}
response = self.client.get('/get_view/', data)
# Check some response details
self.assertContains(response, 'This is a test')
self.assertEqual(response.context['var'], '\xf2')
self.assertEqual(response.templates[0].name, 'GET Template')
def test_get_post_view(self):
"GET a view that normally expects POSTs"
response = self.client.get('/post_view/', {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty GET Template')
self.assertTemplateNotUsed(response, 'Empty POST Template')
def test_empty_post(self):
"POST an empty dictionary to a view"
response = self.client.post('/post_view/', {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty POST Template')
self.assertTemplateNotUsed(response, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty POST Template')
def test_post(self):
"POST some data to a view"
post_data = {
'value': 37
}
response = self.client.post('/post_view/', post_data)
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['data'], '37')
self.assertEqual(response.templates[0].name, 'POST Template')
self.assertContains(response, 'Data received')
def test_trace(self):
"""TRACE a view"""
response = self.client.trace('/trace_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['method'], 'TRACE')
self.assertEqual(response.templates[0].name, 'TRACE Template')
def test_response_headers(self):
"Check the value of HTTP headers returned in a response"
response = self.client.get("/header_view/")
self.assertEqual(response['X-DJANGO-TEST'], 'Slartibartfast')
def test_response_attached_request(self):
"""
Check that the returned response has a ``request`` attribute with the
originating environ dict and a ``wsgi_request`` with the originating
``WSGIRequest`` instance.
"""
response = self.client.get("/header_view/")
self.assertTrue(hasattr(response, 'request'))
self.assertTrue(hasattr(response, 'wsgi_request'))
for key, value in response.request.items():
self.assertIn(key, response.wsgi_request.environ)
self.assertEqual(response.wsgi_request.environ[key], value)
def test_response_resolver_match(self):
"""
The response contains a ResolverMatch instance.
"""
response = self.client.get('/header_view/')
self.assertTrue(hasattr(response, 'resolver_match'))
def test_response_resolver_match_redirect_follow(self):
"""
The response ResolverMatch instance contains the correct
information when following redirects.
"""
response = self.client.get('/redirect_view/', follow=True)
self.assertEqual(response.resolver_match.url_name, 'get_view')
def test_response_resolver_match_regular_view(self):
"""
The response ResolverMatch instance contains the correct
information when accessing a regular view.
"""
response = self.client.get('/get_view/')
self.assertEqual(response.resolver_match.url_name, 'get_view')
def test_raw_post(self):
"POST raw data (with a content type) to a view"
test_doc = """<?xml version="1.0" encoding="utf-8"?>
<library><book><title>Blink</title><author>Malcolm Gladwell</author></book></library>
"""
response = self.client.post("/raw_post_view/", test_doc,
content_type="text/xml")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, "Book template")
self.assertEqual(response.content, b"Blink - Malcolm Gladwell")
def test_insecure(self):
"GET a URL through http"
response = self.client.get('/secure_view/', secure=False)
self.assertFalse(response.test_was_secure_request)
self.assertEqual(response.test_server_port, '80')
def test_secure(self):
"GET a URL through https"
response = self.client.get('/secure_view/', secure=True)
self.assertTrue(response.test_was_secure_request)
self.assertEqual(response.test_server_port, '443')
def test_redirect(self):
"GET a URL that redirects elsewhere"
response = self.client.get('/redirect_view/')
# Check that the response was a 302 (redirect)
self.assertRedirects(response, '/get_view/')
def test_redirect_with_query(self):
"GET a URL that redirects with given GET parameters"
response = self.client.get('/redirect_view/', {'var': 'value'})
# Check if parameters are intact
self.assertRedirects(response, '/get_view/?var=value')
def test_permanent_redirect(self):
"GET a URL that redirects permanently elsewhere"
response = self.client.get('/permanent_redirect_view/')
# Check that the response was a 301 (permanent redirect)
self.assertRedirects(response, '/get_view/', status_code=301)
def test_temporary_redirect(self):
"GET a URL that does a non-permanent redirect"
response = self.client.get('/temporary_redirect_view/')
# Check that the response was a 302 (non-permanent redirect)
self.assertRedirects(response, '/get_view/', status_code=302)
def test_redirect_to_strange_location(self):
"GET a URL that redirects to a non-200 page"
response = self.client.get('/double_redirect_view/')
# Check that the response was a 302, and that
# the attempt to get the redirection location returned 301 when retrieved
self.assertRedirects(response, '/permanent_redirect_view/', target_status_code=301)
def test_follow_redirect(self):
"A URL that redirects can be followed to termination."
response = self.client.get('/double_redirect_view/', follow=True)
self.assertRedirects(response, '/get_view/', status_code=302, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 2)
def test_follow_relative_redirect(self):
"A URL with a relative redirect can be followed."
response = self.client.get('/accounts/', follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.request['PATH_INFO'], '/accounts/login/')
def test_follow_relative_redirect_no_trailing_slash(self):
"A URL with a relative redirect with no trailing slash can be followed."
response = self.client.get('/accounts/no_trailing_slash', follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.request['PATH_INFO'], '/accounts/login/')
def test_redirect_http(self):
"GET a URL that redirects to an http URI"
response = self.client.get('/http_redirect_view/', follow=True)
self.assertFalse(response.test_was_secure_request)
def test_redirect_https(self):
"GET a URL that redirects to an https URI"
response = self.client.get('/https_redirect_view/', follow=True)
self.assertTrue(response.test_was_secure_request)
def test_notfound_response(self):
"GET a URL that responds as '404:Not Found'"
response = self.client.get('/bad_view/')
# Check that the response was a 404, and that the content contains MAGIC
self.assertContains(response, 'MAGIC', status_code=404)
def test_valid_form(self):
"POST valid data to a form"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Valid POST Template")
def test_valid_form_with_hints(self):
"GET a form, providing hints in the GET data"
hints = {
'text': 'Hello World',
'multi': ('b', 'c', 'e')
}
response = self.client.get('/form_view/', data=hints)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Form GET Template")
# Check that the multi-value data has been rolled out ok
self.assertContains(response, 'Select a valid choice.', 0)
def test_incomplete_data_form(self):
"POST incomplete data to a form"
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/form_view/', post_data)
self.assertContains(response, 'This field is required.', 3)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error(self):
"POST erroneous data to a form"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
def test_valid_form_with_template(self):
"POST valid data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Valid POST Template")
def test_incomplete_data_form_with_template(self):
"POST incomplete data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, 'form_view.html')
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error_with_template(self):
"POST erroneous data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
def test_unknown_page(self):
"GET an invalid URL"
response = self.client.get('/unknown_view/')
# Check that the response was a 404
self.assertEqual(response.status_code, 404)
def test_url_parameters(self):
"Make sure that URL ;-parameters are not stripped."
response = self.client.get('/unknown_view/;some-parameter')
# Check that the path in the response includes it (ignore that it's a 404)
self.assertEqual(response.request['PATH_INFO'], '/unknown_view/;some-parameter')
def test_view_with_login(self):
"Request a page that is protected with @login_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
@override_settings(
INSTALLED_APPS=['django.contrib.auth'],
SESSION_ENGINE='django.contrib.sessions.backends.file',
)
def test_view_with_login_when_sessions_app_is_not_installed(self):
self.test_view_with_login()
def test_view_with_force_login(self):
"Request a page that is protected with @login_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_method_login(self):
"Request a page that is protected with a @login_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_method_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_method_force_login(self):
"Request a page that is protected with a @login_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_method_view/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_method_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_login_and_custom_redirect(self):
"Request a page that is protected with @login_required(redirect_field_name='redirect_to')"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertRedirects(response, '/accounts/login/?redirect_to=/login_protected_view_custom_redirect/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_force_login_and_custom_redirect(self):
"""
Request a page that is protected with
@login_required(redirect_field_name='redirect_to')
"""
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertRedirects(response, '/accounts/login/?redirect_to=/login_protected_view_custom_redirect/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_bad_login(self):
"Request a page that is protected with @login, but use bad credentials"
login = self.client.login(username='otheruser', password='nopassword')
self.assertFalse(login)
def test_view_with_inactive_login(self):
"""
An inactive user may login if the authenticate backend allows it.
"""
credentials = {'username': 'inactive', 'password': 'password'}
self.assertFalse(self.client.login(**credentials))
with self.settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.AllowAllUsersModelBackend']):
self.assertTrue(self.client.login(**credentials))
@override_settings(
AUTHENTICATION_BACKENDS=[
'django.contrib.auth.backends.ModelBackend',
'django.contrib.auth.backends.AllowAllUsersModelBackend',
]
)
def test_view_with_inactive_force_login(self):
"Request a page that is protected with @login, but use an inactive login"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u2, backend='django.contrib.auth.backends.AllowAllUsersModelBackend')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'inactive')
def test_logout(self):
"Request a logout after logging in"
# Log in
self.client.login(username='testclient', password='password')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
def test_logout_with_force_login(self):
"Request a logout after logging in"
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
@override_settings(
AUTHENTICATION_BACKENDS=[
'django.contrib.auth.backends.ModelBackend',
'test_client.auth_backends.TestClientBackend',
],
)
def test_force_login_with_backend(self):
"""
Request a page that is protected with @login_required when using
force_login() and passing a backend.
"""
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u1, backend='test_client.auth_backends.TestClientBackend')
self.assertEqual(self.u1.backend, 'test_client.auth_backends.TestClientBackend')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
@override_settings(
AUTHENTICATION_BACKENDS=[
'django.contrib.auth.backends.ModelBackend',
'test_client.auth_backends.TestClientBackend',
],
)
def test_force_login_without_backend(self):
"""
force_login() without passing a backend and with multiple backends
configured should automatically use the first backend.
"""
self.client.force_login(self.u1)
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
self.assertEqual(self.u1.backend, 'django.contrib.auth.backends.ModelBackend')
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.signed_cookies")
def test_logout_cookie_sessions(self):
self.test_logout()
def test_view_with_permissions(self):
"Request a page that is protected with @permission_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/permission_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/permission_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_view/')
# TODO: Log in with right permissions and request the page again
def test_view_with_permissions_exception(self):
"Request a page that is protected with @permission_required but raises an exception"
# Get the page without logging in. Should result in 403.
response = self.client.get('/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 403.
response = self.client.get('/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
def test_view_with_method_permissions(self):
"Request a page that is protected with a @permission_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/permission_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/permission_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_method_view/')
# TODO: Log in with right permissions and request the page again
def test_external_redirect(self):
response = self.client.get('/django_project_redirect/')
self.assertRedirects(response, 'https://www.djangoproject.com/', fetch_redirect_response=False)
def test_external_redirect_with_fetch_error_msg(self):
"""
Check that assertRedirects without fetch_redirect_response=False raises
a relevant ValueError rather than a non-descript AssertionError.
"""
response = self.client.get('/django_project_redirect/')
msg = (
"The test client is unable to fetch remote URLs (got "
"https://www.djangoproject.com/). If the host is served by Django, "
"add 'www.djangoproject.com' to ALLOWED_HOSTS. "
"Otherwise, use assertRedirects(..., fetch_redirect_response=False)."
)
with self.assertRaisesMessage(ValueError, msg):
self.assertRedirects(response, 'https://www.djangoproject.com/')
def test_session_modifying_view(self):
"Request a page that modifies the session"
# Session value isn't set initially
with self.assertRaises(KeyError):
self.client.session['tobacconist']
self.client.post('/session_view/')
# Check that the session was modified
self.assertEqual(self.client.session['tobacconist'], 'hovercraft')
@override_settings(
INSTALLED_APPS=[],
SESSION_ENGINE='django.contrib.sessions.backends.file',
)
def test_sessions_app_is_not_installed(self):
self.test_session_modifying_view()
@override_settings(
INSTALLED_APPS=[],
SESSION_ENGINE='django.contrib.sessions.backends.nonexistent',
)
def test_session_engine_is_invalid(self):
with self.assertRaisesMessage(ImportError, 'nonexistent'):
self.test_session_modifying_view()
def test_view_with_exception(self):
"Request a page that is known to throw an error"
with self.assertRaises(KeyError):
self.client.get("/broken_view/")
def test_mail_sending(self):
"Test that mail is redirected to a dummy outbox during test setup"
response = self.client.get('/mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Test message')
self.assertEqual(mail.outbox[0].body, 'This is a test email')
self.assertEqual(mail.outbox[0].from_email, 'from@example.com')
self.assertEqual(mail.outbox[0].to[0], 'first@example.com')
self.assertEqual(mail.outbox[0].to[1], 'second@example.com')
def test_reverse_lazy_decodes(self):
"Ensure reverse_lazy works in the test client"
data = {'var': 'data'}
response = self.client.get(reverse_lazy('get_view'), data)
# Check some response details
self.assertContains(response, 'This is a test')
def test_relative_redirect(self):
response = self.client.get('/accounts/')
self.assertRedirects(response, '/accounts/login/')
def test_relative_redirect_no_trailing_slash(self):
response = self.client.get('/accounts/no_trailing_slash')
self.assertRedirects(response, '/accounts/login/')
def test_mass_mail_sending(self):
"Test that mass mail is redirected to a dummy outbox during test setup"
response = self.client.get('/mass_mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, 'First Test message')
self.assertEqual(mail.outbox[0].body, 'This is the first test email')
self.assertEqual(mail.outbox[0].from_email, 'from@example.com')
self.assertEqual(mail.outbox[0].to[0], 'first@example.com')
self.assertEqual(mail.outbox[0].to[1], 'second@example.com')
self.assertEqual(mail.outbox[1].subject, 'Second Test message')
self.assertEqual(mail.outbox[1].body, 'This is the second test email')
self.assertEqual(mail.outbox[1].from_email, 'from@example.com')
self.assertEqual(mail.outbox[1].to[0], 'second@example.com')
self.assertEqual(mail.outbox[1].to[1], 'third@example.com')
def test_exception_following_nested_client_request(self):
"""
A nested test client request shouldn't clobber exception signals from
the outer client request.
"""
with self.assertRaisesMessage(Exception, 'exception message'):
self.client.get('/nesting_exception_view/')
@override_settings(
MIDDLEWARE=['django.middleware.csrf.CsrfViewMiddleware'],
ROOT_URLCONF='test_client.urls',
)
class CSRFEnabledClientTests(SimpleTestCase):
def test_csrf_enabled_client(self):
"A client can be instantiated with CSRF checks enabled"
csrf_client = Client(enforce_csrf_checks=True)
# The normal client allows the post
response = self.client.post('/post_view/', {})
self.assertEqual(response.status_code, 200)
# The CSRF-enabled client rejects it
response = csrf_client.post('/post_view/', {})
self.assertEqual(response.status_code, 403)
class CustomTestClient(Client):
i_am_customized = "Yes"
class CustomTestClientTest(SimpleTestCase):
client_class = CustomTestClient
def test_custom_test_client(self):
"""A test case can specify a custom class for self.client."""
self.assertIs(hasattr(self.client, "i_am_customized"), True)
def _generic_view(request):
return HttpResponse(status=200)
@override_settings(ROOT_URLCONF='test_client.urls')
class RequestFactoryTest(SimpleTestCase):
"""Tests for the request factory."""
# A mapping between names of HTTP/1.1 methods and their test views.
http_methods_and_views = (
('get', get_view),
('post', post_view),
('put', _generic_view),
('patch', _generic_view),
('delete', _generic_view),
('head', _generic_view),
('options', _generic_view),
('trace', trace_view),
)
def setUp(self):
self.request_factory = RequestFactory()
def test_request_factory(self):
"""The request factory implements all the HTTP/1.1 methods."""
for method_name, view in self.http_methods_and_views:
method = getattr(self.request_factory, method_name)
request = method('/somewhere/')
response = view(request)
self.assertEqual(response.status_code, 200)
def test_get_request_from_factory(self):
"""
The request factory returns a templated response for a GET request.
"""
request = self.request_factory.get('/somewhere/')
response = get_view(request)
self.assertContains(response, 'This is a test')
def test_trace_request_from_factory(self):
"""The request factory returns an echo response for a TRACE request."""
url_path = '/somewhere/'
request = self.request_factory.trace(url_path)
response = trace_view(request)
protocol = request.META["SERVER_PROTOCOL"]
echoed_request_line = "TRACE {} {}".format(url_path, protocol)
self.assertContains(response, echoed_request_line)
| bsd-3-clause |
h4ck3rm1k3/FEC-Field-Documentation | fec/version/v1/SH1.py | 1 | 2453 | import fechbase
class Records(fechbase.RecordsBase):
def __init__(self):
fechbase.RecordsBase.__init__(self)
self.fields = [
{'name': 'FORM TYPE', 'number': '1'},
{'name': 'FEC COMMITTEE ID NUMBER', 'number': '2'},
{'name': 'NAT PARTY COMMITTEES %', 'number': '3'},
{'name': 'HSE/SEN PTY COMMITTEES MINIMUM FED %', 'number': '4'},
{'name': 'HSE/SEN PTY COMMITTEES PERCENTAGE ESTIMATED FEDERAL CAN SUPPORT', 'number': '5'},
{'name': 'HSE/SEN PTY COMMITTEES PERCENTAGE ESTIMATED NON-FEDERAL CAN SUPPORT', 'number': '6'},
{'name': 'HSE/SEN PTY COMMITTEES ACTUAL FEDERAL CAN SUPPORT', 'number': '7'},
{'name': 'HSE/SEN PTY COMMITTEES ACTUAL NON-FEDERAL CAN SUPPORT', 'number': '8'},
{'name': 'HSE/SEN PTY COMMITTEES PERCENTAGE ACTUAL FEDERAL', 'number': '9'},
{'name': 'SEP SEG FUNDS & PERCENTAGE NON-CONNECTED COMMITTEES ESTIMATED FEDERAL CANDIDATE SUPPORT', 'number': '10'},
{'name': 'SEP SEG FUNDS & PERCENTAGE NON-CONNECTED COMMITTEES ESTIMATED NON-FEDERAL CANDIDATE SUPPORT', 'number': '11'},
{'name': 'SEP SEG FUNDS & NON-CONNECTED COMMITTEES ACTUAL FEDERAL CANDIDATE SUPPORT', 'number': '12'},
{'name': 'SEP SEG FUNDS & NON-CONNECTED COMMITTEES ACTUAL NON-FEDERAL CANDIDATE SUPPORT', 'number': '13'},
{'name': 'SEP SEG FUNDS & PERCENTAGE NON-CONNECTED COMMITTEES ACTUAL FEDERAL CANDIDATE SUPPORT', 'number': '14'},
{'name': 'BALLOT COMP PRES BLANK OR 1', 'number': '15-1'},
{'name': 'BALLOT COMP SEN BLANK OR 1', 'number': '16-2'},
{'name': 'BALLOT COMP HSE BLANK OR 1', 'number': '17-3'},
{'name': 'SUBTOTAL-FED', 'number': '18-4'},
{'name': 'BALLOT COMP GOV BLANK OR 1', 'number': '19-5'},
{'name': 'OTHER STATEWIDE', 'number': '20-6'},
{'name': 'STATE SENATE', 'number': '21-7'},
{'name': 'STATE REP', 'number': '22-8'},
{'name': 'LOCAL CANDIDATES BLANK, 1 OR 2', 'number': '23-9'},
{'name': 'EXTRA NON-FED POINT BLANK OR 1', 'number': '24-10'},
{'name': 'SUBTOTAL', 'number': '25-11'},
{'name': 'TOTAL POINTS', 'number': '26-12'},
{'name': 'FEDERAL ALLOCATION PERCENTAGE', 'number': '27'},
{'name': 'AMENDED', 'number': '28'},
]
self.fields_names = self.hash_names(self.fields)
| unlicense |
lpawluczuk/summar.pl | Summarizer/summarization/summarizer.py | 1 | 2690 | #-*- coding: utf-8 -*-
import features
from model import NeuralNetworkModel, SVMModel, RandomModel, MaximumModel, FannModel
import datetime
# from helpers import normalize
try:
from Summarizer.helpers import normalize
except ImportError:
from helpers import normalize
try:
from Summarizer.models import Singleton, Summary
except ImportError:
from models import Singleton,Summary
from enum import Enum
class ModelType(Enum):
Random = 1
First = 2
NN = 3
SVM = 4
Fann = 5
def __str__(self):
return self.name
@Singleton
class Summarizer:
def __init__(self):
self.model = NeuralNetworkModel()
def set_model(self, model_path=None, features_path=None, config=None):
model_type = config.get_model_type()
if model_type == str(ModelType.Random):
self.model = RandomModel()
elif model_type == str(ModelType.First):
self.model = MaximumModel()
elif model_type == str(ModelType.NN):
self.model = NeuralNetworkModel()
self.model.set_model(model_path, features_path, config.get_stop_list_path())
elif model_type == str(ModelType.SVM):
self.model = SVMModel()
self.model.set_model(model_path, features_path, config.get_stop_list_path())
elif model_type == str(ModelType.Fann):
self.model = FannModel()
self.model.set_model(model_path, features_path, config.get_stop_list_path())
else:
print "Wrong model!"
def set_features(self, features_path, stop_list):
self.model.set_model(None, features_path, stop_list)
def get_features(self):
return self.model.features
def create_summary(self, document, length=10):
summary = Summary(self.score_document(document), length)
# for s in summary.document.get_sentences():
# for i, feature in enumerate(s.scores.keys()):
# s.scores[feature] = normalize(s.scores[feature], self.model.normalization_values[i][0], self.model.normalization_values[i][1])
summary.set_scores({i: self.model.activate(s.scores.values())[0] for i, s in enumerate(document.get_sentences())})
return summary
def score_document(self, document):
[f.init(document) for f in self.get_features()]
for paragraph_number, paragraph in enumerate(document.paragraphs):
for sentence_number, sentence in enumerate(paragraph.sentences):
[sentence.update_scores(f.name, f.process(sentence, paragraph, paragraph_number, sentence_number)) for f in self.get_features()]
document.scored = True
return document | mit |
weka-io/boto | boto/rds/dbinstance.py | 167 | 17366 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.rds.dbsecuritygroup import DBSecurityGroup
from boto.rds.parametergroup import ParameterGroup
from boto.rds.statusinfo import StatusInfo
from boto.rds.dbsubnetgroup import DBSubnetGroup
from boto.rds.vpcsecuritygroupmembership import VPCSecurityGroupMembership
from boto.resultset import ResultSet
class DBInstance(object):
"""
Represents a RDS DBInstance
Properties reference available from the AWS documentation at
http://goo.gl/sC2Kn
:ivar connection: connection
:ivar id: The name and identifier of the DBInstance
:ivar create_time: The date and time of creation
:ivar engine: The database engine being used
:ivar status: The status of the database in a string. e.g. "available"
:ivar allocated_storage: The size of the disk in gigabytes (int).
:ivar auto_minor_version_upgrade: Indicates that minor version patches
are applied automatically.
:ivar endpoint: A tuple that describes the hostname and port of
the instance. This is only available when the database is
in status "available".
:ivar instance_class: Contains the name of the compute and memory
capacity class of the DB Instance.
:ivar master_username: The username that is set as master username
at creation time.
:ivar parameter_groups: Provides the list of DB Parameter Groups
applied to this DB Instance.
:ivar security_groups: Provides List of DB Security Group elements
containing only DBSecurityGroup.Name and DBSecurityGroup.Status
subelements.
:ivar availability_zone: Specifies the name of the Availability Zone
the DB Instance is located in.
:ivar backup_retention_period: Specifies the number of days for
which automatic DB Snapshots are retained.
:ivar preferred_backup_window: Specifies the daily time range during
which automated backups are created if automated backups are
enabled, as determined by the backup_retention_period.
:ivar preferred_maintenance_window: Specifies the weekly time
range (in UTC) during which system maintenance can occur. (string)
:ivar latest_restorable_time: Specifies the latest time to which
a database can be restored with point-in-time restore. (string)
:ivar multi_az: Boolean that specifies if the DB Instance is a
Multi-AZ deployment.
:ivar iops: The current number of provisioned IOPS for the DB Instance.
Can be None if this is a standard instance.
:ivar vpc_security_groups: List of VPC Security Group Membership elements
containing only VpcSecurityGroupMembership.VpcSecurityGroupId and
VpcSecurityGroupMembership.Status subelements.
:ivar pending_modified_values: Specifies that changes to the
DB Instance are pending. This element is only included when changes
are pending. Specific changes are identified by subelements.
:ivar read_replica_dbinstance_identifiers: List of read replicas
associated with this DB instance.
:ivar status_infos: The status of a Read Replica. If the instance is not a
for a read replica, this will be blank.
:ivar character_set_name: If present, specifies the name of the character
set that this instance is associated with.
:ivar subnet_group: Specifies information on the subnet group associated
with the DB instance, including the name, description, and subnets
in the subnet group.
:ivar engine_version: Indicates the database engine version.
:ivar license_model: License model information for this DB instance.
"""
def __init__(self, connection=None, id=None):
self.connection = connection
self.id = id
self.create_time = None
self.engine = None
self.status = None
self.allocated_storage = None
self.auto_minor_version_upgrade = None
self.endpoint = None
self.instance_class = None
self.master_username = None
self.parameter_groups = []
self.security_groups = []
self.read_replica_dbinstance_identifiers = []
self.availability_zone = None
self.backup_retention_period = None
self.preferred_backup_window = None
self.preferred_maintenance_window = None
self.latest_restorable_time = None
self.multi_az = False
self.iops = None
self.vpc_security_groups = None
self.pending_modified_values = None
self._in_endpoint = False
self._port = None
self._address = None
self.status_infos = None
self.character_set_name = None
self.subnet_group = None
self.engine_version = None
self.license_model = None
def __repr__(self):
return 'DBInstance:%s' % self.id
def startElement(self, name, attrs, connection):
if name == 'Endpoint':
self._in_endpoint = True
elif name == 'DBParameterGroups':
self.parameter_groups = ResultSet([('DBParameterGroup',
ParameterGroup)])
return self.parameter_groups
elif name == 'DBSecurityGroups':
self.security_groups = ResultSet([('DBSecurityGroup',
DBSecurityGroup)])
return self.security_groups
elif name == 'VpcSecurityGroups':
self.vpc_security_groups = ResultSet([('VpcSecurityGroupMembership',
VPCSecurityGroupMembership)])
return self.vpc_security_groups
elif name == 'PendingModifiedValues':
self.pending_modified_values = PendingModifiedValues()
return self.pending_modified_values
elif name == 'ReadReplicaDBInstanceIdentifiers':
self.read_replica_dbinstance_identifiers = \
ReadReplicaDBInstanceIdentifiers()
return self.read_replica_dbinstance_identifiers
elif name == 'StatusInfos':
self.status_infos = ResultSet([
('DBInstanceStatusInfo', StatusInfo)
])
return self.status_infos
elif name == 'DBSubnetGroup':
self.subnet_group = DBSubnetGroup()
return self.subnet_group
return None
def endElement(self, name, value, connection):
if name == 'DBInstanceIdentifier':
self.id = value
elif name == 'DBInstanceStatus':
self.status = value
elif name == 'InstanceCreateTime':
self.create_time = value
elif name == 'Engine':
self.engine = value
elif name == 'DBInstanceStatus':
self.status = value
elif name == 'AllocatedStorage':
self.allocated_storage = int(value)
elif name == 'AutoMinorVersionUpgrade':
self.auto_minor_version_upgrade = value.lower() == 'true'
elif name == 'DBInstanceClass':
self.instance_class = value
elif name == 'MasterUsername':
self.master_username = value
elif name == 'Port':
if self._in_endpoint:
self._port = int(value)
elif name == 'Address':
if self._in_endpoint:
self._address = value
elif name == 'Endpoint':
self.endpoint = (self._address, self._port)
self._in_endpoint = False
elif name == 'AvailabilityZone':
self.availability_zone = value
elif name == 'BackupRetentionPeriod':
self.backup_retention_period = int(value)
elif name == 'LatestRestorableTime':
self.latest_restorable_time = value
elif name == 'PreferredMaintenanceWindow':
self.preferred_maintenance_window = value
elif name == 'PreferredBackupWindow':
self.preferred_backup_window = value
elif name == 'MultiAZ':
if value.lower() == 'true':
self.multi_az = True
elif name == 'Iops':
self.iops = int(value)
elif name == 'CharacterSetName':
self.character_set_name = value
elif name == 'EngineVersion':
self.engine_version = value
elif name == 'LicenseModel':
self.license_model = value
else:
setattr(self, name, value)
@property
def security_group(self):
"""
Provide backward compatibility for previous security_group
attribute.
"""
if len(self.security_groups) > 0:
return self.security_groups[-1]
else:
return None
@property
def parameter_group(self):
"""
Provide backward compatibility for previous parameter_group
attribute.
"""
if len(self.parameter_groups) > 0:
return self.parameter_groups[-1]
else:
return None
def snapshot(self, snapshot_id):
"""
Create a new DB snapshot of this DBInstance.
:type identifier: string
:param identifier: The identifier for the DBSnapshot
:rtype: :class:`boto.rds.dbsnapshot.DBSnapshot`
:return: The newly created DBSnapshot
"""
return self.connection.create_dbsnapshot(snapshot_id, self.id)
def reboot(self):
"""
Reboot this DBInstance
:rtype: :class:`boto.rds.dbsnapshot.DBSnapshot`
:return: The newly created DBSnapshot
"""
return self.connection.reboot_dbinstance(self.id)
def update(self, validate=False):
"""
Update the DB instance's status information by making a call to fetch
the current instance attributes from the service.
:type validate: bool
:param validate: By default, if EC2 returns no data about the
instance the update method returns quietly. If the
validate param is True, however, it will raise a
ValueError exception if no data is returned from EC2.
"""
rs = self.connection.get_all_dbinstances(self.id)
if len(rs) > 0:
for i in rs:
if i.id == self.id:
self.__dict__.update(i.__dict__)
elif validate:
raise ValueError('%s is not a valid Instance ID' % self.id)
return self.status
def stop(self, skip_final_snapshot=False, final_snapshot_id=''):
"""
Delete this DBInstance.
:type skip_final_snapshot: bool
:param skip_final_snapshot: This parameter determines whether
a final db snapshot is created before the instance is
deleted. If True, no snapshot is created. If False, a
snapshot is created before deleting the instance.
:type final_snapshot_id: str
:param final_snapshot_id: If a final snapshot is requested, this
is the identifier used for that snapshot.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The deleted db instance.
"""
return self.connection.delete_dbinstance(self.id,
skip_final_snapshot,
final_snapshot_id)
def modify(self, param_group=None, security_groups=None,
preferred_maintenance_window=None,
master_password=None, allocated_storage=None,
instance_class=None,
backup_retention_period=None,
preferred_backup_window=None,
multi_az=False,
iops=None,
vpc_security_groups=None,
apply_immediately=False,
new_instance_id=None):
"""
Modify this DBInstance.
:type param_group: str
:param param_group: Name of DBParameterGroup to associate with
this DBInstance.
:type security_groups: list of str or list of DBSecurityGroup objects
:param security_groups: List of names of DBSecurityGroup to
authorize on this DBInstance.
:type preferred_maintenance_window: str
:param preferred_maintenance_window: The weekly time range (in
UTC) during which maintenance can occur. Default is
Sun:05:00-Sun:09:00
:type master_password: str
:param master_password: Password of master user for the DBInstance.
Must be 4-15 alphanumeric characters.
:type allocated_storage: int
:param allocated_storage: The new allocated storage size, in GBs.
Valid values are [5-1024]
:type instance_class: str
:param instance_class: The compute and memory capacity of the
DBInstance. Changes will be applied at next maintenance
window unless apply_immediately is True.
Valid values are:
* db.m1.small
* db.m1.large
* db.m1.xlarge
* db.m2.xlarge
* db.m2.2xlarge
* db.m2.4xlarge
:type apply_immediately: bool
:param apply_immediately: If true, the modifications will be
applied as soon as possible rather than waiting for the
next preferred maintenance window.
:type new_instance_id: str
:param new_instance_id: The new DB instance identifier.
:type backup_retention_period: int
:param backup_retention_period: The number of days for which
automated backups are retained. Setting this to zero
disables automated backups.
:type preferred_backup_window: str
:param preferred_backup_window: The daily time range during
which automated backups are created (if enabled). Must be
in h24:mi-hh24:mi format (UTC).
:type multi_az: bool
:param multi_az: If True, specifies the DB Instance will be
deployed in multiple availability zones.
:type iops: int
:param iops: The amount of IOPS (input/output operations per
second) to Provisioned for the DB Instance. Can be
modified at a later date.
Must scale linearly. For every 1000 IOPS provision, you
must allocated 100 GB of storage space. This scales up to
1 TB / 10 000 IOPS for MySQL and Oracle. MSSQL is limited
to 700 GB / 7 000 IOPS.
If you specify a value, it must be at least 1000 IOPS and
you must allocate 100 GB of storage.
:type vpc_security_groups: list
:param vpc_security_groups: List of VPCSecurityGroupMembership
that this DBInstance is a memberof.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The modified db instance.
"""
return self.connection.modify_dbinstance(self.id,
param_group,
security_groups,
preferred_maintenance_window,
master_password,
allocated_storage,
instance_class,
backup_retention_period,
preferred_backup_window,
multi_az,
apply_immediately,
iops,
vpc_security_groups,
new_instance_id)
class PendingModifiedValues(dict):
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name != 'PendingModifiedValues':
self[name] = value
class ReadReplicaDBInstanceIdentifiers(list):
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'ReadReplicaDBInstanceIdentifier':
self.append(value)
| mit |
BT-fgarbely/odoo | addons/account_check_writing/__openerp__.py | 313 | 1808 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Check Writing',
'version': '1.1',
'author': 'OpenERP SA, NovaPoint Group',
'category': 'Generic Modules/Accounting',
'description': """
Module for the Check Writing and Check Printing.
================================================
""",
'website': 'https://www.odoo.com/page/accounting',
'depends' : ['account_voucher'],
'data': [
'wizard/account_check_batch_printing_view.xml',
'account_view.xml',
'account_voucher_view.xml',
'account_check_writing_data.xml',
'data/report_paperformat.xml',
'views/report_check.xml',
'account_check_writing_report.xml',
],
'demo': ['account_demo.xml'],
'test': [],
'installable': True,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
nikolas/smc | salvus/sage_parsing.py | 2 | 18980 | """
sage_parser.py
Code for parsing Sage code blocks sensibly.
"""
#########################################################################################
# Copyright (C) 2013 William Stein <wstein@gmail.com> #
# #
# Distributed under the terms of the GNU General Public License (GPL), version 2+ #
# #
# http://www.gnu.org/licenses/ #
#########################################################################################
import string
import traceback
def get_input(prompt):
try:
r = raw_input(prompt)
z = r
if z.rstrip().endswith(':'):
while True:
try:
z = raw_input('... ')
except EOFError:
quit = True
break
if z != '':
r += '\n ' + z
else:
break
return r
except EOFError:
return None
#def strip_leading_prompts(code, prompts=['sage:', '....:', '...:', '>>>', '...']):
# code, literals, state = strip_string_literals(code)
# code2 = []
# for line in code.splitlines():
# line2 = line.lstrip()
# for p in prompts:
# if line2.startswith(p):
# line2 = line2[len(p):]
# if p[0] != '.':
# line2 = line2.lstrip()
# break
# code2.append(line2)
# code = ('\n'.join(code2))%literals
# return code
def preparse_code(code):
import sage.all_cmdline
return sage.all_cmdline.preparse(code, ignore_prompts=True)
def strip_string_literals(code, state=None):
new_code = []
literals = {}
counter = 0
start = q = 0
if state is None:
in_quote = False
raw = False
else:
in_quote, raw = state
while True:
sig_q = code.find("'", q)
dbl_q = code.find('"', q)
hash_q = code.find('#', q)
q = min(sig_q, dbl_q)
if q == -1: q = max(sig_q, dbl_q)
if not in_quote and hash_q != -1 and (q == -1 or hash_q < q):
# it's a comment
newline = code.find('\n', hash_q)
if newline == -1: newline = len(code)
counter += 1
label = "L%s" % counter
literals[label] = code[hash_q:newline] # changed from sage
new_code.append(code[start:hash_q].replace('%','%%'))
new_code.append("%%(%s)s" % label)
start = q = newline
elif q == -1:
if in_quote:
counter += 1
label = "L%s" % counter
literals[label] = code[start:]
new_code.append("%%(%s)s" % label)
else:
new_code.append(code[start:].replace('%','%%'))
break
elif in_quote:
if code[q-1] == '\\':
k = 2
while code[q-k] == '\\':
k += 1
if k % 2 == 0:
q += 1
if code[q:q+len(in_quote)] == in_quote:
counter += 1
label = "L%s" % counter
literals[label] = code[start:q+len(in_quote)]
new_code.append("%%(%s)s" % label)
q += len(in_quote)
start = q
in_quote = False
else:
q += 1
else:
raw = q>0 and code[q-1] in 'rR'
if len(code) >= q+3 and (code[q+1] == code[q] == code[q+2]):
in_quote = code[q]*3
else:
in_quote = code[q]
new_code.append(code[start:q].replace('%', '%%'))
start = q
q += len(in_quote)
return "".join(new_code), literals, (in_quote, raw)
def end_of_expr(s):
"""
The input string s is a code expression that contains no strings (they have been stripped).
Find the end of the expression that starts at the beginning of s by finding the first whitespace
at which the parenthesis and brackets are matched.
The returned index is the position *after* the expression.
"""
i = 0
parens = 0
brackets = 0
while i<len(s):
c = s[i]
if c == '(':
parens += 1
elif c == '[':
brackets += 1
elif c == ')':
parens -= 1
elif c == ']':
brackets -= 1
elif parens == 0 and brackets == 0 and (c == ' ' or c == '\t'):
return i
i += 1
return i
# NOTE: The dec_args dict will leak memory over time. However, it only
# contains code that was entered, so it should never get big. It
# seems impossible to know for sure whether a bit of code will be
# eventually needed later, so this leakiness seems necessary.
dec_counter = 0
dec_args = {}
# Divide the input code (a string) into blocks of code.
def divide_into_blocks(code):
global dec_counter
# strip string literals from the input, so that we can parse it without having to worry about strings
code, literals, state = strip_string_literals(code)
# divide the code up into line lines.
code = code.splitlines()
# Compute the line-level code decorators.
c = list(code)
try:
v = []
for line in code:
done = False
# Transform shell escape into sh decorator.
if line.lstrip().startswith('!'):
line = line.replace('!', "%%sh ", 1)
# Check for cell decorator
# NOTE: strip_string_literals maps % to %%, because %foo is used for python string templating.
if line.lstrip().startswith('%%'):
i = line.find("%")
j = end_of_expr(line[i+2:]) + i+2 + 1 # +1 for the space or tab delimiter
expr = line[j:]%literals
# Special case -- if % starts line *and* expr is empty (or a comment),
# then code decorators impacts the rest of the code.
sexpr = expr.strip()
if i == 0 and (len(sexpr) == 0 or sexpr.startswith('#')):
new_line = '%ssalvus.execute_with_code_decorators(*_salvus_parsing.dec_args[%s])'%(line[:i], dec_counter)
expr = ('\n'.join(code[len(v)+1:]))%literals
done = True
else:
# Expr is nonempty -- code decorator only impacts this line
new_line = '%ssalvus.execute_with_code_decorators(*_salvus_parsing.dec_args[%s])'%(line[:i], dec_counter)
dec_args[dec_counter] = ([line[i+2:j]%literals], expr)
dec_counter += 1
else:
new_line = line
v.append(new_line)
if done:
break
code = v
except Exception, mesg:
code = c
## Tested this: Completely disable block parsing:
## but it requires the caller to do "exec compile(block+'\n', '', 'exec') in namespace, locals", which means no display hook,
## so "2+2" breaks.
## return [[0,len(code)-1,('\n'.join(code))%literals]]
# take only non-empty lines now for Python code.
code = [x for x in code if x.strip()]
# Compute the blocks
i = len(code)-1
blocks = []
while i >= 0:
stop = i
paren_depth = code[i].count('(') - code[i].count(')')
brack_depth = code[i].count('[') - code[i].count(']')
curly_depth = code[i].count('{') - code[i].count('}')
while i>=0 and ((len(code[i]) > 0 and (code[i][0] in string.whitespace or code[i][:2] == '%(')) or paren_depth < 0 or brack_depth < 0 or curly_depth < 0):
i -= 1
if i >= 0:
paren_depth += code[i].count('(') - code[i].count(')')
brack_depth += code[i].count('[') - code[i].count(']')
curly_depth += code[i].count('{') - code[i].count('}')
# remove comments
for k, v in literals.iteritems():
if v.startswith('#'):
literals[k] = ''
block = ('\n'.join(code[i:]))%literals
bs = block.strip()
if bs: # has to not be only whitespace
blocks.insert(0, [i, stop, bs])
code = code[:i]
i = len(code)-1
# merge try/except/finally/decorator/else/elif blocks
i = 1
def merge():
"Merge block i-1 with block i."
blocks[i-1][-1] += '\n' + blocks[i][-1]
blocks[i-1][1] = blocks[i][1]
del blocks[i]
while i < len(blocks):
s = blocks[i][-1].lstrip()
# finally/except lines after a try
if (s.startswith('finally') or s.startswith('except')) and blocks[i-1][-1].lstrip().startswith('try'):
merge()
# function definitions
elif s.startswith('def') and blocks[i-1][-1].splitlines()[-1].lstrip().startswith('@'):
merge()
# lines starting with else conditions (if *and* for *and* while!)
elif s.startswith('else') and (blocks[i-1][-1].lstrip().startswith('if') or blocks[i-1][-1].lstrip().startswith('while') or blocks[i-1][-1].lstrip().startswith('for') or blocks[i-1][-1].lstrip().startswith('elif')):
merge()
# lines starting with elif
elif s.startswith('elif') and blocks[i-1][-1].lstrip().startswith('if'):
merge()
# do not merge blocks -- move on to next one
else:
i += 1
return blocks
############################################
CHARS0 = string.ascii_letters + string.digits + '_'
CHARS = CHARS0 + '.'
def guess_last_expression(obj): # TODO: bad guess -- need to use a parser to go any further.
i = len(obj)-1
while i >= 0 and obj[i] in CHARS:
i -= 1
return obj[i+1:]
def is_valid_identifier(target):
if len(target) == 0: return False
for x in target:
if x not in CHARS0:
return False
if target[0] not in string.ascii_letters + '_':
return False
return True
# Keywords from http://docs.python.org/release/2.7.2/reference/lexical_analysis.html
_builtin_completions = __builtins__.keys() + ['and', 'del', 'from', 'not', 'while', 'as', 'elif', 'global', 'or', 'with', 'assert', 'else', 'if', 'pass', 'yield', 'break', 'except', 'import', 'print', 'class', 'exec', 'in', 'raise', 'continue', 'finally', 'is', 'return', 'def', 'for', 'lambda', 'try']
def introspect(code, namespace, preparse=True):
"""
INPUT:
- code -- a string containing Sage (if preparse=True) or Python code.
- namespace -- a dictionary to complete in (we also complete using
builtins such as 'def', 'for', etc.
- preparse -- a boolean
OUTPUT:
An object: {'result':, 'target':, 'expr':, 'status':, 'get_help':, 'get_completions':, 'get_source':}
"""
# result: the docstring, source code, or list of completions (at
# return, it might thus be either a list or a string)
result = []
# expr: the part of code that is used to do the completion, e.g.,
# for 'a = n.m.foo', expr would be 'n.m.foo'. It can be more complicated,
# e.g., for '(2+3).foo.bar' it would be '(2+3).foo'.
expr = ''
# target: for completions, target is the part of the code that we
# complete on in the namespace defined by the object right before
# it, e.g., for n.m.foo, the target is "foo". target is the empty
# string for source code and docstrings.
target = ''
# When returning, exactly one of the following will be true:
get_help = False # getting docstring of something
get_source = False # getting source code of a function
get_completions = True # getting completions of an identifier in some namespace
try:
# Strip all strings from the code, replacing them by template
# symbols; this makes parsing much easier.
code0, literals, state = strip_string_literals(code.strip()) # we strip, since trailing space could cause confusion below
# Move i so that it points to the start of the last expression in the code.
# (TODO: this should probably be replaced by using ast on preparsed version. Not easy.)
i = max([code0.rfind(t) for t in '\n;='])+1
while i<len(code0) and code0[i] in string.whitespace:
i += 1
# Break the line in two pieces: before_expr | expr; we may
# need before_expr in order to evaluate and make sense of
# expr. We also put the string literals back in, so that
# evaluation works.
expr = code0[i:]%literals
before_expr = code0[:i]%literals
if '.' not in expr and '(' not in expr and ')' not in expr and '?' not in expr:
# Easy case: this is just completion on a simple identifier in the namespace.
get_help = False; get_completions = True; get_source = False
target = expr
else:
# Now for all of the other harder cases.
i = max([expr.rfind(s) for s in '?('])
if i >= 1 and i == len(expr)-1 and expr[i-1] == '?': # expr ends in two ?? -- source code
get_source = True; get_completions = False; get_help = False
target = ""
obj = expr[:i-1]
elif i == len(expr)-1: # ends in ( or ? (but not ??) -- docstring
get_help = True; get_completions = False; get_source = False
target = ""
obj = expr[:i]
else: # completions (not docstrings or source)
get_help = False; get_completions = True; get_source = False
i = expr.rfind('.')
target = expr[i+1:]
if target == '' or is_valid_identifier(target):
obj = expr[:i]
else:
expr = guess_last_expression(target)
i = expr.rfind('.')
if i != -1:
target = expr[i+1:]
obj = expr[:i]
else:
target = expr
if get_completions and target == expr:
j = len(expr)
v = [x[j:] for x in (namespace.keys() + _builtin_completions) if x.startswith(expr)]
else:
# We will try to evaluate
# obj. This is danerous and a priori could take
# forever, so we spend at most 1 second doing this --
# if it takes longer a signal kills the evaluation.
# Obviously, this could in fact lock if
# non-interruptable code is called, which should be rare.
O = None
try:
import signal
def mysig(*args): raise KeyboardInterrupt
signal.signal(signal.SIGALRM, mysig)
signal.alarm(1)
import sage.all_cmdline
if before_expr.strip():
try:
exec (before_expr if not preparse else preparse_code(before_expr)) in namespace
except Exception, msg:
pass
# uncomment for debugging only
# traceback.print_exc()
# We first try to evaluate the part of the expression before the name
try:
O = eval(obj if not preparse else preparse_code(obj), namespace)
except SyntaxError:
# If that fails, we try on a subexpression.
# TODO: This will not be needed when
# this code is re-written to parse using an
# AST, instead of using this lame hack.
obj = guess_last_expression(obj)
O = eval(obj if not preparse else preparse_code(obj), namespace)
finally:
signal.signal(signal.SIGALRM, signal.SIG_IGN)
def get_file():
try:
import sage.misc.sageinspect
return " File: " + eval('getdoc(O)', {'getdoc':sage.misc.sageinspect.sage_getfile, 'O':O}) + "\n"
except Exception, err:
return "Unable to read source filename (%s)"%err
if get_help:
import sage.misc.sageinspect
result = get_file()
try:
def f(s):
x = sage.misc.sageinspect.sage_getargspec(s)
defaults = list(x.defaults) if x.defaults else []
args = list(x.args) if x.defaults else []
v = []
if x.keywords:
v.insert(0,'**kwds')
if x.varargs:
v.insert(0,'*args')
while defaults:
d = defaults.pop()
k = args.pop()
v.insert(0,'%s=%r'%(k,d))
v = args + v
t = " Signature : %s(%s)\n"%(obj, ', '.join(v))
t += " Docstring :\n" + sage.misc.sageinspect.sage_getdoc(s).strip()
return t
result += eval('getdoc(O)', {'getdoc':f, 'O':O})
except Exception, err:
result += "Unable to read docstring (%s)"%err
result = result.lstrip().replace('\n ','\n') # Get rid of the 3 spaces in front of everything.
elif get_source:
import sage.misc.sageinspect
result = get_file()
try:
result += " Source:\n " + eval('getsource(O)', {'getsource':sage.misc.sageinspect.sage_getsource, 'O':O})
except Exception, err:
result += "Unable to read source code (%s)"%err
elif get_completions:
if O is not None:
v = dir(O)
if hasattr(O, 'trait_names'):
v += O.trait_names()
if not target.startswith('_'):
v = [x for x in v if x and not x.startswith('_')]
j = len(target)
v = [x[j:] for x in v if x.startswith(target)]
else:
v = []
if get_completions:
result = list(sorted(set(v), lambda x,y:cmp(x.lower(),y.lower())))
except Exception, msg:
traceback.print_exc()
result = []
status = 'ok'
else:
status = 'ok'
return {'result':result, 'target':target, 'expr':expr, 'status':status, 'get_help':get_help, 'get_completions':get_completions, 'get_source':get_source} | gpl-3.0 |
pizzathief/numpy | numpy/lib/polynomial.py | 4 | 40727 | """
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import functools
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.core import overrides
from numpy.core.overrides import set_module
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
@set_module('numpy')
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def _poly_dispatcher(seq_of_zeros):
return seq_of_zeros
@array_function_dispatch(_poly_dispatcher)
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Compute polynomial values.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1., 0., 0., 0.])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) # random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())):
a = a.real.copy()
return a
def _roots_dispatcher(p):
return p
@array_function_dispatch(_roots_dispatcher)
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Compute polynomial values.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if p.ndim != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def _polyint_dispatcher(p, m=None, k=None):
return (p,)
@array_function_dispatch(_polyint_dispatcher)
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : array_like or poly1d
Polynomial to integrate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : list of `m` scalars or scalar, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def _polyder_dispatcher(p, m=None):
return (p,)
@array_function_dispatch(_polyder_dispatcher)
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None):
return (x, y, w)
@array_function_dispatch(_polyfit_dispatcher)
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error in the order `deg`, `deg-1`, ... `0`.
The `Polynomial.fit <numpy.polynomial.polynomial.Polynomial.fit>` class
method is recommended for new code as it is more stable numerically. See
the documentation of the method for more information.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
Weights to apply to the y-coordinates of the sample points. For
gaussian uncertainties, use 1/sigma (not 1/sigma**2).
cov : bool or str, optional
If given and not `False`, return not just the estimate but also its
covariance matrix. By default, the covariance are scaled by
chi2/sqrt(N-dof), i.e., the weights are presumed to be unreliable
except in a relative sense and everything is scaled such that the
reduced chi2 is unity. This scaling is omitted if ``cov='unscaled'``,
as is relevant for the case that the weights are 1/sigma**2, with
sigma known to be a reliable estimate of the uncertainty.
Returns
-------
p : ndarray, shape (deg + 1,) or (deg + 1, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Compute polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
https://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
https://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> import warnings
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179 # may vary
>>> p(3.5)
-0.34732142857143039 # may vary
>>> p(10)
22.579365079365115 # may vary
High-order polynomials may oscillate wildly:
>>> with warnings.catch_warnings():
... warnings.simplefilter('ignore', np.RankWarning)
... p30 = np.poly1d(np.polyfit(x, y, 30))
...
>>> p30(4)
-0.80000000000000204 # may vary
>>> p30(5)
-0.99999999999999445 # may vary
>>> p30(4.5)
-0.10547061179440398 # may vary
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning, stacklevel=4)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
if cov == "unscaled":
fac = 1
else:
if len(x) <= order:
raise ValueError("the number of data points must exceed order "
"to scale the covariance matrix")
# note, this used to be: fac = resids / (len(x) - order - 2.0)
# it was deciced that the "- 2" (originally justified by "Bayesian
# uncertainty analysis") is not was the user expects
# (see gh-11196 and gh-11197)
fac = resids / (len(x) - order)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def _polyval_dispatcher(p, x):
return (p, x)
@array_function_dispatch(_polyval_dispatcher)
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, an array of numbers, or an instance of poly1d, at
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
If `x` is a subtype of `ndarray` the return value will be of the same type.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asanyarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def _binary_op_dispatcher(a1, a2):
return (a1, a2)
@array_function_dispatch(_binary_op_dispatcher)
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print(p1)
1 x + 2
>>> print(p2)
2
9 x + 5 x + 4
>>> print(np.polyadd(p1, p2))
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
@array_function_dispatch(_binary_op_dispatcher)
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
@array_function_dispatch(_binary_op_dispatcher)
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print(p1)
2
1 x + 2 x + 3
>>> print(p2)
2
9 x + 5 x + 1
>>> print(np.polymul(p1, p2))
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def _polydiv_dispatcher(u, v):
return (u, v)
@array_function_dispatch(_polydiv_dispatcher)
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([1.5 , 1.75]), array([0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.astype(w.dtype)
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
@set_module('numpy')
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print(np.poly1d(p))
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print(p)
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1., -3., 2.])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
__hash__ = None
@property
def coeffs(self):
""" The polynomial coefficients """
return self._coeffs
@coeffs.setter
def coeffs(self, value):
# allowing this makes p.coeffs *= 2 legal
if value is not self._coeffs:
raise AttributeError("Cannot set attribute")
@property
def variable(self):
""" The name of the polynomial variable """
return self._variable
# calculated attributes
@property
def order(self):
""" The order or degree of the polynomial """
return len(self._coeffs) - 1
@property
def roots(self):
""" The roots of the polynomial, where self(x) == 0 """
return roots(self._coeffs)
# our internal _coeffs property need to be backed by __dict__['coeffs'] for
# scipy to work correctly.
@property
def _coeffs(self):
return self.__dict__['coeffs']
@_coeffs.setter
def _coeffs(self, coeffs):
self.__dict__['coeffs'] = coeffs
# alias attributes
r = roots
c = coef = coefficients = coeffs
o = order
def __init__(self, c_or_r, r=False, variable=None):
if isinstance(c_or_r, poly1d):
self._variable = c_or_r._variable
self._coeffs = c_or_r._coeffs
if set(c_or_r.__dict__) - set(self.__dict__):
msg = ("In the future extra properties will not be copied "
"across when constructing one poly1d from another")
warnings.warn(msg, FutureWarning, stacklevel=2)
self.__dict__.update(c_or_r.__dict__)
if variable is not None:
self._variable = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if c_or_r.ndim > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self._coeffs = c_or_r
if variable is None:
variable = 'x'
self._variable = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
return not self.__eq__(other)
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self._coeffs = NX.concatenate((zr, self.coeffs))
ind = 0
self._coeffs[ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
| bsd-3-clause |
ABcDexter/cython | Cython/Plex/Errors.py | 33 | 1169 | #=======================================================================
#
# Python Lexical Analyser
#
# Exception classes
#
#=======================================================================
class PlexError(Exception):
message = ""
class PlexTypeError(PlexError, TypeError):
pass
class PlexValueError(PlexError, ValueError):
pass
class InvalidRegex(PlexError):
pass
class InvalidToken(PlexError):
def __init__(self, token_number, message):
PlexError.__init__(self, "Token number %d: %s" % (token_number, message))
class InvalidScanner(PlexError):
pass
class AmbiguousAction(PlexError):
message = "Two tokens with different actions can match the same string"
def __init__(self):
pass
class UnrecognizedInput(PlexError):
scanner = None
position = None
state_name = None
def __init__(self, scanner, state_name):
self.scanner = scanner
self.position = scanner.get_position()
self.state_name = state_name
def __str__(self):
return ("'%s', line %d, char %d: Token not recognised in state %r" % (
self.position + (self.state_name,)))
| apache-2.0 |
tkralphs/GrUMPy | test/test_bb.py | 2 | 2626 | '''
Tests correctness of branch and bound algorithm. Optimal values of problems are
hard-coded. This will be valid until python decides to change its random number
generator. See test_bb_pulp.py for comparing branch and bound results with
optimal values given by PuLP.
Script raises exceptions if the bb solution is not integer feasible or optimal
value is not right.
'''
from grumpy import BBTree
import sys
import math
EPSILON = 1e-10
# test problem, (num_vars,num_cons,seed)
problem = [(10,10,0),
(10,10,1),
(20,10,2),
(20,10,3),
(30,20,4),
(30,20,5),
(40,20,6),
(40,20,7),
(40,30,8),
]
# optimal values of problems
pre_computed_opt_val= {(10,10,0):50,
(10,10,1):45,
(20,10,2):104,
(20,10,3):93,
(30,20,4):139,
(30,20,5):136,
(40,20,6):192,
(40,20,7):164,
(40,30,8):179,
}
if __name__=='__main__':
for p in problem:
bt = BBTree()
var, con, seed = p
CONSTRAINTS, VARIABLES, OBJ, MAT, RHS = bt.GenerateRandomMIP(numVars=var,
numCons=con,
rand_seed=seed)
solution, opt_value = bt.BranchAndBound(CONSTRAINTS, VARIABLES, OBJ,
MAT, RHS)
# test solution.
#= test integer feasibility
for v in solution:
diff = solution[v]-math.floor(solution[v])
if (diff>EPSILON and diff<(1-EPSILON)):
raise Exception('Integer infeasible variable %s, value %f ' %(v, solution[v]))
#= test feasibility of constraints
Ax = []
num_cons = len(CONSTRAINTS)
#== for each constraint
for c in range(num_cons):
_sum = 0.0
for v in VARIABLES:
_sum += MAT[v][c]*solution[v]
Ax.append(_sum)
for c in range(num_cons):
if Ax[c] > RHS[c]:
raise Exception('Solution does not satisfy constraint ' + CONSTRAINTS[c])
#= test optimal value
if opt_value!=pre_computed_opt_val[p]:
raise Exception('Optimality is not acheived for problem %s. BB: %f, OPT: %f ' %(str(p), opt_value[p], pre_computed_opt_val[p]))
print '***************************************************'
print '* No exceptions raised, BB solutions are correct. *'
print '***************************************************'
| epl-1.0 |
gototem/qemu | scripts/tracetool/format/events_h.py | 95 | 1183 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Generate .h for event description.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@linux.vnet.ibm.com"
from tracetool import out
def begin(events):
out('/* This file is autogenerated by tracetool, do not edit. */',
'',
'#ifndef TRACE__GENERATED_EVENTS_H',
'#define TRACE__GENERATED_EVENTS_H',
'',
'#include <stdbool.h>',
''
)
# event identifiers
out('typedef enum {')
for e in events:
out(' TRACE_%s,' % e.name.upper())
out(' TRACE_EVENT_COUNT',
'} TraceEventID;',
)
# static state
for e in events:
if 'disable' in e.properties:
enabled = 0
else:
enabled = 1
out('#define TRACE_%s_ENABLED %d' % (e.name.upper(), enabled))
out('#include "trace/event-internal.h"',
'',
'#endif /* TRACE__GENERATED_EVENTS_H */',
)
| gpl-2.0 |
APCVSRepo/sdl_core_v4.0_winceport | src/3rd_party-static/jsoncpp/devtools/fixeol.py | 247 | 1941 | import os.path
def fix_source_eol( path, is_dry_run = True, verbose = True, eol = '\n' ):
"""Makes sure that all sources have the specified eol sequence (default: unix)."""
if not os.path.isfile( path ):
raise ValueError( 'Path "%s" is not a file' % path )
try:
f = open(path, 'rb')
except IOError, msg:
print >> sys.stderr, "%s: I/O Error: %s" % (file, str(msg))
return False
try:
raw_lines = f.readlines()
finally:
f.close()
fixed_lines = [line.rstrip('\r\n') + eol for line in raw_lines]
if raw_lines != fixed_lines:
print '%s =>' % path,
if not is_dry_run:
f = open(path, "wb")
try:
f.writelines(fixed_lines)
finally:
f.close()
if verbose:
print is_dry_run and ' NEED FIX' or ' FIXED'
return True
##
##
##
##def _do_fix( is_dry_run = True ):
## from waftools import antglob
## python_sources = antglob.glob( '.',
## includes = '**/*.py **/wscript **/wscript_build',
## excludes = antglob.default_excludes + './waf.py',
## prune_dirs = antglob.prune_dirs + 'waf-* ./build' )
## for path in python_sources:
## _fix_python_source( path, is_dry_run )
##
## cpp_sources = antglob.glob( '.',
## includes = '**/*.cpp **/*.h **/*.inl',
## prune_dirs = antglob.prune_dirs + 'waf-* ./build' )
## for path in cpp_sources:
## _fix_source_eol( path, is_dry_run )
##
##
##def dry_fix(context):
## _do_fix( is_dry_run = True )
##
##def fix(context):
## _do_fix( is_dry_run = False )
##
##def shutdown():
## pass
##
##def check(context):
## # Unit tests are run when "check" target is used
## ut = UnitTest.unit_test()
## ut.change_to_testfile_dir = True
## ut.want_to_see_test_output = True
## ut.want_to_see_test_error = True
## ut.run()
## ut.print_results()
| bsd-3-clause |
scottbelden/coala | tests/output/printers/LogPrinterTest.py | 15 | 4740 | import unittest
from datetime import datetime
from pyprint.NullPrinter import NullPrinter
from pyprint.Printer import Printer
from pyprint.StringPrinter import StringPrinter
from coalib.misc import Constants
from coalib.output.printers.LogPrinter import LogPrinter
from coalib.processes.communication.LogMessage import LOG_LEVEL, LogMessage
class LogPrinterTest(unittest.TestCase):
timestamp = datetime.today()
log_message = LogMessage(LOG_LEVEL.ERROR,
Constants.COMPLEX_TEST_STRING,
timestamp=timestamp)
def test_interface(self):
uut = LogPrinter(Printer())
self.assertRaises(NotImplementedError,
uut.log_message,
self.log_message)
def test_get_printer(self):
self.assertIs(LogPrinter(None).printer, None)
printer = Printer()
self.assertIs(LogPrinter(printer).printer, printer)
def test_logging(self):
uut = LogPrinter(StringPrinter(), timestamp_format="")
uut.log_message(self.log_message, end="")
self.assertEqual(uut.printer.string, str(self.log_message))
uut = LogPrinter(StringPrinter(), log_level=LOG_LEVEL.DEBUG)
uut.log_message(self.log_message, end="")
self.assertEqual(
uut.printer.string,
"[ERROR][" + self.timestamp.strftime("%X") + "] " +
Constants.COMPLEX_TEST_STRING)
uut.printer.clear()
uut.log(LOG_LEVEL.ERROR,
Constants.COMPLEX_TEST_STRING,
timestamp=self.timestamp,
end="")
self.assertEqual(
uut.printer.string,
"[ERROR][" + self.timestamp.strftime("%X") + "] " +
Constants.COMPLEX_TEST_STRING)
uut.printer.clear()
uut.debug(Constants.COMPLEX_TEST_STRING,
"d",
timestamp=self.timestamp,
end="")
self.assertEqual(
uut.printer.string,
"[DEBUG][" + self.timestamp.strftime("%X") + "] " +
Constants.COMPLEX_TEST_STRING + " d")
uut.printer.clear()
uut.log_level = LOG_LEVEL.INFO
uut.debug(Constants.COMPLEX_TEST_STRING,
timestamp=self.timestamp,
end="")
self.assertEqual(uut.printer.string, "")
uut.printer.clear()
uut.info(Constants.COMPLEX_TEST_STRING,
"d",
timestamp=self.timestamp,
end="")
self.assertEqual(
uut.printer.string,
"[INFO][" + self.timestamp.strftime("%X") + "] " +
Constants.COMPLEX_TEST_STRING + " d")
uut.log_level = LOG_LEVEL.WARNING
uut.printer.clear()
uut.debug(Constants.COMPLEX_TEST_STRING,
timestamp=self.timestamp,
end="")
self.assertEqual(uut.printer.string, "")
uut.printer.clear()
uut.warn(Constants.COMPLEX_TEST_STRING,
"d",
timestamp=self.timestamp,
end="")
self.assertEqual(
uut.printer.string,
"[WARNING][" + self.timestamp.strftime("%X") + "] " +
Constants.COMPLEX_TEST_STRING + " d")
uut.printer.clear()
uut.err(Constants.COMPLEX_TEST_STRING,
"d",
timestamp=self.timestamp,
end="")
self.assertEqual(
uut.printer.string,
"[ERROR][" + self.timestamp.strftime("%X") + "] " +
Constants.COMPLEX_TEST_STRING + " d")
uut.log_level = LOG_LEVEL.DEBUG
uut.printer.clear()
uut.log_exception(
"Something failed.",
NotImplementedError(Constants.COMPLEX_TEST_STRING),
timestamp=self.timestamp)
self.assertTrue(uut.printer.string.startswith(
"[ERROR][" + self.timestamp.strftime("%X") +
"] Something failed.\n" +
"[DEBUG][" + self.timestamp.strftime("%X") +
"] Exception was:"))
uut.log_level = LOG_LEVEL.INFO
uut.printer.clear()
logged = uut.log_exception(
"Something failed.",
NotImplementedError(Constants.COMPLEX_TEST_STRING),
timestamp=self.timestamp,
end="")
self.assertTrue(uut.printer.string.startswith(
"[ERROR][" + self.timestamp.strftime("%X") +
"] Something failed."))
def test_raises(self):
uut = LogPrinter(NullPrinter())
self.assertRaises(TypeError, uut.log, 5)
self.assertRaises(TypeError, uut.log_exception, "message", 5)
self.assertRaises(TypeError, uut.log_message, 5)
| agpl-3.0 |
Voluntarynet/BitmessageKit | BitmessageKit/Vendor/static-python/Lib/test/test_linecache.py | 96 | 4079 | """ Tests for the linecache module """
import linecache
import unittest
import os.path
from test import test_support as support
FILENAME = linecache.__file__
INVALID_NAME = '!@$)(!@#_1'
EMPTY = ''
TESTS = 'inspect_fodder inspect_fodder2 mapping_tests'
TESTS = TESTS.split()
TEST_PATH = os.path.dirname(support.__file__)
MODULES = "linecache abc".split()
MODULE_PATH = os.path.dirname(FILENAME)
SOURCE_1 = '''
" Docstring "
def function():
return result
'''
SOURCE_2 = '''
def f():
return 1 + 1
a = f()
'''
SOURCE_3 = '''
def f():
return 3''' # No ending newline
class LineCacheTests(unittest.TestCase):
def test_getline(self):
getline = linecache.getline
# Bad values for line number should return an empty string
self.assertEqual(getline(FILENAME, 2**15), EMPTY)
self.assertEqual(getline(FILENAME, -1), EMPTY)
# Float values currently raise TypeError, should it?
self.assertRaises(TypeError, getline, FILENAME, 1.1)
# Bad filenames should return an empty string
self.assertEqual(getline(EMPTY, 1), EMPTY)
self.assertEqual(getline(INVALID_NAME, 1), EMPTY)
# Check whether lines correspond to those from file iteration
for entry in TESTS:
filename = os.path.join(TEST_PATH, entry) + '.py'
for index, line in enumerate(open(filename)):
self.assertEqual(line, getline(filename, index + 1))
# Check module loading
for entry in MODULES:
filename = os.path.join(MODULE_PATH, entry) + '.py'
for index, line in enumerate(open(filename)):
self.assertEqual(line, getline(filename, index + 1))
# Check that bogus data isn't returned (issue #1309567)
empty = linecache.getlines('a/b/c/__init__.py')
self.assertEqual(empty, [])
def test_no_ending_newline(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "w") as fp:
fp.write(SOURCE_3)
lines = linecache.getlines(support.TESTFN)
self.assertEqual(lines, ["\n", "def f():\n", " return 3\n"])
def test_clearcache(self):
cached = []
for entry in TESTS:
filename = os.path.join(TEST_PATH, entry) + '.py'
cached.append(filename)
linecache.getline(filename, 1)
# Are all files cached?
cached_empty = [fn for fn in cached if fn not in linecache.cache]
self.assertEqual(cached_empty, [])
# Can we clear the cache?
linecache.clearcache()
cached_empty = [fn for fn in cached if fn in linecache.cache]
self.assertEqual(cached_empty, [])
def test_checkcache(self):
getline = linecache.getline
# Create a source file and cache its contents
source_name = support.TESTFN + '.py'
self.addCleanup(support.unlink, source_name)
with open(source_name, 'w') as source:
source.write(SOURCE_1)
getline(source_name, 1)
# Keep a copy of the old contents
source_list = []
with open(source_name) as source:
for index, line in enumerate(source):
self.assertEqual(line, getline(source_name, index + 1))
source_list.append(line)
with open(source_name, 'w') as source:
source.write(SOURCE_2)
# Try to update a bogus cache entry
linecache.checkcache('dummy')
# Check that the cache matches the old contents
for index, line in enumerate(source_list):
self.assertEqual(line, getline(source_name, index + 1))
# Update the cache and check whether it matches the new source file
linecache.checkcache(source_name)
with open(source_name) as source:
for index, line in enumerate(source):
self.assertEqual(line, getline(source_name, index + 1))
source_list.append(line)
def test_main():
support.run_unittest(LineCacheTests)
if __name__ == "__main__":
test_main()
| mit |
ShiYw/Sigil | 3rdparty/python/Lib/encodings/euc_kr.py | 816 | 1027 | #
# euc_kr.py: Python Unicode Codec for EUC_KR
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_kr, codecs
import _multibytecodec as mbc
codec = _codecs_kr.getcodec('euc_kr')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='euc_kr',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| gpl-3.0 |
alanamarzoev/ray | src/plasma/setup.py | 3 | 1287 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import setup, find_packages
import setuptools.command.install as _install
import subprocess
class install(_install.install):
def run(self):
subprocess.check_call(["make"])
subprocess.check_call(["cp", "build/plasma_store",
"plasma/plasma_store"])
subprocess.check_call(["cp", "build/plasma_manager",
"plasma/plasma_manager"])
subprocess.check_call(["cmake", ".."], cwd="./build")
subprocess.check_call(["make", "install"], cwd="./build")
# Calling _install.install.run(self) does not fetch required packages
# and instead performs an old-style install. See command/install.py in
# setuptools. So, calling do_egg_install() manually here.
self.do_egg_install()
setup(name="Plasma",
version="0.0.1",
description="Plasma client for Python",
packages=find_packages(),
package_data={"plasma": ["plasma_store",
"plasma_manager",
"libplasma.so"]},
cmdclass={"install": install},
include_package_data=True,
zip_safe=False)
| apache-2.0 |
jbornschein/mca-genmodel | pulp/em/__init__.py | 1 | 6141 | #
# Author: Jorg Bornschein <bornschein@fias.uni-frankfurt.de)
# Lincense: Academic Free License (AFL) v3.0
#
"""
Expectation Maximization Framework
==================================
Usage example::
import pulp.em as em
from pulp.em.LinCA import LinCA
from pulp.utils.dlog import dlog
# Prepare Model
model = LinCA()
anneal = em.LinearAnneal([ (200, 2, 50) ])
# Prepare training data
# Instantiate and run EM
em = em.EM()
em.model = model
em.anneal = anneal
em.set_data(y)
em.run()
"""
from __future__ import division
import numpy as np
from mpi4py import MPI
try:
from abc import ABCMeta, abstractmethod
except ImportError, e:
from pulp.utils.py25_compatibility import _py25_ABCMeta as ABCMeta
from pulp.utils.py25_compatibility import _py25_abstractmethod as abstractmethod
import pulp.utils.tracing as tracing
import pulp.utils.parallel as parallel
from pulp.utils.datalog import dlog
#=============================================================================
# General EM Model Base Class
class Model():
""" Model Base Class.
Includes knowledge about parameters, data generation, model specific
functions, E and M step.
Specific models will be subclasses of this abstract base class.
"""
__metaclass__ = ABCMeta
def __init__(self, comm=MPI.COMM_WORLD):
self.comm = comm
self.noise_policy = {}
@abstractmethod
def generate_data(self, model_params, N):
""" Generate datapoints according to the model.
Given the model parameters *model_params* return a dataset
of *N* datapoints.
"""
return data # as dictionary
@abstractmethod
def step(self, anneal, model_params, my_data):
"""
"""
pass
@abstractmethod
def standard_init(self, data):
""" Initialize a set of model parameters in some
sane way.
Return value is model_parameter dictionary
"""
pass
@tracing.traced
def noisify_params(self, model_params, anneal):
""" Noisify model params.
Noisify the given model parameters according to self.noise_policy
and the annealing object provided. The noise_policy of some model
parameter PARAM will only be applied if the annealing object
provides a noise strength via PARAM_noise.
"""
H, D = self.H, self.D
normal = np.random.normal
comm = self.comm
for param, policy in self.noise_policy.items():
pvalue = model_params[param]
if anneal[param+"_noise"] != 0.0:
if np.isscalar(pvalue): # Param to be noisified is scalar
new_pvalue = 0
if comm.rank == 0:
scale = anneal[param+"_noise"]
new_pvalue = pvalue + normal(scale=scale)
if new_pvalue < policy[0]:
new_value = policy[0]
if new_pvalue >= policy[1]:
new_value = policy[1]
if policy[2]:
new_pvalue = np.abs(new_pvalue)
pvalue = comm.bcast(new_pvalue)
else: # Param to be noisified is an ndarray
if comm.rank == 0:
scale = anneal[param+"_noise"]
shape = pvalue.shape
new_pvalue = pvalue + normal(scale=scale, size=shape)
low_bound, up_bound, absify = policy
new_pvalue = np.maximum(low_bound, new_pvalue)
new_pvalue = np.minimum( up_bound, new_pvalue)
if absify:
new_pvalue = np.abs(new_pvalue)
pvalue = new_pvalue
comm.Bcast([pvalue, MPI.DOUBLE])
model_params[param] = pvalue
return model_params
def gain(self, old_params, new_params):
return 0.
#=============================================================================#
# EM Class
class EM():
""" This class drives the EM algorithm.
It uses instances of the following classes:
*model* Instance of a Model class.
*anneal* Instance of an Annealing class.
*data* Training data in a dictionary. The required content is model,
but usually data['y'] should contain the trainig data.
*lparams* Initial set of model parameters in a dictionary. Exact content
is model dependent.
"""
def __init__(self, model=None, anneal=None, data=None,
lparams=None, mpi_comm=None):
self.model = model;
self.anneal = anneal
self.data = data
self.lparams = lparams
self.mpi_comm = mpi_comm
def step(self):
""" Execute a single EM-Step """
model = self.model
anneal = self.anneal
my_data = self.data
model_params = self.lparams
# Do an complete EM-step
new_model_params = model.step(anneal, model_params, my_data)
def run(self, verbose=False):
""" Run a complete cooling-cycle
When *verbose* is True a progress message is printed for every step
via dlog.progress(...)
"""
model = self.model
anneal = self.anneal
my_data = self.data
model_params = self.lparams
while not anneal.finished:
# Progress message
if verbose:
dlog.progress("EM step %d of %d" % (anneal['step']+1, anneal['max_step']), anneal['position'])
# Do E and M step
new_model_params = model.step(anneal, model_params, my_data)
# Calculate the gain so that dynamic annealing schemes can be implemented
gain = model.gain(model_params, new_model_params)
anneal.next(gain)
if anneal.accept:
model_params = new_model_params
self.lparams = model_params
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.