repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
SyamGadde/cython | pyximport/test/test_pyximport.py | 13 | 2070 | from __future__ import absolute_import, print_function
from pyximport import pyximport; pyximport.install(reload_support=True)
import os, sys
import time, shutil
import tempfile
def make_tempdir():
tempdir = os.path.join(tempfile.gettempdir(), "pyrex_temp")
if os.path.exists(tempdir):
remove_tempdir(tempdir)
os.mkdir(tempdir)
return tempdir
def remove_tempdir(tempdir):
shutil.rmtree(tempdir, 0, on_remove_file_error)
def on_remove_file_error(func, path, excinfo):
print("Sorry! Could not remove a temp file:", path)
print("Extra information.")
print(func, excinfo)
print("You may want to delete this yourself when you get a chance.")
def test():
pyximport._test_files = []
tempdir = make_tempdir()
sys.path.append(tempdir)
filename = os.path.join(tempdir, "dummy.pyx")
open(filename, "w").write("print 'Hello world from the Pyrex install hook'")
import dummy
reload(dummy)
depend_filename = os.path.join(tempdir, "dummy.pyxdep")
depend_file = open(depend_filename, "w")
depend_file.write("*.txt\nfoo.bar")
depend_file.close()
build_filename = os.path.join(tempdir, "dummy.pyxbld")
build_file = open(build_filename, "w")
build_file.write("""
from distutils.extension import Extension
def make_ext(name, filename):
return Extension(name=name, sources=[filename])
""")
build_file.close()
open(os.path.join(tempdir, "foo.bar"), "w").write(" ")
open(os.path.join(tempdir, "1.txt"), "w").write(" ")
open(os.path.join(tempdir, "abc.txt"), "w").write(" ")
reload(dummy)
assert len(pyximport._test_files)==1, pyximport._test_files
reload(dummy)
time.sleep(1) # sleep a second to get safer mtimes
open(os.path.join(tempdir, "abc.txt"), "w").write(" ")
print("Here goes the reolad")
reload(dummy)
assert len(pyximport._test_files) == 1, pyximport._test_files
reload(dummy)
assert len(pyximport._test_files) == 0, pyximport._test_files
remove_tempdir(tempdir)
if __name__=="__main__":
test()
| apache-2.0 |
yize/grunt-tps | tasks/lib/python/Lib/python2.7/test/test_contextlib.py | 125 | 9103 | """Unit tests for contextlib.py, and other context managers."""
import sys
import tempfile
import unittest
from contextlib import * # Tests __all__
from test import test_support
try:
import threading
except ImportError:
threading = None
class ContextManagerTestCase(unittest.TestCase):
def test_contextmanager_plain(self):
state = []
@contextmanager
def woohoo():
state.append(1)
yield 42
state.append(999)
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_finally(self):
state = []
@contextmanager
def woohoo():
state.append(1)
try:
yield 42
finally:
state.append(999)
with self.assertRaises(ZeroDivisionError):
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError()
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_no_reraise(self):
@contextmanager
def whee():
yield
ctx = whee()
ctx.__enter__()
# Calling __exit__ should not result in an exception
self.assertFalse(ctx.__exit__(TypeError, TypeError("foo"), None))
def test_contextmanager_trap_yield_after_throw(self):
@contextmanager
def whoo():
try:
yield
except:
yield
ctx = whoo()
ctx.__enter__()
self.assertRaises(
RuntimeError, ctx.__exit__, TypeError, TypeError("foo"), None
)
def test_contextmanager_except(self):
state = []
@contextmanager
def woohoo():
state.append(1)
try:
yield 42
except ZeroDivisionError, e:
state.append(e.args[0])
self.assertEqual(state, [1, 42, 999])
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError(999)
self.assertEqual(state, [1, 42, 999])
def _create_contextmanager_attribs(self):
def attribs(**kw):
def decorate(func):
for k,v in kw.items():
setattr(func,k,v)
return func
return decorate
@contextmanager
@attribs(foo='bar')
def baz(spam):
"""Whee!"""
return baz
def test_contextmanager_attribs(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__name__,'baz')
self.assertEqual(baz.foo, 'bar')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_contextmanager_doc_attrib(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__doc__, "Whee!")
class NestedTestCase(unittest.TestCase):
# XXX This needs more work
def test_nested(self):
@contextmanager
def a():
yield 1
@contextmanager
def b():
yield 2
@contextmanager
def c():
yield 3
with nested(a(), b(), c()) as (x, y, z):
self.assertEqual(x, 1)
self.assertEqual(y, 2)
self.assertEqual(z, 3)
def test_nested_cleanup(self):
state = []
@contextmanager
def a():
state.append(1)
try:
yield 2
finally:
state.append(3)
@contextmanager
def b():
state.append(4)
try:
yield 5
finally:
state.append(6)
with self.assertRaises(ZeroDivisionError):
with nested(a(), b()) as (x, y):
state.append(x)
state.append(y)
1 // 0
self.assertEqual(state, [1, 4, 2, 5, 6, 3])
def test_nested_right_exception(self):
@contextmanager
def a():
yield 1
class b(object):
def __enter__(self):
return 2
def __exit__(self, *exc_info):
try:
raise Exception()
except:
pass
with self.assertRaises(ZeroDivisionError):
with nested(a(), b()) as (x, y):
1 // 0
self.assertEqual((x, y), (1, 2))
def test_nested_b_swallows(self):
@contextmanager
def a():
yield
@contextmanager
def b():
try:
yield
except:
# Swallow the exception
pass
try:
with nested(a(), b()):
1 // 0
except ZeroDivisionError:
self.fail("Didn't swallow ZeroDivisionError")
def test_nested_break(self):
@contextmanager
def a():
yield
state = 0
while True:
state += 1
with nested(a(), a()):
break
state += 10
self.assertEqual(state, 1)
def test_nested_continue(self):
@contextmanager
def a():
yield
state = 0
while state < 3:
state += 1
with nested(a(), a()):
continue
state += 10
self.assertEqual(state, 3)
def test_nested_return(self):
@contextmanager
def a():
try:
yield
except:
pass
def foo():
with nested(a(), a()):
return 1
return 10
self.assertEqual(foo(), 1)
class ClosingTestCase(unittest.TestCase):
# XXX This needs more work
def test_closing(self):
state = []
class C:
def close(self):
state.append(1)
x = C()
self.assertEqual(state, [])
with closing(x) as y:
self.assertEqual(x, y)
self.assertEqual(state, [1])
def test_closing_error(self):
state = []
class C:
def close(self):
state.append(1)
x = C()
self.assertEqual(state, [])
with self.assertRaises(ZeroDivisionError):
with closing(x) as y:
self.assertEqual(x, y)
1 // 0
self.assertEqual(state, [1])
class FileContextTestCase(unittest.TestCase):
def testWithOpen(self):
tfn = tempfile.mktemp()
try:
f = None
with open(tfn, "w") as f:
self.assertFalse(f.closed)
f.write("Booh\n")
self.assertTrue(f.closed)
f = None
with self.assertRaises(ZeroDivisionError):
with open(tfn, "r") as f:
self.assertFalse(f.closed)
self.assertEqual(f.read(), "Booh\n")
1 // 0
self.assertTrue(f.closed)
finally:
test_support.unlink(tfn)
@unittest.skipUnless(threading, 'Threading required for this test.')
class LockContextTestCase(unittest.TestCase):
def boilerPlate(self, lock, locked):
self.assertFalse(locked())
with lock:
self.assertTrue(locked())
self.assertFalse(locked())
with self.assertRaises(ZeroDivisionError):
with lock:
self.assertTrue(locked())
1 // 0
self.assertFalse(locked())
def testWithLock(self):
lock = threading.Lock()
self.boilerPlate(lock, lock.locked)
def testWithRLock(self):
lock = threading.RLock()
self.boilerPlate(lock, lock._is_owned)
def testWithCondition(self):
lock = threading.Condition()
def locked():
return lock._is_owned()
self.boilerPlate(lock, locked)
def testWithSemaphore(self):
lock = threading.Semaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
def testWithBoundedSemaphore(self):
lock = threading.BoundedSemaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
# This is needed to make the test actually run under regrtest.py!
def test_main():
with test_support.check_warnings(("With-statements now directly support "
"multiple context managers",
DeprecationWarning)):
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| mit |
afrolov1/nova | nova/virt/baremetal/db/sqlalchemy/api.py | 3 | 11612 | # Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of SQLAlchemy backend."""
import uuid
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import literal_column
import nova.context
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova import exception
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common.gettextutils import _
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova.virt.baremetal.db.sqlalchemy import models
from nova.virt.baremetal.db.sqlalchemy import session as db_session
def model_query(context, *args, **kwargs):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param session: if present, the session to use
:param read_deleted: if present, overrides context's read_deleted field.
:param project_only: if present and context is user-type, then restrict
query to match the context's project_id.
"""
session = kwargs.get('session') or db_session.get_session()
read_deleted = kwargs.get('read_deleted') or context.read_deleted
project_only = kwargs.get('project_only')
query = session.query(*args)
if read_deleted == 'no':
query = query.filter_by(deleted=False)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter_by(deleted=True)
else:
raise Exception(
_("Unrecognized read_deleted value '%s'") % read_deleted)
if project_only and nova.context.is_user_context(context):
query = query.filter_by(project_id=context.project_id)
return query
def _save(ref, session=None):
if not session:
session = db_session.get_session()
# We must not call ref.save() with session=None, otherwise NovaBase
# uses nova-db's session, which cannot access bm-db.
ref.save(session=session)
def _build_node_order_by(query):
query = query.order_by(asc(models.BareMetalNode.memory_mb))
query = query.order_by(asc(models.BareMetalNode.cpus))
query = query.order_by(asc(models.BareMetalNode.local_gb))
return query
@sqlalchemy_api.require_admin_context
def bm_node_get_all(context, service_host=None):
query = model_query(context, models.BareMetalNode, read_deleted="no")
if service_host:
query = query.filter_by(service_host=service_host)
return query.all()
@sqlalchemy_api.require_admin_context
def bm_node_get_associated(context, service_host=None):
query = model_query(context, models.BareMetalNode, read_deleted="no").\
filter(models.BareMetalNode.instance_uuid != None)
if service_host:
query = query.filter_by(service_host=service_host)
return query.all()
@sqlalchemy_api.require_admin_context
def bm_node_get_unassociated(context, service_host=None):
query = model_query(context, models.BareMetalNode, read_deleted="no").\
filter(models.BareMetalNode.instance_uuid == None)
if service_host:
query = query.filter_by(service_host=service_host)
return query.all()
@sqlalchemy_api.require_admin_context
def bm_node_find_free(context, service_host=None,
cpus=None, memory_mb=None, local_gb=None):
query = model_query(context, models.BareMetalNode, read_deleted="no")
query = query.filter(models.BareMetalNode.instance_uuid == None)
if service_host:
query = query.filter_by(service_host=service_host)
if cpus is not None:
query = query.filter(models.BareMetalNode.cpus >= cpus)
if memory_mb is not None:
query = query.filter(models.BareMetalNode.memory_mb >= memory_mb)
if local_gb is not None:
query = query.filter(models.BareMetalNode.local_gb >= local_gb)
query = _build_node_order_by(query)
return query.first()
@sqlalchemy_api.require_admin_context
def bm_node_get(context, bm_node_id):
# bm_node_id may be passed as a string. Convert to INT to improve DB perf.
bm_node_id = int(bm_node_id)
result = model_query(context, models.BareMetalNode, read_deleted="no").\
filter_by(id=bm_node_id).\
first()
if not result:
raise exception.NodeNotFound(node_id=bm_node_id)
return result
@sqlalchemy_api.require_admin_context
def bm_node_get_by_instance_uuid(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InstanceNotFound(instance_id=instance_uuid)
result = model_query(context, models.BareMetalNode, read_deleted="no").\
filter_by(instance_uuid=instance_uuid).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_uuid)
return result
@sqlalchemy_api.require_admin_context
def bm_node_get_by_node_uuid(context, bm_node_uuid):
result = model_query(context, models.BareMetalNode, read_deleted="no").\
filter_by(uuid=bm_node_uuid).\
first()
if not result:
raise exception.NodeNotFoundByUUID(node_uuid=bm_node_uuid)
return result
@sqlalchemy_api.require_admin_context
def bm_node_create(context, values):
if not values.get('uuid'):
values['uuid'] = str(uuid.uuid4())
bm_node_ref = models.BareMetalNode()
bm_node_ref.update(values)
_save(bm_node_ref)
return bm_node_ref
@sqlalchemy_api.require_admin_context
def bm_node_update(context, bm_node_id, values):
rows = model_query(context, models.BareMetalNode, read_deleted="no").\
filter_by(id=bm_node_id).\
update(values)
if not rows:
raise exception.NodeNotFound(node_id=bm_node_id)
@sqlalchemy_api.require_admin_context
def bm_node_associate_and_update(context, node_uuid, values):
"""Associate an instance to a node safely
Associate an instance to a node only if that node is not yet associated.
Allow the caller to set any other fields they require in the same
operation. For example, this is used to set the node's task_state to
BUILDING at the beginning of driver.spawn().
"""
if 'instance_uuid' not in values:
raise exception.NovaException(_(
"instance_uuid must be supplied to bm_node_associate_and_update"))
session = db_session.get_session()
with session.begin():
query = model_query(context, models.BareMetalNode,
session=session, read_deleted="no").\
filter_by(uuid=node_uuid)
count = query.filter_by(instance_uuid=None).\
update(values, synchronize_session=False)
if count != 1:
raise exception.NovaException(_(
"Failed to associate instance %(i_uuid)s to baremetal node "
"%(n_uuid)s.") % {'i_uuid': values['instance_uuid'],
'n_uuid': node_uuid})
ref = query.first()
return ref
@sqlalchemy_api.require_admin_context
def bm_node_destroy(context, bm_node_id):
# First, delete all interfaces belonging to the node.
# Delete physically since these have unique columns.
session = db_session.get_session()
with session.begin():
model_query(context, models.BareMetalInterface, read_deleted="no").\
filter_by(bm_node_id=bm_node_id).\
delete()
rows = model_query(context, models.BareMetalNode, read_deleted="no").\
filter_by(id=bm_node_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
if not rows:
raise exception.NodeNotFound(node_id=bm_node_id)
@sqlalchemy_api.require_admin_context
def bm_interface_get(context, if_id):
result = model_query(context, models.BareMetalInterface,
read_deleted="no").\
filter_by(id=if_id).\
first()
if not result:
raise exception.NovaException(_("Baremetal interface %s "
"not found") % if_id)
return result
@sqlalchemy_api.require_admin_context
def bm_interface_get_all(context):
query = model_query(context, models.BareMetalInterface,
read_deleted="no")
return query.all()
@sqlalchemy_api.require_admin_context
def bm_interface_destroy(context, if_id):
# Delete physically since it has unique columns
model_query(context, models.BareMetalInterface, read_deleted="no").\
filter_by(id=if_id).\
delete()
@sqlalchemy_api.require_admin_context
def bm_interface_create(context, bm_node_id, address, datapath_id, port_no):
ref = models.BareMetalInterface()
ref.bm_node_id = bm_node_id
ref.address = address
ref.datapath_id = datapath_id
ref.port_no = port_no
_save(ref)
return ref.id
@sqlalchemy_api.require_admin_context
def bm_interface_set_vif_uuid(context, if_id, vif_uuid):
session = db_session.get_session()
with session.begin():
bm_interface = model_query(context, models.BareMetalInterface,
read_deleted="no", session=session).\
filter_by(id=if_id).\
with_lockmode('update').\
first()
if not bm_interface:
raise exception.NovaException(_("Baremetal interface %s "
"not found") % if_id)
bm_interface.vif_uuid = vif_uuid
try:
session.add(bm_interface)
session.flush()
except db_exc.DBError as e:
# TODO(deva): clean up when db layer raises DuplicateKeyError
if str(e).find('IntegrityError') != -1:
raise exception.NovaException(_("Baremetal interface %s "
"already in use") % vif_uuid)
raise
@sqlalchemy_api.require_admin_context
def bm_interface_get_by_vif_uuid(context, vif_uuid):
result = model_query(context, models.BareMetalInterface,
read_deleted="no").\
filter_by(vif_uuid=vif_uuid).\
first()
if not result:
raise exception.NovaException(_("Baremetal virtual interface %s "
"not found") % vif_uuid)
return result
@sqlalchemy_api.require_admin_context
def bm_interface_get_all_by_bm_node_id(context, bm_node_id):
result = model_query(context, models.BareMetalInterface,
read_deleted="no").\
filter_by(bm_node_id=bm_node_id).\
all()
if not result:
raise exception.NodeNotFound(node_id=bm_node_id)
return result
| apache-2.0 |
lthurlow/Network-Grapher | proj/external/networkx-1.7/build/lib.linux-i686-2.7/networkx/algorithms/traversal/breadth_first_search.py | 14 | 1422 | """
====================
Breadth-first search
====================
Basic algorithms for breadth-first searching.
"""
__author__ = """\n""".join(['Aric Hagberg <hagberg@lanl.gov>'])
__all__ = ['bfs_edges', 'bfs_tree',
'bfs_predecessors', 'bfs_successors']
import networkx as nx
from collections import defaultdict
def bfs_edges(G,source):
"""Produce edges in a breadth-first-search starting at source."""
# Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
# by D. Eppstein, July 2004.
visited=set([source])
stack = [(source,iter(G[source]))]
while stack:
parent,children = stack[0]
try:
child = next(children)
if child not in visited:
yield parent,child
visited.add(child)
stack.append((child,iter(G[child])))
except StopIteration:
stack.pop(0)
def bfs_tree(G, source):
"""Return directed tree of breadth-first-search from source."""
return nx.DiGraph(bfs_edges(G,source))
def bfs_predecessors(G, source):
"""Return dictionary of predecessors in breadth-first-search from source."""
return dict((t,s) for s,t in bfs_edges(G,source))
def bfs_successors(G, source):
"""Return dictionary of successors in breadth-first-search from source."""
d=defaultdict(list)
for s,t in bfs_edges(G,source):
d[s].append(t)
return dict(d)
| mit |
emilydolson/forestcat | pyrobot/brain/psom/visvector.py | 2 | 7394 | #A set of model vector visualization tools.
#Each visualizer should be a subclass of VisVector, which is an abstract class.
#If a visualizer is added, it should be added to the if statement in
#VisVectorFactory. Each new class should follow the nameing convention
#XVisVector, where X is the string that gets passed to VisVectorFactory
from pyrobot.brain.psom import *
from Tkinter import *
import struct
from types import *
from PIL import ImageTk
import pyrobot.gui.plot.hinton
import pyrobot.gui.plot.matrix
class VisVector:
"""
This is an abstract base class for a set of vector visualizers.
Each subclass should be made specifically for a type of vector.
"""
def __init__(self, vector, title="", opts = (0, 0)):
raise "This is an abstract function."
def close(self):
"""Close my Window"""
self.win.destroy()
class IRVisVector(VisVector):
"""
Display a vector representing a list of IR sensor values as
floats.
"""
def __init__(self, vector, title="", opts = (0, 0)):
self.vector = vector.get_elts()
self.length = len(self.vector)
self.app = Tk()
self.app.wm_state('withdrawn')
self.win = Toplevel()
self.win.title(title)
Lframe = Frame(self.win)
Rframe = Frame(self.win)
b = Button(self.win, text="Close", command=self.close)
b.pack(side=BOTTOM)
Lframe.pack(side=LEFT, fill=Y)
Rframe.pack(side=RIGHT, fill=Y)
for i in range(self.length):
ll = Label(Lframe, text="IR" + str(i))
ll.pack(anchor=W)
rl = Label(Rframe, text="= " + str(self.vector[i]))
rl.pack(anchor=W)
class GenericVisVector(VisVector):
"""
Display a generic vector of floats.
"""
def __init__(self, vector, title="", opts = (0, 0)):
self.vector = vector.get_elts()
self.length = len(self.vector)
self.win = Toplevel()
self.win.title(title)
Lframe = Frame(self.win)
Rframe = Frame(self.win)
b = Button(self.win, text="Close", command=self.close)
b.pack(side=BOTTOM)
Lframe.pack(side=LEFT, fill=Y)
Rframe.pack(side=RIGHT, fill=Y)
for i in range(self.length):
ll = Label(Lframe, text=str(i))
ll.pack(anchor=W)
rl = Label(Rframe, text="= " + str(self.vector[i]))
rl.pack(anchor=W)
class GrayscaleVisVector(VisVector):
"""
Display a vector of floats representing a grayscale image
"""
def __init__(self, vector, title="", opts = (0, 0)):
self.vector = vector.get_elts()
self.length = len(self.vector)
width, height = opts
self.win = Toplevel()
self.win.title(title)
self.canvas = Canvas(self.win, width=width, height=height)
self.canvas.pack()
#turn unit-valued floats into 8-bit grayscale values...
grayvec = map(lambda x: struct.pack("B", int(x*255)), self.vector)
#...and pack them into a string.
self.graystr = ""
for pix in grayvec:
self.graystr += pix
img = ImageTk.Image.fromstring("L", (width, height), self.graystr)
photo = ImageTk.PhotoImage(img)
i = self.canvas.create_image(1,1,anchor=NW,image=photo)
b = Button(self.win, text="Close", command=self.close)
b.pack(side=BOTTOM)
self.win.mainloop()
class HintonVisVector(VisVector, pyrobot.gui.plot.hinton.Hinton):
"""
Use the Hinton plot to display the vector
"""
def __init__(self, vector, title="", opts = (None,)):
maxval = opts[0]
if not maxval:
maxval = max(vector)
pyrobot.gui.plot.hinton.Hinton.__init__(self, data=vector.get_elts(),
maxvalue=maxval, title=title)
b = Button(self.win, text="Close", command=self.close)
b.pack()
class MatrixVisVector(VisVector, pyrobot.gui.plot.matrix.Matrix):
"""
Use the Matrix plot to display the vector
"""
def __init__(self, vector, title="", opts = (None,)):
if len(opts) == 3:
maxval = opts[2]
else:
maxval = max(vector)
cols, rows = opts[0], opts[1]
pyrobot.gui.plot.matrix.Matrix.__init__(self, data=vector.get_elts(),
rows = rows, cols = cols,
maxvalue=maxval, title=title)
b = Button(self.win, text="Close", command=self.close)
b.pack()
class SOMVisVector(VisVector, pyrobot.gui.plot.matrix.Matrix):
"""
Use the SOM plot to display the vector as a SOM
"""
def __init__(self, vector, title="", opts = (None,)):
maxval = opts[0]
if not maxval:
maxval = max(vector)
cols, rows = opts
print "Title %s" % title
pyrobot.gui.plot.matrix.Matrix.__init__(self, data=vector.get_elts(),
rows = rows, cols = cols,
maxvalue=255, title=title,
type = 'som')
b = Button(self.win, text="Close", command=self.close)
b.pack()
class BarGraphVisVector(VisVector):
"""
Display a vector as a series of horizonal bars
"""
def __init__(self, vector, title="", opts = (None, None)):
"""
Min and max can either be scalar minima and maxima for the
entire vector, or it can be a list of the same length as the vector,
each element corresponding to an element of the vector
"""
self.vector = vector.get_elts()
self.length = len(self.vector)
minval, maxval = opts
#if the min and max are given as scalar values,
#convert them to vectors of the length of the
#vector
if type(minval) is ListType:
self.min = minval
else:
if minval == None:
self.min = [min(self.vector)] * self.length
else:
self.min = [minval] * self.length
if type(maxval) is ListType:
self.max = maxval
else:
if maxval == None:
self.max = [max(self.vector)] * self.length
else:
self.max = [maxval] * self.length
self.win = Toplevel()
self.win.title(title)
for i in range(len(self.vector)):
c = Canvas(self.win, height= 20, width = 100)
c.create_text(2, 10, anchor=W, text=str(i))
vec_len = abs(int((self.vector[i] / (self.max[i] - self.min[i])) * 85))
c.create_rectangle(15, 3, vec_len + 15, 17, fill="red")
c.pack()
b = Button(self.win, text="Close", command=self.close)
b.pack(side=BOTTOM)
def getVisVectorByName(type):
"""
Given a type of VisVector as a string, create and initialize an
instance of that type, and return a reference.
"""
if type == "IR":
return IRVisVector
elif type == "Generic":
return GenericVisVector
elif type =="Grayscale" or type =="Greyscale":
return GrayscaleVisVector
elif type == "BarGraph":
return BarGraphVisVector
elif type =="Hinton":
return HintonVisVector
elif type =="Matrix":
return MatrixVisVector
elif type =="SOM":
return SOMVisVector
else:
raise "VisVector type not supported"
#if __name__=="__main__":
# from pyrobot.brain.psom import *
# grayvis = getVisVectorByName("BarGraph")
# piclist = [float(x)/16.0 for x in range(16)]
# grayvis(vector(piclist))
# print "Done"
| agpl-3.0 |
mantidproject/mantid | scripts/Muon/GUI/Common/home_runinfo_widget/home_runinfo_widget_presenter.py | 3 | 2415 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from Muon.GUI.Common.home_tab.home_tab_presenter import HomeTabSubWidget
class HomeRunInfoWidgetPresenter(HomeTabSubWidget):
def __init__(self, view, model):
self._view = view
self._model = model
def show(self):
self._view.show()
def update_view_from_model(self):
self._view.clear()
run = self._model.get_run_number()
instrument = self._model.get_instrument_name()
counts = self._model.get_total_counts()
self._view.add_text_line("Instrument : " + str(instrument))
self._view.add_text_line("Run : " + run)
self._view.add_text_line(self.create_text_line("Title ", "run_title"))
self._view.add_text_line("Comment : " + str(self._model.get_workspace_comment()))
self._view.add_text_line(self.create_text_line("Start ", "run_start"))
self._view.add_text_line(self.create_text_line("End ", "run_end"))
self._view.add_text_line("Counts (MEv) : " + str(self._model.get_counts_in_MeV(counts)))
self._view.add_text_line(self.create_text_line("Good Frames ", "goodfrm"))
self._view.add_text_line("Counts per Good Frame : " + str(self._model.get_counts_per_good_frame(counts)))
self._view.add_text_line("Counts per Good Frame per det : "
+ str(self._model.get_counts_per_good_frame_per_detector(counts)))
self._view.add_text_line("Average Temperature (K) : "+str(self._model.get_average_temperature()))
self._view.add_text_line(self.create_text_line("Sample Temperature (K) ", "sample_temp"))
self._view.add_text_line(self.create_text_line("Sample Magnetic Field (G)", "sample_magn_field"))
self._view.add_text_line("Number of DAQ Periods : " + str(self._model.get_periods()))
def create_text_line(self, name, log_name):
log = self._model.get_log_value(log_name)
text = str(name) + " : " + str(log)
return text
| gpl-3.0 |
DavidLP/home-assistant | homeassistant/components/point/__init__.py | 4 | 11084 | """Support for Minut Point."""
import asyncio
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_TOKEN, CONF_WEBHOOK_ID
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect, async_dispatcher_send)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util.dt import as_local, parse_datetime, utc_from_timestamp
from . import config_flow # noqa pylint_disable=unused-import
from .const import (
CONF_WEBHOOK_URL, DOMAIN, EVENT_RECEIVED, POINT_DISCOVERY_NEW,
SCAN_INTERVAL, SIGNAL_UPDATE_ENTITY, SIGNAL_WEBHOOK)
_LOGGER = logging.getLogger(__name__)
CONF_CLIENT_ID = 'client_id'
CONF_CLIENT_SECRET = 'client_secret'
DATA_CONFIG_ENTRY_LOCK = 'point_config_entry_lock'
CONFIG_ENTRY_IS_SETUP = 'point_config_entry_is_setup'
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN:
vol.Schema({
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
})
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Minut Point component."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
config_flow.register_flow_implementation(
hass, DOMAIN, conf[CONF_CLIENT_ID],
conf[CONF_CLIENT_SECRET])
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={'source': config_entries.SOURCE_IMPORT},
))
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Set up Point from a config entry."""
from pypoint import PointSession
def token_saver(token):
_LOGGER.debug('Saving updated token')
entry.data[CONF_TOKEN] = token
hass.config_entries.async_update_entry(entry, data={**entry.data})
# Force token update.
entry.data[CONF_TOKEN]['expires_in'] = -1
session = PointSession(
entry.data['refresh_args']['client_id'],
token=entry.data[CONF_TOKEN],
auto_refresh_kwargs=entry.data['refresh_args'],
token_saver=token_saver,
)
if not session.is_authorized:
_LOGGER.error('Authentication Error')
return False
hass.data[DATA_CONFIG_ENTRY_LOCK] = asyncio.Lock()
hass.data[CONFIG_ENTRY_IS_SETUP] = set()
await async_setup_webhook(hass, entry, session)
client = MinutPointClient(hass, entry, session)
hass.data.setdefault(DOMAIN, {}).update({entry.entry_id: client})
hass.async_create_task(client.update())
return True
async def async_setup_webhook(hass: HomeAssistantType, entry: ConfigEntry,
session):
"""Set up a webhook to handle binary sensor events."""
if CONF_WEBHOOK_ID not in entry.data:
entry.data[CONF_WEBHOOK_ID] = \
hass.components.webhook.async_generate_id()
entry.data[CONF_WEBHOOK_URL] = \
hass.components.webhook.async_generate_url(
entry.data[CONF_WEBHOOK_ID])
_LOGGER.info('Registering new webhook at: %s',
entry.data[CONF_WEBHOOK_URL])
hass.config_entries.async_update_entry(
entry, data={
**entry.data,
})
await hass.async_add_executor_job(
session.update_webhook,
entry.data[CONF_WEBHOOK_URL],
entry.data[CONF_WEBHOOK_ID],
['*'])
hass.components.webhook.async_register(
DOMAIN, 'Point', entry.data[CONF_WEBHOOK_ID], handle_webhook)
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Unload a config entry."""
hass.components.webhook.async_unregister(entry.data[CONF_WEBHOOK_ID])
session = hass.data[DOMAIN].pop(entry.entry_id)
await hass.async_add_executor_job(session.remove_webhook)
if not hass.data[DOMAIN]:
hass.data.pop(DOMAIN)
for component in ('binary_sensor', 'sensor'):
await hass.config_entries.async_forward_entry_unload(
entry, component)
return True
async def handle_webhook(hass, webhook_id, request):
"""Handle webhook callback."""
try:
data = await request.json()
_LOGGER.debug("Webhook %s: %s", webhook_id, data)
except ValueError:
return None
if isinstance(data, dict):
data['webhook_id'] = webhook_id
async_dispatcher_send(hass, SIGNAL_WEBHOOK, data, data.get('hook_id'))
hass.bus.async_fire(EVENT_RECEIVED, data)
class MinutPointClient():
"""Get the latest data and update the states."""
def __init__(self, hass: HomeAssistantType, config_entry: ConfigEntry,
session):
"""Initialize the Minut data object."""
self._known_devices = set()
self._known_homes = set()
self._hass = hass
self._config_entry = config_entry
self._is_available = True
self._client = session
async_track_time_interval(self._hass, self.update, SCAN_INTERVAL)
async def update(self, *args):
"""Periodically poll the cloud for current state."""
await self._sync()
async def _sync(self):
"""Update local list of devices."""
if not await self._hass.async_add_executor_job(
self._client.update) and self._is_available:
self._is_available = False
_LOGGER.warning("Device is unavailable")
return
async def new_device(device_id, component):
"""Load new device."""
config_entries_key = '{}.{}'.format(component, DOMAIN)
async with self._hass.data[DATA_CONFIG_ENTRY_LOCK]:
if config_entries_key not in self._hass.data[
CONFIG_ENTRY_IS_SETUP]:
await self._hass.config_entries.async_forward_entry_setup(
self._config_entry, component)
self._hass.data[CONFIG_ENTRY_IS_SETUP].add(
config_entries_key)
async_dispatcher_send(
self._hass, POINT_DISCOVERY_NEW.format(component, DOMAIN),
device_id)
self._is_available = True
for home_id in self._client.homes:
if home_id not in self._known_homes:
await new_device(home_id, 'alarm_control_panel')
self._known_homes.add(home_id)
for device in self._client.devices:
if device.device_id not in self._known_devices:
for component in ('sensor', 'binary_sensor'):
await new_device(device.device_id, component)
self._known_devices.add(device.device_id)
async_dispatcher_send(self._hass, SIGNAL_UPDATE_ENTITY)
def device(self, device_id):
"""Return device representation."""
return self._client.device(device_id)
def is_available(self, device_id):
"""Return device availability."""
return device_id in self._client.device_ids
def remove_webhook(self):
"""Remove the session webhook."""
return self._client.remove_webhook()
@property
def homes(self):
"""Return known homes."""
return self._client.homes
def alarm_disarm(self, home_id):
"""Send alarm disarm command."""
return self._client.alarm_disarm(home_id)
def alarm_arm(self, home_id):
"""Send alarm arm command."""
return self._client.alarm_arm(home_id)
class MinutPointEntity(Entity):
"""Base Entity used by the sensors."""
def __init__(self, point_client, device_id, device_class):
"""Initialize the entity."""
self._async_unsub_dispatcher_connect = None
self._client = point_client
self._id = device_id
self._name = self.device.name
self._device_class = device_class
self._updated = utc_from_timestamp(0)
self._value = None
def __str__(self):
"""Return string representation of device."""
return "MinutPoint {}".format(self.name)
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
_LOGGER.debug('Created device %s', self)
self._async_unsub_dispatcher_connect = async_dispatcher_connect(
self.hass, SIGNAL_UPDATE_ENTITY, self._update_callback)
await self._update_callback()
async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listener when removed."""
if self._async_unsub_dispatcher_connect:
self._async_unsub_dispatcher_connect()
async def _update_callback(self):
"""Update the value of the sensor."""
pass
@property
def available(self):
"""Return true if device is not offline."""
return self._client.is_available(self.device_id)
@property
def device(self):
"""Return the representation of the device."""
return self._client.device(self.device_id)
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def device_id(self):
"""Return the id of the device."""
return self._id
@property
def device_state_attributes(self):
"""Return status of device."""
attrs = self.device.device_status
attrs['last_heard_from'] = \
as_local(self.last_update).strftime("%Y-%m-%d %H:%M:%S")
return attrs
@property
def device_info(self):
"""Return a device description for device registry."""
device = self.device.device
return {
'connections': {('mac', device['device_mac'])},
'identifieres': device['device_id'],
'manufacturer': 'Minut',
'model': 'Point v{}'.format(device['hardware_version']),
'name': device['description'],
'sw_version': device['firmware']['installed'],
'via_hub': (DOMAIN, device['home']),
}
@property
def name(self):
"""Return the display name of this device."""
return "{} {}".format(self._name, self.device_class.capitalize())
@property
def is_updated(self):
"""Return true if sensor have been updated."""
return self.last_update > self._updated
@property
def last_update(self):
"""Return the last_update time for the device."""
last_update = parse_datetime(self.device.last_update)
return last_update
@property
def should_poll(self):
"""No polling needed for point."""
return False
@property
def unique_id(self):
"""Return the unique id of the sensor."""
return 'point.{}-{}'.format(self._id, self.device_class)
@property
def value(self):
"""Return the sensor value."""
return self._value
| apache-2.0 |
vmindru/ansible-modules-core | network/basics/uri.py | 4 | 17344 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Romeo Theriault <romeot () hawaii.edu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# see examples/playbooks/uri.yml
DOCUMENTATION = '''
---
module: uri
short_description: Interacts with webservices
description:
- Interacts with HTTP and HTTPS web services and supports Digest, Basic and WSSE
HTTP authentication mechanisms.
version_added: "1.1"
options:
url:
description:
- HTTP or HTTPS URL in the form (http|https)://host.domain[:port]/path
required: true
default: null
dest:
description:
- path of where to download the file to (if desired). If I(dest) is a
directory, the basename of the file on the remote server will be used.
required: false
default: null
user:
description:
- username for the module to use for Digest, Basic or WSSE authentication.
required: false
default: null
password:
description:
- password for the module to use for Digest, Basic or WSSE authentication.
required: false
default: null
body:
description:
- The body of the http request/response to the web service. If C(body_format) is set
to 'json' it will take an already formatted JSON string or convert a data structure
into JSON.
required: false
default: null
body_format:
description:
- The serialization format of the body. When set to json, encodes the
body argument, if needed, and automatically sets the Content-Type header accordingly.
required: false
choices: [ "raw", "json" ]
default: raw
version_added: "2.0"
method:
description:
- The HTTP method of the request or response. It MUST be uppercase.
required: false
choices: [ "GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS", "PATCH", "TRACE", "CONNECT", "REFRESH" ]
default: "GET"
return_content:
description:
- Whether or not to return the body of the request as a "content" key in
the dictionary result. If the reported Content-type is
"application/json", then the JSON is additionally loaded into a key
called C(json) in the dictionary results.
required: false
choices: [ "yes", "no" ]
default: "no"
force_basic_auth:
description:
- The library used by the uri module only sends authentication information when a webservice
responds to an initial request with a 401 status. Since some basic auth services do not properly
send a 401, logins will fail. This option forces the sending of the Basic authentication header
upon initial request.
required: false
choices: [ "yes", "no" ]
default: "no"
follow_redirects:
description:
- Whether or not the URI module should follow redirects. C(all) will follow all redirects.
C(safe) will follow only "safe" redirects, where "safe" means that the client is only
doing a GET or HEAD on the URI to which it is being redirected. C(none) will not follow
any redirects. Note that C(yes) and C(no) choices are accepted for backwards compatibility,
where C(yes) is the equivalent of C(all) and C(no) is the equivalent of C(safe). C(yes) and C(no)
are deprecated and will be removed in some future version of Ansible.
required: false
choices: [ "all", "safe", "none" ]
default: "safe"
creates:
description:
- a filename, when it already exists, this step will not be run.
required: false
removes:
description:
- a filename, when it does not exist, this step will not be run.
required: false
status_code:
description:
- A valid, numeric, HTTP status code that signifies success of the
request. Can also be comma separated list of status codes.
required: false
default: 200
timeout:
description:
- The socket level timeout in seconds
required: false
default: 30
HEADER_:
description:
- Any parameter starting with "HEADER_" is a sent with your request as a header.
For example, HEADER_Content-Type="application/json" would send the header
"Content-Type" along with your request with a value of "application/json".
This option is deprecated as of C(2.1) and may be removed in a future
release. Use I(headers) instead.
required: false
default: null
headers:
description:
- Add custom HTTP headers to a request in the format of a YAML hash
required: false
default: null
version_added: '2.1'
others:
description:
- all arguments accepted by the M(file) module also work here
required: false
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only
set to C(no) used on personally controlled sites using self-signed
certificates. Prior to 1.9.2 the code defaulted to C(no).
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: '1.9.2'
notes:
- The dependency on httplib2 was removed in Ansible 2.1
author: "Romeo Theriault (@romeotheriault)"
'''
EXAMPLES = '''
# Check that you can connect (GET) to a page and it returns a status 200
- uri: url=http://www.example.com
# Check that a page returns a status 200 and fail if the word AWESOME is not
# in the page contents.
- action: uri url=http://www.example.com return_content=yes
register: webpage
- action: fail
when: "'AWESOME' not in webpage.content"
# Create a JIRA issue
- uri:
url: https://your.jira.example.com/rest/api/2/issue/
method: POST
user: your_username
password: your_pass
body: "{{ lookup('file','issue.json') }}"
force_basic_auth: yes
status_code: 201
body_format: json
# Login to a form based webpage, then use the returned cookie to
# access the app in later tasks
- uri:
url: https://your.form.based.auth.example.com/index.php
method: POST
body: "name=your_username&password=your_password&enter=Sign%20in"
status_code: 302
HEADER_Content-Type: "application/x-www-form-urlencoded"
register: login
- uri:
url: https://your.form.based.auth.example.com/dashboard.php
method: GET
return_content: yes
HEADER_Cookie: "{{login.set_cookie}}"
# Queue build of a project in Jenkins:
- uri:
url: "http://{{ jenkins.host }}/job/{{ jenkins.job }}/build?token={{ jenkins.token }}"
method: GET
user: "{{ jenkins.user }}"
password: "{{ jenkins.password }}"
force_basic_auth: yes
status_code: 201
'''
import cgi
import datetime
import os
import shutil
import tempfile
try:
import json
except ImportError:
import simplejson as json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
import ansible.module_utils.six as six
from ansible.module_utils._text import to_text
from ansible.module_utils.urls import fetch_url, url_argument_spec
def write_file(module, url, dest, content):
# create a tempfile with some test content
fd, tmpsrc = tempfile.mkstemp()
f = open(tmpsrc, 'wb')
try:
f.write(content)
except Exception:
err = get_exception()
os.remove(tmpsrc)
module.fail_json(msg="failed to create temporary content file: %s" % str(err))
f.close()
checksum_src = None
checksum_dest = None
# raise an error if there is no tmpsrc file
if not os.path.exists(tmpsrc):
os.remove(tmpsrc)
module.fail_json(msg="Source %s does not exist" % (tmpsrc))
if not os.access(tmpsrc, os.R_OK):
os.remove(tmpsrc)
module.fail_json( msg="Source %s not readable" % (tmpsrc))
checksum_src = module.sha1(tmpsrc)
# check if there is no dest file
if os.path.exists(dest):
# raise an error if copy has no permission on dest
if not os.access(dest, os.W_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s not writable" % (dest))
if not os.access(dest, os.R_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s not readable" % (dest))
checksum_dest = module.sha1(dest)
else:
if not os.access(os.path.dirname(dest), os.W_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination dir %s not writable" % (os.path.dirname(dest)))
if checksum_src != checksum_dest:
try:
shutil.copyfile(tmpsrc, dest)
except Exception:
err = get_exception()
os.remove(tmpsrc)
module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err)))
os.remove(tmpsrc)
def url_filename(url):
fn = os.path.basename(six.moves.urllib.parse.urlsplit(url)[2])
if fn == '':
return 'index.html'
return fn
def absolute_location(url, location):
"""Attempts to create an absolute URL based on initial URL, and
next URL, specifically in the case of a ``Location`` header.
"""
if '://' in location:
return location
elif location.startswith('/'):
parts = six.moves.urllib.parse.urlsplit(url)
base = url.replace(parts[2], '')
return '%s%s' % (base, location)
elif not location.startswith('/'):
base = os.path.dirname(url)
return '%s/%s' % (base, location)
else:
return location
def uri(module, url, dest, body, body_format, method, headers, socket_timeout):
# is dest is set and is a directory, let's check if we get redirected and
# set the filename from that url
redirected = False
redir_info = {}
r = {}
if dest is not None:
# Stash follow_redirects, in this block we don't want to follow
# we'll reset back to the supplied value soon
follow_redirects = module.params['follow_redirects']
module.params['follow_redirects'] = False
dest = os.path.expanduser(dest)
if os.path.isdir(dest):
# first check if we are redirected to a file download
_, redir_info = fetch_url(module, url, data=body,
headers=headers,
method=method,
timeout=socket_timeout)
# if we are redirected, update the url with the location header,
# and update dest with the new url filename
if redir_info['status'] in (301, 302, 303, 307):
url = redir_info['location']
redirected = True
dest = os.path.join(dest, url_filename(url))
# if destination file already exist, only download if file newer
if os.path.exists(dest):
t = datetime.datetime.utcfromtimestamp(os.path.getmtime(dest))
tstamp = t.strftime('%a, %d %b %Y %H:%M:%S +0000')
headers['If-Modified-Since'] = tstamp
# Reset follow_redirects back to the stashed value
module.params['follow_redirects'] = follow_redirects
resp, info = fetch_url(module, url, data=body, headers=headers,
method=method, timeout=socket_timeout)
try:
content = resp.read()
except AttributeError:
# there was no content, but the error read()
# may have been stored in the info as 'body'
content = info.pop('body', '')
r['redirected'] = redirected or info['url'] != url
r.update(redir_info)
r.update(info)
return r, content, dest
def main():
argument_spec = url_argument_spec()
argument_spec.update(dict(
dest = dict(required=False, default=None, type='path'),
url_username = dict(required=False, default=None, aliases=['user']),
url_password = dict(required=False, default=None, aliases=['password']),
body = dict(required=False, default=None, type='raw'),
body_format = dict(required=False, default='raw', choices=['raw', 'json']),
method = dict(required=False, default='GET', choices=['GET', 'POST', 'PUT', 'HEAD', 'DELETE', 'OPTIONS', 'PATCH', 'TRACE', 'CONNECT', 'REFRESH']),
return_content = dict(required=False, default='no', type='bool'),
follow_redirects = dict(required=False, default='safe', choices=['all', 'safe', 'none', 'yes', 'no']),
creates = dict(required=False, default=None, type='path'),
removes = dict(required=False, default=None, type='path'),
status_code = dict(required=False, default=[200], type='list'),
timeout = dict(required=False, default=30, type='int'),
headers = dict(required=False, type='dict', default={})
))
module = AnsibleModule(
argument_spec=argument_spec,
check_invalid_arguments=False,
add_file_common_args=True
)
url = module.params['url']
body = module.params['body']
body_format = module.params['body_format'].lower()
method = module.params['method']
dest = module.params['dest']
return_content = module.params['return_content']
creates = module.params['creates']
removes = module.params['removes']
status_code = [int(x) for x in list(module.params['status_code'])]
socket_timeout = module.params['timeout']
dict_headers = module.params['headers']
if body_format == 'json':
# Encode the body unless its a string, then assume it is pre-formatted JSON
if not isinstance(body, basestring):
body = json.dumps(body)
dict_headers['Content-Type'] = 'application/json'
# Grab all the http headers. Need this hack since passing multi-values is
# currently a bit ugly. (e.g. headers='{"Content-Type":"application/json"}')
for key, value in six.iteritems(module.params):
if key.startswith("HEADER_"):
skey = key.replace("HEADER_", "")
dict_headers[skey] = value
if creates is not None:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of uri executions.
if os.path.exists(creates):
module.exit_json(stdout="skipped, since %s exists" % creates,
changed=False, stderr=False, rc=0)
if removes is not None:
# do not run the command if the line contains removes=filename
# and the filename do not exists. This allows idempotence
# of uri executions.
if not os.path.exists(removes):
module.exit_json(stdout="skipped, since %s does not exist" % removes, changed=False, stderr=False, rc=0)
# Make the request
resp, content, dest = uri(module, url, dest, body, body_format, method,
dict_headers, socket_timeout)
resp['status'] = int(resp['status'])
# Write the file out if requested
if dest is not None:
if resp['status'] == 304:
changed = False
else:
write_file(module, url, dest, content)
# allow file attribute changes
changed = True
module.params['path'] = dest
file_args = module.load_file_common_arguments(module.params)
file_args['path'] = dest
changed = module.set_fs_attributes_if_different(file_args, changed)
resp['path'] = dest
else:
changed = False
# Transmogrify the headers, replacing '-' with '_', since variables dont
# work with dashes.
# In python3, the headers are title cased. Lowercase them to be
# compatible with the python2 behaviour.
uresp = {}
for key, value in six.iteritems(resp):
ukey = key.replace("-", "_").lower()
uresp[ukey] = value
try:
uresp['location'] = absolute_location(url, uresp['location'])
except KeyError:
pass
# Default content_encoding to try
content_encoding = 'utf-8'
if 'content_type' in uresp:
content_type, params = cgi.parse_header(uresp['content_type'])
if 'charset' in params:
content_encoding = params['charset']
u_content = to_text(content, encoding=content_encoding)
if 'application/json' in content_type or 'text/json' in content_type:
try:
js = json.loads(u_content)
uresp['json'] = js
except:
pass
else:
u_content = to_text(content, encoding=content_encoding)
if resp['status'] not in status_code:
uresp['msg'] = 'Status code was not %s: %s' % (status_code, uresp.get('msg', ''))
module.fail_json(content=u_content, **uresp)
elif return_content:
module.exit_json(changed=changed, content=u_content, **uresp)
else:
module.exit_json(changed=changed, **uresp)
if __name__ == '__main__':
main()
| gpl-3.0 |
paran0ids0ul/infernal-twin | build/pip/pip/index.py | 237 | 47847 | """Routines related to PyPI, indexes"""
from __future__ import absolute_import
import logging
import cgi
from collections import namedtuple
import itertools
import sys
import os
import re
import mimetypes
import posixpath
import warnings
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.compat import ipaddress
from pip.utils import (
Inf, cached_property, normalize_name, splitext, normalize_path,
ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS)
from pip.utils.deprecation import RemovedInPip8Warning
from pip.utils.logging import indent_log
from pip.exceptions import (
DistributionNotFound, BestVersionAlreadyInstalled, InvalidWheelFilename,
UnsupportedWheel,
)
from pip.download import HAS_TLS, url_to_path, path_to_url
from pip.models import PyPI
from pip.wheel import Wheel, wheel_ext
from pip.pep425tags import supported_tags, supported_tags_noarch, get_platform
from pip._vendor import html5lib, requests, pkg_resources, six
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.requests.exceptions import SSLError
__all__ = ['FormatControl', 'fmt_ctl_handle_mutual_exclude', 'PackageFinder']
# Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC)
SECURE_ORIGINS = [
# protocol, hostname, port
("https", "*", "*"),
("*", "localhost", "*"),
("*", "127.0.0.0/8", "*"),
("*", "::1/128", "*"),
("file", "*", None),
]
logger = logging.getLogger(__name__)
class InstallationCandidate(object):
def __init__(self, project, version, location):
self.project = project
self.version = parse_version(version)
self.location = location
self._key = (self.project, self.version, self.location)
def __repr__(self):
return "<InstallationCandidate({0!r}, {1!r}, {2!r})>".format(
self.project, self.version, self.location,
)
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, InstallationCandidate):
return NotImplemented
return method(self._key, other._key)
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links.
"""
def __init__(self, find_links, index_urls,
allow_external=(), allow_unverified=(),
allow_all_external=False, allow_all_prereleases=False,
trusted_hosts=None, process_dependency_links=False,
session=None, format_control=None):
"""Create a PackageFinder.
:param format_control: A FormatControl object or None. Used to control
the selection of source packages / binary packages when consulting
the index and links.
"""
if session is None:
raise TypeError(
"PackageFinder() missing 1 required keyword argument: "
"'session'"
)
# Build find_links. If an argument starts with ~, it may be
# a local file relative to a home directory. So try normalizing
# it and if it exists, use the normalized version.
# This is deliberately conservative - it might be fine just to
# blindly normalize anything starting with a ~...
self.find_links = []
for link in find_links:
if link.startswith('~'):
new_link = normalize_path(link)
if os.path.exists(new_link):
link = new_link
self.find_links.append(link)
self.index_urls = index_urls
self.dependency_links = []
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.format_control = format_control or FormatControl(set(), set())
# Do we allow (safe and verifiable) externally hosted files?
self.allow_external = set(normalize_name(n) for n in allow_external)
# Which names are allowed to install insecure and unverifiable files?
self.allow_unverified = set(
normalize_name(n) for n in allow_unverified
)
# Anything that is allowed unverified is also allowed external
self.allow_external |= self.allow_unverified
# Do we allow all (safe and verifiable) externally hosted files?
self.allow_all_external = allow_all_external
# Domains that we won't emit warnings for when not using HTTPS
self.secure_origins = [
("*", host, "*")
for host in (trusted_hosts if trusted_hosts else [])
]
# Stores if we ignored any external links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_external = False
# Stores if we ignored any unsafe links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_unverified = False
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# Do we process dependency links?
self.process_dependency_links = process_dependency_links
# The Session we'll use to make requests
self.session = session
# If we don't have TLS enabled, then WARN if anyplace we're looking
# relies on TLS.
if not HAS_TLS:
for link in itertools.chain(self.index_urls, self.find_links):
parsed = urllib_parse.urlparse(link)
if parsed.scheme == "https":
logger.warning(
"pip is configured with locations that require "
"TLS/SSL, however the ssl module in Python is not "
"available."
)
break
def add_dependency_links(self, links):
# # FIXME: this shouldn't be global list this, it should only
# # apply to requirements of the package that specifies the
# # dependency_links value
# # FIXME: also, we should track comes_from (i.e., use Link)
if self.process_dependency_links:
warnings.warn(
"Dependency Links processing has been deprecated and will be "
"removed in a future release.",
RemovedInPip8Warning,
)
self.dependency_links.extend(links)
@staticmethod
def _sort_locations(locations, expand_dir=False):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if os.path.isdir(path):
if expand_dir:
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url:
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
urls.append(url)
return files, urls
def _candidate_sort_key(self, candidate):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min()
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
support_num = len(supported_tags)
if candidate.location == INSTALLED_VERSION:
pri = 1
elif candidate.location.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(candidate.location.filename)
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel for this platform. It "
"can't be sorted." % wheel.filename
)
pri = -(wheel.support_index_min())
else: # sdist
pri = -(support_num)
return (candidate.version, pri)
def _sort_versions(self, applicable_versions):
"""
Bring the latest version (and wheels) to the front, but maintain the
existing ordering as secondary. See the docstring for `_link_sort_key`
for details. This function is isolated for easier unit testing.
"""
return sorted(
applicable_versions,
key=self._candidate_sort_key,
reverse=True
)
def _validate_secure_origin(self, logger, location):
# Determine if this url used a secure transport mechanism
parsed = urllib_parse.urlparse(str(location))
origin = (parsed.scheme, parsed.hostname, parsed.port)
# Determine if our origin is a secure origin by looking through our
# hardcoded list of secure origins, as well as any additional ones
# configured on this PackageFinder instance.
for secure_origin in (SECURE_ORIGINS + self.secure_origins):
# Check to see if the protocol matches
if origin[0] != secure_origin[0] and secure_origin[0] != "*":
continue
try:
# We need to do this decode dance to ensure that we have a
# unicode object, even on Python 2.x.
addr = ipaddress.ip_address(
origin[1]
if (
isinstance(origin[1], six.text_type) or
origin[1] is None
)
else origin[1].decode("utf8")
)
network = ipaddress.ip_network(
secure_origin[1]
if isinstance(secure_origin[1], six.text_type)
else secure_origin[1].decode("utf8")
)
except ValueError:
# We don't have both a valid address or a valid network, so
# we'll check this origin against hostnames.
if origin[1] != secure_origin[1] and secure_origin[1] != "*":
continue
else:
# We have a valid address and network, so see if the address
# is contained within the network.
if addr not in network:
continue
# Check to see if the port patches
if (origin[2] != secure_origin[2] and
secure_origin[2] != "*" and
secure_origin[2] is not None):
continue
# If we've gotten here, then this origin matches the current
# secure origin and we should return True
return True
# If we've gotten to this point, then the origin isn't secure and we
# will not accept it as a valid location to search. We will however
# log a warning that we are ignoring it.
logger.warning(
"The repository located at %s is not a trusted or secure host and "
"is being ignored. If this repository is available via HTTPS it "
"is recommended to use HTTPS instead, otherwise you may silence "
"this warning and allow it anyways with '--trusted-host %s'.",
parsed.hostname,
parsed.hostname,
)
return False
def _get_index_urls_locations(self, project_name):
"""Returns the locations found via self.index_urls
Checks the url_name on the main (first in the list) index and
use this url_name to produce all locations
"""
def mkurl_pypi_url(url):
loc = posixpath.join(url, project_url_name)
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's
# behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
project_url_name = urllib_parse.quote(project_name.lower())
if self.index_urls:
# Check that we have the url_name correctly spelled:
# Only check main index if index URL is given
main_index_url = Link(
mkurl_pypi_url(self.index_urls[0]),
trusted=True,
)
page = self._get_page(main_index_url)
if page is None and PyPI.netloc not in str(main_index_url):
warnings.warn(
"Failed to find %r at %s. It is suggested to upgrade "
"your index to support normalized names as the name in "
"/simple/{name}." % (project_name, main_index_url),
RemovedInPip8Warning,
)
project_url_name = self._find_url_name(
Link(self.index_urls[0], trusted=True),
project_url_name,
) or project_url_name
if project_url_name is not None:
return [mkurl_pypi_url(url) for url in self.index_urls]
return []
def _find_all_versions(self, project_name):
"""Find all available versions for project_name
This checks index_urls, find_links and dependency_links
All versions found are returned
See _link_package_versions for details on which files are accepted
"""
index_locations = self._get_index_urls_locations(project_name)
index_file_loc, index_url_loc = self._sort_locations(index_locations)
fl_file_loc, fl_url_loc = self._sort_locations(
self.find_links, expand_dir=True)
dep_file_loc, dep_url_loc = self._sort_locations(self.dependency_links)
file_locations = (
Link(url) for url in itertools.chain(
index_file_loc, fl_file_loc, dep_file_loc)
)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
# We explicitly do not trust links that came from dependency_links
# We want to filter out any thing which does not have a secure origin.
url_locations = [
link for link in itertools.chain(
(Link(url, trusted=True) for url in index_url_loc),
(Link(url, trusted=True) for url in fl_url_loc),
(Link(url) for url in dep_url_loc),
)
if self._validate_secure_origin(logger, link)
]
logger.debug('%d location(s) to search for versions of %s:',
len(url_locations), project_name)
for location in url_locations:
logger.debug('* %s', location)
canonical_name = pkg_resources.safe_name(project_name).lower()
formats = fmt_ctl_formats(self.format_control, canonical_name)
search = Search(project_name.lower(), canonical_name, formats)
find_links_versions = self._package_versions(
# We trust every directly linked archive in find_links
(Link(url, '-f', trusted=True) for url in self.find_links),
search
)
page_versions = []
for page in self._get_pages(url_locations, project_name):
logger.debug('Analyzing links from page %s', page.url)
with indent_log():
page_versions.extend(
self._package_versions(page.links, search)
)
dependency_versions = self._package_versions(
(Link(url) for url in self.dependency_links), search
)
if dependency_versions:
logger.debug(
'dependency_links found: %s',
', '.join([
version.location.url for version in dependency_versions
])
)
file_versions = self._package_versions(file_locations, search)
if file_versions:
file_versions.sort(reverse=True)
logger.debug(
'Local files found: %s',
', '.join([
url_to_path(candidate.location.url)
for candidate in file_versions
])
)
# This is an intentional priority ordering
return (
file_versions + find_links_versions + page_versions +
dependency_versions
)
def find_requirement(self, req, upgrade):
"""Try to find an InstallationCandidate for req
Expects req, an InstallRequirement and upgrade, a boolean
Returns an InstallationCandidate or None
May raise DistributionNotFound or BestVersionAlreadyInstalled
"""
all_versions = self._find_all_versions(req.name)
# Filter out anything which doesn't match our specifier
_versions = set(
req.specifier.filter(
# We turn the version object into a str here because otherwise
# when we're debundled but setuptools isn't, Python will see
# packaging.version.Version and
# pkg_resources._vendor.packaging.version.Version as different
# types. This way we'll use a str as a common data interchange
# format. If we stop using the pkg_resources provided specifier
# and start using our own, we can drop the cast to str().
[str(x.version) for x in all_versions],
prereleases=(
self.allow_all_prereleases
if self.allow_all_prereleases else None
),
)
)
applicable_versions = [
# Again, converting to str to deal with debundling.
x for x in all_versions if str(x.version) in _versions
]
if req.satisfied_by is not None:
# Finally add our existing versions to the front of our versions.
applicable_versions.insert(
0,
InstallationCandidate(
req.name,
req.satisfied_by.version,
INSTALLED_VERSION,
)
)
existing_applicable = True
else:
existing_applicable = False
applicable_versions = self._sort_versions(applicable_versions)
if not upgrade and existing_applicable:
if applicable_versions[0].location is INSTALLED_VERSION:
logger.debug(
'Existing installed version (%s) is most up-to-date and '
'satisfies requirement',
req.satisfied_by.version,
)
else:
logger.debug(
'Existing installed version (%s) satisfies requirement '
'(most up-to-date version is %s)',
req.satisfied_by.version,
applicable_versions[0][2],
)
return None
if not applicable_versions:
logger.critical(
'Could not find a version that satisfies the requirement %s '
'(from versions: %s)',
req,
', '.join(
sorted(
set(str(i.version) for i in all_versions),
key=parse_version,
)
)
)
if self.need_warn_external:
logger.warning(
"Some externally hosted files were ignored as access to "
"them may be unreliable (use --allow-external %s to "
"allow).",
req.name,
)
if self.need_warn_unverified:
logger.warning(
"Some insecure and unverifiable files were ignored"
" (use --allow-unverified %s to allow).",
req.name,
)
raise DistributionNotFound(
'No matching distribution found for %s' % req
)
if applicable_versions[0].location is INSTALLED_VERSION:
# We have an existing version, and its the best version
logger.debug(
'Installed version (%s) is most up-to-date (past versions: '
'%s)',
req.satisfied_by.version,
', '.join(str(i.version) for i in applicable_versions[1:]) or
"none",
)
raise BestVersionAlreadyInstalled
if len(applicable_versions) > 1:
logger.debug(
'Using version %s (newest of versions: %s)',
applicable_versions[0].version,
', '.join(str(i.version) for i in applicable_versions)
)
selected_version = applicable_versions[0].location
if (selected_version.verifiable is not None and not
selected_version.verifiable):
logger.warning(
"%s is potentially insecure and unverifiable.", req.name,
)
return selected_version
def _find_url_name(self, index_url, url_name):
"""
Finds the true URL name of a package, when the given name isn't quite
correct.
This is usually used to implement case-insensitivity.
"""
if not index_url.url.endswith('/'):
# Vaguely part of the PyPI API... weird but true.
# FIXME: bad to modify this?
index_url.url += '/'
page = self._get_page(index_url)
if page is None:
logger.critical('Cannot fetch index base URL %s', index_url)
return
norm_name = normalize_name(url_name)
for link in page.links:
base = posixpath.basename(link.path.rstrip('/'))
if norm_name == normalize_name(base):
logger.debug(
'Real name of requirement %s is %s', url_name, base,
)
return base
return None
def _get_pages(self, locations, project_name):
"""
Yields (page, page_url) from the given locations, skipping
locations that have errors, and adding download/homepage links
"""
all_locations = list(locations)
seen = set()
normalized = normalize_name(project_name)
while all_locations:
location = all_locations.pop(0)
if location in seen:
continue
seen.add(location)
page = self._get_page(location)
if page is None:
continue
yield page
for link in page.rel_links():
if (normalized not in self.allow_external and not
self.allow_all_external):
self.need_warn_external = True
logger.debug(
"Not searching %s for files because external "
"urls are disallowed.",
link,
)
continue
if (link.trusted is not None and not
link.trusted and
normalized not in self.allow_unverified):
logger.debug(
"Not searching %s for urls, it is an "
"untrusted link and cannot produce safe or "
"verifiable files.",
link,
)
self.need_warn_unverified = True
continue
all_locations.append(link)
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search):
result = []
for link in self._sort_links(links):
v = self._link_package_versions(link, search)
if v is not None:
result.append(v)
return result
def _log_skipped_link(self, link, reason):
if link not in self.logged_links:
logger.debug('Skipping link %s; %s', link, reason)
self.logged_links.add(link)
def _link_package_versions(self, link, search):
"""Return an InstallationCandidate or None"""
platform = get_platform()
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
ext = link.ext
else:
egg_info, ext = link.splitext()
if not ext:
self._log_skipped_link(link, 'not a file')
return
if ext not in SUPPORTED_EXTENSIONS:
self._log_skipped_link(
link, 'unsupported archive format: %s' % ext)
return
if "binary" not in search.formats and ext == wheel_ext:
self._log_skipped_link(
link, 'No binaries permitted for %s' % search.supplied)
return
if "macosx10" in link.path and ext == '.zip':
self._log_skipped_link(link, 'macosx10 one')
return
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
self._log_skipped_link(link, 'invalid wheel filename')
return
if (pkg_resources.safe_name(wheel.name).lower() !=
search.canonical):
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
if not wheel.supported():
self._log_skipped_link(
link, 'it is not compatible with this Python')
return
# This is a dirty hack to prevent installing Binary Wheels from
# PyPI unless it is a Windows or Mac Binary Wheel. This is
# paired with a change to PyPI disabling uploads for the
# same. Once we have a mechanism for enabling support for
# binary wheels on linux that deals with the inherent problems
# of binary distribution this can be removed.
comes_from = getattr(link, "comes_from", None)
if (
(
not platform.startswith('win') and not
platform.startswith('macosx') and not
platform == 'cli'
) and
comes_from is not None and
urllib_parse.urlparse(
comes_from.url
).netloc.endswith(PyPI.netloc)):
if not wheel.supported(tags=supported_tags_noarch):
self._log_skipped_link(
link,
"it is a pypi-hosted binary "
"Wheel on an unsupported platform",
)
return
version = wheel.version
# This should be up by the search.ok_binary check, but see issue 2700.
if "source" not in search.formats and ext != wheel_ext:
self._log_skipped_link(
link, 'No sources permitted for %s' % search.supplied)
return
if not version:
version = egg_info_matches(egg_info, search.supplied, link)
if version is None:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
if (link.internal is not None and not
link.internal and not
normalize_name(search.supplied).lower()
in self.allow_external and not
self.allow_all_external):
# We have a link that we are sure is external, so we should skip
# it unless we are allowing externals
self._log_skipped_link(link, 'it is externally hosted')
self.need_warn_external = True
return
if (link.verifiable is not None and not
link.verifiable and not
(normalize_name(search.supplied).lower()
in self.allow_unverified)):
# We have a link that we are sure we cannot verify its integrity,
# so we should skip it unless we are allowing unsafe installs
# for this requirement.
self._log_skipped_link(
link, 'it is an insecure and unverifiable file')
self.need_warn_unverified = True
return
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
self._log_skipped_link(
link, 'Python version is incorrect')
return
logger.debug('Found link %s, version: %s', link, version)
return InstallationCandidate(search.supplied, version, link)
def _get_page(self, link):
return HTMLPage.get_page(link, session=self.session)
def egg_info_matches(
egg_info, search_name, link,
_egg_info_re=re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)):
"""Pull the version part out of a string.
:param egg_info: The string to parse. E.g. foo-2.1
:param search_name: The name of the package this belongs to. None to
infer the name. Note that this cannot unambiguously parse strings
like foo-2-2 which might be foo, 2-2 or foo-2, 2.
:param link: The link the string came from, for logging on failure.
"""
match = _egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s', link)
return None
if search_name is None:
full_match = match.group(0)
return full_match[full_match.index('-'):]
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
class HTMLPage(object):
"""Represents one page, along with its URL"""
def __init__(self, content, url, headers=None, trusted=None):
# Determine if we have any encoding information in our headers
encoding = None
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
encoding = params['charset']
self.content = content
self.parsed = html5lib.parse(
self.content,
encoding=encoding,
namespaceHTMLElements=False,
)
self.url = url
self.headers = headers
self.trusted = trusted
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, skip_archives=True, session=None):
if session is None:
raise TypeError(
"get_page() missing 1 required keyword argument: 'session'"
)
url = link.url
url = url.split('#', 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %s URL %s', scheme, link)
return None
try:
if skip_archives:
filename = link.filename
for bad_ext in ARCHIVE_EXTENSIONS:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(
url, session=session,
)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
logger.debug('Getting page %s', url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = \
urllib_parse.urlparse(url)
if (scheme == 'file' and
os.path.isdir(urllib_request.url2pathname(path))):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith('/'):
url += '/'
url = urllib_parse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s', url)
resp = session.get(
url,
headers={
"Accept": "text/html",
"Cache-Control": "max-age=600",
},
)
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
content_type = resp.headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
inst = cls(
resp.content, resp.url, resp.headers,
trusted=link.trusted,
)
except requests.HTTPError as exc:
level = 2 if exc.response.status_code == 404 else 1
cls._handle_fail(link, exc, url, level=level)
except requests.ConnectionError as exc:
cls._handle_fail(link, "connection error: %s" % exc, url)
except requests.Timeout:
cls._handle_fail(link, "timed out", url)
except SSLError as exc:
reason = ("There was a problem confirming the ssl certificate: "
"%s" % exc)
cls._handle_fail(link, reason, url, level=2, meth=logger.info)
else:
return inst
@staticmethod
def _handle_fail(link, reason, url, level=1, meth=None):
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s - skipping", link, reason)
@staticmethod
def _get_content_type(url, session):
"""Get the Content-Type of the given url, using a HEAD request"""
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
if scheme not in ('http', 'https'):
# FIXME: some warning or something?
# assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "")
@cached_property
def api_version(self):
metas = [
x for x in self.parsed.findall(".//meta")
if x.get("name", "").lower() == "api-version"
]
if metas:
try:
return int(metas[0].get("value", None))
except (TypeError, ValueError):
pass
return None
@cached_property
def base_url(self):
bases = [
x for x in self.parsed.findall(".//base")
if x.get("href") is not None
]
if bases and bases[0].get("href"):
return bases[0].get("href")
else:
return self.url
@property
def links(self):
"""Yields all links in the page"""
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(
urllib_parse.urljoin(self.base_url, href)
)
# Determine if this link is internal. If that distinction
# doesn't make sense in this context, then we don't make
# any distinction.
internal = None
if self.api_version and self.api_version >= 2:
# Only api_versions >= 2 have a distinction between
# external and internal links
internal = bool(
anchor.get("rel") and
"internal" in anchor.get("rel").split()
)
yield Link(url, self, internal=internal)
def rel_links(self, rels=('homepage', 'download')):
"""Yields all links with the given relations"""
rels = set(rels)
for anchor in self.parsed.findall(".//a"):
if anchor.get("rel") and anchor.get("href"):
found_rels = set(anchor.get("rel").split())
# Determine the intersection between what rels were found and
# what rels were being looked for
if found_rels & rels:
href = anchor.get("href")
url = self.clean_link(
urllib_parse.urljoin(self.base_url, href)
)
yield Link(url, self, trusted=False)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None, internal=None, trusted=None):
# url can be a UNC windows share
if url != Inf and url.startswith('\\\\'):
url = path_to_url(url)
self.url = url
self.comes_from = comes_from
self.internal = internal
self.trusted = trusted
def __str__(self):
if self.comes_from:
return '%s (from %s)' % (self.url, self.comes_from)
else:
return str(self.url)
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url == other.url
def __ne__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url != other.url
def __lt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url < other.url
def __le__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url <= other.url
def __gt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url > other.url
def __ge__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url >= other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urllib_parse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
name = urllib_parse.unquote(name)
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urllib_parse.urlsplit(self.url)[0]
@property
def netloc(self):
return urllib_parse.urlsplit(self.url)[1]
@property
def path(self):
return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2])
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def ext(self):
return self.splitext()[1]
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url)
return urllib_parse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(
r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)'
)
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def verifiable(self):
"""
Returns True if this link can be verified after download, False if it
cannot, and None if we cannot determine.
"""
trusted = self.trusted or getattr(self.comes_from, "trusted", None)
if trusted is not None and trusted:
# This link came from a trusted source. It *may* be verifiable but
# first we need to see if this page is operating under the new
# API version.
try:
api_version = getattr(self.comes_from, "api_version", None)
api_version = int(api_version)
except (ValueError, TypeError):
api_version = None
if api_version is None or api_version <= 1:
# This link is either trusted, or it came from a trusted,
# however it is not operating under the API version 2 so
# we can't make any claims about if it's safe or not
return
if self.hash:
# This link came from a trusted source and it has a hash, so we
# can consider it safe.
return True
else:
# This link came from a trusted source, using the new API
# version, and it does not have a hash. It is NOT verifiable
return False
elif trusted is not None:
# This link came from an untrusted source and we cannot trust it
return False
@property
def is_wheel(self):
return self.ext == wheel_ext
@property
def is_artifact(self):
"""
Determines if this points to an actual artifact (e.g. a tarball) or if
it points to an "abstract" thing like a path or a VCS location.
"""
from pip.vcs import vcs
if self.scheme in vcs.all_schemes:
return False
return True
# An object to represent the "link" for the installed version of a requirement.
# Using Inf as the url makes it sort higher.
INSTALLED_VERSION = Link(Inf)
FormatControl = namedtuple('FormatControl', 'no_binary only_binary')
"""This object has two fields, no_binary and only_binary.
If a field is falsy, it isn't set. If it is {':all:'}, it should match all
packages except those listed in the other field. Only one field can be set
to {':all:'} at a time. The rest of the time exact package name matches
are listed, with any given package only showing up in one field at a time.
"""
def fmt_ctl_handle_mutual_exclude(value, target, other):
new = value.split(',')
while ':all:' in new:
other.clear()
target.clear()
target.add(':all:')
del new[:new.index(':all:') + 1]
if ':none:' not in new:
# Without a none, we want to discard everything as :all: covers it
return
for name in new:
if name == ':none:':
target.clear()
continue
name = pkg_resources.safe_name(name).lower()
other.discard(name)
target.add(name)
def fmt_ctl_formats(fmt_ctl, canonical_name):
result = set(["binary", "source"])
if canonical_name in fmt_ctl.only_binary:
result.discard('source')
elif canonical_name in fmt_ctl.no_binary:
result.discard('binary')
elif ':all:' in fmt_ctl.only_binary:
result.discard('source')
elif ':all:' in fmt_ctl.no_binary:
result.discard('binary')
return frozenset(result)
def fmt_ctl_no_binary(fmt_ctl):
fmt_ctl_handle_mutual_exclude(
':all:', fmt_ctl.no_binary, fmt_ctl.only_binary)
def fmt_ctl_no_use_wheel(fmt_ctl):
fmt_ctl_no_binary(fmt_ctl)
warnings.warn(
'--no-use-wheel is deprecated and will be removed in the future. '
' Please use --no-binary :all: instead.', DeprecationWarning,
stacklevel=2)
Search = namedtuple('Search', 'supplied canonical formats')
"""Capture key aspects of a search.
:attribute supplied: The user supplied package.
:attribute canonical: The canonical package name.
:attribute formats: The formats allowed for this package. Should be a set
with 'binary' or 'source' or both in it.
"""
| gpl-3.0 |
github-account-because-they-want-it/django | django/contrib/contenttypes/views.py | 380 | 3608 | from __future__ import unicode_literals
from django import http
from django.apps import apps
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.requests import RequestSite
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext as _
def shortcut(request, content_type_id, object_id):
"""
Redirect to an object's page based on a content-type ID and an object ID.
"""
# Look up the object, making sure it's got a get_absolute_url() function.
try:
content_type = ContentType.objects.get(pk=content_type_id)
if not content_type.model_class():
raise http.Http404(_("Content type %(ct_id)s object has no associated model") %
{'ct_id': content_type_id})
obj = content_type.get_object_for_this_type(pk=object_id)
except (ObjectDoesNotExist, ValueError):
raise http.Http404(_("Content type %(ct_id)s object %(obj_id)s doesn't exist") %
{'ct_id': content_type_id, 'obj_id': object_id})
try:
get_absolute_url = obj.get_absolute_url
except AttributeError:
raise http.Http404(_("%(ct_name)s objects don't have a get_absolute_url() method") %
{'ct_name': content_type.name})
absurl = get_absolute_url()
# Try to figure out the object's domain, so we can do a cross-site redirect
# if necessary.
# If the object actually defines a domain, we're done.
if absurl.startswith(('http://', 'https://', '//')):
return http.HttpResponseRedirect(absurl)
# Otherwise, we need to introspect the object's relationships for a
# relation to the Site object
object_domain = None
if apps.is_installed('django.contrib.sites'):
Site = apps.get_model('sites.Site')
opts = obj._meta
# First, look for an many-to-many relationship to Site.
for field in opts.many_to_many:
if field.remote_field.model is Site:
try:
# Caveat: In the case of multiple related Sites, this just
# selects the *first* one, which is arbitrary.
object_domain = getattr(obj, field.name).all()[0].domain
except IndexError:
pass
if object_domain is not None:
break
# Next, look for a many-to-one relationship to Site.
if object_domain is None:
for field in obj._meta.fields:
if field.remote_field and field.remote_field.model is Site:
try:
object_domain = getattr(obj, field.name).domain
except Site.DoesNotExist:
pass
if object_domain is not None:
break
# Fall back to the current site (if possible).
if object_domain is None:
try:
object_domain = Site.objects.get_current(request).domain
except Site.DoesNotExist:
pass
else:
# Fall back to the current request's site.
object_domain = RequestSite(request).domain
# If all that malarkey found an object domain, use it. Otherwise, fall back
# to whatever get_absolute_url() returned.
if object_domain is not None:
protocol = request.scheme
return http.HttpResponseRedirect('%s://%s%s'
% (protocol, object_domain, absurl))
else:
return http.HttpResponseRedirect(absurl)
| bsd-3-clause |
StevenBlack/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/test/finder_unittest.py | 124 | 5471 | # Copyright (C) 2012 Google, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import unittest2 as unittest
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.test.finder import Finder
class FinderTest(unittest.TestCase):
def setUp(self):
files = {
'/foo/bar/baz.py': '',
'/foo/bar/baz_unittest.py': '',
'/foo2/bar2/baz2.py': '',
'/foo2/bar2/baz2.pyc': '',
'/foo2/bar2/baz2_integrationtest.py': '',
'/foo2/bar2/missing.pyc': '',
'/tmp/another_unittest.py': '',
}
self.fs = MockFileSystem(files)
self.finder = Finder(self.fs)
self.finder.add_tree('/foo', 'bar')
self.finder.add_tree('/foo2')
# Here we have to jump through a hoop to make sure test-webkitpy doesn't log
# any messages from these tests :(.
self.root_logger = logging.getLogger()
self.log_levels = []
self.log_handlers = self.root_logger.handlers[:]
for handler in self.log_handlers:
self.log_levels.append(handler.level)
handler.level = logging.CRITICAL
def tearDown(self):
for handler in self.log_handlers:
handler.level = self.log_levels.pop(0)
def test_additional_system_paths(self):
self.assertEqual(self.finder.additional_paths(['/usr']),
['/foo', '/foo2'])
def test_is_module(self):
self.assertTrue(self.finder.is_module('bar.baz'))
self.assertTrue(self.finder.is_module('bar2.baz2'))
self.assertTrue(self.finder.is_module('bar2.baz2_integrationtest'))
# Missing the proper namespace.
self.assertFalse(self.finder.is_module('baz'))
def test_to_module(self):
self.assertEqual(self.finder.to_module('/foo/test.py'), 'test')
self.assertEqual(self.finder.to_module('/foo/bar/test.py'), 'bar.test')
self.assertEqual(self.finder.to_module('/foo/bar/pytest.py'), 'bar.pytest')
def test_clean(self):
self.assertTrue(self.fs.exists('/foo2/bar2/missing.pyc'))
self.finder.clean_trees()
self.assertFalse(self.fs.exists('/foo2/bar2/missing.pyc'))
def check_names(self, names, expected_names, find_all=True):
self.assertEqual(self.finder.find_names(names, find_all), expected_names)
def test_default_names(self):
self.check_names([], ['bar.baz_unittest', 'bar2.baz2_integrationtest'], find_all=True)
self.check_names([], ['bar.baz_unittest', 'bar2.baz2_integrationtest'], find_all=False)
# Should return the names given it, even if they don't exist.
self.check_names(['foobar'], ['foobar'], find_all=False)
def test_paths(self):
self.fs.chdir('/foo/bar')
self.check_names(['baz_unittest.py'], ['bar.baz_unittest'])
self.check_names(['./baz_unittest.py'], ['bar.baz_unittest'])
self.check_names(['/foo/bar/baz_unittest.py'], ['bar.baz_unittest'])
self.check_names(['.'], ['bar.baz_unittest'])
self.check_names(['../../foo2/bar2'], ['bar2.baz2_integrationtest'])
self.fs.chdir('/')
self.check_names(['bar'], ['bar.baz_unittest'])
self.check_names(['/foo/bar/'], ['bar.baz_unittest'])
# This works 'by accident' since it maps onto a package.
self.check_names(['bar/'], ['bar.baz_unittest'])
# This should log an error, since it's outside the trees.
oc = OutputCapture()
oc.set_log_level(logging.ERROR)
oc.capture_output()
try:
self.check_names(['/tmp/another_unittest.py'], [])
finally:
_, _, logs = oc.restore_output()
self.assertIn('another_unittest.py', logs)
# Paths that don't exist are errors.
oc.capture_output()
try:
self.check_names(['/foo/bar/notexist_unittest.py'], [])
finally:
_, _, logs = oc.restore_output()
self.assertIn('notexist_unittest.py', logs)
# Names that don't exist are caught later, at load time.
self.check_names(['bar.notexist_unittest'], ['bar.notexist_unittest'])
| bsd-3-clause |
addition-it-solutions/project-all | addons/crm/wizard/crm_phonecall_to_phonecall.py | 8 | 4319 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import time
class crm_phonecall2phonecall(osv.osv_memory):
_name = 'crm.phonecall2phonecall'
_description = 'Phonecall To Phonecall'
_columns = {
'name' : fields.char('Call summary', required=True, select=1),
'user_id' : fields.many2one('res.users',"Assign To"),
'contact_name':fields.char('Contact'),
'phone':fields.char('Phone'),
'categ_id': fields.many2one('crm.phonecall.category', 'Category'),
'date': fields.datetime('Date'),
'team_id':fields.many2one('crm.team','Sales Team', oldname='section_id'),
'action': fields.selection([('schedule','Schedule a call'), ('log','Log a call')], 'Action', required=True),
'partner_id' : fields.many2one('res.partner', "Partner"),
'note':fields.text('Note')
}
def action_cancel(self, cr, uid, ids, context=None):
"""
Closes Phonecall to Phonecall form
"""
return {'type':'ir.actions.act_window_close'}
def action_schedule(self, cr, uid, ids, context=None):
value = {}
if context is None:
context = {}
phonecall = self.pool.get('crm.phonecall')
phonecall_ids = context and context.get('active_ids') or []
for this in self.browse(cr, uid, ids, context=context):
phocall_ids = phonecall.schedule_another_phonecall(cr, uid, phonecall_ids, this.date, this.name, \
this.user_id and this.user_id.id or False, \
this.team_id and this.team_id.id or False, \
this.categ_id and this.categ_id.id or False, \
action=this.action, context=context)
return phonecall.redirect_phonecall_view(cr, uid, phocall_ids[phonecall_ids[0]], context=context)
def default_get(self, cr, uid, fields, context=None):
"""
This function gets default values
"""
res = super(crm_phonecall2phonecall, self).default_get(cr, uid, fields, context=context)
record_id = context and context.get('active_id', False) or False
res.update({'action': 'schedule', 'date': time.strftime('%Y-%m-%d %H:%M:%S')})
if record_id:
phonecall = self.pool.get('crm.phonecall').browse(cr, uid, record_id, context=context)
categ_id = False
data_obj = self.pool.get('ir.model.data')
try:
res_id = data_obj._get_id(cr, uid, 'crm', 'categ_phone2')
categ_id = data_obj.browse(cr, uid, res_id, context=context).res_id
except ValueError:
pass
if 'name' in fields:
res.update({'name': phonecall.name})
if 'user_id' in fields:
res.update({'user_id': phonecall.user_id and phonecall.user_id.id or False})
if 'date' in fields:
res.update({'date': False})
if 'team_id' in fields:
res.update({'team_id': phonecall.team_id and phonecall.team_id.id or False})
if 'categ_id' in fields:
res.update({'categ_id': categ_id})
if 'partner_id' in fields:
res.update({'partner_id': phonecall.partner_id and phonecall.partner_id.id or False})
return res
| agpl-3.0 |
talavis/Protein-Data-Visualisation | project.py | 1 | 13480 | #!/usr/bin/env python2
import sys
# check input parameters, generate INDATA list
# code placed here to avoid unneccesary vtk loading
if len(sys.argv) == 1 :
sys.stderr.write('Using standard data set, to use other data:\n'.format(sys.argv[0]))
sys.stderr.write('Usage: {0} <pdb file> <connection file> |<protname> <data file>| [repeat || for multiple proteins]\n'.format(sys.argv[0]))
PREFIX = 'data/'
INDATA = (PREFIX + '1U3W.pdb', PREFIX + 'adh_connections.txt',
(PREFIX + 'ADH1', PREFIX + 'adh1_conservation.txt'),
(PREFIX + 'ADH2', PREFIX + 'adh2_conservation.txt'),
(PREFIX + 'ADH3', PREFIX + 'adh3_conservation.txt'),
(PREFIX + 'ADH4', PREFIX + 'adh4_conservation.txt'),
(PREFIX + 'ADH5', PREFIX + 'adh5_conservation.txt'))
# alternative data set
# INDATA = (PREFIX + '1U3W.pdb', PREFIX + 'adh_connections.txt',
# (PREFIX + 'ADH1', PREFIX + 'adh1_scores.txt'),
# (PREFIX + 'ADH2', PREFIX + 'adh2_scores.txt'),
# (PREFIX + 'ADH3', PREFIX + 'adh3_scores.txt'),
# (PREFIX + 'ADH4', PREFIX + 'adh4_scores.txt'),
# (PREFIX + 'ADH5', PREFIX + 'adh5_scores.txt'))
elif len(sys.argv) > 3 and len(sys.argv) % 2 != 1 :
sys.stderr.write('Usage: {0} <pdb file> <connection file> |<protname> <data file>| [repeat || for multiple proteins]\n'.format(sys.argv[0]))
sys.exit()
else :
INDATA = list()
INDATA.append(sys.argv[1])
INDATA.append(sys.argv[2])
for i in range(3, len(sys.argv), 2) :
INDATA.append((sys.argv[i], sys.argv[i+1]))
import Tkinter
import vtk
from vtk.tk.vtkTkRenderWidget import vtkTkRenderWidget
import read_data
class Protein :
'''Stores information about a protein'''
def __init__(self, name, data, scalars) :
self._name = name
self._data = data
self._scalars = scalars
@property
def name(self) :
'''Get the protein name'''
return self._name
@name.setter
def name(self, newName) :
'''Set the protein name'''
self._name = newName
@property
def data(self) :
'''Get the data set'''
return self._data
@property
def scalars(self) :
'''Get a list of the scalars'''
return self._scalars
class Interface :
def __init__(self, indata) :
self._proteins = list()
self._protStruct = self.initVtkProtein(indata[0])
self._protCoord = read_data.read_points(indata[0])
self._protConnections = read_data.read_connections(indata[1])
self.initVtkColors()
for i in range(2, len(indata)) :
self.addProtein(*indata[i])
self.initUI()
def addProtein(self, protName, dataFile) :
'''Add a protein'''
data, scalars = self.readData(dataFile)
self._proteins.append(Protein(protName, data, scalars))
def doMenuFileExit(self):
sys.exit(0)
def getScoreRange(self) :
'''Get the score range for the protein data sets that are currently visible'''
maxScore = 0
# minScore will be 0
minScore = 0
for i in range(len(self._proteins)) :
low, high = self._proteins[self._currentData1.get()].data.GetScalarRange()
if maxScore == 0 or high > maxScore :
maxScore = high
return minScore, maxScore
def initUI(self) :
'''Initialize the UI'''
# initialise tkinter
self._root = Tkinter.Tk()
self._root.title('Protein data visualisation')
# tkinter ui
# vtk
self.renderWidget = vtkTkRenderWidget(self._root, width=1000, height=800)
self.renderWidget.pack(expand='true', fill='both', side = Tkinter.RIGHT)
wMain = self.renderWidget.GetRenderWindow()
wMain.AddRenderer(self.initVtk())
# toggle protein structure
settingsManager = Tkinter.Frame(self._root)
settingsManager.pack(side = Tkinter.LEFT)
self._proteinStructureVisible = Tkinter.IntVar()
self._proteinStructureVisible.set(1)
self._toggleProteinStructureBox = Tkinter.Checkbutton(settingsManager, text = 'Show protein structure',
command = self.toggleProteinStructure,
var = self._proteinStructureVisible)
self._toggleProteinStructureBox.pack()
# toggle current data set
dataManager = Tkinter.Frame(settingsManager)
dataManager.pack()
groupData1 = Tkinter.LabelFrame(dataManager, text = 'Data', padx = 5, pady = 5)
groupData1.pack(padx = 10, pady = 10, side = Tkinter.LEFT, anchor = Tkinter.N)
self._currentData1 = Tkinter.IntVar()
self._currentData1.set(0)
for i in range(len(self._proteins)) :
Tkinter.Radiobutton(groupData1, text = self._proteins[i].name,
command = self.toggleProteinData,
var = self._currentData1,
value = i).pack(anchor = Tkinter.W)
groupData2 = Tkinter.LabelFrame(dataManager, text='Compare with', padx = 5, pady = 5)
groupData2.pack(padx = 10, pady = 10, side = Tkinter.RIGHT)
self._currentData2 = Tkinter.IntVar()
self._currentData2.set(0)
for i in range(len(self._proteins)) :
Tkinter.Radiobutton(groupData2, text = self._proteins[i].name,
command = self.toggleProteinData,
var = self._currentData2,
value = i).pack(anchor = Tkinter.W)
# make sure the correct data set is shown
self.toggleProteinData()
# color scaling
colorManager = Tkinter.LabelFrame(settingsManager, text = 'Color scaling', padx = 5, pady = 5)
colorManager.pack()
colorManagerLow = Tkinter.LabelFrame(colorManager, text = 'Lower limit', padx = 5, pady = 5)
colorManagerLow.pack(side = Tkinter.LEFT)
self._colorLow = Tkinter.StringVar()
self._colorLow.set("0")
self._colorScalerLow = Tkinter.Spinbox(colorManagerLow, from_ = 0, to = self.getScoreRange()[1],
textvariable = self._colorLow, width = 8,
command = self.updateColorScale, increment = 0.1)
self._colorScalerLow.pack()
colorManagerHigh = Tkinter.LabelFrame(colorManager, text = 'Upper limit', padx = 5, pady = 5)
colorManagerHigh.pack(side = Tkinter.RIGHT)
self._colorHigh = Tkinter.StringVar()
self._colorHigh.set(str(self.getScoreRange()[1]))
self._colorScalerHigh = Tkinter.Spinbox(colorManagerHigh, from_ = 0, to = self.getScoreRange()[1],
textvariable = self._colorHigh, width = 8,
command = self.updateColorScale, increment = 0.1)
self._colorScalerHigh.pack(side = Tkinter.RIGHT)
self._root.mainloop()
def initVtk(self) :
'''Initialize the VTK renderer'''
main = vtk.vtkRenderer()
main.SetBackground(0.2, 0.2, 0.2)
main.AddActor(self._protStruct)
self._dataVisualiserAtoms = self.initVtkAtoms(self._proteins[0].data)
self._dataVisualiserBonds = self.initVtkBonds(self._proteins[0].data)
# [0] : actor, [1] : datamanager
main.AddActor(self._dataVisualiserAtoms[0])
main.AddActor(self._dataVisualiserBonds[0])
self._aSBar = self.initVtkBar()
main.AddActor(self._aSBar)
main.ResetCamera()
return main
def initVtkAtoms(self, data) :
'''Initialize the residue representations'''
sAtom = vtk.vtkSphereSource()
sAtom.SetRadius(0.5)
sAtom.SetThetaResolution(15)
sAtom.SetPhiResolution(15)
gAtom = vtk.vtkGlyph3D()
gAtom.SetInputData(data)
gAtom.SetSourceConnection(sAtom.GetOutputPort())
gAtom.SetScaleModeToDataScalingOff()
mAtom = vtk.vtkPolyDataMapper()
mAtom.SetInputConnection(gAtom.GetOutputPort())
mAtom.SetLookupTable(self._lut)
mAtom.SetScalarRange(*data.GetScalarRange())
aAtom = vtk.vtkActor()
aAtom.SetMapper(mAtom)
return aAtom, gAtom
def initVtkBar(self) :
'''Initialize the scalar bar for color levels'''
aSBar = vtk.vtkScalarBarActor()
aSBar.SetOrientationToVertical()
aSBar.SetLookupTable(self._lut)
aSBar.SetTitle("Residue score")
aSBar.GetLabelTextProperty().SetColor(0.8, 0.8, 0.8)
aSBar.GetTitleTextProperty().SetColor(0.8, 0.8, 0.8)
aSBar.SetWidth(0.1)
aSBar.SetHeight(0.9)
spc = aSBar.GetPositionCoordinate()
spc.SetCoordinateSystemToNormalizedViewport()
spc.SetValue(0.05, 0.05)
return aSBar
def initVtkBonds(self, data) :
'''Initialize the bond connectors'''
bond = vtk.vtkTubeFilter()
bond.SetNumberOfSides(6)
bond.SetInputData(data)
bond.SetRadius(0.15)
bond.SetVaryRadiusToVaryRadiusOff()
mBond = vtk.vtkPolyDataMapper()
mBond.SetLookupTable(self._lut)
mBond.SetInputConnection(bond.GetOutputPort())
mBond.SetScalarRange(*data.GetScalarRange())
aBond = vtk.vtkActor()
aBond.SetMapper(mBond)
return aBond, bond
def initVtkColors(self) :
self._lut = vtk.vtkLookupTable()
self._lut.SetHueRange(0.5, 0.0)
self._lut.SetValueRange(1.0, 1.0)
self._lut.SetSaturationRange(1.0, 1.0)
self._lut.SetTableRange(0.0, 1.0)
def initVtkProtein(self, pdbFile) :
reader = vtk.vtkPDBReader()
reader.SetFileName(pdbFile)
protein = vtk.vtkProteinRibbonFilter()
protein.SetInputConnection(reader.GetOutputPort())
protein.SetDrawSmallMoleculesAsSpheres(False)
protein.Update()
mProtein = vtk.vtkPolyDataMapper()
mProtein.SetInputData(protein.GetOutput())
mProtein.Update()
aProtein = vtk.vtkActor()
aProtein.SetMapper(mProtein)
return aProtein
def joinData(self, prot1, prot2) :
newScal = list()
for i in range(len(self._proteins[prot1].scalars)) :
newScal.append(abs(self._proteins[prot1].scalars[i]-self._proteins[prot2].scalars[i]))
newData = vtk.vtkPolyData()
newData.SetPoints(self._proteins[prot1].data.GetPoints())
newData.GetPointData().SetScalars(self.packScalars(newScal))
newData.SetLines(self._proteins[prot1].data.GetLines())
return newData
def packScalars(self, scalars) :
outScalars = vtk.vtkFloatArray()
for i in range(len(scalars)) :
outScalars.InsertNextValue(scalars[i])
return outScalars
def readData(self, scoreFile) :
data = vtk.vtkPolyData()
data.SetPoints(self._protCoord)
scalars = read_data.read_scalars(scoreFile)
data.GetPointData().SetScalars(self.packScalars(scalars))
data.SetLines(self._protConnections)
return data, scalars
def toggleProteinData(self) :
'''Check which proteins are active and make them visible'''
if self._currentData1.get() == self._currentData2.get() :
newData = self._proteins[self._currentData1.get()].data
else :
newData = self.joinData(self._currentData1.get(),
self._currentData2.get())
self._dataVisualiserAtoms[1].SetInputData(newData)
self._dataVisualiserAtoms[1].Modified()
self._dataVisualiserAtoms[1].Update()
self._dataVisualiserBonds[1].SetInputData(newData)
self._dataVisualiserBonds[1].Modified()
self._dataVisualiserBonds[1].Update()
self.renderWidget.GetRenderWindow().Render()
def toggleProteinStructure(self) :
'''Toggle the protein structure'''
self._protStruct.SetVisibility(self._proteinStructureVisible.get())
self.renderWidget.GetRenderWindow().Render()
def updateVtkColors(self, minValue, maxValue) :
'''Update the value range of the color table'''
self._lut.SetTableRange(minValue, maxValue)
self._dataVisualiserBonds[0].GetMapper().SetScalarRange(minValue, maxValue)
self._dataVisualiserBonds[0].GetMapper().Modified()
self._dataVisualiserBonds[0].GetMapper().Update()
self._dataVisualiserAtoms[0].GetMapper().SetScalarRange(minValue, maxValue)
self._dataVisualiserAtoms[0].GetMapper().Modified()
self._dataVisualiserAtoms[0].GetMapper().Update()
def updateColorScale(self) :
self._colorScalerHigh.config(from_ = self._colorLow.get())
self._colorScalerLow.config(to = self._colorHigh.get())
if float(self._colorHigh.get()) < float(self._colorLow.get()) :
self._colorHigh = self._colorLow.get()
self.updateVtkColors(float(self._colorLow.get()), float(self._colorHigh.get()))
self.renderWidget.GetRenderWindow().Render()
if __name__ == '__main__' :
interface = Interface(INDATA)
| bsd-3-clause |
fbradyirl/home-assistant | homeassistant/components/braviatv/media_player.py | 2 | 11127 | """Support for interface with a Sony Bravia TV."""
import ipaddress
import logging
from getmac import get_mac_address
import voluptuous as vol
from homeassistant.components.media_player import MediaPlayerDevice, PLATFORM_SCHEMA
from homeassistant.components.media_player.const import (
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import CONF_HOST, CONF_NAME, STATE_OFF, STATE_ON
import homeassistant.helpers.config_validation as cv
from homeassistant.util.json import load_json, save_json
BRAVIA_CONFIG_FILE = "bravia.conf"
CLIENTID_PREFIX = "HomeAssistant"
DEFAULT_NAME = "Sony Bravia TV"
NICKNAME = "Home Assistant"
# Map ip to request id for configuring
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
SUPPORT_BRAVIA = (
SUPPORT_PAUSE
| SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_SET
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_SELECT_SOURCE
| SUPPORT_PLAY
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Sony Bravia TV platform."""
host = config.get(CONF_HOST)
if host is None:
return
pin = None
bravia_config = load_json(hass.config.path(BRAVIA_CONFIG_FILE))
while bravia_config:
# Set up a configured TV
host_ip, host_config = bravia_config.popitem()
if host_ip == host:
pin = host_config["pin"]
mac = host_config["mac"]
name = config.get(CONF_NAME)
add_entities([BraviaTVDevice(host, mac, name, pin)])
return
setup_bravia(config, pin, hass, add_entities)
def setup_bravia(config, pin, hass, add_entities):
"""Set up a Sony Bravia TV based on host parameter."""
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
if pin is None:
request_configuration(config, hass, add_entities)
return
try:
if ipaddress.ip_address(host).version == 6:
mode = "ip6"
else:
mode = "ip"
except ValueError:
mode = "hostname"
mac = get_mac_address(**{mode: host})
# If we came here and configuring this host, mark as done
if host in _CONFIGURING:
request_id = _CONFIGURING.pop(host)
configurator = hass.components.configurator
configurator.request_done(request_id)
_LOGGER.info("Discovery configuration done")
# Save config
save_json(
hass.config.path(BRAVIA_CONFIG_FILE),
{host: {"pin": pin, "host": host, "mac": mac}},
)
add_entities([BraviaTVDevice(host, mac, name, pin)])
def request_configuration(config, hass, add_entities):
"""Request configuration steps from the user."""
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
configurator = hass.components.configurator
# We got an error if this method is called while we are configuring
if host in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING[host], "Failed to register, please try again."
)
return
def bravia_configuration_callback(data):
"""Handle the entry of user PIN."""
from braviarc import braviarc
pin = data.get("pin")
braviarc = braviarc.BraviaRC(host)
braviarc.connect(pin, CLIENTID_PREFIX, NICKNAME)
if braviarc.is_connected():
setup_bravia(config, pin, hass, add_entities)
else:
request_configuration(config, hass, add_entities)
_CONFIGURING[host] = configurator.request_config(
name,
bravia_configuration_callback,
description="Enter the Pin shown on your Sony Bravia TV."
+ "If no Pin is shown, enter 0000 to let TV show you a Pin.",
description_image="/static/images/smart-tv.png",
submit_caption="Confirm",
fields=[{"id": "pin", "name": "Enter the pin", "type": ""}],
)
class BraviaTVDevice(MediaPlayerDevice):
"""Representation of a Sony Bravia TV."""
def __init__(self, host, mac, name, pin):
"""Initialize the Sony Bravia device."""
from braviarc import braviarc
self._pin = pin
self._braviarc = braviarc.BraviaRC(host, mac)
self._name = name
self._state = STATE_OFF
self._muted = False
self._program_name = None
self._channel_name = None
self._channel_number = None
self._source = None
self._source_list = []
self._original_content_list = []
self._content_mapping = {}
self._duration = None
self._content_uri = None
self._id = None
self._playing = False
self._start_date_time = None
self._program_media_type = None
self._min_volume = None
self._max_volume = None
self._volume = None
self._braviarc.connect(pin, CLIENTID_PREFIX, NICKNAME)
if self._braviarc.is_connected():
self.update()
else:
self._state = STATE_OFF
def update(self):
"""Update TV info."""
if not self._braviarc.is_connected():
if self._braviarc.get_power_status() != "off":
self._braviarc.connect(self._pin, CLIENTID_PREFIX, NICKNAME)
if not self._braviarc.is_connected():
return
# Retrieve the latest data.
try:
if self._state == STATE_ON:
# refresh volume info:
self._refresh_volume()
self._refresh_channels()
power_status = self._braviarc.get_power_status()
if power_status == "active":
self._state = STATE_ON
playing_info = self._braviarc.get_playing_info()
self._reset_playing_info()
if playing_info is None or not playing_info:
self._channel_name = "App"
else:
self._program_name = playing_info.get("programTitle")
self._channel_name = playing_info.get("title")
self._program_media_type = playing_info.get("programMediaType")
self._channel_number = playing_info.get("dispNum")
self._source = playing_info.get("source")
self._content_uri = playing_info.get("uri")
self._duration = playing_info.get("durationSec")
self._start_date_time = playing_info.get("startDateTime")
else:
self._state = STATE_OFF
except Exception as exception_instance: # pylint: disable=broad-except
_LOGGER.error(exception_instance)
self._state = STATE_OFF
def _reset_playing_info(self):
self._program_name = None
self._channel_name = None
self._program_media_type = None
self._channel_number = None
self._source = None
self._content_uri = None
self._duration = None
self._start_date_time = None
def _refresh_volume(self):
"""Refresh volume information."""
volume_info = self._braviarc.get_volume_info()
if volume_info is not None:
self._volume = volume_info.get("volume")
self._min_volume = volume_info.get("minVolume")
self._max_volume = volume_info.get("maxVolume")
self._muted = volume_info.get("mute")
def _refresh_channels(self):
if not self._source_list:
self._content_mapping = self._braviarc.load_source_list()
self._source_list = []
for key in self._content_mapping:
self._source_list.append(key)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def source(self):
"""Return the current input source."""
return self._source
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
if self._volume is not None:
return self._volume / 100
return None
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_BRAVIA
@property
def media_title(self):
"""Title of current playing media."""
return_value = None
if self._channel_name is not None:
return_value = self._channel_name
if self._program_name is not None:
return_value = return_value + ": " + self._program_name
return return_value
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self._channel_name
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._duration
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._braviarc.set_volume_level(volume)
def turn_on(self):
"""Turn the media player on."""
self._braviarc.turn_on()
def turn_off(self):
"""Turn off media player."""
self._braviarc.turn_off()
def volume_up(self):
"""Volume up the media player."""
self._braviarc.volume_up()
def volume_down(self):
"""Volume down media player."""
self._braviarc.volume_down()
def mute_volume(self, mute):
"""Send mute command."""
self._braviarc.mute_volume(mute)
def select_source(self, source):
"""Set the input source."""
if source in self._content_mapping:
uri = self._content_mapping[source]
self._braviarc.play_content(uri)
def media_play_pause(self):
"""Simulate play pause media player."""
if self._playing:
self.media_pause()
else:
self.media_play()
def media_play(self):
"""Send play command."""
self._playing = True
self._braviarc.media_play()
def media_pause(self):
"""Send media pause command to media player."""
self._playing = False
self._braviarc.media_pause()
def media_next_track(self):
"""Send next track command."""
self._braviarc.media_next_track()
def media_previous_track(self):
"""Send the previous track command."""
self._braviarc.media_previous_track()
| apache-2.0 |
Effi-01/Testing | script.video.F4mProxy/lib/f4mUtils/asn1parser.py | 206 | 1191 | # Author: Trevor Perrin
# Patch from Google adding getChildBytes()
#
# See the LICENSE file for legal information regarding use of this file.
"""Class for parsing ASN.1"""
from .compat import *
from .codec import *
#Takes a byte array which has a DER TLV field at its head
class ASN1Parser(object):
def __init__(self, bytes):
p = Parser(bytes)
p.get(1) #skip Type
#Get Length
self.length = self._getASN1Length(p)
#Get Value
self.value = p.getFixBytes(self.length)
#Assuming this is a sequence...
def getChild(self, which):
return ASN1Parser(self.getChildBytes(which))
def getChildBytes(self, which):
p = Parser(self.value)
for x in range(which+1):
markIndex = p.index
p.get(1) #skip Type
length = self._getASN1Length(p)
p.getFixBytes(length)
return p.bytes[markIndex : p.index]
#Decode the ASN.1 DER length field
def _getASN1Length(self, p):
firstLength = p.get(1)
if firstLength<=127:
return firstLength
else:
lengthLength = firstLength & 0x7F
return p.get(lengthLength)
| gpl-2.0 |
gsobczyk/hamster | src/hamster/lib/dbus.py | 1 | 3725 | import dbus
from dbus.mainloop.glib import DBusGMainLoop as DBusMainLoop
from json import dumps, loads
from calendar import timegm
from hamster.lib import datetime as dt
from hamster.lib.fact import Fact
"""D-Bus communication utilities."""
# file layout: functions sorted in alphabetical order,
# not taking into account the "to_" and "from_" prefixes.
# So back and forth conversions are close to one another.
# dates
def from_dbus_date(dbus_date):
"""Convert D-Bus timestamp (seconds since epoch) to date."""
return dt.date.fromtimestamp(dbus_date) if dbus_date else None
def to_dbus_date(date):
"""Convert date to D-Bus timestamp (seconds since epoch)."""
return timegm(date.timetuple()) if date else 0
# facts
def from_dbus_fact_json(dbus_fact):
"""Convert D-Bus JSON to Fact."""
d = loads(dbus_fact)
range_d = d['range']
# should use pdt.datetime.fromisoformat,
# but that appears only in python3.7, nevermind
start_s = range_d['start']
end_s = range_d['end']
range = dt.Range(start=dt.datetime.parse(start_s) if start_s else None,
end=dt.datetime.parse(end_s) if end_s else None)
d['range'] = range
return Fact(**d)
def to_dbus_fact_json(fact):
"""Convert Fact to D-Bus JSON (str)."""
d = {}
keys = ('activity', 'category', 'description', 'tags', 'id', 'activity_id', 'exported')
for key in keys:
d[key] = getattr(fact, key)
# isoformat(timespec="minutes") appears only in python3.6, nevermind
# and fromisoformat is not available anyway, so let's talk hamster
start = str(fact.range.start) if fact.range.start else None
end = str(fact.range.end) if fact.range.end else None
d['range'] = {'start': start, 'end': end}
return dumps(d)
# Range
def from_dbus_range(dbus_range):
"""Convert from D-Bus string to dt.Range."""
range, __ = dt.Range.parse(dbus_range, position="exact")
return range
def to_dbus_range(range):
"""Convert dt.Range to D-Bus string."""
# no default_day, to always output in the same format
return range.format(default_day=None)
# Legacy functions:
"""
old dbus_fact signature (types matching the to_dbus_fact output)
i id
i start_time
i end_time
s description
s activity name
i activity id
s category name
as List of fact tags
i date
i delta
b exported
"""
fact_signature = '(iiissisasiib)'
def from_dbus_fact(dbus_fact):
"""Unpack the struct into a proper dict.
Legacy: to besuperceded by from_dbus_fact_json at some point.
"""
return Fact(activity=dbus_fact[4],
start_time=dt.datetime.utcfromtimestamp(dbus_fact[1]),
end_time=dt.datetime.utcfromtimestamp(dbus_fact[2]) if dbus_fact[2] else None,
description=dbus_fact[3],
activity_id=dbus_fact[5],
category=dbus_fact[6],
tags=dbus_fact[7],
exported=dbus_fact[10],
id=dbus_fact[0]
)
def to_dbus_fact(fact):
"""Perform Fact conversion to D-Bus.
Return the corresponding dbus structure, with supported data types.
Legacy: to besuperceded by to_dbus_fact_json at some point.
"""
return (fact.id or 0,
timegm(fact.start_time.timetuple()),
timegm(fact.end_time.timetuple()) if fact.end_time else 0,
fact.description or '',
fact.activity or '',
fact.activity_id or 0,
fact.category or '',
dbus.Array(fact.tags, signature = 's'),
to_dbus_date(fact.date),
fact.delta.days * 24 * 60 * 60 + fact.delta.seconds,
fact.exported)
| gpl-3.0 |
himanshuo/osf.io | website/addons/github/views/hooks.py | 36 | 3114 | # -*- coding: utf-8 -*-
import os # noqa
from dateutil.parser import parse as dateparse
from flask import request
from website import models
from website.project.decorators import must_be_valid_project
from website.project.decorators import must_not_be_registration
from website.project.decorators import must_have_addon
from website.addons.github import utils
# TODO: Refactor using NodeLogger
def add_hook_log(node, github, action, path, date, committer, include_urls=False,
sha=None, save=False):
"""Add log event for commit from webhook payload.
:param node: Node to add logs to
:param github: GitHub node settings record
:param path: Path to file
:param date: Date of commit
:param committer: Committer name
:param include_urls: Include URLs in `params`
:param sha: SHA of updated file
:param save: Save changes
"""
github_data = {
'user': github.user,
'repo': github.repo,
}
urls = {}
if include_urls:
# TODO: Move to helper function
url = node.web_url_for('addon_view_or_download_file', path=path, provider='github')
urls = {
'view': '{0}?ref={1}'.format(url, sha),
'download': '{0}?action=download&ref={1}'.format(url, sha)
}
node.add_log(
action=action,
params={
'project': node.parent_id,
'node': node._id,
'path': path,
'github': github_data,
'urls': urls,
},
auth=None,
foreign_user=committer,
log_date=date,
save=save,
)
@must_be_valid_project
@must_not_be_registration
@must_have_addon('github', 'node')
def github_hook_callback(node_addon, **kwargs):
"""Add logs for commits from outside OSF.
"""
if request.json is None:
return {}
# Fail if hook signature is invalid
utils.verify_hook_signature(
node_addon,
request.data,
request.headers,
)
node = kwargs['node'] or kwargs['project']
payload = request.json
for commit in payload.get('commits', []):
# TODO: Look up OSF user by commit
# Skip if pushed by OSF
if commit['message'] and commit['message'] in utils.MESSAGES.values():
continue
_id = commit['id']
date = dateparse(commit['timestamp'])
committer = commit['committer']['name']
# Add logs
for path in commit.get('added', []):
add_hook_log(
node, node_addon, 'github_' + models.NodeLog.FILE_ADDED,
path, date, committer, include_urls=True, sha=_id,
)
for path in commit.get('modified', []):
add_hook_log(
node, node_addon, 'github_' + models.NodeLog.FILE_UPDATED,
path, date, committer, include_urls=True, sha=_id,
)
for path in commit.get('removed', []):
add_hook_log(
node, node_addon, 'github_' + models.NodeLog.FILE_REMOVED,
path, date, committer,
)
node.save()
| apache-2.0 |
thewangcj/MyFlasky | app/__init__.py | 1 | 1626 | from flask import Flask
from flask_bootstrap import Bootstrap
from flask_mail import Mail
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from config import config # 导入配置
from flask_login import LoginManager
from flask_pagedown import PageDown
# 初始化flask-login
login_manager = LoginManager()
# 设为'strong' 时,Flask-Login 会记录客户端IP地址和浏览器的用户代理信息,如果发现异动就登出用户
login_manager.session_protection = 'strong'
# 登录页面的端点,前面要加蓝图的名字
login_manager.login_view = 'auth.login'
bootstrap = Bootstrap()
mail = Mail()
moment = Moment()
db = SQLAlchemy()
pagedown = PageDown()
# 程序的工厂函数,用于在不同的环境中显示调用创建程序,提高测试覆盖率
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name]) # Flask app.config提供的函数,从类中直接导入配置
config[config_name].init_app(app)
# 配置各种拓展
bootstrap.init_app(app)
mail.init_app(app)
moment.init_app(app)
db.init_app(app) # 初始化数据库
login_manager.init_app(app)
pagedown.init_app(app)
# 注册蓝图
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth') # 以后所有该蓝图下的路由前面都带有/auth
from .api_1_0 import api as api_1_0_blueprint
app.register_blueprint(api_1_0_blueprint, url_prefix='/api/V1.0')
return app
| mit |
jagill/treeano | treeano/sandbox/nodes/paired_conv.py | 2 | 2082 | """
node for 2 conv's paired together, which allows more flexible combinations of
filter size and padding - specifically even filter sizes can have "same"
padding
"""
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
import canopy
fX = theano.config.floatX
@treeano.register_node("paired_conv")
class PairedConvNode(treeano.WrapperNodeImpl):
hyperparameter_names = ("inits",
"filter_size",
"num_filters",
"conv_pad",
"pad")
children_container = treeano.core.DictChildrenContainerSchema(
conv=treeano.core.ChildContainer,
separator=treeano.core.ChildContainer,
)
def architecture_children(self):
children = self.raw_children()
conv_node = children["conv"]
separator_node = children["separator"]
return [tn.SequentialNode(
self.name + "_sequential",
[canopy.node_utils.suffix_node(conv_node, "_1"),
separator_node,
canopy.node_utils.suffix_node(conv_node, "_2")])]
def init_state(self, network):
super(PairedConvNode, self).init_state(network)
filter_size = network.find_hyperparameter(["filter_size"])
# calculate effective total filter size
total_filter_size = tuple([fs * 2 - 1 for fs in filter_size])
# by default, do same padding
pad = network.find_hyperparameter(["conv_pad", "pad"], "same")
total_pad = tn.conv.conv_parse_pad(total_filter_size, pad)
second_pad = tuple([p // 2 for p in total_pad])
first_pad = tuple([p - p2 for p, p2 in zip(total_pad, second_pad)])
conv_node_name = self.raw_children()["conv"].name
network.set_hyperparameter(conv_node_name + "_1",
"pad",
first_pad)
network.set_hyperparameter(conv_node_name + "_2",
"pad",
second_pad)
| apache-2.0 |
Bedrock02/digital-tribe-site | lib/werkzeug/contrib/cache.py | 84 | 30341 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.cache
~~~~~~~~~~~~~~~~~~~~~~
The main problem with dynamic Web sites is, well, they're dynamic. Each
time a user requests a page, the webserver executes a lot of code, queries
the database, renders templates until the visitor gets the page he sees.
This is a lot more expensive than just loading a file from the file system
and sending it to the visitor.
For most Web applications, this overhead isn't a big deal but once it
becomes, you will be glad to have a cache system in place.
How Caching Works
=================
Caching is pretty simple. Basically you have a cache object lurking around
somewhere that is connected to a remote cache or the file system or
something else. When the request comes in you check if the current page
is already in the cache and if so, you're returning it from the cache.
Otherwise you generate the page and put it into the cache. (Or a fragment
of the page, you don't have to cache the full thing)
Here is a simple example of how to cache a sidebar for 5 minutes::
def get_sidebar(user):
identifier = 'sidebar_for/user%d' % user.id
value = cache.get(identifier)
if value is not None:
return value
value = generate_sidebar_for(user=user)
cache.set(identifier, value, timeout=60 * 5)
return value
Creating a Cache Object
=======================
To create a cache object you just import the cache system of your choice
from the cache module and instantiate it. Then you can start working
with that object:
>>> from werkzeug.contrib.cache import SimpleCache
>>> c = SimpleCache()
>>> c.set("foo", "value")
>>> c.get("foo")
'value'
>>> c.get("missing") is None
True
Please keep in mind that you have to create the cache and put it somewhere
you have access to it (either as a module global you can import or you just
put it into your WSGI application).
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import re
import errno
import tempfile
import platform
from hashlib import md5
from time import time
try:
import cPickle as pickle
except ImportError: # pragma: no cover
import pickle
from werkzeug._compat import iteritems, string_types, text_type, \
integer_types, to_native
from werkzeug.posixemulation import rename
def _items(mappingorseq):
"""Wrapper for efficient iteration over mappings represented by dicts
or sequences::
>>> for k, v in _items((i, i*i) for i in xrange(5)):
... assert k*k == v
>>> for k, v in _items(dict((i, i*i) for i in xrange(5))):
... assert k*k == v
"""
if hasattr(mappingorseq, 'items'):
return iteritems(mappingorseq)
return mappingorseq
class BaseCache(object):
"""Baseclass for the cache systems. All the cache systems implement this
API or a superset of it.
:param default_timeout: the default timeout (in seconds) that is used if
no timeout is specified on :meth:`set`. A timeout
of 0 indicates that the cache never expires.
"""
def __init__(self, default_timeout=300):
self.default_timeout = default_timeout
def _normalize_timeout(self, timeout):
if timeout is None:
timeout = self.default_timeout
return timeout
def get(self, key):
"""Look up key in the cache and return the value for it.
:param key: the key to be looked up.
:returns: The value if it exists and is readable, else ``None``.
"""
return None
def delete(self, key):
"""Delete `key` from the cache.
:param key: the key to delete.
:returns: Whether the key existed and has been deleted.
:rtype: boolean
"""
return True
def get_many(self, *keys):
"""Returns a list of values for the given keys.
For each key a item in the list is created::
foo, bar = cache.get_many("foo", "bar")
Has the same error handling as :meth:`get`.
:param keys: The function accepts multiple keys as positional
arguments.
"""
return map(self.get, keys)
def get_dict(self, *keys):
"""Like :meth:`get_many` but return a dict::
d = cache.get_dict("foo", "bar")
foo = d["foo"]
bar = d["bar"]
:param keys: The function accepts multiple keys as positional
arguments.
"""
return dict(zip(keys, self.get_many(*keys)))
def set(self, key, value, timeout=None):
"""Add a new key/value to the cache (overwrites value, if key already
exists in the cache).
:param key: the key to set
:param value: the value for the key
:param timeout: the cache timeout for the key in seconds (if not
specified, it uses the default timeout). A timeout of
0 idicates that the cache never expires.
:returns: ``True`` if key has been updated, ``False`` for backend
errors. Pickling errors, however, will raise a subclass of
``pickle.PickleError``.
:rtype: boolean
"""
return True
def add(self, key, value, timeout=None):
"""Works like :meth:`set` but does not overwrite the values of already
existing keys.
:param key: the key to set
:param value: the value for the key
:param timeout: the cache timeout for the key in seconds (if not
specified, it uses the default timeout). A timeout of
0 idicates that the cache never expires.
:returns: Same as :meth:`set`, but also ``False`` for already
existing keys.
:rtype: boolean
"""
return True
def set_many(self, mapping, timeout=None):
"""Sets multiple keys and values from a mapping.
:param mapping: a mapping with the keys/values to set.
:param timeout: the cache timeout for the key in seconds (if not
specified, it uses the default timeout). A timeout of
0 idicates that the cache never expires.
:returns: Whether all given keys have been set.
:rtype: boolean
"""
rv = True
for key, value in _items(mapping):
if not self.set(key, value, timeout):
rv = False
return rv
def delete_many(self, *keys):
"""Deletes multiple keys at once.
:param keys: The function accepts multiple keys as positional
arguments.
:returns: Whether all given keys have been deleted.
:rtype: boolean
"""
return all(self.delete(key) for key in keys)
def has(self, key):
"""Checks if a key exists in the cache without returning it. This is a
cheap operation that bypasses loading the actual data on the backend.
This method is optional and may not be implemented on all caches.
:param key: the key to check
"""
raise NotImplementedError(
'%s doesn\'t have an efficient implementation of `has`. That '
'means it is impossible to check whether a key exists without '
'fully loading the key\'s data. Consider using `self.get` '
'explicitly if you don\'t care about performance.'
)
def clear(self):
"""Clears the cache. Keep in mind that not all caches support
completely clearing the cache.
:returns: Whether the cache has been cleared.
:rtype: boolean
"""
return True
def inc(self, key, delta=1):
"""Increments the value of a key by `delta`. If the key does
not yet exist it is initialized with `delta`.
For supporting caches this is an atomic operation.
:param key: the key to increment.
:param delta: the delta to add.
:returns: The new value or ``None`` for backend errors.
"""
value = (self.get(key) or 0) + delta
return value if self.set(key, value) else None
def dec(self, key, delta=1):
"""Decrements the value of a key by `delta`. If the key does
not yet exist it is initialized with `-delta`.
For supporting caches this is an atomic operation.
:param key: the key to increment.
:param delta: the delta to subtract.
:returns: The new value or `None` for backend errors.
"""
value = (self.get(key) or 0) - delta
return value if self.set(key, value) else None
class NullCache(BaseCache):
"""A cache that doesn't cache. This can be useful for unit testing.
:param default_timeout: a dummy parameter that is ignored but exists
for API compatibility with other caches.
"""
class SimpleCache(BaseCache):
"""Simple memory cache for single process environments. This class exists
mainly for the development server and is not 100% thread safe. It tries
to use as many atomic operations as possible and no locks for simplicity
but it could happen under heavy load that keys are added multiple times.
:param threshold: the maximum number of items the cache stores before
it starts deleting some.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`. A timeout of
0 indicates that the cache never expires.
"""
def __init__(self, threshold=500, default_timeout=300):
BaseCache.__init__(self, default_timeout)
self._cache = {}
self.clear = self._cache.clear
self._threshold = threshold
def _prune(self):
if len(self._cache) > self._threshold:
now = time()
toremove = []
for idx, (key, (expires, _)) in enumerate(self._cache.items()):
if (expires != 0 and expires <= now) or idx % 3 == 0:
toremove.append(key)
for key in toremove:
self._cache.pop(key, None)
def _normalize_timeout(self, timeout):
timeout = BaseCache._normalize_timeout(self, timeout)
if timeout > 0:
timeout = time() + timeout
return timeout
def get(self, key):
try:
expires, value = self._cache[key]
if expires == 0 or expires > time():
return pickle.loads(value)
except (KeyError, pickle.PickleError):
return None
def set(self, key, value, timeout=None):
expires = self._normalize_timeout(timeout)
self._prune()
self._cache[key] = (expires, pickle.dumps(value,
pickle.HIGHEST_PROTOCOL))
return True
def add(self, key, value, timeout=None):
expires = self._normalize_timeout(timeout)
self._prune()
item = (expires, pickle.dumps(value,
pickle.HIGHEST_PROTOCOL))
if key in self._cache:
return False
self._cache.setdefault(key, item)
return True
def delete(self, key):
return self._cache.pop(key, None) is not None
def has(self, key):
try:
expires, value = self._cache[key]
return expires == 0 or expires > time()
except KeyError:
return False
_test_memcached_key = re.compile(r'[^\x00-\x21\xff]{1,250}$').match
class MemcachedCache(BaseCache):
"""A cache that uses memcached as backend.
The first argument can either be an object that resembles the API of a
:class:`memcache.Client` or a tuple/list of server addresses. In the
event that a tuple/list is passed, Werkzeug tries to import the best
available memcache library.
This cache looks into the following packages/modules to find bindings for
memcached:
- ``pylibmc``
- ``google.appengine.api.memcached``
- ``memcached``
Implementation notes: This cache backend works around some limitations in
memcached to simplify the interface. For example unicode keys are encoded
to utf-8 on the fly. Methods such as :meth:`~BaseCache.get_dict` return
the keys in the same format as passed. Furthermore all get methods
silently ignore key errors to not cause problems when untrusted user data
is passed to the get methods which is often the case in web applications.
:param servers: a list or tuple of server addresses or alternatively
a :class:`memcache.Client` or a compatible client.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`. A timeout of
0 indicates taht the cache never expires.
:param key_prefix: a prefix that is added before all keys. This makes it
possible to use the same memcached server for different
applications. Keep in mind that
:meth:`~BaseCache.clear` will also clear keys with a
different prefix.
"""
def __init__(self, servers=None, default_timeout=300, key_prefix=None):
BaseCache.__init__(self, default_timeout)
if servers is None or isinstance(servers, (list, tuple)):
if servers is None:
servers = ['127.0.0.1:11211']
self._client = self.import_preferred_memcache_lib(servers)
if self._client is None:
raise RuntimeError('no memcache module found')
else:
# NOTE: servers is actually an already initialized memcache
# client.
self._client = servers
self.key_prefix = to_native(key_prefix)
def _normalize_key(self, key):
key = to_native(key, 'utf-8')
if self.key_prefix:
key = self.key_prefix + key
return key
def _normalize_timeout(self, timeout):
timeout = BaseCache._normalize_timeout(self, timeout)
if timeout > 0:
timeout = int(time()) + timeout
return timeout
def get(self, key):
key = self._normalize_key(key)
# memcached doesn't support keys longer than that. Because often
# checks for so long keys can occur because it's tested from user
# submitted data etc we fail silently for getting.
if _test_memcached_key(key):
return self._client.get(key)
def get_dict(self, *keys):
key_mapping = {}
have_encoded_keys = False
for key in keys:
encoded_key = self._normalize_key(key)
if not isinstance(key, str):
have_encoded_keys = True
if _test_memcached_key(key):
key_mapping[encoded_key] = key
d = rv = self._client.get_multi(key_mapping.keys())
if have_encoded_keys or self.key_prefix:
rv = {}
for key, value in iteritems(d):
rv[key_mapping[key]] = value
if len(rv) < len(keys):
for key in keys:
if key not in rv:
rv[key] = None
return rv
def add(self, key, value, timeout=None):
key = self._normalize_key(key)
timeout = self._normalize_timeout(timeout)
return self._client.add(key, value, timeout)
def set(self, key, value, timeout=None):
key = self._normalize_key(key)
timeout = self._normalize_timeout(timeout)
return self._client.set(key, value, timeout)
def get_many(self, *keys):
d = self.get_dict(*keys)
return [d[key] for key in keys]
def set_many(self, mapping, timeout=None):
new_mapping = {}
for key, value in _items(mapping):
key = self._normalize_key(key)
new_mapping[key] = value
timeout = self._normalize_timeout(timeout)
failed_keys = self._client.set_multi(new_mapping, timeout)
return not failed_keys
def delete(self, key):
key = self._normalize_key(key)
if _test_memcached_key(key):
return self._client.delete(key)
def delete_many(self, *keys):
new_keys = []
for key in keys:
key = self._normalize_key(key)
if _test_memcached_key(key):
new_keys.append(key)
return self._client.delete_multi(new_keys)
def has(self, key):
key = self._normalize_key(key)
if _test_memcached_key(key):
return self._client.append(key, '')
return False
def clear(self):
return self._client.flush_all()
def inc(self, key, delta=1):
key = self._normalize_key(key)
return self._client.incr(key, delta)
def dec(self, key, delta=1):
key = self._normalize_key(key)
return self._client.decr(key, delta)
def import_preferred_memcache_lib(self, servers):
"""Returns an initialized memcache client. Used by the constructor."""
try:
import pylibmc
except ImportError:
pass
else:
return pylibmc.Client(servers)
try:
from google.appengine.api import memcache
except ImportError:
pass
else:
return memcache.Client()
try:
import memcache
except ImportError:
pass
else:
return memcache.Client(servers)
# backwards compatibility
GAEMemcachedCache = MemcachedCache
class RedisCache(BaseCache):
"""Uses the Redis key-value store as a cache backend.
The first argument can be either a string denoting address of the Redis
server or an object resembling an instance of a redis.Redis class.
Note: Python Redis API already takes care of encoding unicode strings on
the fly.
.. versionadded:: 0.7
.. versionadded:: 0.8
`key_prefix` was added.
.. versionchanged:: 0.8
This cache backend now properly serializes objects.
.. versionchanged:: 0.8.3
This cache backend now supports password authentication.
.. versionchanged:: 0.10
``**kwargs`` is now passed to the redis object.
:param host: address of the Redis server or an object which API is
compatible with the official Python Redis client (redis-py).
:param port: port number on which Redis server listens for connections.
:param password: password authentication for the Redis server.
:param db: db (zero-based numeric index) on Redis Server to connect.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`. A timeout of
0 indicates that the cache never expires.
:param key_prefix: A prefix that should be added to all keys.
Any additional keyword arguments will be passed to ``redis.Redis``.
"""
def __init__(self, host='localhost', port=6379, password=None,
db=0, default_timeout=300, key_prefix=None, **kwargs):
BaseCache.__init__(self, default_timeout)
if isinstance(host, string_types):
try:
import redis
except ImportError:
raise RuntimeError('no redis module found')
if kwargs.get('decode_responses', None):
raise ValueError('decode_responses is not supported by '
'RedisCache.')
self._client = redis.Redis(host=host, port=port, password=password,
db=db, **kwargs)
else:
self._client = host
self.key_prefix = key_prefix or ''
def _normalize_timeout(self, timeout):
timeout = BaseCache._normalize_timeout(self, timeout)
if timeout == 0:
timeout = -1
return timeout
def dump_object(self, value):
"""Dumps an object into a string for redis. By default it serializes
integers as regular string and pickle dumps everything else.
"""
t = type(value)
if t in integer_types:
return str(value).encode('ascii')
return b'!' + pickle.dumps(value)
def load_object(self, value):
"""The reversal of :meth:`dump_object`. This might be called with
None.
"""
if value is None:
return None
if value.startswith(b'!'):
try:
return pickle.loads(value[1:])
except pickle.PickleError:
return None
try:
return int(value)
except ValueError:
# before 0.8 we did not have serialization. Still support that.
return value
def get(self, key):
return self.load_object(self._client.get(self.key_prefix + key))
def get_many(self, *keys):
if self.key_prefix:
keys = [self.key_prefix + key for key in keys]
return [self.load_object(x) for x in self._client.mget(keys)]
def set(self, key, value, timeout=None):
timeout = self._normalize_timeout(timeout)
dump = self.dump_object(value)
if timeout == -1:
result = self._client.set(name=self.key_prefix + key,
value=dump)
else:
result = self._client.setex(name=self.key_prefix + key,
value=dump, time=timeout)
return result
def add(self, key, value, timeout=None):
timeout = self._normalize_timeout(timeout)
dump = self.dump_object(value)
return (
self._client.setnx(name=self.key_prefix + key, value=dump) and
self._client.expire(name=self.key_prefix + key, time=timeout)
)
def set_many(self, mapping, timeout=None):
timeout = self._normalize_timeout(timeout)
# Use transaction=False to batch without calling redis MULTI
# which is not supported by twemproxy
pipe = self._client.pipeline(transaction=False)
for key, value in _items(mapping):
dump = self.dump_object(value)
if timeout == -1:
pipe.set(name=self.key_prefix + key, value=dump)
else:
pipe.setex(name=self.key_prefix + key, value=dump,
time=timeout)
return pipe.execute()
def delete(self, key):
return self._client.delete(self.key_prefix + key)
def delete_many(self, *keys):
if not keys:
return
if self.key_prefix:
keys = [self.key_prefix + key for key in keys]
return self._client.delete(*keys)
def has(self, key):
return self._client.exists(self.key_prefix + key)
def clear(self):
status = False
if self.key_prefix:
keys = self._client.keys(self.key_prefix + '*')
if keys:
status = self._client.delete(*keys)
else:
status = self._client.flushdb()
return status
def inc(self, key, delta=1):
return self._client.incr(name=self.key_prefix + key, amount=delta)
def dec(self, key, delta=1):
return self._client.decr(name=self.key_prefix + key, amount=delta)
class FileSystemCache(BaseCache):
"""A cache that stores the items on the file system. This cache depends
on being the only user of the `cache_dir`. Make absolutely sure that
nobody but this cache stores files there or otherwise the cache will
randomly delete files therein.
:param cache_dir: the directory where cache files are stored.
:param threshold: the maximum number of items the cache stores before
it starts deleting some.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`. A timeout of
0 indicates that the cache never expires.
:param mode: the file mode wanted for the cache files, default 0600
"""
#: used for temporary files by the FileSystemCache
_fs_transaction_suffix = '.__wz_cache'
def __init__(self, cache_dir, threshold=500, default_timeout=300,
mode=0o600):
BaseCache.__init__(self, default_timeout)
self._path = cache_dir
self._threshold = threshold
self._mode = mode
try:
os.makedirs(self._path)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
def _normalize_timeout(self, timeout):
timeout = BaseCache._normalize_timeout(self, timeout)
if timeout != 0:
timeout = time() + timeout
return int(timeout)
def _list_dir(self):
"""return a list of (fully qualified) cache filenames
"""
return [os.path.join(self._path, fn) for fn in os.listdir(self._path)
if not fn.endswith(self._fs_transaction_suffix)]
def _prune(self):
entries = self._list_dir()
if len(entries) > self._threshold:
now = time()
for idx, fname in enumerate(entries):
try:
remove = False
with open(fname, 'rb') as f:
expires = pickle.load(f)
remove = (expires != 0 and expires <= now) or idx % 3 == 0
if remove:
os.remove(fname)
except (IOError, OSError):
pass
def clear(self):
for fname in self._list_dir():
try:
os.remove(fname)
except (IOError, OSError):
return False
return True
def _get_filename(self, key):
if isinstance(key, text_type):
key = key.encode('utf-8') # XXX unicode review
hash = md5(key).hexdigest()
return os.path.join(self._path, hash)
def get(self, key):
filename = self._get_filename(key)
try:
with open(filename, 'rb') as f:
pickle_time = pickle.load(f)
if pickle_time == 0 or pickle_time >= time():
return pickle.load(f)
else:
os.remove(filename)
return None
except (IOError, OSError, pickle.PickleError):
return None
def add(self, key, value, timeout=None):
filename = self._get_filename(key)
if not os.path.exists(filename):
return self.set(key, value, timeout)
return False
def set(self, key, value, timeout=None):
timeout = self._normalize_timeout(timeout)
filename = self._get_filename(key)
self._prune()
try:
fd, tmp = tempfile.mkstemp(suffix=self._fs_transaction_suffix,
dir=self._path)
with os.fdopen(fd, 'wb') as f:
pickle.dump(timeout, f, 1)
pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
rename(tmp, filename)
os.chmod(filename, self._mode)
except (IOError, OSError):
return False
else:
return True
def delete(self, key):
try:
os.remove(self._get_filename(key))
except (IOError, OSError):
return False
else:
return True
def has(self, key):
filename = self._get_filename(key)
try:
with open(filename, 'rb') as f:
pickle_time = pickle.load(f)
if pickle_time == 0 or pickle_time >= time():
return True
else:
os.remove(filename)
return False
except (IOError, OSError, pickle.PickleError):
return False
class UWSGICache(BaseCache):
""" Implements the cache using uWSGI's caching framework.
.. note::
This class cannot be used when running under PyPy, because the uWSGI
API implementation for PyPy is lacking the needed functionality.
:param default_timeout: The default timeout in seconds.
:param cache: The name of the caching instance to connect to, for
example: mycache@localhost:3031, defaults to an empty string, which
means uWSGI will cache in the local instance. If the cache is in the
same instance as the werkzeug app, you only have to provide the name of
the cache.
"""
def __init__(self, default_timeout=300, cache=''):
BaseCache.__init__(self, default_timeout)
if platform.python_implementation() == 'PyPy':
raise RuntimeError("uWSGI caching does not work under PyPy, see "
"the docs for more details.")
try:
import uwsgi
self._uwsgi = uwsgi
except ImportError:
raise RuntimeError("uWSGI could not be imported, are you "
"running under uWSGI?")
self.cache = cache
def get(self, key):
rv = self._uwsgi.cache_get(key, self.cache)
if rv is None:
return
return pickle.loads(rv)
def delete(self, key):
return self._uwsgi.cache_del(key, self.cache)
def set(self, key, value, timeout=None):
return self._uwsgi.cache_update(key, pickle.dumps(value),
self._normalize_timeout(timeout),
self.cache)
def add(self, key, value, timeout=None):
return self._uwsgi.cache_set(key, pickle.dumps(value),
self._normalize_timeout(timeout),
self.cache)
def clear(self):
return self._uwsgi.cache_clear(self.cache)
def has(self, key):
return self._uwsgi.cache_exists(key, self.cache) is not None
| mit |
babble/babble | include/jython/Lib/test/test_binop.py | 87 | 10683 | """Tests for binary operators on subtypes of built-in types."""
import unittest
from test import test_support
def gcd(a, b):
"""Greatest common divisor using Euclid's algorithm."""
while a:
a, b = b%a, a
return b
def isint(x):
"""Test whether an object is an instance of int or long."""
return isinstance(x, int) or isinstance(x, long)
def isnum(x):
"""Test whether an object is an instance of a built-in numeric type."""
for T in int, long, float, complex:
if isinstance(x, T):
return 1
return 0
def isRat(x):
"""Test wheter an object is an instance of the Rat class."""
return isinstance(x, Rat)
class Rat(object):
"""Rational number implemented as a normalized pair of longs."""
__slots__ = ['_Rat__num', '_Rat__den']
def __init__(self, num=0L, den=1L):
"""Constructor: Rat([num[, den]]).
The arguments must be ints or longs, and default to (0, 1)."""
if not isint(num):
raise TypeError, "Rat numerator must be int or long (%r)" % num
if not isint(den):
raise TypeError, "Rat denominator must be int or long (%r)" % den
# But the zero is always on
if den == 0:
raise ZeroDivisionError, "zero denominator"
g = gcd(den, num)
self.__num = long(num//g)
self.__den = long(den//g)
def _get_num(self):
"""Accessor function for read-only 'num' attribute of Rat."""
return self.__num
num = property(_get_num, None)
def _get_den(self):
"""Accessor function for read-only 'den' attribute of Rat."""
return self.__den
den = property(_get_den, None)
def __repr__(self):
"""Convert a Rat to an string resembling a Rat constructor call."""
return "Rat(%d, %d)" % (self.__num, self.__den)
def __str__(self):
"""Convert a Rat to a string resembling a decimal numeric value."""
return str(float(self))
def __float__(self):
"""Convert a Rat to a float."""
return self.__num*1.0/self.__den
def __int__(self):
"""Convert a Rat to an int; self.den must be 1."""
if self.__den == 1:
try:
return int(self.__num)
except OverflowError:
raise OverflowError, ("%s too large to convert to int" %
repr(self))
raise ValueError, "can't convert %s to int" % repr(self)
def __long__(self):
"""Convert a Rat to an long; self.den must be 1."""
if self.__den == 1:
return long(self.__num)
raise ValueError, "can't convert %s to long" % repr(self)
def __add__(self, other):
"""Add two Rats, or a Rat and a number."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(self.__num*other.__den + other.__num*self.__den,
self.__den*other.__den)
if isnum(other):
return float(self) + other
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two Rats, or a Rat and a number."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(self.__num*other.__den - other.__num*self.__den,
self.__den*other.__den)
if isnum(other):
return float(self) - other
return NotImplemented
def __rsub__(self, other):
"""Subtract two Rats, or a Rat and a number (reversed args)."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(other.__num*self.__den - self.__num*other.__den,
self.__den*other.__den)
if isnum(other):
return other - float(self)
return NotImplemented
def __mul__(self, other):
"""Multiply two Rats, or a Rat and a number."""
if isRat(other):
return Rat(self.__num*other.__num, self.__den*other.__den)
if isint(other):
return Rat(self.__num*other, self.__den)
if isnum(other):
return float(self)*other
return NotImplemented
__rmul__ = __mul__
def __truediv__(self, other):
"""Divide two Rats, or a Rat and a number."""
if isRat(other):
return Rat(self.__num*other.__den, self.__den*other.__num)
if isint(other):
return Rat(self.__num, self.__den*other)
if isnum(other):
return float(self) / other
return NotImplemented
__div__ = __truediv__
def __rtruediv__(self, other):
"""Divide two Rats, or a Rat and a number (reversed args)."""
if isRat(other):
return Rat(other.__num*self.__den, other.__den*self.__num)
if isint(other):
return Rat(other*self.__den, self.__num)
if isnum(other):
return other / float(self)
return NotImplemented
__rdiv__ = __rtruediv__
def __floordiv__(self, other):
"""Divide two Rats, returning the floored result."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
x = self/other
return x.__num // x.__den
def __rfloordiv__(self, other):
"""Divide two Rats, returning the floored result (reversed args)."""
x = other/self
return x.__num // x.__den
def __divmod__(self, other):
"""Divide two Rats, returning quotient and remainder."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
x = self//other
return (x, self - other * x)
def __rdivmod__(self, other):
"""Divide two Rats, returning quotient and remainder (reversed args)."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
return divmod(other, self)
def __mod__(self, other):
"""Take one Rat modulo another."""
return divmod(self, other)[1]
def __rmod__(self, other):
"""Take one Rat modulo another (reversed args)."""
return divmod(other, self)[1]
def __eq__(self, other):
"""Compare two Rats for equality."""
if isint(other):
return self.__den == 1 and self.__num == other
if isRat(other):
return self.__num == other.__num and self.__den == other.__den
if isnum(other):
return float(self) == other
return NotImplemented
def __ne__(self, other):
"""Compare two Rats for inequality."""
return not self == other
class RatTestCase(unittest.TestCase):
"""Unit tests for Rat class and its support utilities."""
def test_gcd(self):
self.assertEqual(gcd(10, 12), 2)
self.assertEqual(gcd(10, 15), 5)
self.assertEqual(gcd(10, 11), 1)
self.assertEqual(gcd(100, 15), 5)
self.assertEqual(gcd(-10, 2), -2)
self.assertEqual(gcd(10, -2), 2)
self.assertEqual(gcd(-10, -2), -2)
for i in range(1, 20):
for j in range(1, 20):
self.assert_(gcd(i, j) > 0)
self.assert_(gcd(-i, j) < 0)
self.assert_(gcd(i, -j) > 0)
self.assert_(gcd(-i, -j) < 0)
def test_constructor(self):
a = Rat(10, 15)
self.assertEqual(a.num, 2)
self.assertEqual(a.den, 3)
a = Rat(10L, 15L)
self.assertEqual(a.num, 2)
self.assertEqual(a.den, 3)
a = Rat(10, -15)
self.assertEqual(a.num, -2)
self.assertEqual(a.den, 3)
a = Rat(-10, 15)
self.assertEqual(a.num, -2)
self.assertEqual(a.den, 3)
a = Rat(-10, -15)
self.assertEqual(a.num, 2)
self.assertEqual(a.den, 3)
a = Rat(7)
self.assertEqual(a.num, 7)
self.assertEqual(a.den, 1)
try:
a = Rat(1, 0)
except ZeroDivisionError:
pass
else:
self.fail("Rat(1, 0) didn't raise ZeroDivisionError")
for bad in "0", 0.0, 0j, (), [], {}, None, Rat, unittest:
try:
a = Rat(bad)
except TypeError:
pass
else:
self.fail("Rat(%r) didn't raise TypeError" % bad)
try:
a = Rat(1, bad)
except TypeError:
pass
else:
self.fail("Rat(1, %r) didn't raise TypeError" % bad)
def test_add(self):
self.assertEqual(Rat(2, 3) + Rat(1, 3), 1)
self.assertEqual(Rat(2, 3) + 1, Rat(5, 3))
self.assertEqual(1 + Rat(2, 3), Rat(5, 3))
self.assertEqual(1.0 + Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) + 1.0, 1.5)
def test_sub(self):
self.assertEqual(Rat(7, 2) - Rat(7, 5), Rat(21, 10))
self.assertEqual(Rat(7, 5) - 1, Rat(2, 5))
self.assertEqual(1 - Rat(3, 5), Rat(2, 5))
self.assertEqual(Rat(3, 2) - 1.0, 0.5)
self.assertEqual(1.0 - Rat(1, 2), 0.5)
def test_mul(self):
self.assertEqual(Rat(2, 3) * Rat(5, 7), Rat(10, 21))
self.assertEqual(Rat(10, 3) * 3, 10)
self.assertEqual(3 * Rat(10, 3), 10)
self.assertEqual(Rat(10, 5) * 0.5, 1.0)
self.assertEqual(0.5 * Rat(10, 5), 1.0)
def test_div(self):
self.assertEqual(Rat(10, 3) / Rat(5, 7), Rat(14, 3))
self.assertEqual(Rat(10, 3) / 3, Rat(10, 9))
self.assertEqual(2 / Rat(5), Rat(2, 5))
self.assertEqual(3.0 * Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) * 3.0, 1.5)
def test_floordiv(self):
self.assertEqual(Rat(10) // Rat(4), 2)
self.assertEqual(Rat(10, 3) // Rat(4, 3), 2)
self.assertEqual(Rat(10) // 4, 2)
self.assertEqual(10 // Rat(4), 2)
def test_eq(self):
self.assertEqual(Rat(10), Rat(20, 2))
self.assertEqual(Rat(10), 10)
self.assertEqual(10, Rat(10))
self.assertEqual(Rat(10), 10.0)
self.assertEqual(10.0, Rat(10))
def test_future_div(self):
exec future_test
# XXX Ran out of steam; TO DO: divmod, div, future division
future_test = """
from __future__ import division
self.assertEqual(Rat(10, 3) / Rat(5, 7), Rat(14, 3))
self.assertEqual(Rat(10, 3) / 3, Rat(10, 9))
self.assertEqual(2 / Rat(5), Rat(2, 5))
self.assertEqual(3.0 * Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) * 3.0, 1.5)
self.assertEqual(eval('1/2'), 0.5)
"""
def test_main():
test_support.run_unittest(RatTestCase)
if __name__ == "__main__":
test_main()
| apache-2.0 |
danielfrg/word2vec | word2vec/wordclusters.py | 1 | 1041 | import numpy as np
class WordClusters(object):
def __init__(self, vocab, clusters):
self.vocab = vocab
self.clusters = clusters
def ix(self, word):
"""
Returns the index on self.vocab and self.clusters for 'word'
"""
temp = np.where(self.vocab == word)[0]
if temp.size == 0:
raise KeyError("Word not in vocabulary")
else:
return temp[0]
def __getitem__(self, word):
return self.get_cluster(word)
def get_cluster(self, word):
"""
Returns the cluster number for a word in the vocabulary
"""
idx = self.ix(word)
return self.clusters[idx]
def get_words_on_cluster(self, cluster):
return self.vocab[self.clusters == cluster]
@classmethod
def from_text(cls, fname):
vocab = np.genfromtxt(fname, dtype=str, delimiter=" ", usecols=0)
clusters = np.genfromtxt(fname, dtype=int, delimiter=" ", usecols=1)
return cls(vocab=vocab, clusters=clusters)
| apache-2.0 |
brokenjacobs/ansible | lib/ansible/modules/network/cloudengine/ce_snmp_traps.py | 39 | 19559 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: ce_snmp_traps
version_added: "2.4"
short_description: Manages SNMP traps configuration on HUAWEI CloudEngine switches.
description:
- Manages SNMP traps configurations on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
feature_name:
description:
- Alarm feature name.
required: false
default: null
choices: ['aaa', 'arp', 'bfd', 'bgp', 'cfg', 'configuration', 'dad', 'devm',
'dhcpsnp', 'dldp', 'driver', 'efm', 'erps', 'error-down', 'fcoe',
'fei', 'fei_comm', 'fm', 'ifnet', 'info', 'ipsg', 'ipv6', 'isis',
'l3vpn', 'lacp', 'lcs', 'ldm', 'ldp', 'ldt', 'lldp', 'mpls_lspm',
'msdp', 'mstp', 'nd', 'netconf', 'nqa', 'nvo3', 'openflow', 'ospf',
'ospfv3', 'pim', 'pim-std', 'qos', 'radius', 'rm', 'rmon', 'securitytrap',
'smlktrap', 'snmp', 'ssh', 'stackmng', 'sysclock', 'sysom', 'system',
'tcp', 'telnet', 'trill', 'trunk', 'tty', 'vbst', 'vfs', 'virtual-perception',
'vrrp', 'vstm', 'all']
trap_name:
description:
- Alarm trap name.
required: false
default: null
interface_type:
description:
- Interface type.
required: false
default: null
choices: ['Ethernet', 'Eth-Trunk', 'Tunnel', 'NULL', 'LoopBack', 'Vlanif', '100GE',
'40GE', 'MTunnel', '10GE', 'GE', 'MEth', 'Vbdif', 'Nve']
interface_number:
description:
- Interface number.
required: false
default: null
port_number:
description:
- Source port number.
required: false
default: null
'''
EXAMPLES = '''
- name: CloudEngine snmp traps test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config SNMP trap all enable"
ce_snmp_traps:
state: present
feature_name: all
provider: "{{ cli }}"
- name: "Config SNMP trap interface"
ce_snmp_traps:
state: present
interface_type: 40GE
interface_number: 2/0/1
provider: "{{ cli }}"
- name: "Config SNMP trap port"
ce_snmp_traps:
state: present
port_number: 2222
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"feature_name": "all",
"state": "present"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {"snmp-agent trap": [],
"undo snmp-agent trap": []}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"snmp-agent trap": ["enable"],
"undo snmp-agent trap": []}
updates:
description: command sent to the device
returned: always
type: list
sample: ["snmp-agent trap enable"]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_config, load_config, ce_argument_spec, run_commands
class SnmpTraps(object):
""" Manages SNMP trap configuration """
def __init__(self, **kwargs):
""" Class init """
# module
argument_spec = kwargs["argument_spec"]
self.spec = argument_spec
self.module = AnsibleModule(
argument_spec=self.spec,
required_together=[("interface_type", "interface_number")],
supports_check_mode=True
)
# config
self.cur_cfg = dict()
self.cur_cfg["snmp-agent trap"] = []
self.cur_cfg["undo snmp-agent trap"] = []
# module args
self.state = self.module.params['state']
self.feature_name = self.module.params['feature_name']
self.trap_name = self.module.params['trap_name']
self.interface_type = self.module.params['interface_type']
self.interface_number = self.module.params['interface_number']
self.port_number = self.module.params['port_number']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.existing["snmp-agent trap"] = []
self.existing["undo snmp-agent trap"] = []
self.end_state = dict()
self.end_state["snmp-agent trap"] = []
self.end_state["undo snmp-agent trap"] = []
commands = list()
cmd1 = 'display interface brief'
commands.append(cmd1)
self.interface = run_commands(self.module, commands)
def check_args(self):
""" Check invalid args """
if self.port_number:
if self.port_number.isdigit():
if int(self.port_number) < 1025 or int(self.port_number) > 65535:
self.module.fail_json(
msg='Error: The value of port_number is out of [1025 - 65535].')
else:
self.module.fail_json(
msg='Error: The port_number is not digit.')
if self.interface_type and self.interface_number:
tmp_interface = self.interface_type + self.interface_number
if tmp_interface not in self.interface[0]:
self.module.fail_json(
msg='Error: The interface %s is not in the device.' % tmp_interface)
def get_proposed(self):
""" Get proposed state """
self.proposed["state"] = self.state
if self.feature_name:
self.proposed["feature_name"] = self.feature_name
if self.trap_name:
self.proposed["trap_name"] = self.trap_name
if self.interface_type:
self.proposed["interface_type"] = self.interface_type
if self.interface_number:
self.proposed["interface_number"] = self.interface_number
if self.port_number:
self.proposed["port_number"] = self.port_number
def get_existing(self):
""" Get existing state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_cfg_lower = tmp_cfg.lower()
temp_data = tmp_cfg.split("\n")
temp_data_lower = temp_cfg_lower.split("\n")
for item in temp_data:
if "snmp-agent trap source-port " in item:
if self.port_number:
item_tmp = item.split("snmp-agent trap source-port ")
self.cur_cfg["trap source-port"] = item_tmp[1]
self.existing["trap source-port"] = item_tmp[1]
elif "snmp-agent trap source " in item:
if self.interface_type:
item_tmp = item.split("snmp-agent trap source ")
self.cur_cfg["trap source interface"] = item_tmp[1]
self.existing["trap source interface"] = item_tmp[1]
if self.feature_name:
for item in temp_data_lower:
if item == "snmp-agent trap enable":
self.cur_cfg["snmp-agent trap"].append("enable")
self.existing["snmp-agent trap"].append("enable")
elif item == "snmp-agent trap disable":
self.cur_cfg["snmp-agent trap"].append("disable")
self.existing["snmp-agent trap"].append("disable")
elif "undo snmp-agent trap enable " in item:
item_tmp = item.split("undo snmp-agent trap enable ")
self.cur_cfg[
"undo snmp-agent trap"].append(item_tmp[1])
self.existing[
"undo snmp-agent trap"].append(item_tmp[1])
elif "snmp-agent trap enable " in item:
item_tmp = item.split("snmp-agent trap enable ")
self.cur_cfg["snmp-agent trap"].append(item_tmp[1])
self.existing["snmp-agent trap"].append(item_tmp[1])
else:
del self.existing["snmp-agent trap"]
del self.existing["undo snmp-agent trap"]
def get_end_state(self):
""" Get end_state state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_cfg_lower = tmp_cfg.lower()
temp_data = tmp_cfg.split("\n")
temp_data_lower = temp_cfg_lower.split("\n")
for item in temp_data:
if "snmp-agent trap source-port " in item:
if self.port_number:
item_tmp = item.split("snmp-agent trap source-port ")
self.end_state["trap source-port"] = item_tmp[1]
elif "snmp-agent trap source " in item:
if self.interface_type:
item_tmp = item.split("snmp-agent trap source ")
self.end_state["trap source interface"] = item_tmp[1]
if self.feature_name:
for item in temp_data_lower:
if item == "snmp-agent trap enable":
self.end_state["snmp-agent trap"].append("enable")
elif item == "snmp-agent trap disable":
self.end_state["snmp-agent trap"].append("disable")
elif "undo snmp-agent trap enable " in item:
item_tmp = item.split("undo snmp-agent trap enable ")
self.end_state[
"undo snmp-agent trap"].append(item_tmp[1])
elif "snmp-agent trap enable " in item:
item_tmp = item.split("snmp-agent trap enable ")
self.end_state["snmp-agent trap"].append(item_tmp[1])
else:
del self.end_state["snmp-agent trap"]
del self.end_state["undo snmp-agent trap"]
def cli_load_config(self, commands):
""" Load configure through cli """
if not self.module.check_mode:
load_config(self.module, commands)
def cli_get_config(self):
""" Get configure through cli """
regular = "| include snmp | include trap"
flags = list()
flags.append(regular)
tmp_cfg = get_config(self.module, flags)
return tmp_cfg
def set_trap_feature_name(self):
""" Set feature name for trap """
if self.feature_name == "all":
cmd = "snmp-agent trap enable"
else:
cmd = "snmp-agent trap enable feature-name %s" % self.feature_name
if self.trap_name:
cmd += " trap-name %s" % self.trap_name
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def undo_trap_feature_name(self):
""" Undo feature name for trap """
if self.feature_name == "all":
cmd = "undo snmp-agent trap enable"
else:
cmd = "undo snmp-agent trap enable feature-name %s" % self.feature_name
if self.trap_name:
cmd += " trap-name %s" % self.trap_name
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def set_trap_source_interface(self):
""" Set source interface for trap """
cmd = "snmp-agent trap source %s %s" % (
self.interface_type, self.interface_number)
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def undo_trap_source_interface(self):
""" Undo source interface for trap """
cmd = "undo snmp-agent trap source"
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def set_trap_source_port(self):
""" Set source port for trap """
cmd = "snmp-agent trap source-port %s" % self.port_number
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def undo_trap_source_port(self):
""" Undo source port for trap """
cmd = "undo snmp-agent trap source-port"
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def work(self):
""" The work function """
self.check_args()
self.get_proposed()
self.get_existing()
find_flag = False
find_undo_flag = False
tmp_interface = None
if self.state == "present":
if self.feature_name:
if self.trap_name:
tmp_cfg = "feature-name %s trap-name %s" % (
self.feature_name, self.trap_name.lower())
else:
tmp_cfg = "feature-name %s" % self.feature_name
find_undo_flag = False
if self.cur_cfg["undo snmp-agent trap"]:
for item in self.cur_cfg["undo snmp-agent trap"]:
if item == tmp_cfg:
find_undo_flag = True
elif tmp_cfg in item:
find_undo_flag = True
elif self.feature_name == "all":
find_undo_flag = True
if find_undo_flag:
self.set_trap_feature_name()
if not find_undo_flag:
find_flag = False
if self.cur_cfg["snmp-agent trap"]:
for item in self.cur_cfg["snmp-agent trap"]:
if item == "enable":
find_flag = True
elif item == tmp_cfg:
find_flag = True
if not find_flag:
self.set_trap_feature_name()
if self.interface_type:
find_flag = False
tmp_interface = self.interface_type + self.interface_number
if "trap source interface" in self.cur_cfg.keys():
if self.cur_cfg["trap source interface"] == tmp_interface:
find_flag = True
if not find_flag:
self.set_trap_source_interface()
if self.port_number:
find_flag = False
if "trap source-port" in self.cur_cfg.keys():
if self.cur_cfg["trap source-port"] == self.port_number:
find_flag = True
if not find_flag:
self.set_trap_source_port()
else:
if self.feature_name:
if self.trap_name:
tmp_cfg = "feature-name %s trap-name %s" % (
self.feature_name, self.trap_name.lower())
else:
tmp_cfg = "feature-name %s" % self.feature_name
find_flag = False
if self.cur_cfg["snmp-agent trap"]:
for item in self.cur_cfg["snmp-agent trap"]:
if item == tmp_cfg:
find_flag = True
elif item == "enable":
find_flag = True
elif tmp_cfg in item:
find_flag = True
else:
find_flag = True
find_undo_flag = False
if self.cur_cfg["undo snmp-agent trap"]:
for item in self.cur_cfg["undo snmp-agent trap"]:
if item == tmp_cfg:
find_undo_flag = True
elif tmp_cfg in item:
find_undo_flag = True
if find_undo_flag:
pass
elif find_flag:
self.undo_trap_feature_name()
if self.interface_type:
if "trap source interface" in self.cur_cfg.keys():
self.undo_trap_source_interface()
if self.port_number:
if "trap source-port" in self.cur_cfg.keys():
self.undo_trap_source_port()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
self.results['updates'] = self.updates_cmd
self.module.exit_json(**self.results)
def main():
""" Module main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
feature_name=dict(choices=['aaa', 'arp', 'bfd', 'bgp', 'cfg', 'configuration', 'dad',
'devm', 'dhcpsnp', 'dldp', 'driver', 'efm', 'erps', 'error-down',
'fcoe', 'fei', 'fei_comm', 'fm', 'ifnet', 'info', 'ipsg', 'ipv6',
'isis', 'l3vpn', 'lacp', 'lcs', 'ldm', 'ldp', 'ldt', 'lldp',
'mpls_lspm', 'msdp', 'mstp', 'nd', 'netconf', 'nqa', 'nvo3',
'openflow', 'ospf', 'ospfv3', 'pim', 'pim-std', 'qos', 'radius',
'rm', 'rmon', 'securitytrap', 'smlktrap', 'snmp', 'ssh', 'stackmng',
'sysclock', 'sysom', 'system', 'tcp', 'telnet', 'trill', 'trunk',
'tty', 'vbst', 'vfs', 'virtual-perception', 'vrrp', 'vstm', 'all']),
trap_name=dict(type='str'),
interface_type=dict(choices=['Ethernet', 'Eth-Trunk', 'Tunnel', 'NULL', 'LoopBack', 'Vlanif',
'100GE', '40GE', 'MTunnel', '10GE', 'GE', 'MEth', 'Vbdif', 'Nve']),
interface_number=dict(type='str'),
port_number=dict(type='str')
)
argument_spec.update(ce_argument_spec)
module = SnmpTraps(argument_spec=argument_spec)
module.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
flan/media-storage | clients/python/media_storage/compression.py | 2 | 6538 | """
media-storage : compression
===========================
Offers efficient handlers for compressing and decompressing data, using
file-like objects (often tempfiles).
This module is shared by every Python facet of the media-storage project and
changes to one instance should be reflected in all.
Usage
-----
(De)compressors may be called explicitly or retrieved with one of the getter
functions.
Legal
-----
This file is part of the LGPLed subset of the media-storage project.
This module is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU General Public License and
GNU Lesser General Public License along with this program. If not, see
<http://www.gnu.org/licenses/>.
(C) Neil Tallim, 2011
"""
import bz2
import logging
import tempfile
import zlib
try:
import lzma
except ImportError:
lzma = None
#Compression type constants
COMPRESS_NONE = None
COMPRESS_BZ2 = 'bz2'
COMPRESS_GZ = 'gz'
COMPRESS_LZMA = 'lzma'
SUPPORTED_FORMATS = [COMPRESS_GZ, COMPRESS_BZ2]
if lzma:
SUPPORTED_FORMATS.append(COMPRESS_LZMA)
SUPPORTED_FORMATS = tuple(SUPPORTED_FORMATS)
_MAX_SPOOLED_FILESIZE = 1024 * 256 #Allow up to 256k in memory
_BUFFER_SIZE = 1024 * 32 #Work with 32k chunks
_logger = logging.getLogger('media_storage-compression')
def get_compressor(format):
"""
Returns a callable that accepts a file-like object and returns a compressed version of the
file's contents as a file-like object.
`format` is the format to which conversion should occur, one of the compression type constants.
"""
if format is COMPRESS_NONE:
return (lambda x:x)
elif format == COMPRESS_GZ:
return compress_gz
elif format == COMPRESS_BZ2:
return compress_bz2
elif format == COMPRESS_LZMA and lzma:
return compress_lzma
raise ValueError(format + " is unsupported")
def get_decompressor(format):
"""
Returns a callable that accepts a file-like object and returns a decompressed version of the
file's contents as a file-like object.
`format` is the format from which conversion should occur, one of the compression type constants.
"""
if format is COMPRESS_NONE:
return (lambda x:x)
elif format == COMPRESS_GZ:
return decompress_gz
elif format == COMPRESS_BZ2:
return decompress_bz2
elif format == COMPRESS_LZMA and lzma:
return decompress_lzma
raise ValueError(format + " is unsupported")
def _process(data, handler, flush_handler):
"""
Iterates over the given `data`, reading a reasonable number of bytes, passing them through the
given (de)compression `handler`, and writing the output to a temporary file, which is ultimately
returned (seeked to 0).
If an exception occurs, it is raised directly.
"""
try:
temp = tempfile.SpooledTemporaryFile(_MAX_SPOOLED_FILESIZE)
while True:
chunk = data.read(_BUFFER_SIZE)
if chunk:
chunk = handler(chunk)
if chunk:
temp.write(chunk)
else:
if flush_handler:
chunk = flush_handler()
if chunk:
temp.write(chunk)
break
temp.flush()
temp.seek(0)
return temp
except Exception as e:
_logger.error("A problem occurred during (de)compression: %(error)s" % {
'error': str(e),
})
raise
def compress_bz2(data):
"""
Compresses the given file-like object `data` with the bz2 algorithm, returning a file-like
object and its size in a tuple.
Any exceptions are raised directly.
"""
_logger.debug("Compressing data with bz2...")
compressor = bz2.BZ2Compressor()
return _process(data, compressor.compress, compressor.flush)
def decompress_bz2(data):
"""
Decompresses the given file-like object `data` with the bz2 algorithm, returning a file-like
object and its size in a tuple.
Any exceptions are raised directly.
"""
_logger.debug("Decompressing data with bz2...")
decompressor = bz2.BZ2Decompressor()
return _process(data, decompressor.decompress, None)
def compress_gz(data):
"""
Compresses the given file-like object `data` with the gz algorithm, returning a file-like
object and its size in a tuple.
Any exceptions are raised directly.
"""
_logger.debug("Compressing data with gz...")
compressor = zlib.compressobj()
return _process(data, compressor.compress, compressor.flush)
def decompress_gz(data):
"""
Decompresses the given file-like object `data` with the gz algorithm, returning a file-like
object and its size in a tuple.
Any exceptions are raised directly.
"""
_logger.debug("Decompressing data with gz...")
decompressor = zlib.decompressobj()
return _process(data, decompressor.decompress, decompressor.flush)
if lzma: #If the module is unavailable, don't even define the functions
def compress_lzma(data):
"""
Compresses the given file-like object `data` with the lzma algorithm, returning a file-like
object and its size in a tuple.
Any exceptions are raised directly.
This function is not available if no LZMA library is present.
"""
_logger.debug("Compressing data with lzma...")
compressor = lzma.LZMACompressor()
return _process(data, compressor.compress, compressor.flush)
def decompress_lzma(data):
"""
Decompresses the given file-like object `data` with the lzma algorithm, returning a
file-like object and its size in a tuple.
Any exceptions are raised directly.
This function is not available if no LZMA library is present.
"""
_logger.debug("Decompressing data with lzma...")
decompressor = lzma.LZMADecompressor()
return _process(data, decompressor.decompress, decompressor.flush)
| lgpl-3.0 |
ahb0327/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/tests/geometries.py | 90 | 33343 | import re
wkt_regex = re.compile(r'^(?P<type>[A-Z]+) ?\(')
class TestGeom:
"The Test Geometry class container."
def __init__(self, wkt, **kwargs):
self.wkt = wkt
self.bad = kwargs.pop('bad', False)
if not self.bad:
m = wkt_regex.match(wkt)
if not m:
raise Exception('Improper WKT: "%s"' % wkt)
self.geo_type = m.group('type')
for key, value in kwargs.items():
setattr(self, key, value)
# For the old tests
swig_geoms = (TestGeom('POLYGON ((0 0, 0 100, 100 100, 100 0, 0 0))', ncoords=5),
TestGeom('POLYGON ((0 0, 0 100, 100 100, 100 0, 0 0), (10 10, 10 90, 90 90, 90 10, 10 10) ))', ncoords=10),
)
# Testing WKT & HEX
hex_wkt = (TestGeom('POINT(0 1)', hex='01010000000000000000000000000000000000F03F'),
TestGeom('LINESTRING(0 1, 2 3, 4 5)', hex='0102000000030000000000000000000000000000000000F03F0000000000000040000000000000084000000000000010400000000000001440'),
TestGeom('POLYGON((0 0, 10 0, 10 10, 0 10, 0 0))', hex='010300000001000000050000000000000000000000000000000000000000000000000024400000000000000000000000000000244000000000000024400000000000000000000000000000244000000000000000000000000000000000'),
TestGeom('MULTIPOINT(0 0, 10 0, 10 10, 0 10, 0 0)', hex='010400000005000000010100000000000000000000000000000000000000010100000000000000000024400000000000000000010100000000000000000024400000000000002440010100000000000000000000000000000000002440010100000000000000000000000000000000000000'),
TestGeom('MULTILINESTRING((0 0, 10 0, 10 10, 0 10),(20 20, 30 20))', hex='01050000000200000001020000000400000000000000000000000000000000000000000000000000244000000000000000000000000000002440000000000000244000000000000000000000000000002440010200000002000000000000000000344000000000000034400000000000003E400000000000003440'),
TestGeom('MULTIPOLYGON(((0 0, 10 0, 10 10, 0 10, 0 0)),((20 20, 20 30, 30 30, 30 20, 20 20),(25 25, 25 26, 26 26, 26 25, 25 25)))', hex='010600000002000000010300000001000000050000000000000000000000000000000000000000000000000024400000000000000000000000000000244000000000000024400000000000000000000000000000244000000000000000000000000000000000010300000002000000050000000000000000003440000000000000344000000000000034400000000000003E400000000000003E400000000000003E400000000000003E40000000000000344000000000000034400000000000003440050000000000000000003940000000000000394000000000000039400000000000003A400000000000003A400000000000003A400000000000003A40000000000000394000000000000039400000000000003940'),
TestGeom('GEOMETRYCOLLECTION(MULTIPOLYGON(((0 0, 10 0, 10 10, 0 10, 0 0)),((20 20, 20 30, 30 30, 30 20, 20 20),(25 25, 25 26, 26 26, 26 25, 25 25))),MULTILINESTRING((0 0, 10 0, 10 10, 0 10),(20 20, 30 20)),MULTIPOINT(0 0, 10 0, 10 10, 0 10, 0 0))', hex='010700000003000000010600000002000000010300000001000000050000000000000000000000000000000000000000000000000024400000000000000000000000000000244000000000000024400000000000000000000000000000244000000000000000000000000000000000010300000002000000050000000000000000003440000000000000344000000000000034400000000000003E400000000000003E400000000000003E400000000000003E40000000000000344000000000000034400000000000003440050000000000000000003940000000000000394000000000000039400000000000003A400000000000003A400000000000003A400000000000003A4000000000000039400000000000003940000000000000394001050000000200000001020000000400000000000000000000000000000000000000000000000000244000000000000000000000000000002440000000000000244000000000000000000000000000002440010200000002000000000000000000344000000000000034400000000000003E400000000000003440010400000005000000010100000000000000000000000000000000000000010100000000000000000024400000000000000000010100000000000000000024400000000000002440010100000000000000000000000000000000002440010100000000000000000000000000000000000000'),
)
# WKT, GML, KML output
wkt_out = (TestGeom('POINT (110 130)', ewkt='POINT (110.0000000000000000 130.0000000000000000)', kml='<Point><coordinates>110.0,130.0,0</coordinates></Point>', gml='<gml:Point><gml:coordinates>110,130</gml:coordinates></gml:Point>'),
TestGeom('LINESTRING (40 40,50 130,130 130)', ewkt='LINESTRING (40.0000000000000000 40.0000000000000000, 50.0000000000000000 130.0000000000000000, 130.0000000000000000 130.0000000000000000)', kml='<LineString><coordinates>40.0,40.0,0 50.0,130.0,0 130.0,130.0,0</coordinates></LineString>', gml='<gml:LineString><gml:coordinates>40,40 50,130 130,130</gml:coordinates></gml:LineString>'),
TestGeom('POLYGON ((150 150,410 150,280 20,20 20,150 150),(170 120,330 120,260 50,100 50,170 120))', ewkt='POLYGON ((150.0000000000000000 150.0000000000000000, 410.0000000000000000 150.0000000000000000, 280.0000000000000000 20.0000000000000000, 20.0000000000000000 20.0000000000000000, 150.0000000000000000 150.0000000000000000), (170.0000000000000000 120.0000000000000000, 330.0000000000000000 120.0000000000000000, 260.0000000000000000 50.0000000000000000, 100.0000000000000000 50.0000000000000000, 170.0000000000000000 120.0000000000000000))', kml='<Polygon><outerBoundaryIs><LinearRing><coordinates>150.0,150.0,0 410.0,150.0,0 280.0,20.0,0 20.0,20.0,0 150.0,150.0,0</coordinates></LinearRing></outerBoundaryIs><innerBoundaryIs><LinearRing><coordinates>170.0,120.0,0 330.0,120.0,0 260.0,50.0,0 100.0,50.0,0 170.0,120.0,0</coordinates></LinearRing></innerBoundaryIs></Polygon>', gml='<gml:Polygon><gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>150,150 410,150 280,20 20,20 150,150</gml:coordinates></gml:LinearRing></gml:outerBoundaryIs><gml:innerBoundaryIs><gml:LinearRing><gml:coordinates>170,120 330,120 260,50 100,50 170,120</gml:coordinates></gml:LinearRing></gml:innerBoundaryIs></gml:Polygon>'),
TestGeom('MULTIPOINT (10 80,110 170,110 120)', ewkt='MULTIPOINT (10.0000000000000000 80.0000000000000000, 110.0000000000000000 170.0000000000000000, 110.0000000000000000 120.0000000000000000)', kml='<MultiGeometry><Point><coordinates>10.0,80.0,0</coordinates></Point><Point><coordinates>110.0,170.0,0</coordinates></Point><Point><coordinates>110.0,120.0,0</coordinates></Point></MultiGeometry>', gml='<gml:MultiPoint><gml:pointMember><gml:Point><gml:coordinates>10,80</gml:coordinates></gml:Point></gml:pointMember><gml:pointMember><gml:Point><gml:coordinates>110,170</gml:coordinates></gml:Point></gml:pointMember><gml:pointMember><gml:Point><gml:coordinates>110,120</gml:coordinates></gml:Point></gml:pointMember></gml:MultiPoint>'),
TestGeom('MULTILINESTRING ((110 100,40 30,180 30),(170 30,110 90,50 30))', ewkt='MULTILINESTRING ((110.0000000000000000 100.0000000000000000, 40.0000000000000000 30.0000000000000000, 180.0000000000000000 30.0000000000000000), (170.0000000000000000 30.0000000000000000, 110.0000000000000000 90.0000000000000000, 50.0000000000000000 30.0000000000000000))', kml='<MultiGeometry><LineString><coordinates>110.0,100.0,0 40.0,30.0,0 180.0,30.0,0</coordinates></LineString><LineString><coordinates>170.0,30.0,0 110.0,90.0,0 50.0,30.0,0</coordinates></LineString></MultiGeometry>', gml='<gml:MultiLineString><gml:lineStringMember><gml:LineString><gml:coordinates>110,100 40,30 180,30</gml:coordinates></gml:LineString></gml:lineStringMember><gml:lineStringMember><gml:LineString><gml:coordinates>170,30 110,90 50,30</gml:coordinates></gml:LineString></gml:lineStringMember></gml:MultiLineString>'),
TestGeom('MULTIPOLYGON (((110 110,70 200,150 200,110 110),(110 110,100 180,120 180,110 110)),((110 110,150 20,70 20,110 110),(110 110,120 40,100 40,110 110)))', ewkt='MULTIPOLYGON (((110.0000000000000000 110.0000000000000000, 70.0000000000000000 200.0000000000000000, 150.0000000000000000 200.0000000000000000, 110.0000000000000000 110.0000000000000000), (110.0000000000000000 110.0000000000000000, 100.0000000000000000 180.0000000000000000, 120.0000000000000000 180.0000000000000000, 110.0000000000000000 110.0000000000000000)), ((110.0000000000000000 110.0000000000000000, 150.0000000000000000 20.0000000000000000, 70.0000000000000000 20.0000000000000000, 110.0000000000000000 110.0000000000000000), (110.0000000000000000 110.0000000000000000, 120.0000000000000000 40.0000000000000000, 100.0000000000000000 40.0000000000000000, 110.0000000000000000 110.0000000000000000)))', kml='<MultiGeometry><Polygon><outerBoundaryIs><LinearRing><coordinates>110.0,110.0,0 70.0,200.0,0 150.0,200.0,0 110.0,110.0,0</coordinates></LinearRing></outerBoundaryIs><innerBoundaryIs><LinearRing><coordinates>110.0,110.0,0 100.0,180.0,0 120.0,180.0,0 110.0,110.0,0</coordinates></LinearRing></innerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>110.0,110.0,0 150.0,20.0,0 70.0,20.0,0 110.0,110.0,0</coordinates></LinearRing></outerBoundaryIs><innerBoundaryIs><LinearRing><coordinates>110.0,110.0,0 120.0,40.0,0 100.0,40.0,0 110.0,110.0,0</coordinates></LinearRing></innerBoundaryIs></Polygon></MultiGeometry>', gml='<gml:MultiPolygon><gml:polygonMember><gml:Polygon><gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>110,110 70,200 150,200 110,110</gml:coordinates></gml:LinearRing></gml:outerBoundaryIs><gml:innerBoundaryIs><gml:LinearRing><gml:coordinates>110,110 100,180 120,180 110,110</gml:coordinates></gml:LinearRing></gml:innerBoundaryIs></gml:Polygon></gml:polygonMember><gml:polygonMember><gml:Polygon><gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>110,110 150,20 70,20 110,110</gml:coordinates></gml:LinearRing></gml:outerBoundaryIs><gml:innerBoundaryIs><gml:LinearRing><gml:coordinates>110,110 120,40 100,40 110,110</gml:coordinates></gml:LinearRing></gml:innerBoundaryIs></gml:Polygon></gml:polygonMember></gml:MultiPolygon>'),
TestGeom('GEOMETRYCOLLECTION (POINT (110 260),LINESTRING (110 0,110 60))', ewkt='GEOMETRYCOLLECTION (POINT (110.0000000000000000 260.0000000000000000), LINESTRING (110.0000000000000000 0.0000000000000000, 110.0000000000000000 60.0000000000000000))', kml='<MultiGeometry><Point><coordinates>110.0,260.0,0</coordinates></Point><LineString><coordinates>110.0,0.0,0 110.0,60.0,0</coordinates></LineString></MultiGeometry>', gml='<gml:GeometryCollection><gml:geometryMember><gml:Point><gml:coordinates>110,260</gml:coordinates></gml:Point></gml:geometryMember><gml:geometryMember><gml:LineString><gml:coordinates>110,0 110,60</gml:coordinates></gml:LineString></gml:geometryMember></gml:GeometryCollection>'),
)
# Errors
errors = (TestGeom('GEOMETR##!@#%#............a32515', bad=True, hex=False),
TestGeom('Foo.Bar', bad=True, hex=False),
TestGeom('POINT (5, 23)', bad=True, hex=False),
TestGeom('AAABBBDDDAAD##@#1113511111-098111111111111111533333333333333', bad=True, hex=True),
TestGeom('FFFFFFFFFFFFFFFFF1355555555555555555565111', bad=True, hex=True),
TestGeom('', bad=True, hex=False),
)
# Polygons
polygons = (TestGeom('POLYGON ((0 0, 0 100, 100 100, 100 0, 0 0), (10 10, 10 90, 90 90, 90 10, 10 10))',
n_i=1, ext_ring_cs=((0, 0), (0, 100), (100, 100), (100, 0), (0, 0)), n_p=10, area=3600.0, centroid=(50., 50.),
),
TestGeom('POLYGON ((0 0, 0 100, 100 100, 100 0, 0 0), (10 10, 10 20, 20 20, 20 10, 10 10), (80 80, 80 90, 90 90, 90 80, 80 80))',
n_i=2, ext_ring_cs=((0, 0), (0, 100), (100, 100), (100, 0), (0, 0)), n_p=15, area=9800.0, centroid=(50., 50.),
),
TestGeom('POLYGON ((0 0, 0 100, 100 100, 100 0, 0 0))',
n_i=0, ext_ring_cs=((0, 0), (0, 100), (100, 100), (100, 0), (0, 0)), n_p=5, area=10000.0, centroid=(50., 50.),
),
TestGeom('POLYGON ((-95.3848703124799471 29.7056021479768511, -95.3851905195191847 29.7046588196500281, -95.3859356966379011 29.7025053545605502, -95.3860723000647539 29.7020963367038391, -95.3871517697222089 29.6989779021280995, -95.3865578518265522 29.6990856888057202, -95.3862634205175226 29.6999471753441782, -95.3861991779541967 29.6999591988978615, -95.3856773799358137 29.6998323107113578, -95.3856209915427229 29.6998005235473741, -95.3855833545501639 29.6996619391729801, -95.3855776331865002 29.6996232659570047, -95.3850162731712885 29.6997236706530536, -95.3831047357410284 29.7000847603095082, -95.3829800724914776 29.7000676365023502, -95.3828084594470909 29.6999969684031200, -95.3828131504821499 29.6999090511531065, -95.3828022942979601 29.6998152117366025, -95.3827893930918833 29.6997790953076759, -95.3825174668099862 29.6998267772748825, -95.3823521544804862 29.7000451723151606, -95.3820491918785223 29.6999682034582335, -95.3817932841505893 29.6999640407204772, -95.3815438924600443 29.7005983712500630, -95.3807812390843424 29.7007538492921590, -95.3778578936435935 29.7012966201172048, -95.3770817300034679 29.7010555145969093, -95.3772763716395957 29.7004995005932031, -95.3769891024414420 29.7005797730360186, -95.3759855007185990 29.7007754783987821, -95.3759516423090474 29.7007305400669388, -95.3765252155960042 29.6989549173240874, -95.3766842746727832 29.6985134987163164, -95.3768510987262914 29.6980530300744938, -95.3769198676258014 29.6977137204527573, -95.3769616670751930 29.6973351617272172, -95.3770309229297766 29.6969821084304186, -95.3772352596880637 29.6959751305871613, -95.3776232419333354 29.6945439060847463, -95.3776849628727064 29.6943364710766069, -95.3779699491714723 29.6926548349458947, -95.3781945479573494 29.6920088336742545, -95.3785807118394189 29.6908279316076005, -95.3787441368896651 29.6908846275832197, -95.3787903214163890 29.6907152912461640, -95.3791765069353659 29.6893335376821526, -95.3794935959513026 29.6884781789101595, -95.3796592071232112 29.6880066681407619, -95.3799788182090111 29.6873687353035081, -95.3801545516183893 29.6868782380716993, -95.3801258908302145 29.6867756621337762, -95.3801104284899566 29.6867229678809572, -95.3803803523746154 29.6863753372986459, -95.3821028558287622 29.6837392961470421, -95.3827289584682205 29.6828097375216160, -95.3827494698109035 29.6790739156259278, -95.3826022014838486 29.6776502228345507, -95.3825047356438063 29.6765773006280753, -95.3823473035336917 29.6750405250369127, -95.3824540163482055 29.6750076408228587, -95.3838984230304305 29.6745679207378679, -95.3916547074937426 29.6722459226508377, -95.3926154662749468 29.6719609085105489, -95.3967246645118081 29.6707316485589736, -95.3974588054406780 29.6705065336410989, -95.3978523748756828 29.6703795547846845, -95.3988598162279970 29.6700874981900853, -95.3995628600665952 29.6698505300412414, -95.4134721665944170 29.6656841279906232, -95.4143262068232616 29.6654291174019278, -95.4159685142480214 29.6649750989232288, -95.4180067396277565 29.6643253024318021, -95.4185886692196590 29.6641482768691063, -95.4234155309609662 29.6626925393704788, -95.4287785503196346 29.6611023620959706, -95.4310287312749352 29.6604222580752648, -95.4320295629628959 29.6603361318136720, -95.4332899683975739 29.6600560661713608, -95.4342675748811047 29.6598454934599900, -95.4343110414310871 29.6598411486215490, -95.4345576779282538 29.6598147020668499, -95.4348823041721630 29.6597875803673112, -95.4352827715209457 29.6597762346946681, -95.4355290431309982 29.6597827926562374, -95.4359197997999331 29.6598014511782715, -95.4361907884752156 29.6598444333523368, -95.4364608955807228 29.6598901433108217, -95.4367250147512323 29.6599494499910712, -95.4364898759758091 29.6601880616540186, -95.4354501111810691 29.6616378572201107, -95.4381459623171224 29.6631265631655126, -95.4367852490863129 29.6642266600024023, -95.4370040894557263 29.6643425389568769, -95.4367078350812648 29.6645492592343238, -95.4366081749871285 29.6646291473027297, -95.4358539359938192 29.6652308742342932, -95.4350327668927889 29.6658995989314462, -95.4350580905272921 29.6678812477895271, -95.4349710541447536 29.6680054925936965, -95.4349500440473548 29.6671410080890006, -95.4341492724148850 29.6678790545191688, -95.4340248868274728 29.6680353198492135, -95.4333227845797438 29.6689245624945990, -95.4331325652123326 29.6691616138940901, -95.4321314741096955 29.6704473333237253, -95.4320435792664341 29.6702578985411982, -95.4320147929883547 29.6701800936425109, -95.4319764538662980 29.6683246590817085, -95.4317490976340679 29.6684974372577166, -95.4305958185342718 29.6694049049170374, -95.4296600735653016 29.6701723430938493, -95.4284928989940937 29.6710931793380972, -95.4274630532378580 29.6719378813640091, -95.4273056811974811 29.6720684984625791, -95.4260554084574864 29.6730668861566969, -95.4253558063699643 29.6736342467365724, -95.4249278826026028 29.6739557343648919, -95.4248648873821423 29.6745400910786152, -95.4260016131471929 29.6750987014005858, -95.4258567183010911 29.6753452063069929, -95.4260238081486847 29.6754322077221353, -95.4258707374502393 29.6756647377294307, -95.4257951755816691 29.6756407098663360, -95.4257701599566985 29.6761077719536068, -95.4257726684792260 29.6761711204603955, -95.4257980187195614 29.6770219651929423, -95.4252712669032519 29.6770161558853758, -95.4249234392992065 29.6770068683962300, -95.4249574272905789 29.6779707498635759, -95.4244725881033702 29.6779825646764159, -95.4222269476429545 29.6780711474441716, -95.4223032371999267 29.6796029391538809, -95.4239133706588945 29.6795331493690355, -95.4224579084327331 29.6813706893847780, -95.4224290108823965 29.6821953228763924, -95.4230916478977349 29.6822130268724109, -95.4222928279595521 29.6832041816675343, -95.4228763710016352 29.6832087677714505, -95.4223401691637179 29.6838987872753748, -95.4211655906087088 29.6838784024852984, -95.4201984153205558 29.6851319258758082, -95.4206156387716362 29.6851623398125319, -95.4213438084897660 29.6851763011334739, -95.4212071118618752 29.6853679931624974, -95.4202651399651245 29.6865313962980508, -95.4172061157659783 29.6865816431043932, -95.4182217951255183 29.6872251197301544, -95.4178664826439160 29.6876750901471631, -95.4180678442928780 29.6877960336377207, -95.4188763472917572 29.6882826379510938, -95.4185374500596311 29.6887137897831934, -95.4182121713132290 29.6885097429738813, -95.4179857231741551 29.6888118367840086, -95.4183106010563620 29.6890048676118212, -95.4179489865331334 29.6894546700979056, -95.4175581746284820 29.6892323606815438, -95.4173439957341571 29.6894990139807007, -95.4177411199311081 29.6897435034738422, -95.4175789200209721 29.6899207529979208, -95.4170598559864800 29.6896042165807508, -95.4166733682539814 29.6900891174451367, -95.4165941362704331 29.6900347214235047, -95.4163537218065301 29.6903529467753238, -95.4126843270708775 29.6881086357212780, -95.4126604121378392 29.6880942378803496, -95.4126672298953338 29.6885951670109982, -95.4126680884821923 29.6887052446594275, -95.4158080137241882 29.6906382377959339, -95.4152061403821961 29.6910871045531586, -95.4155842583188161 29.6917382915894308, -95.4157426793520358 29.6920726941677096, -95.4154520563662203 29.6922052332446427, -95.4151389936167078 29.6923261661269571, -95.4148649784384872 29.6924343866430256, -95.4144051352401590 29.6925623927348106, -95.4146792019416665 29.6926770338507744, -95.4148824479948985 29.6928117893696388, -95.4149851734360226 29.6929823719519774, -95.4140436551925291 29.6929626643100946, -95.4140465993023241 29.6926545917254892, -95.4137269186733334 29.6927395764256090, -95.4137372859685513 29.6935432485666624, -95.4135702836218655 29.6933186678088283, -95.4133925235973237 29.6930415229852152, -95.4133017035615580 29.6928685062036166, -95.4129588921634593 29.6929391128977862, -95.4125107395559695 29.6930481664661485, -95.4102647423187307 29.6935850183258019, -95.4081931340840157 29.6940907430947760, -95.4078783596459772 29.6941703429951609, -95.4049213975000043 29.6948723732981961, -95.4045944244127071 29.6949626434239207, -95.4045865139788134 29.6954109019001358, -95.4045953345484037 29.6956972800496963, -95.4038879332535146 29.6958296089365490, -95.4040366394459340 29.6964389004769842, -95.4032774779020798 29.6965643341263892, -95.4026066501239853 29.6966646227683881, -95.4024991226393837 29.6961389766619703, -95.4011781398631911 29.6963566063186377, -95.4011524097636112 29.6962596176762190, -95.4018184046368276 29.6961399466727336, -95.4016995838361908 29.6956442609415099, -95.4007100753964608 29.6958900524002978, -95.4008032469935188 29.6962639900781404, -95.3995660267125487 29.6965636449370329, -95.3996140564775601 29.6967877962763644, -95.3996364430014410 29.6968901984825280, -95.3984003269631842 29.6968679634805746, -95.3981442026887265 29.6983660679730335, -95.3980178461957706 29.6990890276252415, -95.3977097967130163 29.7008526152273049, -95.3962347157626027 29.7009697553607630, -95.3951949050136250 29.7004740386619019, -95.3957564950617183 29.6990281830553187, -95.3965927101519924 29.6968771129030706, -95.3957496517238184 29.6970800358387095, -95.3957720559467361 29.6972264611230727, -95.3957391586571788 29.6973548894558732, -95.3956286413405365 29.6974949857280883, -95.3955111053256957 29.6975661086270186, -95.3953215342724121 29.6976022763384790, -95.3951795558443365 29.6975846977491038, -95.3950369632041060 29.6975175779330200, -95.3949401089966500 29.6974269267953304, -95.3948740281415581 29.6972903308506346, -95.3946650813866910 29.6973397326847923, -95.3947654059391112 29.6974882560192022, -95.3949627316619768 29.6980355864961858, -95.3933200807862249 29.6984590863712796, -95.3932606497523494 29.6984464798710839, -95.3932983699113350 29.6983154306484352, -95.3933058014696655 29.6982165816983610, -95.3932946347785133 29.6981089778195759, -95.3931780601756287 29.6977068906794841, -95.3929928222970602 29.6977541771878180, -95.3930873169846478 29.6980676264932946, -95.3932743746374570 29.6981249406449663, -95.3929512584706316 29.6989526513922222, -95.3919850280655197 29.7014358632108646, -95.3918950918929056 29.7014169320765724, -95.3916928317890296 29.7019232352846423, -95.3915424614970959 29.7022988712928289, -95.3901530441668939 29.7058519502930061, -95.3899656322116698 29.7059156823562418, -95.3897628748670883 29.7059900058266777, -95.3896062677805787 29.7060738276384946, -95.3893941800512266 29.7061891695242046, -95.3892150365492455 29.7062641292949436, -95.3890502563035199 29.7063339729630940, -95.3888717930715586 29.7063896908080736, -95.3886925428988945 29.7064453871994978, -95.3885376849411983 29.7064797304524149, -95.3883284158984139 29.7065153575050189, -95.3881046767627794 29.7065368368267357, -95.3878809284696132 29.7065363048447537, -95.3876046356120924 29.7065288525102424, -95.3873060894974714 29.7064822806001452, -95.3869851943158409 29.7063993367575350, -95.3865967896568065 29.7062870572919202, -95.3861785624983156 29.7061492099008184, -95.3857375009733488 29.7059887337478798, -95.3854573290902152 29.7058683664514618, -95.3848703124799471 29.7056021479768511))',
n_i=0, ext_ring_cs=False, n_p=264, area=0.00129917360654, centroid=(-95.403569179437341, 29.681772571690402),
),
)
# MultiPolygons
multipolygons = (TestGeom('MULTIPOLYGON (((100 20, 180 20, 180 100, 100 100, 100 20)), ((20 100, 100 100, 100 180, 20 180, 20 100)), ((100 180, 180 180, 180 260, 100 260, 100 180)), ((180 100, 260 100, 260 180, 180 180, 180 100)))', valid=True, num_geom=4, n_p=20),
TestGeom('MULTIPOLYGON (((60 300, 320 220, 260 60, 60 100, 60 300)), ((60 300, 320 220, 260 60, 60 100, 60 300)))', valid=False),
TestGeom('MULTIPOLYGON (((180 60, 240 160, 300 60, 180 60)), ((80 80, 180 60, 160 140, 240 160, 360 140, 300 60, 420 100, 320 280, 120 260, 80 80)))', valid=True, num_geom=2, n_p=14),
)
# Points
points = (TestGeom('POINT (5 23)', x=5.0, y=23.0, centroid=(5.0, 23.0)),
TestGeom('POINT (-95.338492 29.723893)', x=-95.338492, y=29.723893, centroid=(-95.338492, 29.723893)),
TestGeom('POINT(1.234 5.678)', x=1.234, y=5.678, centroid=(1.234, 5.678)),
TestGeom('POINT(4.321 8.765)', x=4.321, y=8.765, centroid=(4.321, 8.765)),
TestGeom('POINT(10 10)', x=10, y=10, centroid=(10., 10.)),
TestGeom('POINT (5 23 8)', x=5.0, y=23.0, z=8.0, centroid=(5.0, 23.0)),
)
# MultiPoints
multipoints = (TestGeom('MULTIPOINT(10 10, 20 20 )', n_p=2, points=((10., 10.), (20., 20.)), centroid=(15., 15.)),
TestGeom('MULTIPOINT(10 10, 20 20, 10 20, 20 10)',
n_p=4, points=((10., 10.), (20., 20.), (10., 20.), (20., 10.)),
centroid=(15., 15.)),
)
# LineStrings
linestrings = (TestGeom('LINESTRING (60 180, 120 100, 180 180)', n_p=3, centroid=(120, 140), tup=((60, 180), (120, 100), (180, 180))),
TestGeom('LINESTRING (0 0, 5 5, 10 5, 10 10)', n_p=4, centroid=(6.1611652351681556, 4.6966991411008934), tup=((0, 0), (5, 5), (10, 5), (10, 10)),),
)
# Linear Rings
linearrings = (TestGeom('LINEARRING (649899.3065171393100172 4176512.3807915160432458, 649902.7294133581453934 4176512.7834989596158266, 649906.5550170192727819 4176514.3942507002502680, 649910.5820134161040187 4176516.0050024418160319, 649914.4076170771149918 4176518.0184616246260703, 649917.2264131171396002 4176519.4278986593708396, 649920.0452871860470623 4176521.6427505780011415, 649922.0587463703704998 4176522.8507948759943247, 649924.2735982896992937 4176524.4616246484220028, 649926.2870574744883925 4176525.4683542405255139, 649927.8978092158213258 4176526.8777912775985897, 649929.3072462501004338 4176528.0858355751261115, 649930.1126611357321963 4176529.4952726080082357, 649927.4951798024121672 4176506.9444361114874482, 649899.3065171393100172 4176512.3807915160432458)', n_p=15),
)
# MultiLineStrings
multilinestrings = (TestGeom('MULTILINESTRING ((0 0, 0 100), (100 0, 100 100))', n_p=4, centroid=(50, 50), tup=(((0, 0), (0, 100)), ((100, 0), (100, 100)))),
TestGeom('MULTILINESTRING ((20 20, 60 60), (20 -20, 60 -60), (-20 -20, -60 -60), (-20 20, -60 60), (-80 0, 0 80, 80 0, 0 -80, -80 0), (-40 20, -40 -20), (-20 40, 20 40), (40 20, 40 -20), (20 -40, -20 -40))',
n_p=21, centroid=(0, 0), tup=(((20., 20.), (60., 60.)), ((20., -20.), (60., -60.)), ((-20., -20.), (-60., -60.)), ((-20., 20.), (-60., 60.)), ((-80., 0.), (0., 80.), (80., 0.), (0., -80.), (-80., 0.)), ((-40., 20.), (-40., -20.)), ((-20., 40.), (20., 40.)), ((40., 20.), (40., -20.)), ((20., -40.), (-20., -40.))))
)
# ====================================================
# Topology Operations
topology_geoms = ( (TestGeom('POLYGON ((-5.0 0.0, -5.0 10.0, 5.0 10.0, 5.0 0.0, -5.0 0.0))'),
TestGeom('POLYGON ((0.0 -5.0, 0.0 5.0, 10.0 5.0, 10.0 -5.0, 0.0 -5.0))')
),
(TestGeom('POLYGON ((2 0, 18 0, 18 15, 2 15, 2 0))'),
TestGeom('POLYGON ((10 1, 11 3, 13 4, 15 6, 16 8, 16 10, 15 12, 13 13, 11 12, 10 10, 9 12, 7 13, 5 12, 4 10, 4 8, 5 6, 7 4, 9 3, 10 1))'),
),
)
intersect_geoms = ( TestGeom('POLYGON ((5 5,5 0,0 0,0 5,5 5))'),
TestGeom('POLYGON ((10 1, 9 3, 7 4, 5 6, 4 8, 4 10, 5 12, 7 13, 9 12, 10 10, 11 12, 13 13, 15 12, 16 10, 16 8, 15 6, 13 4, 11 3, 10 1))'),
)
union_geoms = ( TestGeom('POLYGON ((-5 0,-5 10,5 10,5 5,10 5,10 -5,0 -5,0 0,-5 0))'),
TestGeom('POLYGON ((2 0, 2 15, 18 15, 18 0, 2 0))'),
)
diff_geoms = ( TestGeom('POLYGON ((-5 0,-5 10,5 10,5 5,0 5,0 0,-5 0))'),
TestGeom('POLYGON ((2 0, 2 15, 18 15, 18 0, 2 0), (10 1, 11 3, 13 4, 15 6, 16 8, 16 10, 15 12, 13 13, 11 12, 10 10, 9 12, 7 13, 5 12, 4 10, 4 8, 5 6, 7 4, 9 3, 10 1))'),
)
sdiff_geoms = ( TestGeom('MULTIPOLYGON (((-5 0,-5 10,5 10,5 5,0 5,0 0,-5 0)),((0 0,5 0,5 5,10 5,10 -5,0 -5,0 0)))'),
TestGeom('POLYGON ((2 0, 2 15, 18 15, 18 0, 2 0), (10 1, 11 3, 13 4, 15 6, 16 8, 16 10, 15 12, 13 13, 11 12, 10 10, 9 12, 7 13, 5 12, 4 10, 4 8, 5 6, 7 4, 9 3, 10 1))'),
)
relate_geoms = ( (TestGeom('MULTIPOINT(80 70, 20 20, 200 170, 140 120)'),
TestGeom('MULTIPOINT(80 170, 140 120, 200 80, 80 70)'),
'0F0FFF0F2', True,),
(TestGeom('POINT(20 20)'), TestGeom('POINT(40 60)'),
'FF0FFF0F2', True,),
(TestGeom('POINT(110 110)'), TestGeom('LINESTRING(200 200, 110 110, 200 20, 20 20, 110 110, 20 200, 200 200)'),
'0FFFFF1F2', True,),
(TestGeom('MULTILINESTRING((20 20, 90 20, 170 20), (90 20, 90 80, 90 140))'),
TestGeom('MULTILINESTRING((90 20, 170 100, 170 140), (130 140, 130 60, 90 20, 20 90, 90 20))'),
'FF10F0102', True,),
)
buffer_geoms = ( (TestGeom('POINT(0 0)'),
TestGeom('POLYGON ((5 0,4.903926402016153 -0.97545161008064,4.619397662556435 -1.913417161825447,4.157348061512728 -2.777851165098009,3.53553390593274 -3.535533905932735,2.777851165098015 -4.157348061512724,1.913417161825454 -4.619397662556431,0.975451610080648 -4.903926402016151,0.000000000000008 -5.0,-0.975451610080632 -4.903926402016154,-1.913417161825439 -4.619397662556437,-2.777851165098002 -4.157348061512732,-3.53553390593273 -3.535533905932746,-4.157348061512719 -2.777851165098022,-4.619397662556429 -1.913417161825462,-4.903926402016149 -0.975451610080656,-5.0 -0.000000000000016,-4.903926402016156 0.975451610080624,-4.619397662556441 1.913417161825432,-4.157348061512737 2.777851165097995,-3.535533905932752 3.535533905932723,-2.777851165098029 4.157348061512714,-1.913417161825468 4.619397662556426,-0.975451610080661 4.903926402016149,-0.000000000000019 5.0,0.975451610080624 4.903926402016156,1.913417161825434 4.61939766255644,2.777851165097998 4.157348061512735,3.535533905932727 3.535533905932748,4.157348061512719 2.777851165098022,4.619397662556429 1.91341716182546,4.90392640201615 0.975451610080652,5 0))'),
5.0, 8),
(TestGeom('POLYGON((0 0, 10 0, 10 10, 0 10, 0 0))'),
TestGeom('POLYGON ((-2 0,-2 10,-1.961570560806461 10.390180644032258,-1.847759065022573 10.765366864730179,-1.662939224605091 11.111140466039204,-1.414213562373095 11.414213562373096,-1.111140466039204 11.662939224605092,-0.765366864730179 11.847759065022574,-0.390180644032256 11.961570560806461,0 12,10 12,10.390180644032256 11.961570560806461,10.765366864730179 11.847759065022574,11.111140466039204 11.66293922460509,11.414213562373096 11.414213562373096,11.66293922460509 11.111140466039204,11.847759065022574 10.765366864730179,11.961570560806461 10.390180644032256,12 10,12 0,11.961570560806461 -0.390180644032256,11.847759065022574 -0.76536686473018,11.66293922460509 -1.111140466039204,11.414213562373096 -1.414213562373095,11.111140466039204 -1.66293922460509,10.765366864730179 -1.847759065022573,10.390180644032256 -1.961570560806461,10 -2,0.0 -2.0,-0.390180644032255 -1.961570560806461,-0.765366864730177 -1.847759065022575,-1.1111404660392 -1.662939224605093,-1.41421356237309 -1.4142135623731,-1.662939224605086 -1.111140466039211,-1.84775906502257 -0.765366864730189,-1.961570560806459 -0.390180644032268,-2 0))'),
2.0, 8),
)
json_geoms = (TestGeom('POINT(100 0)', json='{ "type": "Point", "coordinates": [ 100.000000, 0.000000 ] }'),
TestGeom('POLYGON((0 0, -10 0, -10 -10, 0 -10, 0 0))', json='{ "type": "Polygon", "coordinates": [ [ [ 0.000000, 0.000000 ], [ -10.000000, 0.000000 ], [ -10.000000, -10.000000 ], [ 0.000000, -10.000000 ], [ 0.000000, 0.000000 ] ] ] }'),
TestGeom('MULTIPOLYGON(((102 2, 103 2, 103 3, 102 3, 102 2)), ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0), (100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8, 100.2 0.2)))', json='{ "type": "MultiPolygon", "coordinates": [ [ [ [ 102.000000, 2.000000 ], [ 103.000000, 2.000000 ], [ 103.000000, 3.000000 ], [ 102.000000, 3.000000 ], [ 102.000000, 2.000000 ] ] ], [ [ [ 100.000000, 0.000000 ], [ 101.000000, 0.000000 ], [ 101.000000, 1.000000 ], [ 100.000000, 1.000000 ], [ 100.000000, 0.000000 ] ], [ [ 100.200000, 0.200000 ], [ 100.800000, 0.200000 ], [ 100.800000, 0.800000 ], [ 100.200000, 0.800000 ], [ 100.200000, 0.200000 ] ] ] ] }'),
TestGeom('GEOMETRYCOLLECTION(POINT(100 0),LINESTRING(101.0 0.0, 102.0 1.0))',
json='{ "type": "GeometryCollection", "geometries": [ { "type": "Point", "coordinates": [ 100.000000, 0.000000 ] }, { "type": "LineString", "coordinates": [ [ 101.000000, 0.000000 ], [ 102.000000, 1.000000 ] ] } ] }',
),
TestGeom('MULTILINESTRING((100.0 0.0, 101.0 1.0),(102.0 2.0, 103.0 3.0))',
json="""
{ "type": "MultiLineString",
"coordinates": [
[ [100.0, 0.0], [101.0, 1.0] ],
[ [102.0, 2.0], [103.0, 3.0] ]
]
}
""",
not_equal=True,
),
)
# For testing HEX(EWKB).
ogc_hex = '01010000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));`
hexewkb_2d = '0101000020E61000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));`
hexewkb_3d = '01010000A0E61000000000000000000000000000000000F03F0000000000000040'
| apache-2.0 |
nitin-cherian/LifeLongLearning | Web_Development_Python/RealPython/real-python-test/env/lib/python3.5/site-packages/pkg_resources/_vendor/packaging/requirements.py | 454 | 4355 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import string
import re
from pkg_resources.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException
from pkg_resources.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine
from pkg_resources.extern.pyparsing import Literal as L # noqa
from pkg_resources.extern.six.moves.urllib import parse as urlparse
from .markers import MARKER_EXPR, Marker
from .specifiers import LegacySpecifier, Specifier, SpecifierSet
class InvalidRequirement(ValueError):
"""
An invalid requirement was found, users should refer to PEP 508.
"""
ALPHANUM = Word(string.ascii_letters + string.digits)
LBRACKET = L("[").suppress()
RBRACKET = L("]").suppress()
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
COMMA = L(",").suppress()
SEMICOLON = L(";").suppress()
AT = L("@").suppress()
PUNCTUATION = Word("-_.")
IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
NAME = IDENTIFIER("name")
EXTRA = IDENTIFIER
URI = Regex(r'[^ ]+')("url")
URL = (AT + URI)
EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE),
joinString=",", adjacent=False)("_raw_spec")
_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY))
_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '')
VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
MARKER_EXPR.setParseAction(
lambda s, l, t: Marker(s[t._original_start:t._original_end])
)
MARKER_SEPERATOR = SEMICOLON
MARKER = MARKER_SEPERATOR + MARKER_EXPR
VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
URL_AND_MARKER = URL + Optional(MARKER)
NAMED_REQUIREMENT = \
NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
class Requirement(object):
"""Parse a requirement.
Parse a given requirement string into its parts, such as name, specifier,
URL, and extras. Raises InvalidRequirement on a badly-formed requirement
string.
"""
# TODO: Can we test whether something is contained within a requirement?
# If so how do we do that? Do we need to test against the _name_ of
# the thing as well as the version? What about the markers?
# TODO: Can we normalize the name and extra name?
def __init__(self, requirement_string):
try:
req = REQUIREMENT.parseString(requirement_string)
except ParseException as e:
raise InvalidRequirement(
"Invalid requirement, parse error at \"{0!r}\"".format(
requirement_string[e.loc:e.loc + 8]))
self.name = req.name
if req.url:
parsed_url = urlparse.urlparse(req.url)
if not (parsed_url.scheme and parsed_url.netloc) or (
not parsed_url.scheme and not parsed_url.netloc):
raise InvalidRequirement("Invalid URL given")
self.url = req.url
else:
self.url = None
self.extras = set(req.extras.asList() if req.extras else [])
self.specifier = SpecifierSet(req.specifier)
self.marker = req.marker if req.marker else None
def __str__(self):
parts = [self.name]
if self.extras:
parts.append("[{0}]".format(",".join(sorted(self.extras))))
if self.specifier:
parts.append(str(self.specifier))
if self.url:
parts.append("@ {0}".format(self.url))
if self.marker:
parts.append("; {0}".format(self.marker))
return "".join(parts)
def __repr__(self):
return "<Requirement({0!r})>".format(str(self))
| mit |
echa/gearman-go | example/py/client.py | 7 | 1499 | #!/usr/bin/python
import gearman
def check_request_status(job_request):
if job_request.complete:
print "Job %s finished! Result: %s - %s" % (job_request.job.unique, job_request.state, job_request.result)
elif job_request.timed_out:
print "Job %s timed out!" % job_request.unique
elif job_request.state == JOB_UNKNOWN:
print "Job %s connection failed!" % job_request.unique
def main():
client = gearman.GearmanClient(['localhost:4730', 'otherhost:4730'])
try:
completed_job_request = client.submit_job("ToUpper", "arbitrary binary data")
check_request_status(completed_job_request)
except Exception as e:
print type(e)
try:
completed_job_request = client.submit_job("ToUpperTimeOut5", "arbitrary binary data")
check_request_status(completed_job_request)
except Exception as e:
print type(e)
try:
completed_job_request = client.submit_job("ToUpperTimeOut20", "arbitrary binary data")
check_request_status(completed_job_request)
except Exception as e:
print type(e)
try:
completed_job_request = client.submit_job("SysInfo", "")
check_request_status(completed_job_request)
except Exception as e:
print type(e)
try:
completed_job_request = client.submit_job("MemInfo", "")
check_request_status(completed_job_request)
except Exception as e:
print type(e)
if __name__ == '__main__':
main()
| mit |
borosnborea/SwordGO_app | example/kivymap/.buildozer/venv/lib/python2.7/site-packages/pip/commands/list.py | 168 | 7412 | from __future__ import absolute_import
import logging
import warnings
from pip.basecommand import Command
from pip.exceptions import CommandError
from pip.index import PackageFinder
from pip.utils import (
get_installed_distributions, dist_is_editable)
from pip.utils.deprecation import RemovedInPip10Warning
from pip.cmdoptions import make_option_group, index_group
logger = logging.getLogger(__name__)
class ListCommand(Command):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help=('If in a virtualenv that has global access, do not list '
'globally-installed packages.'),
)
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
index_opts = make_option_group(index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
if options.allow_external:
warnings.warn(
"--allow-external has been deprecated and will be removed in "
"the future. Due to changes in the repository protocol, it no "
"longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_all_external:
warnings.warn(
"--allow-all-external has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_unverified:
warnings.warn(
"--allow-unverified has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.outdated and options.uptodate:
raise CommandError(
"Options --outdated and --uptodate cannot be combined.")
if options.outdated:
self.run_outdated(options)
elif options.uptodate:
self.run_uptodate(options)
else:
self.run_listing(options)
def run_outdated(self, options):
for dist, latest_version, typ in sorted(
self.find_packages_latest_versions(options),
key=lambda p: p[0].project_name.lower()):
if latest_version > dist.parsed_version:
logger.info(
'%s - Latest: %s [%s]',
self.output_package(dist), latest_version, typ,
)
def find_packages_latest_versions(self, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
dependency_links = []
for dist in get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=options.editable):
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt'),
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
finder.add_dependency_links(dependency_links)
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=options.editable,
)
for dist in installed_packages:
typ = 'unknown'
all_candidates = finder.find_all_candidates(dist.key)
if not options.pre:
# Remove prereleases
all_candidates = [candidate for candidate in all_candidates
if not candidate.version.is_prerelease]
if not all_candidates:
continue
best_candidate = max(all_candidates,
key=finder._candidate_sort_key)
remote_version = best_candidate.version
if best_candidate.location.is_wheel:
typ = 'wheel'
else:
typ = 'sdist'
yield dist, remote_version, typ
def run_listing(self, options):
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=options.editable,
)
self.output_package_listing(installed_packages)
def output_package(self, dist):
if dist_is_editable(dist):
return '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
else:
return '%s (%s)' % (dist.project_name, dist.version)
def output_package_listing(self, installed_packages):
installed_packages = sorted(
installed_packages,
key=lambda dist: dist.project_name.lower(),
)
for dist in installed_packages:
logger.info(self.output_package(dist))
def run_uptodate(self, options):
uptodate = []
for dist, version, typ in self.find_packages_latest_versions(options):
if dist.parsed_version == version:
uptodate.append(dist)
self.output_package_listing(uptodate)
| gpl-3.0 |
micolous/metrodroid | extra/mct_to_mfcdump.py | 1 | 2747 | #!/usr/bin/env python3
# -*- mode: python; indent-tabs-mode: nil; tab-width: 2 -*-
"""
mct_to_mfcdump.py - Converts a dump from MIFARE Classic Tool to mfoc/mfcuk .mfc
format (raw data)
Copyright 2015-2018 Michael Farrell <micolous+git@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from argparse import ArgumentParser, FileType
from base64 import b16decode
# https://github.com/ikarus23/MifareClassicTool/blob/master/tools/example-files/example-dump-file.txt
# What an unreadable block looks like from MCT
UNREADABLE_BLOCK = '--------------------------------'
SECTOR_HEADER = '+Sector: '
# What to replace unreadable blocks with
DUMMY_REGULAR_BLOCK = '00000000000000000000000000000000'
DUMMY_TRAILER_BLOCK = 'FFFFFFFFFFFFFF078069FFFFFFFFFFFF'
def mct_to_mfc(input_f, output_f):
# File format:
# +Sector: 0
# repeated base16 encoded block data
# +Sector: 1
# ...
sector = -1
block = 0
for line in input_f:
line = line.strip()
if line.startswith(SECTOR_HEADER):
new_sector = int(line[len(SECTOR_HEADER):])
if new_sector != sector + 1:
raise Exception('Sectors missing from dump (expected %d, got %d)' % (sector + 1, new_sector))
sector = new_sector
block = 0
continue
if block > 15 or (sector < 31 and block > 3):
raise Exception("Excess blocks (%d) in sector %d" % (block, sector))
if line == UNREADABLE_BLOCK:
if block < 3 or (sector >= 31 and block < 15):
line = DUMMY_REGULAR_BLOCK
else:
line = DUMMY_TRAILER_BLOCK
if '--' in line:
line = line.replace('--', '00')
print("Replaced placeholder bytes at sector %d block %d with NULL" % (
sector, block))
line = b16decode(line)
output_f.write(line)
block += 1
output_f.flush()
output_f.close()
def main():
parser = ArgumentParser()
parser.add_argument('input_mct', nargs=1, type=FileType('r'),
help='MIFARE Classic Tool dump file to read')
parser.add_argument('-o', '--output', type=FileType('wb'),
help='Output mfc dump')
options = parser.parse_args()
mct_to_mfc(options.input_mct[0], options.output)
if __name__ == '__main__':
main()
| gpl-3.0 |
migua1204/jikexueyuan | 极客学院/BusStopBoard/node_modules/.3.4.0@node-gyp/gyp/pylib/gyp/flock_tool.py | 1835 | 1748 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""These functions are executed via gyp-flock-tool when using the Makefile
generator. Used on systems that don't have a built-in flock."""
import fcntl
import os
import struct
import subprocess
import sys
def main(args):
executor = FlockTool()
executor.Dispatch(args)
class FlockTool(object):
"""This class emulates the 'flock' command."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
# Note that the stock python on SunOS has a bug
# where fcntl.flock(fd, LOCK_EX) always fails
# with EBADF, that's why we use this F_SETLK
# hack instead.
fd = os.open(lockfile, os.O_WRONLY|os.O_NOCTTY|os.O_CREAT, 0666)
if sys.platform.startswith('aix'):
# Python on AIX is compiled with LARGEFILE support, which changes the
# struct size.
op = struct.pack('hhIllqq', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
else:
op = struct.pack('hhllhhl', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
fcntl.fcntl(fd, fcntl.F_SETLK, op)
return subprocess.call(cmd_list)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| apache-2.0 |
chewable/django | django/contrib/localflavor/jp/jp_prefectures.py | 543 | 2089 | from django.utils.translation import ugettext_lazy
JP_PREFECTURES = (
('hokkaido', ugettext_lazy('Hokkaido'),),
('aomori', ugettext_lazy('Aomori'),),
('iwate', ugettext_lazy('Iwate'),),
('miyagi', ugettext_lazy('Miyagi'),),
('akita', ugettext_lazy('Akita'),),
('yamagata', ugettext_lazy('Yamagata'),),
('fukushima', ugettext_lazy('Fukushima'),),
('ibaraki', ugettext_lazy('Ibaraki'),),
('tochigi', ugettext_lazy('Tochigi'),),
('gunma', ugettext_lazy('Gunma'),),
('saitama', ugettext_lazy('Saitama'),),
('chiba', ugettext_lazy('Chiba'),),
('tokyo', ugettext_lazy('Tokyo'),),
('kanagawa', ugettext_lazy('Kanagawa'),),
('yamanashi', ugettext_lazy('Yamanashi'),),
('nagano', ugettext_lazy('Nagano'),),
('niigata', ugettext_lazy('Niigata'),),
('toyama', ugettext_lazy('Toyama'),),
('ishikawa', ugettext_lazy('Ishikawa'),),
('fukui', ugettext_lazy('Fukui'),),
('gifu', ugettext_lazy('Gifu'),),
('shizuoka', ugettext_lazy('Shizuoka'),),
('aichi', ugettext_lazy('Aichi'),),
('mie', ugettext_lazy('Mie'),),
('shiga', ugettext_lazy('Shiga'),),
('kyoto', ugettext_lazy('Kyoto'),),
('osaka', ugettext_lazy('Osaka'),),
('hyogo', ugettext_lazy('Hyogo'),),
('nara', ugettext_lazy('Nara'),),
('wakayama', ugettext_lazy('Wakayama'),),
('tottori', ugettext_lazy('Tottori'),),
('shimane', ugettext_lazy('Shimane'),),
('okayama', ugettext_lazy('Okayama'),),
('hiroshima', ugettext_lazy('Hiroshima'),),
('yamaguchi', ugettext_lazy('Yamaguchi'),),
('tokushima', ugettext_lazy('Tokushima'),),
('kagawa', ugettext_lazy('Kagawa'),),
('ehime', ugettext_lazy('Ehime'),),
('kochi', ugettext_lazy('Kochi'),),
('fukuoka', ugettext_lazy('Fukuoka'),),
('saga', ugettext_lazy('Saga'),),
('nagasaki', ugettext_lazy('Nagasaki'),),
('kumamoto', ugettext_lazy('Kumamoto'),),
('oita', ugettext_lazy('Oita'),),
('miyazaki', ugettext_lazy('Miyazaki'),),
('kagoshima', ugettext_lazy('Kagoshima'),),
('okinawa', ugettext_lazy('Okinawa'),),
)
| bsd-3-clause |
fivejjs/GPy | GPy/models/bcgplvm.py | 4 | 1639 | # Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ..core import GP
from ..models import GPLVM
from ..mappings import *
class BCGPLVM(GPLVM):
"""
Back constrained Gaussian Process Latent Variable Model
:param Y: observed data
:type Y: np.ndarray
:param input_dim: latent dimensionality
:type input_dim: int
:param init: initialisation method for the latent space
:type init: 'PCA'|'random'
:param mapping: mapping for back constraint
:type mapping: GPy.core.Mapping object
"""
def __init__(self, Y, input_dim, init='PCA', X=None, kernel=None, normalize_Y=False, mapping=None):
if mapping is None:
mapping = Kernel(X=Y, output_dim=input_dim)
self.mapping = mapping
GPLVM.__init__(self, Y, input_dim, init, X, kernel, normalize_Y)
self.X = self.mapping.f(self.likelihood.Y)
def _get_param_names(self):
return self.mapping._get_param_names() + GP._get_param_names(self)
def _get_params(self):
return np.hstack((self.mapping._get_params(), GP._get_params(self)))
def _set_params(self, x):
self.mapping._set_params(x[:self.mapping.num_params])
self.X = self.mapping.f(self.likelihood.Y)
GP._set_params(self, x[self.mapping.num_params:])
def _log_likelihood_gradients(self):
dL_df = self.kern.gradients_X(self.dL_dK, self.X)
dL_dtheta = self.mapping.df_dtheta(dL_df, self.likelihood.Y)
return np.hstack((dL_dtheta.flatten(), GP._log_likelihood_gradients(self)))
| bsd-3-clause |
aloverso/SoftwareSystems | hw04/wave3/thinkdsp.py | 23 | 31996 | """This file contains code used in "Think DSP",
by Allen B. Downey, available from greenteapress.com
Copyright 2013 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import array
import math
import numpy
import random
import scipy
import scipy.stats
import struct
import subprocess
import thinkplot
from fractions import gcd
from wave import open as open_wave
import matplotlib.pyplot as pyplot
PI2 = math.pi * 2
def random_seed(x):
"""Initialize the random and numpy.random generators.
x: int seed
"""
random.seed(x)
numpy.random.seed(x)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class WavFileWriter(object):
"""Writes wav files."""
def __init__(self, filename='sound.wav', framerate=11025):
"""Opens the file and sets parameters.
filename: string
framerate: samples per second
"""
self.filename = filename
self.framerate = framerate
self.nchannels = 1
self.sampwidth = 2
self.bits = self.sampwidth * 8
self.bound = 2**(self.bits-1) - 1
self.fmt = 'h'
self.dtype = numpy.int16
self.fp = open_wave(self.filename, 'w')
self.fp.setnchannels(self.nchannels)
self.fp.setsampwidth(self.sampwidth)
self.fp.setframerate(self.framerate)
def write(self, wave):
"""Writes a wave.
wave: Wave
"""
zs = wave.quantize(self.bound, self.dtype)
self.fp.writeframes(zs.tostring())
def close(self, duration=0):
"""Closes the file.
duration: how many seconds of silence to append
"""
if duration:
self.write(rest(duration))
self.fp.close()
def read_wave(filename='sound.wav'):
"""Reads a wave file.
filename: string
returns: Wave
"""
fp = open_wave(filename, 'r')
nchannels = fp.getnchannels()
nframes = fp.getnframes()
sampwidth = fp.getsampwidth()
framerate = fp.getframerate()
z_str = fp.readframes(nframes)
fp.close()
dtype_map = {1:numpy.int8, 2:numpy.int16}
assert sampwidth in dtype_map
ys = numpy.fromstring(z_str, dtype=dtype_map[sampwidth])
wave = Wave(ys, framerate)
return wave
def play_wave(filename='sound.wav', player='aplay'):
"""Plays a wave file.
filename: string
player: string name of executable that plays wav files
"""
cmd = '%s %s' % (player, filename)
popen = subprocess.Popen(cmd, shell=True)
popen.communicate()
class _SpectrumParent(object):
"""Contains code common to Spectrum and DCT.
"""
@property
def max_freq(self):
return self.framerate / 2.0
@property
def freq_res(self):
return self.max_freq / (len(self.fs) - 1)
def plot(self, low=0, high=None, **options):
"""Plots amplitude vs frequency.
low: int index to start at
high: int index to end at
"""
thinkplot.plot(self.fs[low:high], self.amps[low:high], **options)
def plot_power(self, low=0, high=None, **options):
"""Plots power vs frequency.
low: int index to start at
high: int index to end at
"""
thinkplot.plot(self.fs[low:high], self.power[low:high], **options)
def estimate_slope(self):
"""Runs linear regression on log power vs log frequency.
returns: slope, inter, r2, p, stderr
"""
x = numpy.log(self.fs[1:])
y = numpy.log(self.power[1:])
t = scipy.stats.linregress(x,y)
return t
def peaks(self):
"""Finds the highest peaks and their frequencies.
returns: sorted list of (amplitude, frequency) pairs
"""
t = zip(self.amps, self.fs)
t.sort(reverse=True)
return t
class Spectrum(_SpectrumParent):
"""Represents the spectrum of a signal."""
def __init__(self, hs, framerate):
self.hs = hs
self.framerate = framerate
n = len(hs)
self.fs = numpy.linspace(0, self.max_freq, n)
def __add__(self, other):
if other == 0:
return self
assert self.framerate == other.framerate
hs = self.hs + other.hs
return Spectrum(hs, self.framerate)
__radd__ = __add__
@property
def real(self):
"""Returns the real part of the hs (read-only property)."""
return numpy.real(self.hs)
@property
def imag(self):
"""Returns the imaginary part of the hs (read-only property)."""
return numpy.imag(self.hs)
@property
def amps(self):
"""Returns a sequence of amplitudes (read-only property)."""
return numpy.absolute(self.hs)
@property
def power(self):
"""Returns a sequence of powers (read-only property)."""
return self.amps ** 2
def low_pass(self, cutoff, factor=0):
"""Attenuate frequencies above the cutoff.
cutoff: frequency in Hz
factor: what to multiply the magnitude by
"""
for i in xrange(len(self.hs)):
if self.fs[i] > cutoff:
self.hs[i] *= factor
def high_pass(self, cutoff, factor=0):
"""Attenuate frequencies below the cutoff.
cutoff: frequency in Hz
factor: what to multiply the magnitude by
"""
for i in xrange(len(self.hs)):
if self.fs[i] < cutoff:
self.hs[i] *= factor
def band_stop(self, low_cutoff, high_cutoff, factor=0):
"""Attenuate frequencies between the cutoffs.
low_cutoff: frequency in Hz
high_cutoff: frequency in Hz
factor: what to multiply the magnitude by
"""
for i in xrange(len(self.hs)):
if low_cutoff < self.fs[i] < high_cutoff:
self.hs[i] = 0
def pink_filter(self, beta=1):
"""Apply a filter that would make white noise pink.
beta: exponent of the pink noise
"""
denom = self.fs ** (beta/2.0)
denom[0] = 1
self.hs /= denom
def angles(self, i):
"""Computes phase angles in radians.
returns: list of phase angles
"""
return numpy.angle(self.hs)
def make_integrated_spectrum(self):
"""Makes an integrated spectrum.
"""
cs = numpy.cumsum(self.power)
cs /= cs[-1]
return IntegratedSpectrum(cs, self.fs)
def make_wave(self):
"""Transforms to the time domain.
returns: Wave
"""
ys = numpy.fft.irfft(self.hs)
return Wave(ys, self.framerate)
class IntegratedSpectrum(object):
"""Represents the integral of a spectrum."""
def __init__(self, cs, fs):
"""Initializes an integrated spectrum:
cs: sequence of cumulative amplitudes
fs: sequence of frequences
"""
self.cs = cs
self.fs = fs
def plot_power(self, low=0, high=None, expo=False, **options):
"""Plots the integrated spectrum.
low: int index to start at
high: int index to end at
"""
cs = self.cs[low:high]
fs = self.fs[low:high]
if expo:
cs = numpy.exp(cs)
thinkplot.Plot(fs, cs, **options)
def estimate_slope(self, low=1, high=-12000):
"""Runs linear regression on log cumulative power vs log frequency.
returns: slope, inter, r2, p, stderr
"""
#print self.fs[low:high]
#print self.cs[low:high]
x = numpy.log(self.fs[low:high])
y = numpy.log(self.cs[low:high])
t = scipy.stats.linregress(x,y)
return t
class Dct(_SpectrumParent):
"""Represents the spectrum of a signal."""
def __init__(self, amps, framerate):
self.amps = amps
self.framerate = framerate
n = len(amps)
self.fs = numpy.arange(n) / float(n) * self.max_freq
def make_wave(self):
"""Transforms to the time domain.
returns: Wave
"""
ys = scipy.fftpack.dct(self.amps, type=3) / 2
return Wave(ys, self.framerate)
class Spectrogram(object):
"""Represents the spectrum of a signal."""
def __init__(self, spec_map, seg_length, window_func=None):
"""Initialize the spectrogram.
spec_map: map from float time to Spectrum
seg_length: number of samples in each segment
window_func: function that computes the window
"""
self.spec_map = spec_map
self.seg_length = seg_length
self.window_func = window_func
def any_spectrum(self):
"""Returns an arbitrary spectrum from the spectrogram."""
return self.spec_map.itervalues().next()
@property
def time_res(self):
"""Time resolution in seconds."""
spectrum = self.any_spectrum()
return float(self.seg_length) / spectrum.framerate
@property
def freq_res(self):
"""Frequency resolution in Hz."""
return self.any_spectrum().freq_res
def times(self):
"""Sorted sequence of times.
returns: sequence of float times in seconds
"""
ts = sorted(self.spec_map.iterkeys())
return ts
def frequencies(self):
"""Sequence of frequencies.
returns: sequence of float freqencies in Hz.
"""
fs = self.any_spectrum().fs
return fs
def plot(self, low=0, high=None, **options):
"""Make a pseudocolor plot.
low: index of the lowest frequency component to plot
high: index of the highest frequency component to plot
"""
ts = self.times()
fs = self.frequencies()[low:high]
# make the array
size = len(fs), len(ts)
array = numpy.zeros(size, dtype=numpy.float)
# copy amplitude from each spectrum into a column of the array
for i, t in enumerate(ts):
spectrum = self.spec_map[t]
array[:,i] = spectrum.amps[low:high]
thinkplot.pcolor(ts, fs, array, **options)
def make_wave(self):
"""Inverts the spectrogram and returns a Wave.
returns: Wave
"""
res = []
for t, spectrum in sorted(self.spec_map.iteritems()):
wave = spectrum.make_wave()
n = len(wave)
if self.window_func:
window = 1 / self.window_func(n)
wave.window(window)
i = int(round(t * wave.framerate))
start = i - n / 2
end = start + n
res.append((start, end, wave))
starts, ends, waves = zip(*res)
low = min(starts)
high = max(ends)
ys = numpy.zeros(high-low, numpy.float)
for start, end, wave in res:
ys[start:end] = wave.ys
return Wave(ys, wave.framerate)
class Wave(object):
"""Represents a discrete-time waveform.
Note: the ys attribute is a "wave array" which is a numpy
array of floats.
"""
def __init__(self, ys, framerate, start=0):
"""Initializes the wave.
ys: wave array
framerate: samples per second
"""
self.ys = ys
self.framerate = framerate
self.start = start
def __len__(self):
return len(self.ys)
@property
def duration(self):
"""Duration (property).
returns: float duration in seconds
"""
return len(self.ys) / float(self.framerate)
def __or__(self, other):
"""Concatenates two waves.
other: Wave
returns: Wave
"""
if self.framerate != other.framerate:
raise ValueError('Wave.__or__: framerates do not agree')
ys = numpy.concatenate((self.ys, other.ys))
return Wave(ys, self.framerate)
def quantize(self, bound, dtype):
"""Maps the waveform to quanta.
bound: maximum amplitude
dtype: numpy data type or string
returns: quantized signal
"""
return quantize(self.ys, bound, dtype)
def apodize(self, denom=20, duration=0.1):
"""Tapers the amplitude at the beginning and end of the signal.
Tapers either the given duration of time or the given
fraction of the total duration, whichever is less.
denom: float fraction of the segment to taper
duration: float duration of the taper in seconds
"""
self.ys = apodize(self.ys, self.framerate, denom, duration)
def hamming(self):
"""Apply a Hamming window to the wave.
"""
self.ys *= numpy.hamming(len(self.ys))
def window(self, window):
"""Apply a window to the wave.
window: sequence of multipliers, same length as self.ys
"""
self.ys *= window
def normalize(self, amp=1.0):
"""Normalizes the signal to the given amplitude.
amp: float amplitude
"""
self.ys = normalize(self.ys, amp=amp)
def unbias(self):
"""Unbiases the signal.
"""
self.ys = unbias(self.ys)
def segment(self, start=0, duration=None):
"""Extracts a segment.
start: float start time in seconds
duration: float duration in seconds
returns: Wave
"""
i = start * self.framerate
if duration is None:
j = None
else:
j = i + duration * self.framerate
ys = self.ys[i:j]
return Wave(ys, self.framerate)
def make_spectrum(self):
"""Computes the spectrum using FFT.
returns: Spectrum
"""
hs = numpy.fft.rfft(self.ys)
return Spectrum(hs, self.framerate)
def make_dct(self):
amps = scipy.fftpack.dct(self.ys, type=2)
return Dct(amps, self.framerate)
def make_spectrogram(self, seg_length, window_func=numpy.hamming):
"""Computes the spectrogram of the wave.
seg_length: number of samples in each segment
window_func: function used to compute the window
returns: Spectrogram
"""
n = len(self.ys)
window = window_func(seg_length)
start, end, step = 0, seg_length, seg_length / 2
spec_map = {}
while end < n:
ys = self.ys[start:end] * window
hs = numpy.fft.rfft(ys)
t = (start + end) / 2.0 / self.framerate
spec_map[t] = Spectrum(hs, self.framerate)
start += step
end += step
return Spectrogram(spec_map, seg_length, window_func)
def plot(self, **options):
"""Plots the wave.
"""
n = len(self.ys)
ts = numpy.linspace(0, self.duration, n)
thinkplot.plot(ts, self.ys, **options)
def corr(self, other):
"""Correlation coefficient two waves.
other: Wave
returns: 2x2 covariance matrix
"""
mat = self.cov_mat(other)
corr = mat[0][1] / math.sqrt(mat[0][0] * mat[1][1])
return corr
def cov_mat(self, other):
"""Covariance matrix of two waves.
other: Wave
returns: 2x2 covariance matrix
"""
return numpy.cov(self.ys, other.ys)
def cov(self, other):
"""Covariance of two unbiased waves.
other: Wave
returns: float
"""
total = sum(self.ys * other.ys) / len(self.ys)
return total
def cos_cov(self, k):
"""Covariance with a cosine signal.
freq: freq of the cosine signal in Hz
returns: float covariance
"""
n = len(self.ys)
factor = math.pi * k / n
ys = [math.cos(factor * (i+0.5)) for i in range(n)]
total = 2 * sum(self.ys * ys)
return total
def cos_transform(self):
"""Discrete cosine transform.
returns: list of frequency, cov pairs
"""
n = len(self.ys)
res = []
for k in range(n):
cov = self.cos_cov(k)
res.append((k, cov))
return res
def write(self, filename='sound.wav'):
"""Write a wave file.
filename: string
"""
print 'Writing', filename
wfile = WavFileWriter(filename, self.framerate)
wfile.write(self)
wfile.close()
def play(self, filename='sound.wav'):
"""Plays a wave file.
filename: string
"""
self.write(filename)
play_wave(filename)
def unbias(ys):
"""Shifts a wave array so it has mean 0.
ys: wave array
returns: wave array
"""
return ys - ys.mean()
def normalize(ys, amp=1.0):
"""Normalizes a wave array so the maximum amplitude is +amp or -amp.
ys: wave array
amp: max amplitude (pos or neg) in result
returns: wave array
"""
high, low = abs(max(ys)), abs(min(ys))
return amp * ys / max(high, low)
def quantize(ys, bound, dtype):
"""Maps the waveform to quanta.
ys: wave array
bound: maximum amplitude
dtype: numpy data type of the result
returns: quantized signal
"""
if max(ys) > 1 or min(ys) < -1:
print 'Warning: normalizing before quantizing.'
ys = normalize(ys)
zs = (ys * bound).astype(dtype)
return zs
def apodize(ys, framerate, denom=20, duration=0.1):
"""Tapers the amplitude at the beginning and end of the signal.
Tapers either the given duration of time or the given
fraction of the total duration, whichever is less.
ys: wave array
framerate: int frames per second
denom: float fraction of the segment to taper
duration: float duration of the taper in seconds
returns: wave array
"""
# a fixed fraction of the segment
n = len(ys)
k1 = n / denom
# a fixed duration of time
k2 = int(duration * framerate)
k = min(k1, k2)
w1 = numpy.linspace(0, 1, k)
w2 = numpy.ones(n - 2*k)
w3 = numpy.linspace(1, 0, k)
window = numpy.concatenate((w1, w2, w3))
return ys * window
class Signal(object):
"""Represents a time-varying signal."""
def __add__(self, other):
"""Adds two signals.
other: Signal
returns: Signal
"""
if other == 0:
return self
return SumSignal(self, other)
__radd__ = __add__
@property
def period(self):
"""Period of the signal in seconds (property).
For non-periodic signals, use the default, 0.1 seconds
returns: float seconds
"""
return 0.1
def plot(self, framerate=11025):
"""Plots the signal.
framerate: samples per second
"""
duration = self.period * 3
wave = self.make_wave(duration, start=0, framerate=framerate)
wave.plot()
def make_wave(self, duration=1, start=0, framerate=11025):
"""Makes a Wave object.
duration: float seconds
start: float seconds
framerate: int frames per second
returns: Wave
"""
dt = 1.0 / framerate
ts = numpy.arange(start, duration, dt)
ys = self.evaluate(ts)
return Wave(ys, framerate=framerate, start=start)
def infer_framerate(ts):
"""Given ts, find the framerate.
Assumes that the ts are equally spaced.
ts: sequence of times in seconds
returns: frames per second
"""
dt = ts[1] - ts[0]
framerate = 1.0 / dt
return framerate
class SumSignal(Signal):
"""Represents the sum of signals."""
def __init__(self, *args):
"""Initializes the sum.
args: tuple of signals
"""
self.signals = args
@property
def period(self):
"""Period of the signal in seconds.
Note: this is not correct; it's mostly a placekeeper.
But it is correct for a harmonic sequence where all
component frequencies are multiples of the fundamental.
returns: float seconds
"""
return max(sig.period for sig in self.signals)
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
return sum(sig.evaluate(ts) for sig in self.signals)
class Sinusoid(Signal):
"""Represents a sinusoidal signal."""
def __init__(self, freq=440, amp=1.0, offset=0, func=numpy.sin):
"""Initializes a sinusoidal signal.
freq: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
offset: float phase offset in radians
func: function that maps phase to amplitude
"""
self.freq = freq
self.amp = amp
self.offset = offset
self.func = func
@property
def period(self):
"""Period of the signal in seconds.
returns: float seconds
"""
return 1.0 / self.freq
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
phases = PI2 * self.freq * ts + self.offset
ys = self.amp * self.func(phases)
return ys
def CosSignal(freq=440, amp=1.0, offset=0):
"""Makes a consine Sinusoid.
freq: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
offset: float phase offset in radians
returns: Sinusoid object
"""
return Sinusoid(freq, amp, offset, func=numpy.cos)
def SinSignal(freq=440, amp=1.0, offset=0):
"""Makes a sine Sinusoid.
freq: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
offset: float phase offset in radians
returns: Sinusoid object
"""
return Sinusoid(freq, amp, offset, func=numpy.sin)
class SquareSignal(Sinusoid):
"""Represents a square signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = self.amp * numpy.sign(unbias(frac))
return ys
class SawtoothSignal(Sinusoid):
"""Represents a sawtooth signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = normalize(unbias(frac), self.amp)
return ys
class ParabolicSignal(Sinusoid):
"""Represents a parabolic signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = frac**2
ys = normalize(unbias(ys), self.amp)
return ys
class GlottalSignal(Sinusoid):
"""Represents a periodic signal that resembles a glottal signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = frac**4 * (1-frac)
ys = normalize(unbias(ys), self.amp)
return ys
class TriangleSignal(Sinusoid):
"""Represents a triangle signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = numpy.abs(frac - 0.5)
ys = normalize(unbias(ys), self.amp)
return ys
class Chirp(Signal):
"""Represents a signal with variable frequency."""
def __init__(self, start=440, end=880, amp=1.0):
"""Initializes a linear chirp.
start: float frequency in Hz
end: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
"""
self.start = start
self.end = end
self.amp = amp
@property
def period(self):
"""Period of the signal in seconds.
returns: float seconds
"""
return ValueError('Non-periodic signal.')
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
freqs = numpy.linspace(self.start, self.end, len(ts)-1)
return self._evaluate(ts, freqs)
def _evaluate(self, ts, freqs):
"""Helper function that evaluates the signal.
ts: float array of times
freqs: float array of frequencies during each interval
"""
#n = len(freqs)
#print freqs[::n/2]
dts = numpy.diff(ts)
dps = PI2 * freqs * dts
phases = numpy.cumsum(dps)
phases = numpy.insert(phases, 0, 0)
ys = self.amp * numpy.cos(phases)
return ys
class ExpoChirp(Chirp):
"""Represents a signal with varying frequency."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
start, end = math.log10(self.start), math.log10(self.end)
freqs = numpy.logspace(start, end, len(ts)-1)
return self._evaluate(ts, freqs)
class SilentSignal(Signal):
"""Represents silence."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
return numpy.zeros(len(ts))
class _Noise(Signal):
"""Represents a noise signal (abstract parent class)."""
def __init__(self, amp=1.0):
"""Initializes a white noise signal.
amp: float amplitude, 1.0 is nominal max
"""
self.amp = amp
@property
def period(self):
"""Period of the signal in seconds.
returns: float seconds
"""
return ValueError('Non-periodic signal.')
class UncorrelatedUniformNoise(_Noise):
"""Represents uncorrelated uniform noise."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
ys = numpy.random.uniform(-self.amp, self.amp, len(ts))
return ys
class UncorrelatedGaussianNoise(_Noise):
"""Represents uncorrelated gaussian noise."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
ys = numpy.random.normal(0, 1, len(ts))
ys = normalize(ys, self.amp)
return ys
class BrownianNoise(_Noise):
"""Represents Brownian noise, aka red noise."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
Computes Brownian noise by taking the cumulative sum of
a uniform random series.
ts: float array of times
returns: float wave array
"""
#dys = numpy.random.normal(0, 1, len(ts))
dys = numpy.random.uniform(-1, 1, len(ts))
#ys = numpy.cumsum(dys)
ys = scipy.integrate.cumtrapz(dys, ts)
ys = normalize(unbias(ys), self.amp)
return ys
class PinkNoise(_Noise):
"""Represents Brownian noise, aka red noise."""
def __init__(self, amp=1.0, beta=1.0):
"""Initializes a pink noise signal.
amp: float amplitude, 1.0 is nominal max
"""
self.amp = amp
self.beta = beta
def make_wave(self, duration=1, start=0, framerate=11025):
"""Makes a Wave object.
duration: float seconds
start: float seconds
framerate: int frames per second
returns: Wave
"""
signal = UncorrelatedUniformNoise()
wave = signal.make_wave(duration, start, framerate)
spectrum = wave.make_spectrum()
spectrum.pink_filter(beta=self.beta)
wave2 = spectrum.make_wave()
wave2.unbias()
wave2.normalize(self.amp)
return wave2
def rest(duration):
"""Makes a rest of the given duration.
duration: float seconds
returns: Wave
"""
signal = SilentSignal()
wave = signal.make_wave(duration)
return wave
def make_note(midi_num, duration, sig_cons=CosSignal, framerate=11025):
"""Make a MIDI note with the given duration.
midi_num: int MIDI note number
duration: float seconds
sig_cons: Signal constructor function
framerate: int frames per second
returns: Wave
"""
freq = midi_to_freq(midi_num)
signal = sig_cons(freq)
wave = signal.make_wave(duration, framerate=framerate)
wave.apodize()
return wave
def make_chord(midi_nums, duration, sig_cons=CosSignal, framerate=11025):
"""Make a chord with the given duration.
midi_nums: sequence of int MIDI note numbers
duration: float seconds
sig_cons: Signal constructor function
framerate: int frames per second
returns: Wave
"""
freqs = [midi_to_freq(num) for num in midi_nums]
signal = sum(sig_cons(freq) for freq in freqs)
wave = signal.make_wave(duration, framerate=framerate)
wave.apodize()
return wave
def midi_to_freq(midi_num):
"""Converts MIDI note number to frequency.
midi_num: int MIDI note number
returns: float frequency in Hz
"""
x = (midi_num - 69) / 12.0
freq = 440.0 * 2**x
return freq
def sin_wave(freq, duration=1, offset=0):
"""Makes a sine wave with the given parameters.
freq: float cycles per second
duration: float seconds
offset: float radians
returns: Wave
"""
signal = SinSignal(freq, offset=offset)
wave = signal.make_wave(duration)
return wave
def cos_wave(freq, duration=1, offset=0):
"""Makes a cosine wave with the given parameters.
freq: float cycles per second
duration: float seconds
offset: float radians
returns: Wave
"""
signal = CosSignal(freq, offset=offset)
wave = signal.make_wave(duration)
return wave
def mag(a):
"""Computes the magnitude of a numpy array.
a: numpy array
returns: float
"""
return numpy.sqrt(numpy.dot(a, a))
def main():
cos_basis = cos_wave(440)
sin_basis = sin_wave(440)
wave = cos_wave(440, offset=math.pi/2)
cos_cov = cos_basis.cov(wave)
sin_cov = sin_basis.cov(wave)
print cos_cov, sin_cov, mag((cos_cov, sin_cov))
return
wfile = WavFileWriter()
for sig_cons in [SinSignal, TriangleSignal, SawtoothSignal,
GlottalSignal, ParabolicSignal, SquareSignal]:
print sig_cons
sig = sig_cons(440)
wave = sig.make_wave(1)
wave.apodize()
wfile.write(wave)
wfile.close()
return
signal = GlottalSignal(440)
signal.plot()
pyplot.show()
return
wfile = WavFileWriter()
for m in range(60, 0, -1):
wfile.write(make_note(m, 0.25))
wfile.close()
return
wave1 = make_note(69, 1)
wave2 = make_chord([69, 72, 76], 1)
wave = wave1 | wave2
wfile = WavFileWriter()
wfile.write(wave)
wfile.close()
return
sig1 = CosSignal(freq=440)
sig2 = CosSignal(freq=523.25)
sig3 = CosSignal(freq=660)
sig4 = CosSignal(freq=880)
sig5 = CosSignal(freq=987)
sig = sig1 + sig2 + sig3 + sig4
#wave = Wave(sig, duration=0.02)
#wave.plot()
wave = sig.make_wave(duration=1)
#wave.normalize()
wfile = WavFileWriter(wave)
wfile.write()
wfile.close()
if __name__ == '__main__':
main()
| gpl-3.0 |
a10networks/a10-openstack-lib | a10_openstack_lib/tests/test_resources.py | 1 | 1779 | # Copyright (C) 2016 A10 Networks Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import unittest
import a10_openstack_lib.resources.a10_certificate as a10_certificate
import a10_openstack_lib.resources.a10_device_instance as a10_device_instance
import a10_openstack_lib.resources.a10_scaling_group as a10_scaling_group
import a10_openstack_lib.resources.template as template
class TestResources(unittest.TestCase):
def check_resource_attribute_map(self, resources):
supported_attributes = [
'convert_to_int',
'convert_kvp_list_to_dict',
'convert_to_list',
'convert_kvp_to_list',
'ATTR_NOT_SPECIFIED'
]
mock_attributes = mock.Mock(spec=supported_attributes)
# This shouldn't blow up:
template.apply_template(resources, mock_attributes)
def test_a10_certificate(self):
self.check_resource_attribute_map(a10_certificate.RESOURCE_ATTRIBUTE_MAP)
def test_a10_device_instance(self):
self.check_resource_attribute_map(a10_device_instance.RESOURCE_ATTRIBUTE_MAP)
def test_a10_scaling_group(self):
self.check_resource_attribute_map(a10_scaling_group.RESOURCE_ATTRIBUTE_MAP)
| apache-2.0 |
yongshengwang/builthue | desktop/core/ext-py/requests-2.0.0/requests/packages/bs4/tests/test_html5lib.py | 293 | 2929 | """Tests to ensure that the html5lib tree builder generates good trees."""
import warnings
try:
from bs4.builder import HTML5TreeBuilder
HTML5LIB_PRESENT = True
except ImportError, e:
HTML5LIB_PRESENT = False
from bs4.element import SoupStrainer
from bs4.testing import (
HTML5TreeBuilderSmokeTest,
SoupTest,
skipIf,
)
@skipIf(
not HTML5LIB_PRESENT,
"html5lib seems not to be present, not testing its tree builder.")
class HTML5LibBuilderSmokeTest(SoupTest, HTML5TreeBuilderSmokeTest):
"""See ``HTML5TreeBuilderSmokeTest``."""
@property
def default_builder(self):
return HTML5TreeBuilder()
def test_soupstrainer(self):
# The html5lib tree builder does not support SoupStrainers.
strainer = SoupStrainer("b")
markup = "<p>A <b>bold</b> statement.</p>"
with warnings.catch_warnings(record=True) as w:
soup = self.soup(markup, parse_only=strainer)
self.assertEqual(
soup.decode(), self.document_for(markup))
self.assertTrue(
"the html5lib tree builder doesn't support parse_only" in
str(w[0].message))
def test_correctly_nested_tables(self):
"""html5lib inserts <tbody> tags where other parsers don't."""
markup = ('<table id="1">'
'<tr>'
"<td>Here's another table:"
'<table id="2">'
'<tr><td>foo</td></tr>'
'</table></td>')
self.assertSoupEquals(
markup,
'<table id="1"><tbody><tr><td>Here\'s another table:'
'<table id="2"><tbody><tr><td>foo</td></tr></tbody></table>'
'</td></tr></tbody></table>')
self.assertSoupEquals(
"<table><thead><tr><td>Foo</td></tr></thead>"
"<tbody><tr><td>Bar</td></tr></tbody>"
"<tfoot><tr><td>Baz</td></tr></tfoot></table>")
def test_xml_declaration_followed_by_doctype(self):
markup = '''<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<p>foo</p>
</body>
</html>'''
soup = self.soup(markup)
# Verify that we can reach the <p> tag; this means the tree is connected.
self.assertEqual(b"<p>foo</p>", soup.p.encode())
def test_reparented_markup(self):
markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>'
soup = self.soup(markup)
self.assertEqual(u"<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p></body>", soup.body.decode())
self.assertEqual(2, len(soup.find_all('p')))
def test_reparented_markup_ends_with_whitespace(self):
markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>\n'
soup = self.soup(markup)
self.assertEqual(u"<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p>\n</body>", soup.body.decode())
self.assertEqual(2, len(soup.find_all('p')))
| apache-2.0 |
highco-groupe/odoo | addons/delivery/stock.py | 8 | 8971 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
# Overloaded stock_picking to manage carriers :
class stock_picking(osv.osv):
_inherit = 'stock.picking'
def _cal_weight(self, cr, uid, ids, name, args, context=None):
res = {}
for picking in self.browse(cr, uid, ids, context=context):
total_weight = total_weight_net = 0.00
for move in picking.move_lines:
total_weight += move.weight
total_weight_net += move.weight_net
res[picking.id] = {
'weight': total_weight,
'weight_net': total_weight_net,
}
return res
def _get_picking_line(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('stock.move').browse(cr, uid, ids, context=context):
result[line.picking_id.id] = True
return result.keys()
_columns = {
'carrier_id':fields.many2one("delivery.carrier","Carrier"),
'volume': fields.float('Volume'),
'weight': fields.function(_cal_weight, type='float', string='Weight', digits_compute= dp.get_precision('Stock Weight'), multi='_cal_weight',
store={
'stock.picking': (lambda self, cr, uid, ids, c={}: ids, ['move_lines'], 20),
'stock.move': (_get_picking_line, ['product_id','product_qty','product_uom','product_uos_qty'], 20),
}),
'weight_net': fields.function(_cal_weight, type='float', string='Net Weight', digits_compute= dp.get_precision('Stock Weight'), multi='_cal_weight',
store={
'stock.picking': (lambda self, cr, uid, ids, c={}: ids, ['move_lines'], 20),
'stock.move': (_get_picking_line, ['product_id','product_qty','product_uom','product_uos_qty'], 20),
}),
'carrier_tracking_ref': fields.char('Carrier Tracking Ref'),
'number_of_packages': fields.integer('Number of Packages'),
'weight_uom_id': fields.many2one('product.uom', 'Unit of Measure', required=True,readonly="1",help="Unit of measurement for Weight",),
}
def _prepare_shipping_invoice_line(self, cr, uid, picking, invoice, context=None):
"""Prepare the invoice line to add to the shipping costs to the shipping's
invoice.
:param browse_record picking: the stock picking being invoiced
:param browse_record invoice: the stock picking's invoice
:return: dict containing the values to create the invoice line,
or None to create nothing
"""
carrier_obj = self.pool.get('delivery.carrier')
grid_obj = self.pool.get('delivery.grid')
if not picking.carrier_id or \
any(inv_line.product_id.id == picking.carrier_id.product_id.id
for inv_line in invoice.invoice_line):
return None
grid_id = carrier_obj.grid_get(cr, uid, [picking.carrier_id.id],
picking.partner_id.id, context=context)
if not grid_id:
raise osv.except_osv(_('Warning!'),
_('The carrier %s (id: %d) has no delivery grid!') \
% (picking.carrier_id.name,
picking.carrier_id.id))
quantity = sum([line.product_uom_qty for line in picking.move_lines])
price = grid_obj.get_price_from_picking(cr, uid, grid_id,
invoice.amount_untaxed, picking.weight, picking.volume,
quantity, context=context)
account_id = picking.carrier_id.product_id.property_account_income.id
if not account_id:
account_id = picking.carrier_id.product_id.categ_id\
.property_account_income_categ.id
taxes = picking.carrier_id.product_id.taxes_id
partner = picking.partner_id or False
if partner:
account_id = self.pool.get('account.fiscal.position').map_account(cr, uid, partner.property_account_position, account_id)
taxes_ids = self.pool.get('account.fiscal.position').map_tax(cr, uid, partner.property_account_position, taxes)
else:
taxes_ids = [x.id for x in taxes]
return {
'name': picking.carrier_id.name,
'invoice_id': invoice.id,
'uos_id': picking.carrier_id.product_id.uos_id.id,
'product_id': picking.carrier_id.product_id.id,
'account_id': account_id,
'price_unit': price,
'quantity': 1,
'invoice_line_tax_id': [(6, 0, taxes_ids)],
}
def _create_invoice_from_picking(self, cr, uid, picking, vals, context=None):
invoice_line_obj = self.pool.get('account.invoice.line')
invoice_id = super(stock_picking, self)._create_invoice_from_picking(cr, uid, picking, vals, context=context)
invoice = self.browse(cr, uid, invoice_id, context=context)
invoice_line = self._prepare_shipping_invoice_line(cr, uid, picking, invoice, context=context)
if invoice_line:
invoice_line_obj.create(cr, uid, invoice_line)
return invoice_id
def _get_default_uom(self, cr, uid, context=None):
uom_categ_id = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'product.product_uom_categ_kgm')
return self.pool.get('product.uom').search(cr, uid, [('category_id', '=', uom_categ_id), ('factor', '=', 1)])[0]
_defaults = {
'weight_uom_id': lambda self, cr, uid, c: self._get_default_uom(cr, uid, c),
}
class stock_move(osv.osv):
_inherit = 'stock.move'
def _cal_move_weight(self, cr, uid, ids, name, args, context=None):
res = {}
uom_obj = self.pool.get('product.uom')
for move in self.browse(cr, uid, ids, context=context):
weight = weight_net = 0.00
if move.product_id.weight > 0.00:
converted_qty = move.product_qty
if move.product_uom.id <> move.product_id.uom_id.id:
converted_qty = uom_obj._compute_qty(cr, uid, move.product_uom.id, move.product_qty, move.product_id.uom_id.id)
weight = (converted_qty * move.product_id.weight)
if move.product_id.weight_net > 0.00:
weight_net = (converted_qty * move.product_id.weight_net)
res[move.id] = {
'weight': weight,
'weight_net': weight_net,
}
return res
_columns = {
'weight': fields.function(_cal_move_weight, type='float', string='Weight', digits_compute= dp.get_precision('Stock Weight'), multi='_cal_move_weight',
store={
'stock.move': (lambda self, cr, uid, ids, c=None: ids, ['product_id', 'product_qty', 'product_uom'], 20),
}),
'weight_net': fields.function(_cal_move_weight, type='float', string='Net weight', digits_compute= dp.get_precision('Stock Weight'), multi='_cal_move_weight',
store={
'stock.move': (lambda self, cr, uid, ids, c=None: ids, ['product_id', 'product_qty', 'product_uom'], 20),
}),
'weight_uom_id': fields.many2one('product.uom', 'Unit of Measure', required=True,readonly="1",help="Unit of Measure (Unit of Measure) is the unit of measurement for Weight",),
}
def _get_default_uom(self, cr, uid, context=None):
uom_categ_id = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'product.product_uom_categ_kgm')
return self.pool.get('product.uom').search(cr, uid, [('category_id', '=', uom_categ_id),('factor','=',1)])[0]
_defaults = {
'weight_uom_id': lambda self, cr, uid, c: self._get_default_uom(cr, uid, c),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
foobarbazblarg/stayclean | stayclean-2017-december/display-during-signup.py | 1 | 6725 | #!/usr/bin/python
# TODO: issues with new oauth2 stuff. Keep using older version of Python for now.
# #!/usr/bin/env python
from participantCollection import ParticipantCollection
import string
import re
import datetime
import pyperclip
# Edit Me!
# Remember, this is during signup, so current month is not March, it's February.
currentMonthTotalDays = 30
currentMonthURL = "https://www.reddit.com/r/pornfree/comments/7a37f4/stay_clean_november_this_thread_updated_daily/"
currentMonthIndex = datetime.date.today().month
currentMonthPenultimateDayIndex = currentMonthTotalDays - 1
currentMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[nextMonthIndex]
uppercaseMonth = string.upper(nextMonthName)
currentDayOfMonthIndex = datetime.date.today().day
currentDayOfMonthName = {1: 'first', 2: 'second', 3: 'third', 4: 'fourth', 5: 'fifth', 6: 'sixth', 7: 'seventh', 8: 'eighth', 9: 'ninth', 10: 'tenth', 11: 'eleventh', 12: 'twelfth', 13: 'thirteenth', 14: 'fourteenth', 15: 'fifteenth', 16: 'sixteenth', 17: 'seventeenth', 18: 'eighteenth', 19: 'nineteenth', 20: 'twentieth', 21: 'twenty-first', 22: 'twenty-second', 23: 'twenty-third', 24: 'twenty-fourth', 25: 'twenty-fifth', 26: 'twenty-sixth', 27: 'twenty-seventh', 28: 'twenty-eighth', 29: 'twenty-ninth', 30: 'thirtieth', 31: 'thirty-first'}[currentDayOfMonthIndex]
currentDayOfWeekName = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}[datetime.date.today().weekday()]
# TODO: testing
# currentDayOfMonthIndex = 28
participants = ParticipantCollection()
initialNumber = participants.size()
def templateForParticipants():
answer = ""
answer += "Here are the **INITIAL_NUMBER participants** who have already signed up:\n\n"
for participant in participants.participants:
answer += "/u/" + participant.name
answer += "\n\n"
return answer
def templateForTooEarly():
answer = ""
answer += "(Too early. Come back on CURRENT_MONTH_NAME " + str(currentMonthTotalDays - 6) + ")\n"
return answer
def templateForFirstSignupDay():
answer = ""
answer += "STAY CLEAN UPPERCASE_MONTH! Sign up here! (CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX)\n"
answer += "Hey everybody, we had a great turnout for [Stay Clean CURRENT_MONTH_NAME](CURRENT_MONTH_URL) - let's see if we can knock it out of the park for NEXT_MONTH_NAME. Have you been clean for the month of CURRENT_MONTH_NAME? Great! Join us here, and let's keep our streak going. Did you slip in CURRENT_MONTH_NAME? Then NEXT_MONTH_NAME is your month to shine, and we will gladly fight the good fight along with you. Did you miss out on the CURRENT_MONTH_NAME challenge? Well then here is your opportunity to join us.\n"
answer += "\n"
answer += "If you would like to be included in this challenge, please post a brief comment to this thread, and I will include you. After midnight, NEXT_MONTH_NAME 1, the sign up window will close, and the challenge will begin."
return answer
def templateForMiddleSignupDays():
answer = ""
answer += "STAY CLEAN UPPERCASE_MONTH! Sign up here! (CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX)\n"
answer += "Hey everybody, so far **INITIAL_NUMBER participants** have signed up. Have you been clean for **[the month of CURRENT_MONTH_NAME](CURRENT_MONTH_URL)**? Great! Join us here, and let's keep our streak going. Did you slip in CURRENT_MONTH_NAME? Then NEXT_MONTH_NAME is your month to shine, and we will gladly fight the good fight along with you. Did you miss out on the CURRENT_MONTH_NAME challenge? Well then here is your opportunity to join us.\n"
answer += "\n"
answer += "If you would like to be included in this challenge, please post a brief comment to this thread (if you haven't already done so on an earlier signup thread), and I will include you. After midnight, NEXT_MONTH_NAME 1, the sign up window will close, and the challenge will begin.\n"
answer += "\n"
answer += templateForParticipants()
return answer
def templateForLastSignupDay():
answer = ""
answer += "LAST CHANCE TO SIGN UP FOR STAY CLEAN UPPERCASE_MONTH! Sign up here!\n"
answer += "The Stay Clean NEXT_MONTH_NAME challenge **begins tomorrow**! So far, we have **INITIAL_NUMBER participants** signed up. If you would like to be included in the challenge, please post a brief comment to this thread (if you haven't already done so on an earlier signup thread), and we will include you. After midnight tonight, we will not be accepting any more participants. I will create the official update post tomorrow.\n"
answer += "\n"
answer += templateForParticipants()
return answer
def templateToUse():
if currentDayOfMonthIndex <= (currentMonthTotalDays - 7):
return templateForTooEarly()
elif currentDayOfMonthIndex == (currentMonthTotalDays - 6):
return templateForFirstSignupDay()
elif (currentMonthTotalDays - 5) <= currentDayOfMonthIndex <= (currentMonthTotalDays - 1):
return templateForMiddleSignupDays()
elif currentMonthTotalDays == currentDayOfMonthIndex:
return templateForLastSignupDay()
def stringToPrint():
answer = templateToUse()
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('CURRENT_MONTH_INDEX', str(currentMonthIndex), answer)
answer = re.sub('CURRENT_MONTH_TOTAL_DAYS', str(currentMonthTotalDays), answer)
answer = re.sub('CURRENT_MONTH_PENULTIMATE_DAY_INDEX', str(currentMonthPenultimateDayIndex), answer)
answer = re.sub('CURRENT_MONTH_NAME', currentMonthName, answer)
answer = re.sub('CURRENT_MONTH_URL', currentMonthURL, answer)
answer = re.sub('NEXT_MONTH_INDEX', str(nextMonthIndex), answer)
answer = re.sub('NEXT_MONTH_NAME', nextMonthName, answer)
answer = re.sub('CURRENT_DAY_OF_MONTH_INDEX', str(currentDayOfMonthIndex), answer)
answer = re.sub('CURRENT_DAY_OF_MONTH_NAME', currentDayOfMonthName, answer)
answer = re.sub('CURRENT_DAY_OF_WEEK_NAME', currentDayOfWeekName, answer)
answer = re.sub('UPPERCASE_MONTH', uppercaseMonth, answer)
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
| mit |
ModdedPA/android_external_chromium_org | chrome/browser/resources/web_dev_style/js_checker.py | 59 | 8940 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium JS resources.
See chrome/browser/resources/PRESUBMIT.py
"""
class JSChecker(object):
def __init__(self, input_api, output_api, file_filter=None):
self.input_api = input_api
self.output_api = output_api
self.file_filter = file_filter
def RegexCheck(self, line_number, line, regex, message):
"""Searches for |regex| in |line| to check for a particular style
violation, returning a message like the one below if the regex matches.
The |regex| must have exactly one capturing group so that the relevant
part of |line| can be highlighted. If more groups are needed, use
"(?:...)" to make a non-capturing group. Sample message:
line 6: Use var instead of const.
const foo = bar();
^^^^^
"""
match = self.input_api.re.search(regex, line)
if match:
assert len(match.groups()) == 1
start = match.start(1)
length = match.end(1) - start
return ' line %d: %s\n%s\n%s' % (
line_number,
message,
line,
self.error_highlight(start, length))
return ''
def ChromeSendCheck(self, i, line):
"""Checks for a particular misuse of 'chrome.send'."""
return self.RegexCheck(i, line, r"chrome\.send\('[^']+'\s*(, \[\])\)",
'Passing an empty array to chrome.send is unnecessary')
def ConstCheck(self, i, line):
"""Check for use of the 'const' keyword."""
if self.input_api.re.search(r'\*\s+@const', line):
# Probably a JsDoc line
return ''
return self.RegexCheck(i, line, r'(?:^|\s|\()(const)\s',
'Use /** @const */ var varName; instead of const varName;')
def GetElementByIdCheck(self, i, line):
"""Checks for use of 'document.getElementById' instead of '$'."""
return self.RegexCheck(i, line, r"(document\.getElementById)\('",
"Use $('id'), from chrome://resources/js/util.js, instead of "
"document.getElementById('id')")
def InheritDocCheck(self, i, line):
"""Checks for use of '@inheritDoc' instead of '@override'."""
return self.RegexCheck(i, line, r"\* (@inheritDoc)",
"@inheritDoc is deprecated, use @override instead")
def WrapperTypeCheck(self, i, line):
"""Check for wrappers (new String()) instead of builtins (string)."""
return self.RegexCheck(i, line,
r"(?:/\*)?\*.*?@(?:param|return|type) ?" # /** @param/@return/@type
r"{[^}]*\b(String|Boolean|Number)\b[^}]*}", # {(Boolean|Number|String)}
"Don't use wrapper types (i.e. new String() or @type {String})")
def VarNameCheck(self, i, line):
"""See the style guide. http://goo.gl/uKir6"""
return self.RegexCheck(i, line,
r"var (?!g_\w+)([a-z]*[_$][\w_$]*)(?<! \$)",
"Please use var namesLikeThis <http://goo.gl/uKir6>")
def error_highlight(self, start, length):
"""Takes a start position and a length, and produces a row of '^'s to
highlight the corresponding part of a string.
"""
return start * ' ' + length * '^'
def _makeErrorOrWarning(self, error_text, filename):
"""Takes a few lines of text indicating a style violation and turns it into
a PresubmitError (if |filename| is in a directory where we've already
taken out all the style guide violations) or a PresubmitPromptWarning
(if it's in a directory where we haven't done that yet).
"""
# TODO(tbreisacher): Once we've cleaned up the style nits in all of
# resources/ we can get rid of this function.
path = self.input_api.os_path
resources = self.input_api.PresubmitLocalPath()
dirs = (
path.join(resources, 'bookmark_manager'),
path.join(resources, 'extensions'),
path.join(resources, 'file_manager'),
path.join(resources, 'help'),
path.join(resources, 'history'),
path.join(resources, 'memory_internals'),
path.join(resources, 'net_export'),
path.join(resources, 'net_internals'),
path.join(resources, 'network_action_predictor'),
path.join(resources, 'ntp4'),
path.join(resources, 'options'),
path.join(resources, 'print_preview'),
path.join(resources, 'profiler'),
path.join(resources, 'sync_promo'),
path.join(resources, 'tracing'),
path.join(resources, 'uber'),
)
if filename.startswith(dirs):
return self.output_api.PresubmitError(error_text)
else:
return self.output_api.PresubmitPromptWarning(error_text)
def RunChecks(self):
"""Check for violations of the Chromium JavaScript style guide. See
http://chromium.org/developers/web-development-style-guide#TOC-JavaScript
"""
import sys
import warnings
old_path = sys.path
old_filters = warnings.filters
try:
closure_linter_path = self.input_api.os_path.join(
self.input_api.change.RepositoryRoot(),
"third_party",
"closure_linter")
gflags_path = self.input_api.os_path.join(
self.input_api.change.RepositoryRoot(),
"third_party",
"python_gflags")
sys.path.insert(0, closure_linter_path)
sys.path.insert(0, gflags_path)
warnings.filterwarnings('ignore', category=DeprecationWarning)
from closure_linter import checker, errors
from closure_linter.common import errorhandler
finally:
sys.path = old_path
warnings.filters = old_filters
class ErrorHandlerImpl(errorhandler.ErrorHandler):
"""Filters out errors that don't apply to Chromium JavaScript code."""
def __init__(self, re):
self._errors = []
self.re = re
def HandleFile(self, filename, first_token):
self._filename = filename
def HandleError(self, error):
if (self._valid(error)):
error.filename = self._filename
self._errors.append(error)
def GetErrors(self):
return self._errors
def HasErrors(self):
return bool(self._errors)
def _valid(self, error):
"""Check whether an error is valid. Most errors are valid, with a few
exceptions which are listed here.
"""
is_grit_statement = bool(
self.re.search("</?(include|if)", error.token.line))
return not is_grit_statement and error.code not in [
errors.COMMA_AT_END_OF_LITERAL,
errors.JSDOC_ILLEGAL_QUESTION_WITH_PIPE,
errors.JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER,
errors.LINE_TOO_LONG,
errors.MISSING_JSDOC_TAG_THIS,
]
results = []
affected_files = self.input_api.change.AffectedFiles(
file_filter=self.file_filter,
include_deletes=False)
affected_js_files = filter(lambda f: f.LocalPath().endswith('.js'),
affected_files)
for f in affected_js_files:
error_lines = []
# Check for the following:
# * document.getElementById()
# * the 'const' keyword
# * Passing an empty array to 'chrome.send()'
for i, line in enumerate(f.NewContents(), start=1):
error_lines += filter(None, [
self.ChromeSendCheck(i, line),
self.ConstCheck(i, line),
self.GetElementByIdCheck(i, line),
self.InheritDocCheck(i, line),
self.WrapperTypeCheck(i, line),
self.VarNameCheck(i, line),
])
# Use closure_linter to check for several different errors
error_handler = ErrorHandlerImpl(self.input_api.re)
js_checker = checker.JavaScriptStyleChecker(error_handler)
js_checker.Check(self.input_api.os_path.join(
self.input_api.change.RepositoryRoot(),
f.LocalPath()))
for error in error_handler.GetErrors():
highlight = self.error_highlight(
error.token.start_index, error.token.length)
error_msg = ' line %d: E%04d: %s\n%s\n%s' % (
error.token.line_number,
error.code,
error.message,
error.token.line.rstrip(),
highlight)
error_lines.append(error_msg)
if error_lines:
error_lines = [
'Found JavaScript style violations in %s:' %
f.LocalPath()] + error_lines
results.append(self._makeErrorOrWarning(
'\n'.join(error_lines), f.AbsoluteLocalPath()))
if results:
results.append(self.output_api.PresubmitNotifyResult(
'See the JavaScript style guide at '
'http://www.chromium.org/developers/web-development-style-guide'
'#TOC-JavaScript and if you have any feedback about the JavaScript '
'PRESUBMIT check, contact tbreisacher@chromium.org or '
'dbeam@chromium.org'))
return results
| bsd-3-clause |
hogarthj/ansible | test/units/modules/cloud/amazon/test_elb_application_lb.py | 33 | 4551 | #
# (c) 2017 Michael Tinning
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
import json
from copy import deepcopy
import pytest
from ansible.module_utils._text import to_bytes
from ansible.module_utils import basic
from ansible.module_utils.ec2 import HAS_BOTO3
if not HAS_BOTO3:
pytestmark = pytest.mark.skip("test_elb_application_lb.py requires the `boto3` and `botocore` modules")
import ansible.modules.cloud.amazon.elb_application_lb as elb_module
@pytest.fixture
def listener():
return {
'Protocol': 'HTTP',
'Port': 80,
'DefaultActions': [{
'Type': 'forward',
'TargetGroupName': 'target-group'
}],
'Rules': [{
'Conditions': [{
'Field': 'host-header',
'Values': [
'www.example.com'
]
}],
'Priority': 1,
'Actions': [{
'TargetGroupName': 'other-target-group',
'Type': 'forward'
}]
}]
}
@pytest.fixture
def compare_listeners(mocker):
return mocker.Mock()
@pytest.fixture
def ensure_listeners(mocker):
ensure_listeners_mock = mocker.Mock()
ensure_listeners_mock.return_value = []
return ensure_listeners_mock
@pytest.fixture
def compare_rules(mocker):
compare_rules_mock = mocker.Mock()
compare_rules_mock.return_value = ([], [], [])
return compare_rules_mock
@pytest.fixture
def get_elb_listeners(mocker):
get_elb_listeners_mock = mocker.Mock()
get_elb_listeners_mock.return_value = []
return get_elb_listeners_mock
@pytest.fixture
def elb(mocker, monkeypatch, compare_listeners, ensure_listeners, compare_rules, get_elb_listeners):
monkeypatch.setattr(elb_module, "ensure_listeners_default_action_has_arn", ensure_listeners)
monkeypatch.setattr(elb_module, "get_elb_listeners", get_elb_listeners)
monkeypatch.setattr(elb_module, "ensure_rules_action_has_arn", mocker.Mock())
monkeypatch.setattr(elb_module, "get_listener", mocker.Mock())
monkeypatch.setattr(elb_module, "compare_rules", compare_rules)
monkeypatch.setattr(elb_module, "compare_listeners", compare_listeners)
return elb_module
@pytest.fixture
def created_listener(mocker, listener):
return {
'Port': listener['Port'],
'ListenerArn': 'new-listener-arn'
}
@pytest.fixture
def connection(mocker, created_listener):
connection_mock = mocker.Mock()
connection_mock.create_listener.return_value = {
'Listeners': [created_listener]
}
return connection_mock
@pytest.fixture
def existing_elb():
return {'LoadBalancerArn': 'fake'}
def test_create_listeners_called_with_correct_args(mocker, connection, listener, elb, compare_listeners, existing_elb):
compare_listeners.return_value = ([listener], [], [])
elb.create_or_update_elb_listeners(connection, mocker.Mock(), existing_elb)
connection.create_listener.assert_called_once_with(
Protocol=listener['Protocol'],
Port=listener['Port'],
DefaultActions=listener['DefaultActions'],
LoadBalancerArn=existing_elb['LoadBalancerArn']
)
def test_modify_listeners_called_with_correct_args(mocker, connection, listener, elb, compare_listeners, existing_elb):
# In the case of modify listener, LoadBalancerArn is set in compare_listeners
listener['LoadBalancerArn'] = existing_elb['LoadBalancerArn']
compare_listeners.return_value = ([], [listener], [])
elb.create_or_update_elb_listeners(connection, mocker.Mock(), existing_elb)
connection.modify_listener.assert_called_once_with(
Protocol=listener['Protocol'],
Port=listener['Port'],
DefaultActions=listener['DefaultActions'],
LoadBalancerArn=existing_elb['LoadBalancerArn']
)
def test_compare_rules_called_with_new_listener(
mocker,
connection,
listener,
elb,
compare_listeners,
ensure_listeners,
compare_rules,
existing_elb,
created_listener
):
compare_listeners.return_value = ([listener], [], [])
listener_from_ensure_listeners = deepcopy(listener)
ensure_listeners.return_value = [listener_from_ensure_listeners]
elb.create_or_update_elb_listeners(connection, mocker.Mock(), existing_elb)
(_conn, _module, current_listeners, _listener), _kwargs = compare_rules.call_args
assert created_listener in current_listeners
| gpl-3.0 |
0jpq0/kbengine | kbe/res/scripts/common/Lib/email/_encoded_words.py | 85 | 7913 | """ Routines for manipulating RFC2047 encoded words.
This is currently a package-private API, but will be considered for promotion
to a public API if there is demand.
"""
# An ecoded word looks like this:
#
# =?charset[*lang]?cte?encoded_string?=
#
# for more information about charset see the charset module. Here it is one
# of the preferred MIME charset names (hopefully; you never know when parsing).
# cte (Content Transfer Encoding) is either 'q' or 'b' (ignoring case). In
# theory other letters could be used for other encodings, but in practice this
# (almost?) never happens. There could be a public API for adding entries
# to the CTE tables, but YAGNI for now. 'q' is Quoted Printable, 'b' is
# Base64. The meaning of encoded_string should be obvious. 'lang' is optional
# as indicated by the brackets (they are not part of the syntax) but is almost
# never encountered in practice.
#
# The general interface for a CTE decoder is that it takes the encoded_string
# as its argument, and returns a tuple (cte_decoded_string, defects). The
# cte_decoded_string is the original binary that was encoded using the
# specified cte. 'defects' is a list of MessageDefect instances indicating any
# problems encountered during conversion. 'charset' and 'lang' are the
# corresponding strings extracted from the EW, case preserved.
#
# The general interface for a CTE encoder is that it takes a binary sequence
# as input and returns the cte_encoded_string, which is an ascii-only string.
#
# Each decoder must also supply a length function that takes the binary
# sequence as its argument and returns the length of the resulting encoded
# string.
#
# The main API functions for the module are decode, which calls the decoder
# referenced by the cte specifier, and encode, which adds the appropriate
# RFC 2047 "chrome" to the encoded string, and can optionally automatically
# select the shortest possible encoding. See their docstrings below for
# details.
import re
import base64
import binascii
import functools
from string import ascii_letters, digits
from email import errors
__all__ = ['decode_q',
'encode_q',
'decode_b',
'encode_b',
'len_q',
'len_b',
'decode',
'encode',
]
#
# Quoted Printable
#
# regex based decoder.
_q_byte_subber = functools.partial(re.compile(br'=([a-fA-F0-9]{2})').sub,
lambda m: bytes([int(m.group(1), 16)]))
def decode_q(encoded):
encoded = encoded.replace(b'_', b' ')
return _q_byte_subber(encoded), []
# dict mapping bytes to their encoded form
class _QByteMap(dict):
safe = b'-!*+/' + ascii_letters.encode('ascii') + digits.encode('ascii')
def __missing__(self, key):
if key in self.safe:
self[key] = chr(key)
else:
self[key] = "={:02X}".format(key)
return self[key]
_q_byte_map = _QByteMap()
# In headers spaces are mapped to '_'.
_q_byte_map[ord(' ')] = '_'
def encode_q(bstring):
return ''.join(_q_byte_map[x] for x in bstring)
def len_q(bstring):
return sum(len(_q_byte_map[x]) for x in bstring)
#
# Base64
#
def decode_b(encoded):
defects = []
pad_err = len(encoded) % 4
if pad_err:
defects.append(errors.InvalidBase64PaddingDefect())
padded_encoded = encoded + b'==='[:4-pad_err]
else:
padded_encoded = encoded
try:
return base64.b64decode(padded_encoded, validate=True), defects
except binascii.Error:
# Since we had correct padding, this must an invalid char error.
defects = [errors.InvalidBase64CharactersDefect()]
# The non-alphabet characters are ignored as far as padding
# goes, but we don't know how many there are. So we'll just
# try various padding lengths until something works.
for i in 0, 1, 2, 3:
try:
return base64.b64decode(encoded+b'='*i, validate=False), defects
except binascii.Error:
if i==0:
defects.append(errors.InvalidBase64PaddingDefect())
else:
# This should never happen.
raise AssertionError("unexpected binascii.Error")
def encode_b(bstring):
return base64.b64encode(bstring).decode('ascii')
def len_b(bstring):
groups_of_3, leftover = divmod(len(bstring), 3)
# 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
return groups_of_3 * 4 + (4 if leftover else 0)
_cte_decoders = {
'q': decode_q,
'b': decode_b,
}
def decode(ew):
"""Decode encoded word and return (string, charset, lang, defects) tuple.
An RFC 2047/2243 encoded word has the form:
=?charset*lang?cte?encoded_string?=
where '*lang' may be omitted but the other parts may not be.
This function expects exactly such a string (that is, it does not check the
syntax and may raise errors if the string is not well formed), and returns
the encoded_string decoded first from its Content Transfer Encoding and
then from the resulting bytes into unicode using the specified charset. If
the cte-decoded string does not successfully decode using the specified
character set, a defect is added to the defects list and the unknown octets
are replaced by the unicode 'unknown' character \uFDFF.
The specified charset and language are returned. The default for language,
which is rarely if ever encountered, is the empty string.
"""
_, charset, cte, cte_string, _ = ew.split('?')
charset, _, lang = charset.partition('*')
cte = cte.lower()
# Recover the original bytes and do CTE decoding.
bstring = cte_string.encode('ascii', 'surrogateescape')
bstring, defects = _cte_decoders[cte](bstring)
# Turn the CTE decoded bytes into unicode.
try:
string = bstring.decode(charset)
except UnicodeError:
defects.append(errors.UndecodableBytesDefect("Encoded word "
"contains bytes not decodable using {} charset".format(charset)))
string = bstring.decode(charset, 'surrogateescape')
except LookupError:
string = bstring.decode('ascii', 'surrogateescape')
if charset.lower() != 'unknown-8bit':
defects.append(errors.CharsetError("Unknown charset {} "
"in encoded word; decoded as unknown bytes".format(charset)))
return string, charset, lang, defects
_cte_encoders = {
'q': encode_q,
'b': encode_b,
}
_cte_encode_length = {
'q': len_q,
'b': len_b,
}
def encode(string, charset='utf-8', encoding=None, lang=''):
"""Encode string using the CTE encoding that produces the shorter result.
Produces an RFC 2047/2243 encoded word of the form:
=?charset*lang?cte?encoded_string?=
where '*lang' is omitted unless the 'lang' parameter is given a value.
Optional argument charset (defaults to utf-8) specifies the charset to use
to encode the string to binary before CTE encoding it. Optional argument
'encoding' is the cte specifier for the encoding that should be used ('q'
or 'b'); if it is None (the default) the encoding which produces the
shortest encoded sequence is used, except that 'q' is preferred if it is up
to five characters longer. Optional argument 'lang' (default '') gives the
RFC 2243 language string to specify in the encoded word.
"""
if charset == 'unknown-8bit':
bstring = string.encode('ascii', 'surrogateescape')
else:
bstring = string.encode(charset)
if encoding is None:
qlen = _cte_encode_length['q'](bstring)
blen = _cte_encode_length['b'](bstring)
# Bias toward q. 5 is arbitrary.
encoding = 'q' if qlen - blen < 5 else 'b'
encoded = _cte_encoders[encoding](bstring)
if lang:
lang = '*' + lang
return "=?{}{}?{}?{}?=".format(charset, lang, encoding, encoded)
| lgpl-3.0 |
peterwilletts24/Python-Scripts | plot_scripts/EMBRACE/heat_flux/plot_from_pp_3234_diff_8km.py | 2 | 5598 | """
Load pp, plot and save
"""
import os, sys
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from mpl_toolkits.basemap import Basemap
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as mpl_cm
import numpy as np
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import iris.analysis.cartography
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import scipy.interpolate
import datetime
from mpl_toolkits.basemap import cm
import imp
from textwrap import wrap
import re
import iris.analysis.cartography
import math
experiment_ids = ['dklyu']
save_path='/nfs/a90/eepdw/Figures/EMBRACE/'
model_name_convert_title = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/model_name_convert_title.py')
unrotate = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/unrotate_pole.py')
pp_file = '3234_mean'
degs_crop_top = 3.7
degs_crop_bottom = 3.5
degs_crop_left = 2
degs_crop_right = 3
min_contour = -50
max_contour = 50
tick_interval=20
#
# cmap= cm.s3pcpn_l
divisor=10 # for lat/lon rounding
def main():
# Load diff cube
gl = '/nfs/a90/eepdw/Data/EMBRACE/Mean_State/pp_files/dkmb/dkmbq/%s.pp' % pp_file
glob = iris.load_cube(gl)
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
pfile = '/nfs/a90/eepdw/Data/EMBRACE/Mean_State/pp_files/%s/%s/%s.pp' % (expmin1, experiment_id, pp_file)
pcube = iris.load_cube(pfile)
lat = pcube.coord('grid_latitude').points
lon = pcube.coord('grid_longitude').points
cs = pcube.coord_system('CoordSystem')
if isinstance(cs, iris.coord_systems.RotatedGeogCS):
print ' %s - Unrotate pole %s' % (experiment_id,cs)
lons, lats = np.meshgrid(lon, lat)
lons,lats = iris.analysis.cartography.unrotate_pole(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
lon=lons[0]
lat=lats[:,0]
#pcube.remove_coord('grid_latitude')
#pcube.remove_coord('grid_longitude')
#pcube.add_dim_coord(iris.coords.DimCoord(points=lat, standard_name='grid_latitude', units='degrees', coord_system=csur), lat_dim_coord)
#pcube.add_dim_coord(iris.coords.DimCoord(points=lon, standard_name='grid_longitude', units='degrees', coord_system=csur), lon_dim_coord)
lon_min=np.min(lon)
lon_max=np.max(lon)
lon_low_tick=lon_min -(lon_min%divisor)
lon_high_tick=math.ceil(lon_max/divisor)*divisor
lat_min=np.min(lat)
lat_max=np.max(lat)
lat_low_tick=lat_min - (lat_min%divisor)
lat_high_tick=math.ceil(lat_max/divisor)*divisor
pcubediff=pcube-glob
plt.figure(figsize=(8,8))
cmap= cmap=plt.cm.RdBu_r
ax = plt.axes(projection=ccrs.PlateCarree(), extent=(lon_min+degs_crop_left,lon_max-degs_crop_right,lat_min+degs_crop_bottom,lat_max-degs_crop_top))
clevs = np.linspace(min_contour, max_contour,9)
cont = iplt.contourf(pcubediff, clevs, cmap=cmap, extend='both')
#plt.clabel(cont, fmt='%d')
#ax.stock_img()
ax.coastlines(resolution='110m', color='#262626')
gl = ax.gridlines(draw_labels=True,linewidth=0.5, color='#262626', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
#gl.xlines = False
dx, dy = 10, 10
gl.xlocator = mticker.FixedLocator(range(int(lon_low_tick),int(lon_high_tick)+dx,dx))
gl.ylocator = mticker.FixedLocator(range(int(lat_low_tick),int(lat_high_tick)+dy,dy))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 12, 'color':'#262626'}
#gl.xlabel_style = {'color': '#262626', 'weight': 'bold'}
gl.ylabel_style = {'size': 12, 'color':'#262626'}
cbar = plt.colorbar(cont, orientation='horizontal', pad=0.05, extend='both', format = '%d')
#cbar.set_label('')
cbar.set_label(pcube.units, fontsize=10, color='#262626')
cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
cbar.set_ticklabels(['%d' % i for i in ticks])
main_title='%s - Difference' % pcube.standard_name.title().replace('_',' ')
model_info=re.sub('(.{68} )', '\\1\n', str(model_name_convert_title.main(experiment_id)), 0, re.DOTALL)
model_info = re.sub(r'[(\']', ' ', model_info)
model_info = re.sub(r'[\',)]', ' ', model_info)
print model_info
if not os.path.exists('%s%s/%s' % (save_path, experiment_id, pp_file)): os.makedirs('%s%s/%s' % (save_path, experiment_id, pp_file))
plt.savefig('%s%s/%s/%s_%s_notitle_diff_8km.png' % (save_path, experiment_id, pp_file, experiment_id, pp_file), format='png', bbox_inches='tight')
plt.title('\n'.join(wrap('%s\n%s' % (main_title, model_info), 1000,replace_whitespace=False)), fontsize=16)
#plt.show()
plt.savefig('%s%s/%s/%s_%s_diff_8km.png' % (save_path, experiment_id, pp_file, experiment_id, pp_file), format='png', bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
| mit |
111pontes/ydk-py | cisco-ios-xe/ydk/models/cisco_ios_xe/Cisco_IOS_XE_environment_oper.py | 1 | 4896 | """ Cisco_IOS_XE_environment_oper
This module contains a collection of YANG definitions for
monitoring Environment of a Network Element.Copyright (c) 2016\-2017 by Cisco Systems, Inc.All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class SensorUnitsTypeEnum(Enum):
"""
SensorUnitsTypeEnum
.. data:: Watts = 0
.. data:: Celsius = 1
.. data:: milliVolts = 2
.. data:: Amperes = 3
.. data:: Volts_DC = 4
.. data:: Volts_AC = 5
.. data:: milliAmperes = 6
"""
Watts = 0
Celsius = 1
milliVolts = 2
Amperes = 3
Volts_DC = 4
Volts_AC = 5
milliAmperes = 6
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_environment_oper as meta
return meta._meta_table['SensorUnitsTypeEnum']
class EnvironmentSensors(object):
"""
Data nodes for Environmental Monitoring.
.. attribute:: environment_sensor
The list of components on the device chasis
**type**\: list of :py:class:`EnvironmentSensor <ydk.models.cisco_ios_xe.Cisco_IOS_XE_environment_oper.EnvironmentSensors.EnvironmentSensor>`
"""
_prefix = 'environment-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.environment_sensor = YList()
self.environment_sensor.parent = self
self.environment_sensor.name = 'environment_sensor'
class EnvironmentSensor(object):
"""
The list of components on the device chasis.
.. attribute:: name <key>
The name of the sensor component. This includes all physical components of the chasis \- both fixed and pluggable
**type**\: str
.. attribute:: location <key>
The name of the location where this slot is present
**type**\: str
.. attribute:: current_reading
Numerical value of current sensor reading in sensor\-units
**type**\: int
**range:** 0..4294967295
.. attribute:: sensor_units
Units corresponding to current\-reading value
**type**\: :py:class:`SensorUnitsTypeEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_environment_oper.SensorUnitsTypeEnum>`
.. attribute:: state
A description of current state of the sensor
**type**\: str
"""
_prefix = 'environment-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.name = None
self.location = None
self.current_reading = None
self.sensor_units = None
self.state = None
@property
def _common_path(self):
if self.name is None:
raise YPYModelError('Key property name is None')
if self.location is None:
raise YPYModelError('Key property location is None')
return '/Cisco-IOS-XE-environment-oper:environment-sensors/Cisco-IOS-XE-environment-oper:environment-sensor[Cisco-IOS-XE-environment-oper:name = ' + str(self.name) + '][Cisco-IOS-XE-environment-oper:location = ' + str(self.location) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.name is not None:
return True
if self.location is not None:
return True
if self.current_reading is not None:
return True
if self.sensor_units is not None:
return True
if self.state is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_environment_oper as meta
return meta._meta_table['EnvironmentSensors.EnvironmentSensor']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-environment-oper:environment-sensors'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.environment_sensor is not None:
for child_ref in self.environment_sensor:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_environment_oper as meta
return meta._meta_table['EnvironmentSensors']['meta_info']
| apache-2.0 |
IsaacHaze/tweepy | tweepy/binder.py | 40 | 10796 | # Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
from __future__ import print_function
import time
import re
from six.moves.urllib.parse import quote
import requests
import logging
from tweepy.error import TweepError, RateLimitError, is_rate_limit_error_message
from tweepy.utils import convert_to_utf8_str
from tweepy.models import Model
re_path_template = re.compile('{\w+}')
log = logging.getLogger('tweepy.binder')
def bind_api(**config):
class APIMethod(object):
api = config['api']
path = config['path']
payload_type = config.get('payload_type', None)
payload_list = config.get('payload_list', False)
allowed_param = config.get('allowed_param', [])
method = config.get('method', 'GET')
require_auth = config.get('require_auth', False)
search_api = config.get('search_api', False)
upload_api = config.get('upload_api', False)
use_cache = config.get('use_cache', True)
session = requests.Session()
def __init__(self, args, kwargs):
api = self.api
# If authentication is required and no credentials
# are provided, throw an error.
if self.require_auth and not api.auth:
raise TweepError('Authentication required!')
self.post_data = kwargs.pop('post_data', None)
self.retry_count = kwargs.pop('retry_count',
api.retry_count)
self.retry_delay = kwargs.pop('retry_delay',
api.retry_delay)
self.retry_errors = kwargs.pop('retry_errors',
api.retry_errors)
self.wait_on_rate_limit = kwargs.pop('wait_on_rate_limit',
api.wait_on_rate_limit)
self.wait_on_rate_limit_notify = kwargs.pop('wait_on_rate_limit_notify',
api.wait_on_rate_limit_notify)
self.parser = kwargs.pop('parser', api.parser)
self.session.headers = kwargs.pop('headers', {})
self.build_parameters(args, kwargs)
# Pick correct URL root to use
if self.search_api:
self.api_root = api.search_root
elif self.upload_api:
self.api_root = api.upload_root
else:
self.api_root = api.api_root
# Perform any path variable substitution
self.build_path()
if self.search_api:
self.host = api.search_host
elif self.upload_api:
self.host = api.upload_host
else:
self.host = api.host
# Manually set Host header to fix an issue in python 2.5
# or older where Host is set including the 443 port.
# This causes Twitter to issue 301 redirect.
# See Issue https://github.com/tweepy/tweepy/issues/12
self.session.headers['Host'] = self.host
# Monitoring rate limits
self._remaining_calls = None
self._reset_time = None
def build_parameters(self, args, kwargs):
self.session.params = {}
for idx, arg in enumerate(args):
if arg is None:
continue
try:
self.session.params[self.allowed_param[idx]] = convert_to_utf8_str(arg)
except IndexError:
raise TweepError('Too many parameters supplied!')
for k, arg in kwargs.items():
if arg is None:
continue
if k in self.session.params:
raise TweepError('Multiple values for parameter %s supplied!' % k)
self.session.params[k] = convert_to_utf8_str(arg)
log.info("PARAMS: %r", self.session.params)
def build_path(self):
for variable in re_path_template.findall(self.path):
name = variable.strip('{}')
if name == 'user' and 'user' not in self.session.params and self.api.auth:
# No 'user' parameter provided, fetch it from Auth instead.
value = self.api.auth.get_username()
else:
try:
value = quote(self.session.params[name])
except KeyError:
raise TweepError('No parameter value found for path variable: %s' % name)
del self.session.params[name]
self.path = self.path.replace(variable, value)
def execute(self):
self.api.cached_result = False
# Build the request URL
url = self.api_root + self.path
full_url = 'https://' + self.host + url
# Query the cache if one is available
# and this request uses a GET method.
if self.use_cache and self.api.cache and self.method == 'GET':
cache_result = self.api.cache.get(url)
# if cache result found and not expired, return it
if cache_result:
# must restore api reference
if isinstance(cache_result, list):
for result in cache_result:
if isinstance(result, Model):
result._api = self.api
else:
if isinstance(cache_result, Model):
cache_result._api = self.api
self.api.cached_result = True
return cache_result
# Continue attempting request until successful
# or maximum number of retries is reached.
retries_performed = 0
while retries_performed < self.retry_count + 1:
# handle running out of api calls
if self.wait_on_rate_limit:
if self._reset_time is not None:
if self._remaining_calls is not None:
if self._remaining_calls < 1:
sleep_time = self._reset_time - int(time.time())
if sleep_time > 0:
if self.wait_on_rate_limit_notify:
print("Rate limit reached. Sleeping for:", sleep_time)
time.sleep(sleep_time + 5) # sleep for few extra sec
# if self.wait_on_rate_limit and self._reset_time is not None and \
# self._remaining_calls is not None and self._remaining_calls < 1:
# sleep_time = self._reset_time - int(time.time())
# if sleep_time > 0:
# if self.wait_on_rate_limit_notify:
# print("Rate limit reached. Sleeping for: " + str(sleep_time))
# time.sleep(sleep_time + 5) # sleep for few extra sec
# Apply authentication
if self.api.auth:
auth = self.api.auth.apply_auth()
# Request compression if configured
if self.api.compression:
self.session.headers['Accept-encoding'] = 'gzip'
# Execute request
try:
resp = self.session.request(self.method,
full_url,
data=self.post_data,
timeout=self.api.timeout,
auth=auth,
proxies=self.api.proxy)
except Exception as e:
raise TweepError('Failed to send request: %s' % e)
rem_calls = resp.headers.get('x-rate-limit-remaining')
if rem_calls is not None:
self._remaining_calls = int(rem_calls)
elif isinstance(self._remaining_calls, int):
self._remaining_calls -= 1
reset_time = resp.headers.get('x-rate-limit-reset')
if reset_time is not None:
self._reset_time = int(reset_time)
if self.wait_on_rate_limit and self._remaining_calls == 0 and (
# if ran out of calls before waiting switching retry last call
resp.status_code == 429 or resp.status_code == 420):
continue
retry_delay = self.retry_delay
# Exit request loop if non-retry error code
if resp.status_code == 200:
break
elif (resp.status_code == 429 or resp.status_code == 420) and self.wait_on_rate_limit:
if 'retry-after' in resp.headers:
retry_delay = float(resp.headers['retry-after'])
elif self.retry_errors and resp.status_code not in self.retry_errors:
break
# Sleep before retrying request again
time.sleep(retry_delay)
retries_performed += 1
# If an error was returned, throw an exception
self.api.last_response = resp
if resp.status_code and not 200 <= resp.status_code < 300:
try:
error_msg = self.parser.parse_error(resp.text)
except Exception:
error_msg = "Twitter error response: status code = %s" % resp.status_code
if is_rate_limit_error_message(error_msg):
raise RateLimitError(error_msg, resp)
else:
raise TweepError(error_msg, resp)
# Parse the response payload
result = self.parser.parse(self, resp.text)
# Store result into cache if one is available.
if self.use_cache and self.api.cache and self.method == 'GET' and result:
self.api.cache.store(url, result)
return result
def _call(*args, **kwargs):
method = APIMethod(args, kwargs)
if kwargs.get('create'):
return method
else:
return method.execute()
# Set pagination mode
if 'cursor' in APIMethod.allowed_param:
_call.pagination_mode = 'cursor'
elif 'max_id' in APIMethod.allowed_param:
if 'since_id' in APIMethod.allowed_param:
_call.pagination_mode = 'id'
elif 'page' in APIMethod.allowed_param:
_call.pagination_mode = 'page'
return _call
| mit |
shashank971/edx-platform | common/lib/xmodule/xmodule/video_module/bumper_utils.py | 86 | 4238 | """
Utils for video bumper
"""
import copy
import json
import pytz
import logging
from collections import OrderedDict
from datetime import datetime, timedelta
from django.conf import settings
from .video_utils import set_query_parameter
try:
import edxval.api as edxval_api
except ImportError:
edxval_api = None
log = logging.getLogger(__name__)
def get_bumper_settings(video):
"""
Get bumper settings from video instance.
"""
bumper_settings = copy.deepcopy(getattr(video, 'video_bumper', {}))
# clean up /static/ prefix from bumper transcripts
for lang, transcript_url in bumper_settings.get('transcripts', {}).items():
bumper_settings['transcripts'][lang] = transcript_url.replace("/static/", "")
return bumper_settings
def is_bumper_enabled(video):
"""
Check if bumper enabled.
- Feature flag ENABLE_VIDEO_BUMPER should be set to True
- Do not show again button should not be clicked by user.
- Current time minus periodicity must be greater that last time viewed
- edxval_api should be presented
Returns:
bool.
"""
bumper_last_view_date = getattr(video, 'bumper_last_view_date', None)
utc_now = datetime.utcnow().replace(tzinfo=pytz.utc)
periodicity = settings.FEATURES.get('SHOW_BUMPER_PERIODICITY', 0)
has_viewed = any([
getattr(video, 'bumper_do_not_show_again'),
(bumper_last_view_date and bumper_last_view_date + timedelta(seconds=periodicity) > utc_now)
])
is_studio = getattr(video.system, "is_author_mode", False)
return bool(
not is_studio and
settings.FEATURES.get('ENABLE_VIDEO_BUMPER') and
get_bumper_settings(video) and
edxval_api and
not has_viewed
)
def bumperize(video):
"""
Populate video with bumper settings, if they are presented.
"""
video.bumper = {
'enabled': False,
'edx_video_id': "",
'transcripts': {},
'metadata': None,
}
if not is_bumper_enabled(video):
return
bumper_settings = get_bumper_settings(video)
try:
video.bumper['edx_video_id'] = bumper_settings['video_id']
video.bumper['transcripts'] = bumper_settings['transcripts']
except (TypeError, KeyError):
log.warning(
"Could not retrieve video bumper information from course settings"
)
return
sources = get_bumper_sources(video)
if not sources:
return
video.bumper.update({
'metadata': bumper_metadata(video, sources),
'enabled': True, # Video poster needs this.
})
def get_bumper_sources(video):
"""
Get bumper sources from edxval.
Returns list of sources.
"""
try:
val_profiles = ["desktop_webm", "desktop_mp4"]
val_video_urls = edxval_api.get_urls_for_profiles(video.bumper['edx_video_id'], val_profiles)
bumper_sources = filter(None, [val_video_urls[p] for p in val_profiles])
except edxval_api.ValInternalError:
# if no bumper sources, nothing will be showed
log.warning(
"Could not retrieve information from VAL for Bumper edx Video ID: %s.", video.bumper['edx_video_id']
)
return []
return bumper_sources
def bumper_metadata(video, sources):
"""
Generate bumper metadata.
"""
transcripts = video.get_transcripts_info(is_bumper=True)
unused_track_url, bumper_transcript_language, bumper_languages = video.get_transcripts_for_student(transcripts)
metadata = OrderedDict({
'saveStateUrl': video.system.ajax_url + '/save_user_state',
'showCaptions': json.dumps(video.show_captions),
'sources': sources,
'streams': '',
'transcriptLanguage': bumper_transcript_language,
'transcriptLanguages': bumper_languages,
'transcriptTranslationUrl': set_query_parameter(
video.runtime.handler_url(video, 'transcript', 'translation/__lang__').rstrip('/?'), 'is_bumper', 1
),
'transcriptAvailableTranslationsUrl': set_query_parameter(
video.runtime.handler_url(video, 'transcript', 'available_translations').rstrip('/?'), 'is_bumper', 1
),
})
return metadata
| agpl-3.0 |
esemin83/python_training | test/test_db_matches_ui.py | 1 | 1134 | from model.group import Group
from timeit import timeit
from model.group_address import Address_data
from test.test_string_value import merge_emails
from test.test_string_value import merge_phones_like_on_homepage
#def test_group_list(app, db):
# print(timeit(lambda: app.group.get_group_list(), number=1))
# def clean(group):
# return Group(id=group.id, name=group.name.strip())
# print(timeit(lambda: map(clean, db.get_group_list()), number=1000))
# assert False
def test_contact_list_db(app, db):
def clean(address_data):
return Address_data(id=address_data.id, firstname=address_data.firstname, lastname=address_data.lastname,
address=address_data.address.strip(),
all_phone_from_home_page=merge_phones_like_on_homepage(address_data),
all_emails=merge_emails(address_data))
ui_contacts = app.group.get_contact_rows()
db_contacts = map(clean, db.get_contact_list())
#db_contacts = db.get_contact_list()
assert sorted(ui_contacts, key=Group.id_or_max) == sorted(db_contacts, key=Group.id_or_max)
| apache-2.0 |
smlbiobot/SML-Cogs | figlet/figlet.py | 1 | 2994 | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2017 SML
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import random
import discord
from discord import Message
from discord import Server
from discord.ext import commands
from discord.ext.commands import Context
from __main__ import send_cmd_help
from cogs.utils.chat_formatting import box
from cogs.utils.chat_formatting import pagify
try:
from pyfiglet import Figlet
from pyfiglet import FigletFont
from pyfiglet import FontNotFound
except ImportError:
raise ImportError("Please install the pyfiglet package.") from None
class FigletCog:
"""Ascii art generator."""
def __init__(self, bot):
"""Init."""
self.bot = bot
@commands.command(pass_context=True, no_pm=True)
async def figletfonts(self, ctx: Context):
"""List all fonts."""
await self.bot.say("List of supported fonts:")
out = FigletFont.getFonts()
for page in pagify(', '.join(out), shorten_by=24):
await self.bot.say(box(page))
@commands.command(pass_context=True, no_pm=True)
async def figlet(self, ctx: Context, text: str, font=None):
"""Convert text to ascii art."""
if font is None:
font = 'slant'
if font == 'random':
fonts = FigletFont.getFonts()
font = random.choice(fonts)
f = Figlet(font=font)
out = f.renderText(text)
for page in pagify(out, shorten_by=24):
await self.bot.say(box(page))
@commands.command(pass_context=True, no_pm=True)
async def figletrandom(self, ctx: Context, text: str):
"""Convert text to ascii art using random font."""
font = random.choice(FigletFont.getFonts())
f = Figlet(font=font)
out = f.renderText(text)
for page in pagify(out, shorten_by=24):
await self.bot.say(box(page))
await self.bot.say("Font: {}".format(font))
def setup(bot):
bot.add_cog(FigletCog(bot))
| mit |
agry/NGECore2 | scripts/mobiles/naboo/kaadu_female.py | 2 | 1593 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('kaadu_female')
mobileTemplate.setLevel(12)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Avian Meat")
mobileTemplate.setMeatAmount(120)
mobileTemplate.setHideType("Leathery Hide")
mobileTemplate.setHideAmount(85)
mobileTemplate.setBoneType("Avian Bones")
mobileTemplate.setBoneAmount(70)
mobileTemplate.setSocialGroup("self")
mobileTemplate.setAssistRange(6)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_kaadu_hue.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_bite_2')
attacks.add('bm_claw_2')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('kaadu_female', mobileTemplate)
return | lgpl-3.0 |
timothycrosley/WebBot | instant_templates/create_webbot_appengine/pygments/lexers/hdl.py | 27 | 16181 | # -*- coding: utf-8 -*-
"""
pygments.lexers.hdl
~~~~~~~~~~~~~~~~~~~
Lexers for hardware descriptor languages.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, include, using, this
from pygments.token import \
Text, Comment, Operator, Keyword, Name, String, Number, Punctuation, \
Error
__all__ = ['VerilogLexer', 'SystemVerilogLexer', 'VhdlLexer']
class VerilogLexer(RegexLexer):
"""
For verilog source code with preprocessor directives.
*New in Pygments 1.4.*
"""
name = 'verilog'
aliases = ['v']
filenames = ['*.v']
mimetypes = ['text/x-verilog']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
(r'^\s*`define', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'[{}#@]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex),
(r'([0-9]+)|(\'b)[0-1]+', Number.Hex), # should be binary
(r'([0-9]+)|(\'d)[0-9]+', Number.Integer),
(r'([0-9]+)|(\'o)[0-7]+', Number.Oct),
(r'\'[01xz]', Number),
(r'\d+[Ll]?', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;\']', Punctuation),
(r'`[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant),
(r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)),
(r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text),
'import'),
(r'(always|always_comb|always_ff|always_latch|and|assign|automatic|'
r'begin|break|buf|bufif0|bufif1|case|casex|casez|cmos|const|'
r'continue|deassign|default|defparam|disable|do|edge|else|end|endcase|'
r'endfunction|endgenerate|endmodule|endpackage|endprimitive|endspecify|'
r'endtable|endtask|enum|event|final|for|force|forever|fork|function|'
r'generate|genvar|highz0|highz1|if|initial|inout|input|'
r'integer|join|large|localparam|macromodule|medium|module|'
r'nand|negedge|nmos|nor|not|notif0|notif1|or|output|packed|'
r'parameter|pmos|posedge|primitive|pull0|pull1|pulldown|pullup|rcmos|'
r'ref|release|repeat|return|rnmos|rpmos|rtran|rtranif0|'
r'rtranif1|scalared|signed|small|specify|specparam|strength|'
r'string|strong0|strong1|struct|table|task|'
r'tran|tranif0|tranif1|type|typedef|'
r'unsigned|var|vectored|void|wait|weak0|weak1|while|'
r'xnor|xor)\b', Keyword),
(r'`(accelerate|autoexpand_vectornets|celldefine|default_nettype|'
r'else|elsif|endcelldefine|endif|endprotect|endprotected|'
r'expand_vectornets|ifdef|ifndef|include|noaccelerate|noexpand_vectornets|'
r'noremove_gatenames|noremove_netnames|nounconnected_drive|'
r'protect|protected|remove_gatenames|remove_netnames|resetall|'
r'timescale|unconnected_drive|undef)\b', Comment.Preproc),
(r'\$(bits|bitstoreal|bitstoshortreal|countdrivers|display|fclose|'
r'fdisplay|finish|floor|fmonitor|fopen|fstrobe|fwrite|'
r'getpattern|history|incsave|input|itor|key|list|log|'
r'monitor|monitoroff|monitoron|nokey|nolog|printtimescale|'
r'random|readmemb|readmemh|realtime|realtobits|reset|reset_count|'
r'reset_value|restart|rtoi|save|scale|scope|shortrealtobits|'
r'showscopes|showvariables|showvars|sreadmemb|sreadmemh|'
r'stime|stop|strobe|time|timeformat|write)\b', Name.Builtin),
(r'(byte|shortint|int|longint|integer|time|'
r'bit|logic|reg|'
r'supply0|supply1|tri|triand|trior|tri0|tri1|trireg|uwire|wire|wand|wor'
r'shortreal|real|realtime)\b', Keyword.Type),
('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'import': [
(r'[a-zA-Z0-9_:]+\*?', Name.Namespace, '#pop')
]
}
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
# Convention: mark all upper case names as constants
if token is Name:
if value.isupper():
token = Name.Constant
yield index, token, value
class SystemVerilogLexer(RegexLexer):
"""
Extends verilog lexer to recognise all SystemVerilog keywords from IEEE
1800-2009 standard.
*New in Pygments 1.5.*
"""
name = 'systemverilog'
aliases = ['sv']
filenames = ['*.sv', '*.svh']
mimetypes = ['text/x-systemverilog']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
(r'^\s*`define', Comment.Preproc, 'macro'),
(r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)),
(r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text), 'import'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'[{}#@]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex),
(r'([0-9]+)|(\'b)[0-1]+', Number.Hex), # should be binary
(r'([0-9]+)|(\'d)[0-9]+', Number.Integer),
(r'([0-9]+)|(\'o)[0-7]+', Number.Oct),
(r'\'[01xz]', Number),
(r'\d+[Ll]?', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;\']', Punctuation),
(r'`[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant),
(r'(accept_on|alias|always|always_comb|always_ff|always_latch|'
r'and|assert|assign|assume|automatic|before|begin|bind|bins|'
r'binsof|bit|break|buf|bufif0|bufif1|byte|case|casex|casez|'
r'cell|chandle|checker|class|clocking|cmos|config|const|constraint|'
r'context|continue|cover|covergroup|coverpoint|cross|deassign|'
r'default|defparam|design|disable|dist|do|edge|else|end|endcase|'
r'endchecker|endclass|endclocking|endconfig|endfunction|endgenerate|'
r'endgroup|endinterface|endmodule|endpackage|endprimitive|'
r'endprogram|endproperty|endsequence|endspecify|endtable|'
r'endtask|enum|event|eventually|expect|export|extends|extern|'
r'final|first_match|for|force|foreach|forever|fork|forkjoin|'
r'function|generate|genvar|global|highz0|highz1|if|iff|ifnone|'
r'ignore_bins|illegal_bins|implies|import|incdir|include|'
r'initial|inout|input|inside|instance|int|integer|interface|'
r'intersect|join|join_any|join_none|large|let|liblist|library|'
r'local|localparam|logic|longint|macromodule|matches|medium|'
r'modport|module|nand|negedge|new|nexttime|nmos|nor|noshowcancelled|'
r'not|notif0|notif1|null|or|output|package|packed|parameter|'
r'pmos|posedge|primitive|priority|program|property|protected|'
r'pull0|pull1|pulldown|pullup|pulsestyle_ondetect|pulsestyle_onevent|'
r'pure|rand|randc|randcase|randsequence|rcmos|real|realtime|'
r'ref|reg|reject_on|release|repeat|restrict|return|rnmos|'
r'rpmos|rtran|rtranif0|rtranif1|s_always|s_eventually|s_nexttime|'
r's_until|s_until_with|scalared|sequence|shortint|shortreal|'
r'showcancelled|signed|small|solve|specify|specparam|static|'
r'string|strong|strong0|strong1|struct|super|supply0|supply1|'
r'sync_accept_on|sync_reject_on|table|tagged|task|this|throughout|'
r'time|timeprecision|timeunit|tran|tranif0|tranif1|tri|tri0|'
r'tri1|triand|trior|trireg|type|typedef|union|unique|unique0|'
r'unsigned|until|until_with|untyped|use|uwire|var|vectored|'
r'virtual|void|wait|wait_order|wand|weak|weak0|weak1|while|'
r'wildcard|wire|with|within|wor|xnor|xor)\b', Keyword ),
(r'(`__FILE__|`__LINE__|`begin_keywords|`celldefine|`default_nettype|'
r'`define|`else|`elsif|`end_keywords|`endcelldefine|`endif|'
r'`ifdef|`ifndef|`include|`line|`nounconnected_drive|`pragma|'
r'`resetall|`timescale|`unconnected_drive|`undef|`undefineall)\b',
Comment.Preproc ),
(r'(\$display|\$displayb|\$displayh|\$displayo|\$dumpall|\$dumpfile|'
r'\$dumpflush|\$dumplimit|\$dumpoff|\$dumpon|\$dumpports|'
r'\$dumpportsall|\$dumpportsflush|\$dumpportslimit|\$dumpportsoff|'
r'\$dumpportson|\$dumpvars|\$fclose|\$fdisplay|\$fdisplayb|'
r'\$fdisplayh|\$fdisplayo|\$feof|\$ferror|\$fflush|\$fgetc|'
r'\$fgets|\$fmonitor|\$fmonitorb|\$fmonitorh|\$fmonitoro|'
r'\$fopen|\$fread|\$fscanf|\$fseek|\$fstrobe|\$fstrobeb|\$fstrobeh|'
r'\$fstrobeo|\$ftell|\$fwrite|\$fwriteb|\$fwriteh|\$fwriteo|'
r'\$monitor|\$monitorb|\$monitorh|\$monitoro|\$monitoroff|'
r'\$monitoron|\$plusargs|\$readmemb|\$readmemh|\$rewind|\$sformat|'
r'\$sformatf|\$sscanf|\$strobe|\$strobeb|\$strobeh|\$strobeo|'
r'\$swrite|\$swriteb|\$swriteh|\$swriteo|\$test|\$ungetc|'
r'\$value\$plusargs|\$write|\$writeb|\$writeh|\$writememb|'
r'\$writememh|\$writeo)\b' , Name.Builtin ),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(byte|shortint|int|longint|integer|time|'
r'bit|logic|reg|'
r'supply0|supply1|tri|triand|trior|tri0|tri1|trireg|uwire|wire|wand|wor'
r'shortreal|real|realtime)\b', Keyword.Type),
('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'classname': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'import': [
(r'[a-zA-Z0-9_:]+\*?', Name.Namespace, '#pop')
]
}
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
# Convention: mark all upper case names as constants
if token is Name:
if value.isupper():
token = Name.Constant
yield index, token, value
def analyse_text(text):
if text.startswith('//') or text.startswith('/*'):
return 0.5
class VhdlLexer(RegexLexer):
"""
For VHDL source code.
*New in Pygments 1.5.*
"""
name = 'vhdl'
aliases = ['vhdl']
filenames = ['*.vhdl', '*.vhd']
mimetypes = ['text/x-vhdl']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'--(?![!#$%&*+./<=>?@\^|_~]).*?$', Comment.Single),
(r"'(U|X|0|1|Z|W|L|H|-)'", String.Char),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r"'[a-zA-Z_][a-zA-Z0-9_]*", Name.Attribute),
(r'[()\[\],.;\']', Punctuation),
(r'"[^\n\\]*"', String),
(r'(library)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(use)(\s+)(entity)', bygroups(Keyword, Text, Keyword)),
(r'(use)(\s+)([a-zA-Z_][\.a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(entity|component)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Class)),
(r'(architecture|configuration)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)(\s+)'
r'(of)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)(\s+)(is)',
bygroups(Keyword, Text, Name.Class, Text, Keyword, Text,
Name.Class, Text, Keyword)),
(r'(end)(\s+)', bygroups(using(this), Text), 'endblock'),
include('types'),
include('keywords'),
include('numbers'),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'endblock': [
include('keywords'),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class),
(r'(\s+)', Text),
(r';', Punctuation, '#pop'),
],
'types': [
(r'(boolean|bit|character|severity_level|integer|time|delay_length|'
r'natural|positive|string|bit_vector|file_open_kind|'
r'file_open_status|std_ulogic|std_ulogic_vector|std_logic|'
r'std_logic_vector)\b', Keyword.Type),
],
'keywords': [
(r'(abs|access|after|alias|all|and|'
r'architecture|array|assert|attribute|begin|block|'
r'body|buffer|bus|case|component|configuration|'
r'constant|disconnect|downto|else|elsif|end|'
r'entity|exit|file|for|function|generate|'
r'generic|group|guarded|if|impure|in|'
r'inertial|inout|is|label|library|linkage|'
r'literal|loop|map|mod|nand|new|'
r'next|nor|not|null|of|on|'
r'open|or|others|out|package|port|'
r'postponed|procedure|process|pure|range|record|'
r'register|reject|return|rol|ror|select|'
r'severity|signal|shared|sla|sli|sra|'
r'srl|subtype|then|to|transport|type|'
r'units|until|use|variable|wait|when|'
r'while|with|xnor|xor)\b', Keyword),
],
'numbers': [
(r'\d{1,2}#[0-9a-fA-F_]+#?', Number.Integer),
(r'[0-1_]+(\.[0-1_])', Number.Integer),
(r'\d+', Number.Integer),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'H"[0-9a-fA-F_]+"', Number.Oct),
(r'O"[0-7_]+"', Number.Oct),
(r'B"[0-1_]+"', Number.Oct),
],
}
| gpl-2.0 |
glatard/nipype | nipype/workflows/smri/freesurfer/utils.py | 15 | 13485 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import nipype.pipeline.engine as pe
import nipype.interfaces.fsl as fsl
import nipype.interfaces.freesurfer as fs
import nipype.interfaces.meshfix as mf
import nipype.interfaces.io as nio
import nipype.interfaces.utility as niu
import nipype.algorithms.misc as misc
from nipype.interfaces.utility import Function
from nipype.workflows.misc.utils import region_list_from_volume, id_list_from_lookup_table
import os, os.path as op
def get_aparc_aseg(files):
"""Return the aparc+aseg.mgz file"""
for name in files:
if 'aparc+aseg' in name:
return name
raise ValueError('aparc+aseg.mgz not found')
def create_getmask_flow(name='getmask', dilate_mask=True):
"""Registers a source file to freesurfer space and create a brain mask in
source space
Requires fsl tools for initializing registration
Parameters
----------
name : string
name of workflow
dilate_mask : boolean
indicates whether to dilate mask or not
Example
-------
>>> getmask = create_getmask_flow()
>>> getmask.inputs.inputspec.source_file = 'mean.nii'
>>> getmask.inputs.inputspec.subject_id = 's1'
>>> getmask.inputs.inputspec.subjects_dir = '.'
>>> getmask.inputs.inputspec.contrast_type = 't2'
Inputs::
inputspec.source_file : reference image for mask generation
inputspec.subject_id : freesurfer subject id
inputspec.subjects_dir : freesurfer subjects directory
inputspec.contrast_type : MR contrast of reference image
Outputs::
outputspec.mask_file : binary mask file in reference image space
outputspec.reg_file : registration file that maps reference image to
freesurfer space
outputspec.reg_cost : cost of registration (useful for detecting misalignment)
"""
"""
Initialize the workflow
"""
getmask = pe.Workflow(name=name)
"""
Define the inputs to the workflow.
"""
inputnode = pe.Node(niu.IdentityInterface(fields=['source_file',
'subject_id',
'subjects_dir',
'contrast_type']),
name='inputspec')
"""
Define all the nodes of the workflow:
fssource: used to retrieve aseg.mgz
threshold : binarize aseg
register : coregister source file to freesurfer space
voltransform: convert binarized aseg to source file space
"""
fssource = pe.Node(nio.FreeSurferSource(),
name = 'fssource')
threshold = pe.Node(fs.Binarize(min=0.5, out_type='nii'),
name='threshold')
register = pe.MapNode(fs.BBRegister(init='fsl'),
iterfield=['source_file'],
name='register')
voltransform = pe.MapNode(fs.ApplyVolTransform(inverse=True),
iterfield=['source_file', 'reg_file'],
name='transform')
"""
Connect the nodes
"""
getmask.connect([
(inputnode, fssource, [('subject_id','subject_id'),
('subjects_dir','subjects_dir')]),
(inputnode, register, [('source_file', 'source_file'),
('subject_id', 'subject_id'),
('subjects_dir', 'subjects_dir'),
('contrast_type', 'contrast_type')]),
(inputnode, voltransform, [('subjects_dir', 'subjects_dir'),
('source_file', 'source_file')]),
(fssource, threshold, [(('aparc_aseg', get_aparc_aseg), 'in_file')]),
(register, voltransform, [('out_reg_file','reg_file')]),
(threshold, voltransform, [('binary_file','target_file')])
])
"""
Add remaining nodes and connections
dilate : dilate the transformed file in source space
threshold2 : binarize transformed file
"""
threshold2 = pe.MapNode(fs.Binarize(min=0.5, out_type='nii'),
iterfield=['in_file'],
name='threshold2')
if dilate_mask:
threshold2.inputs.dilate = 1
getmask.connect([
(voltransform, threshold2, [('transformed_file', 'in_file')])
])
"""
Setup an outputnode that defines relevant inputs of the workflow.
"""
outputnode = pe.Node(niu.IdentityInterface(fields=["mask_file",
"reg_file",
"reg_cost"
]),
name="outputspec")
getmask.connect([
(register, outputnode, [("out_reg_file", "reg_file")]),
(register, outputnode, [("min_cost_file", "reg_cost")]),
(threshold2, outputnode, [("binary_file", "mask_file")]),
])
return getmask
def create_get_stats_flow(name='getstats', withreg=False):
"""Retrieves stats from labels
Parameters
----------
name : string
name of workflow
withreg : boolean
indicates whether to register source to label
Example
-------
Inputs::
inputspec.source_file : reference image for mask generation
inputspec.label_file : label file from which to get ROIs
(optionally with registration)
inputspec.reg_file : bbreg file (assumes reg from source to label
inputspec.inverse : boolean whether to invert the registration
inputspec.subjects_dir : freesurfer subjects directory
Outputs::
outputspec.stats_file : stats file
"""
"""
Initialize the workflow
"""
getstats = pe.Workflow(name=name)
"""
Define the inputs to the workflow.
"""
if withreg:
inputnode = pe.Node(niu.IdentityInterface(fields=['source_file',
'label_file',
'reg_file',
'subjects_dir']),
name='inputspec')
else:
inputnode = pe.Node(niu.IdentityInterface(fields=['source_file',
'label_file']),
name='inputspec')
statnode = pe.MapNode(fs.SegStats(),
iterfield=['segmentation_file','in_file'],
name='segstats')
"""
Convert between source and label spaces if registration info is provided
"""
if withreg:
voltransform = pe.MapNode(fs.ApplyVolTransform(inverse=True),
iterfield=['source_file', 'reg_file'],
name='transform')
getstats.connect(inputnode, 'reg_file', voltransform, 'reg_file')
getstats.connect(inputnode, 'source_file', voltransform, 'source_file')
getstats.connect(inputnode, 'label_file', voltransform, 'target_file')
getstats.connect(inputnode, 'subjects_dir', voltransform, 'subjects_dir')
def switch_labels(inverse, transform_output, source_file, label_file):
if inverse:
return transform_output, source_file
else:
return label_file, transform_output
chooser = pe.MapNode(niu.Function(input_names = ['inverse',
'transform_output',
'source_file',
'label_file'],
output_names = ['label_file',
'source_file'],
function=switch_labels),
iterfield=['transform_output','source_file'],
name='chooser')
getstats.connect(inputnode,'source_file', chooser, 'source_file')
getstats.connect(inputnode,'label_file', chooser, 'label_file')
getstats.connect(inputnode,'inverse', chooser, 'inverse')
getstats.connect(voltransform, 'transformed_file', chooser, 'transform_output')
getstats.connect(chooser, 'label_file', statnode, 'segmentation_file')
getstats.connect(chooser, 'source_file', statnode, 'in_file')
else:
getstats.connect(inputnode, 'label_file', statnode, 'segmentation_file')
getstats.connect(inputnode, 'source_file', statnode, 'in_file')
"""
Setup an outputnode that defines relevant inputs of the workflow.
"""
outputnode = pe.Node(niu.IdentityInterface(fields=["stats_file"
]),
name="outputspec")
getstats.connect([
(statnode, outputnode, [("summary_file", "stats_file")]),
])
return getstats
def create_tessellation_flow(name='tessellate', out_format='stl'):
"""Tessellates the input subject's aseg.mgz volume and returns
the surfaces for each region in stereolithic (.stl) format
Example
-------
>>> from nipype.workflows.smri.freesurfer import create_tessellation_flow
>>> tessflow = create_tessellation_flow()
>>> tessflow.inputs.inputspec.subject_id = 'subj1'
>>> tessflow.inputs.inputspec.subjects_dir = '.'
>>> tessflow.inputs.inputspec.lookup_file = 'FreeSurferColorLUT.txt' # doctest: +SKIP
>>> tessflow.run() # doctest: +SKIP
Inputs::
inputspec.subject_id : freesurfer subject id
inputspec.subjects_dir : freesurfer subjects directory
inputspec.lookup_file : lookup file from freesurfer directory
Outputs::
outputspec.meshes : output region meshes in (by default) stereolithographic (.stl) format
"""
"""
Initialize the workflow
"""
tessflow = pe.Workflow(name=name)
"""
Define the inputs to the workflow.
"""
inputnode = pe.Node(niu.IdentityInterface(fields=['subject_id',
'subjects_dir',
'lookup_file']),
name='inputspec')
"""
Define all the nodes of the workflow:
fssource: used to retrieve aseg.mgz
mri_convert : converts aseg.mgz to aseg.nii
tessellate : tessellates regions in aseg.mgz
surfconvert : converts regions to stereolithographic (.stl) format
smoother: smooths the tessellated regions
"""
fssource = pe.Node(nio.FreeSurferSource(),
name = 'fssource')
volconvert = pe.Node(fs.MRIConvert(out_type='nii'),
name = 'volconvert')
tessellate = pe.MapNode(fs.MRIMarchingCubes(),
iterfield=['label_value','out_file'],
name='tessellate')
surfconvert = pe.MapNode(fs.MRIsConvert(out_datatype='stl'),
iterfield=['in_file'],
name='surfconvert')
smoother = pe.MapNode(mf.MeshFix(),
iterfield=['in_file1'],
name='smoother')
if out_format == 'gii':
stl_to_gifti = pe.MapNode(fs.MRIsConvert(out_datatype=out_format),
iterfield=['in_file'],
name='stl_to_gifti')
smoother.inputs.save_as_stl = True
smoother.inputs.laplacian_smoothing_steps = 1
region_list_from_volume_interface = Function(input_names=["in_file"],
output_names=["region_list"],
function=region_list_from_volume)
id_list_from_lookup_table_interface = Function(input_names=["lookup_file", "region_list"],
output_names=["id_list"],
function=id_list_from_lookup_table)
region_list_from_volume_node = pe.Node(interface=region_list_from_volume_interface, name='region_list_from_volume_node')
id_list_from_lookup_table_node = pe.Node(interface=id_list_from_lookup_table_interface, name='id_list_from_lookup_table_node')
"""
Connect the nodes
"""
tessflow.connect([
(inputnode, fssource, [('subject_id','subject_id'),
('subjects_dir','subjects_dir')]),
(fssource, volconvert, [('aseg', 'in_file')]),
(volconvert, region_list_from_volume_node, [('out_file', 'in_file')]),
(region_list_from_volume_node, tessellate, [('region_list', 'label_value')]),
(region_list_from_volume_node, id_list_from_lookup_table_node, [('region_list', 'region_list')]),
(inputnode, id_list_from_lookup_table_node, [('lookup_file', 'lookup_file')]),
(id_list_from_lookup_table_node, tessellate, [('id_list', 'out_file')]),
(fssource, tessellate, [('aseg', 'in_file')]),
(tessellate, surfconvert, [('surface','in_file')]),
(surfconvert, smoother, [('converted','in_file1')]),
])
"""
Setup an outputnode that defines relevant inputs of the workflow.
"""
outputnode = pe.Node(niu.IdentityInterface(fields=["meshes"]),
name="outputspec")
if out_format == 'gii':
tessflow.connect([
(smoother, stl_to_gifti, [("mesh_file", "in_file")]),
])
tessflow.connect([
(stl_to_gifti, outputnode, [("converted", "meshes")]),
])
else:
tessflow.connect([
(smoother, outputnode, [("mesh_file", "meshes")]),
])
return tessflow
| bsd-3-clause |
steebchen/youtube-dl | youtube_dl/extractor/foxsports.py | 31 | 1391 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
smuggle_url,
update_url_query,
)
class FoxSportsIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?foxsports\.com/(?:[^/]+/)*(?P<id>[^/]+)'
_TEST = {
'url': 'http://www.foxsports.com/tennessee/video/432609859715',
'md5': 'b49050e955bebe32c301972e4012ac17',
'info_dict': {
'id': 'bwduI3X_TgUB',
'ext': 'mp4',
'title': 'Courtney Lee on going up 2-0 in series vs. Blazers',
'description': 'Courtney Lee talks about Memphis being focused.',
'upload_date': '20150423',
'timestamp': 1429761109,
'uploader': 'NEWA-FNG-FOXSPORTS',
},
'add_ie': ['ThePlatform'],
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
config = self._parse_json(
self._html_search_regex(
r"""class="[^"]*(?:fs-player|platformPlayer-wrapper)[^"]*".+?data-player-config='([^']+)'""",
webpage, 'data player config'),
video_id)
return self.url_result(smuggle_url(update_url_query(
config['releaseURL'], {
'mbr': 'true',
'switch': 'http',
}), {'force_smil_url': True}))
| unlicense |
SiriusBizniss/evetowerthing | requests-master/requests/packages/chardet/constants.py | 3008 | 1335 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
_debug = 0
eDetecting = 0
eFoundIt = 1
eNotMe = 2
eStart = 0
eError = 1
eItsMe = 2
SHORTCUT_THRESHOLD = 0.95
| mit |
feinno-tang/framework | node_modules/node-gyp/gyp/tools/graphviz.py | 2679 | 2878 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Using the JSON dumped by the dump-dependency-json generator,
generate input suitable for graphviz to render a dependency graph of
targets."""
import collections
import json
import sys
def ParseTarget(target):
target, _, suffix = target.partition('#')
filename, _, target = target.partition(':')
return filename, target, suffix
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
def WriteGraph(edges):
"""Print a graphviz graph to stdout.
|edges| is a map of target to a list of other targets it depends on."""
# Bucket targets by file.
files = collections.defaultdict(list)
for src, dst in edges.items():
build_file, target_name, toolset = ParseTarget(src)
files[build_file].append(src)
print 'digraph D {'
print ' fontsize=8' # Used by subgraphs.
print ' node [fontsize=8]'
# Output nodes by file. We must first write out each node within
# its file grouping before writing out any edges that may refer
# to those nodes.
for filename, targets in files.items():
if len(targets) == 1:
# If there's only one node for this file, simplify
# the display by making it a box without an internal node.
target = targets[0]
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
target_name)
else:
# Group multiple nodes together in a subgraph.
print ' subgraph "cluster_%s" {' % filename
print ' label = "%s"' % filename
for target in targets:
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [label="%s"]' % (target, target_name)
print ' }'
# Now that we've placed all the nodes within subgraphs, output all
# the edges between nodes.
for src, dsts in edges.items():
for dst in dsts:
print ' "%s" -> "%s"' % (src, dst)
print '}'
def main():
if len(sys.argv) < 2:
print >>sys.stderr, __doc__
print >>sys.stderr
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0])
return 1
edges = LoadEdges('dump.json', sys.argv[1:])
WriteGraph(edges)
return 0
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
marcwebbie/youtube-dl | youtube_dl/extractor/vube.py | 26 | 6929 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
)
from ..utils import (
int_or_none,
ExtractorError,
)
class VubeIE(InfoExtractor):
IE_NAME = 'vube'
IE_DESC = 'Vube.com'
_VALID_URL = r'https?://vube\.com/(?:[^/]+/)+(?P<id>[\da-zA-Z]{10})\b'
_TESTS = [
{
'url': 'http://vube.com/trending/William+Wei/Y8NUZ69Tf7?t=s',
'md5': 'e7aabe1f8f1aa826b9e4735e1f9cee42',
'info_dict': {
'id': 'Y8NUZ69Tf7',
'ext': 'mp4',
'title': 'Best Drummer Ever [HD]',
'description': 'md5:2d63c4b277b85c2277761c2cf7337d71',
'thumbnail': 're:^https?://.*\.jpg',
'uploader': 'William',
'timestamp': 1406876915,
'upload_date': '20140801',
'duration': 258.051,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'categories': ['amazing', 'hd', 'best drummer ever', 'william wei', 'bucket drumming', 'street drummer', 'epic street drumming'],
},
'skip': 'Not accessible from Travis CI server',
}, {
'url': 'http://vube.com/Chiara+Grispo+Video+Channel/YL2qNPkqon',
'md5': 'db7aba89d4603dadd627e9d1973946fe',
'info_dict': {
'id': 'YL2qNPkqon',
'ext': 'mp4',
'title': 'Chiara Grispo - Price Tag by Jessie J',
'description': 'md5:8ea652a1f36818352428cb5134933313',
'thumbnail': 're:^http://frame\.thestaticvube\.com/snap/[0-9x]+/102e7e63057-5ebc-4f5c-4065-6ce4ebde131f\.jpg$',
'uploader': 'Chiara.Grispo',
'timestamp': 1388743358,
'upload_date': '20140103',
'duration': 170.56,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'categories': ['pop', 'music', 'cover', 'singing', 'jessie j', 'price tag', 'chiara grispo'],
},
'skip': 'Removed due to DMCA',
},
{
'url': 'http://vube.com/SerainaMusic/my-7-year-old-sister-and-i-singing-alive-by-krewella/UeBhTudbfS?t=s&n=1',
'md5': '5d4a52492d76f72712117ce6b0d98d08',
'info_dict': {
'id': 'UeBhTudbfS',
'ext': 'mp4',
'title': 'My 7 year old Sister and I singing "Alive" by Krewella',
'description': 'md5:40bcacb97796339f1690642c21d56f4a',
'thumbnail': 're:^http://frame\.thestaticvube\.com/snap/[0-9x]+/102265d5a9f-0f17-4f6b-5753-adf08484ee1e\.jpg$',
'uploader': 'Seraina',
'timestamp': 1396492438,
'upload_date': '20140403',
'duration': 240.107,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'categories': ['seraina', 'jessica', 'krewella', 'alive'],
},
'skip': 'Removed due to DMCA',
}, {
'url': 'http://vube.com/vote/Siren+Gene/0nmsMY5vEq?n=2&t=s',
'md5': '0584fc13b50f887127d9d1007589d27f',
'info_dict': {
'id': '0nmsMY5vEq',
'ext': 'mp4',
'title': 'Frozen - Let It Go Cover by Siren Gene',
'description': 'My rendition of "Let It Go" originally sung by Idina Menzel.',
'thumbnail': 're:^http://frame\.thestaticvube\.com/snap/[0-9x]+/10283ab622a-86c9-4681-51f2-30d1f65774af\.jpg$',
'uploader': 'Siren',
'timestamp': 1395448018,
'upload_date': '20140322',
'duration': 221.788,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'categories': ['let it go', 'cover', 'idina menzel', 'frozen', 'singing', 'disney', 'siren gene'],
},
'skip': 'Removed due to DMCA',
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video = self._download_json(
'http://vube.com/t-api/v1/video/%s' % video_id, video_id, 'Downloading video JSON')
public_id = video['public_id']
formats = []
for media in video['media'].get('video', []) + video['media'].get('audio', []):
if media['transcoding_status'] != 'processed':
continue
fmt = {
'url': 'http://video.thestaticvube.com/video/%s/%s.mp4' % (media['media_resolution_id'], public_id),
'abr': int(media['audio_bitrate']),
'format_id': compat_str(media['media_resolution_id']),
}
vbr = int(media['video_bitrate'])
if vbr:
fmt.update({
'vbr': vbr,
'height': int(media['height']),
})
formats.append(fmt)
self._sort_formats(formats)
if not formats and video.get('vst') == 'dmca':
raise ExtractorError(
'This video has been removed in response to a complaint received under the US Digital Millennium Copyright Act.',
expected=True)
title = video['title']
description = video.get('description')
thumbnail = self._proto_relative_url(video.get('thumbnail_src'), scheme='http:')
uploader = video.get('user_alias') or video.get('channel')
timestamp = int_or_none(video.get('upload_time'))
duration = video['duration']
view_count = video.get('raw_view_count')
like_count = video.get('total_likes')
dislike_count = video.get('total_hates')
comments = video.get('comments')
comment_count = None
if comments is None:
comment_data = self._download_json(
'http://vube.com/api/video/%s/comment' % video_id,
video_id, 'Downloading video comment JSON', fatal=False)
if comment_data is not None:
comment_count = int_or_none(comment_data.get('total'))
else:
comment_count = len(comments)
categories = [tag['text'] for tag in video['tags']]
return {
'id': video_id,
'formats': formats,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'timestamp': timestamp,
'duration': duration,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'comment_count': comment_count,
'categories': categories,
}
| unlicense |
zbqf109/goodo | openerp/addons/project_timesheet/__openerp__.py | 23 | 1210 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Bill Time on Tasks',
'version': '1.0',
'category': 'Project Management',
'description': """
Synchronization of project task work entries with timesheet entries.
====================================================================
This module lets you transfer the entries under tasks defined for Project
Management to the Timesheet line entries for particular date and particular user
with the effect of creating, editing and deleting either ways.
""",
'website': 'https://www.odoo.com/page/project-management',
'images': ['images/invoice_task_work.jpeg', 'images/my_timesheet.jpeg', 'images/working_hour.jpeg'],
'depends': ['resource', 'project', 'sale_timesheet'],
'data': [
'security/ir.model.access.csv',
'security/project_timesheet_security.xml',
'report/project_report_view.xml',
'project_timesheet_view.xml',
'res_config_view.xml',
],
'demo': ['project_timesheet_demo.xml'],
'test': [
'test/worktask_entry_to_timesheetline_entry.yml',
],
'installable': True,
'auto_install': True,
}
| gpl-3.0 |
frt-arch/taiga-back | taiga/projects/issues/permissions.py | 6 | 2290 | # Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from taiga.base.api.permissions import (TaigaResourcePermission, HasProjectPerm,
IsProjectOwner, PermissionComponent,
AllowAny, IsAuthenticated, IsSuperUser)
class IssuePermission(TaigaResourcePermission):
enought_perms = IsProjectOwner() | IsSuperUser()
global_perms = None
retrieve_perms = HasProjectPerm('view_issues')
create_perms = HasProjectPerm('add_issue')
update_perms = HasProjectPerm('modify_issue')
destroy_perms = HasProjectPerm('delete_issue')
list_perms = AllowAny()
csv_perms = AllowAny()
upvote_perms = IsAuthenticated() & HasProjectPerm('vote_issues')
downvote_perms = IsAuthenticated() & HasProjectPerm('vote_issues')
bulk_create_perms = HasProjectPerm('add_issue')
delete_comment_perms= HasProjectPerm('modify_issue')
class HasIssueIdUrlParam(PermissionComponent):
def check_permissions(self, request, view, obj=None):
param = view.kwargs.get('issue_id', None)
if param:
return True
return False
class IssueVotersPermission(TaigaResourcePermission):
enought_perms = IsProjectOwner() | IsSuperUser()
global_perms = None
retrieve_perms = HasProjectPerm('view_issues')
create_perms = HasProjectPerm('add_issue')
update_perms = HasProjectPerm('modify_issue')
destroy_perms = HasProjectPerm('delete_issue')
list_perms = HasProjectPerm('view_issues')
| agpl-3.0 |
benoitsteiner/tensorflow-xsmm | tensorflow/contrib/distributions/python/kernel_tests/seed_stream_test.py | 39 | 3229 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the SeedStream class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import seed_stream
from tensorflow.python.platform import test
class SeedStreamTest(test.TestCase):
def assertAllUnique(self, items):
self.assertEqual(len(items), len(set(items)))
def testNonRepetition(self):
# The probability of repetitions in a short stream from a correct
# PRNG is negligible; this test catches bugs that prevent state
# updates.
strm = seed_stream.SeedStream(seed=4, salt="salt")
output = [strm() for _ in range(50)]
self.assertEqual(sorted(output), sorted(list(set(output))))
def testReproducibility(self):
strm1 = seed_stream.SeedStream(seed=4, salt="salt")
strm2 = seed_stream.SeedStream(seed=4, salt="salt")
strm3 = seed_stream.SeedStream(seed=4, salt="salt")
outputs = [strm1() for _ in range(50)]
self.assertEqual(outputs, [strm2() for _ in range(50)])
self.assertEqual(outputs, [strm3() for _ in range(50)])
def testSeededDistinctness(self):
strm1 = seed_stream.SeedStream(seed=4, salt="salt")
strm2 = seed_stream.SeedStream(seed=5, salt="salt")
self.assertAllUnique(
[strm1() for _ in range(50)] + [strm2() for _ in range(50)])
def testSaltedDistinctness(self):
strm1 = seed_stream.SeedStream(seed=4, salt="salt")
strm2 = seed_stream.SeedStream(seed=4, salt="another salt")
self.assertAllUnique(
[strm1() for _ in range(50)] + [strm2() for _ in range(50)])
def testNestingRobustness(self):
# SeedStreams started from generated seeds should not collide with
# the master or with each other, even if the salts are the same.
strm1 = seed_stream.SeedStream(seed=4, salt="salt")
strm2 = seed_stream.SeedStream(strm1(), salt="salt")
strm3 = seed_stream.SeedStream(strm1(), salt="salt")
outputs = [strm1() for _ in range(50)]
self.assertAllUnique(
outputs + [strm2() for _ in range(50)] + [strm3() for _ in range(50)])
def testInitFromOtherSeedStream(self):
strm1 = seed_stream.SeedStream(seed=4, salt="salt")
strm2 = seed_stream.SeedStream(strm1, salt="salt")
strm3 = seed_stream.SeedStream(strm1, salt="another salt")
out1 = [strm1() for _ in range(50)]
out2 = [strm2() for _ in range(50)]
out3 = [strm3() for _ in range(50)]
self.assertAllEqual(out1, out2)
self.assertAllUnique(out1 + out3)
if __name__ == "__main__":
test.main()
| apache-2.0 |
dpetzold/django | tests/file_storage/models.py | 219 | 1603 | """
Storing files according to a custom storage system
``FileField`` and its variations can take a ``storage`` argument to specify how
and where files should be stored.
"""
import random
import tempfile
from django.core.files.storage import FileSystemStorage
from django.db import models
class CustomValidNameStorage(FileSystemStorage):
def get_valid_name(self, name):
# mark the name to show that this was called
return name + '_valid'
temp_storage_location = tempfile.mkdtemp()
temp_storage = FileSystemStorage(location=temp_storage_location)
class Storage(models.Model):
def custom_upload_to(self, filename):
return 'foo'
def random_upload_to(self, filename):
# This returns a different result each time,
# to make sure it only gets called once.
return '%s/%s' % (random.randint(100, 999), filename)
normal = models.FileField(storage=temp_storage, upload_to='tests')
custom = models.FileField(storage=temp_storage, upload_to=custom_upload_to)
random = models.FileField(storage=temp_storage, upload_to=random_upload_to)
custom_valid_name = models.FileField(
storage=CustomValidNameStorage(location=temp_storage_location),
upload_to=random_upload_to,
)
default = models.FileField(storage=temp_storage, upload_to='tests', default='tests/default.txt')
empty = models.FileField(storage=temp_storage)
limited_length = models.FileField(storage=temp_storage, upload_to='tests', max_length=20)
extended_length = models.FileField(storage=temp_storage, upload_to='tests', max_length=300)
| bsd-3-clause |
rs2/pandas | pandas/core/arrays/sparse/dtype.py | 1 | 12178 | """Sparse Dtype"""
import re
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type
import warnings
import numpy as np
from pandas._typing import Dtype, DtypeObj
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.base import ExtensionDtype, register_extension_dtype
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.dtypes.common import (
is_bool_dtype,
is_extension_array_dtype,
is_object_dtype,
is_scalar,
is_string_dtype,
pandas_dtype,
)
from pandas.core.dtypes.missing import isna, na_value_for_dtype
if TYPE_CHECKING:
from pandas.core.arrays.sparse.array import SparseArray # noqa: F401
@register_extension_dtype
class SparseDtype(ExtensionDtype):
"""
Dtype for data stored in :class:`SparseArray`.
This dtype implements the pandas ExtensionDtype interface.
.. versionadded:: 0.24.0
Parameters
----------
dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64
The dtype of the underlying array storing the non-fill value values.
fill_value : scalar, optional
The scalar value not stored in the SparseArray. By default, this
depends on `dtype`.
=========== ==========
dtype na_value
=========== ==========
float ``np.nan``
int ``0``
bool ``False``
datetime64 ``pd.NaT``
timedelta64 ``pd.NaT``
=========== ==========
The default value may be overridden by specifying a `fill_value`.
Attributes
----------
None
Methods
-------
None
"""
# We include `_is_na_fill_value` in the metadata to avoid hash collisions
# between SparseDtype(float, 0.0) and SparseDtype(float, nan).
# Without is_na_fill_value in the comparison, those would be equal since
# hash(nan) is (sometimes?) 0.
_metadata = ("_dtype", "_fill_value", "_is_na_fill_value")
def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None):
if isinstance(dtype, type(self)):
if fill_value is None:
fill_value = dtype.fill_value
dtype = dtype.subtype
dtype = pandas_dtype(dtype)
if is_string_dtype(dtype):
dtype = np.dtype("object")
if fill_value is None:
fill_value = na_value_for_dtype(dtype)
if not is_scalar(fill_value):
raise ValueError(f"fill_value must be a scalar. Got {fill_value} instead")
self._dtype = dtype
self._fill_value = fill_value
def __hash__(self):
# Python3 doesn't inherit __hash__ when a base class overrides
# __eq__, so we explicitly do it here.
return super().__hash__()
def __eq__(self, other: Any) -> bool:
# We have to override __eq__ to handle NA values in _metadata.
# The base class does simple == checks, which fail for NA.
if isinstance(other, str):
try:
other = self.construct_from_string(other)
except TypeError:
return False
if isinstance(other, type(self)):
subtype = self.subtype == other.subtype
if self._is_na_fill_value:
# this case is complicated by two things:
# SparseDtype(float, float(nan)) == SparseDtype(float, np.nan)
# SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT)
# i.e. we want to treat any floating-point NaN as equal, but
# not a floating-point NaN and a datetime NaT.
fill_value = (
other._is_na_fill_value
and isinstance(self.fill_value, type(other.fill_value))
or isinstance(other.fill_value, type(self.fill_value))
)
else:
fill_value = self.fill_value == other.fill_value
return subtype and fill_value
return False
@property
def fill_value(self):
"""
The fill value of the array.
Converting the SparseArray to a dense ndarray will fill the
array with this value.
.. warning::
It's possible to end up with a SparseArray that has ``fill_value``
values in ``sp_values``. This can occur, for example, when setting
``SparseArray.fill_value`` directly.
"""
return self._fill_value
@property
def _is_na_fill_value(self):
return isna(self.fill_value)
@property
def _is_numeric(self) -> bool:
return not is_object_dtype(self.subtype)
@property
def _is_boolean(self) -> bool:
return is_bool_dtype(self.subtype)
@property
def kind(self):
"""
The sparse kind. Either 'integer', or 'block'.
"""
return self.subtype.kind
@property
def type(self):
return self.subtype.type
@property
def subtype(self):
return self._dtype
@property
def name(self):
return f"Sparse[{self.subtype.name}, {repr(self.fill_value)}]"
def __repr__(self) -> str:
return self.name
@classmethod
def construct_array_type(cls) -> Type["SparseArray"]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
from pandas.core.arrays.sparse.array import SparseArray # noqa: F811
return SparseArray
@classmethod
def construct_from_string(cls, string: str) -> "SparseDtype":
"""
Construct a SparseDtype from a string form.
Parameters
----------
string : str
Can take the following forms.
string dtype
================ ============================
'int' SparseDtype[np.int64, 0]
'Sparse' SparseDtype[np.float64, nan]
'Sparse[int]' SparseDtype[np.int64, 0]
'Sparse[int, 0]' SparseDtype[np.int64, 0]
================ ============================
It is not possible to specify non-default fill values
with a string. An argument like ``'Sparse[int, 1]'``
will raise a ``TypeError`` because the default fill value
for integers is 0.
Returns
-------
SparseDtype
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
msg = f"Cannot construct a 'SparseDtype' from '{string}'"
if string.startswith("Sparse"):
try:
sub_type, has_fill_value = cls._parse_subtype(string)
except ValueError as err:
raise TypeError(msg) from err
else:
result = SparseDtype(sub_type)
msg = (
f"Cannot construct a 'SparseDtype' from '{string}'.\n\nIt "
"looks like the fill_value in the string is not "
"the default for the dtype. Non-default fill_values "
"are not supported. Use the 'SparseDtype()' "
"constructor instead."
)
if has_fill_value and str(result) != string:
raise TypeError(msg)
return result
else:
raise TypeError(msg)
@staticmethod
def _parse_subtype(dtype: str) -> Tuple[str, bool]:
"""
Parse a string to get the subtype
Parameters
----------
dtype : str
A string like
* Sparse[subtype]
* Sparse[subtype, fill_value]
Returns
-------
subtype : str
Raises
------
ValueError
When the subtype cannot be extracted.
"""
xpr = re.compile(r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$")
m = xpr.match(dtype)
has_fill_value = False
if m:
subtype = m.groupdict()["subtype"]
has_fill_value = bool(m.groupdict()["fill_value"])
elif dtype == "Sparse":
subtype = "float64"
else:
raise ValueError(f"Cannot parse {dtype}")
return subtype, has_fill_value
@classmethod
def is_dtype(cls, dtype: object) -> bool:
dtype = getattr(dtype, "dtype", dtype)
if isinstance(dtype, str) and dtype.startswith("Sparse"):
sub_type, _ = cls._parse_subtype(dtype)
dtype = np.dtype(sub_type)
elif isinstance(dtype, cls):
return True
return isinstance(dtype, np.dtype) or dtype == "Sparse"
def update_dtype(self, dtype):
"""
Convert the SparseDtype to a new dtype.
This takes care of converting the ``fill_value``.
Parameters
----------
dtype : Union[str, numpy.dtype, SparseDtype]
The new dtype to use.
* For a SparseDtype, it is simply returned
* For a NumPy dtype (or str), the current fill value
is converted to the new dtype, and a SparseDtype
with `dtype` and the new fill value is returned.
Returns
-------
SparseDtype
A new SparseDtype with the correct `dtype` and fill value
for that `dtype`.
Raises
------
ValueError
When the current fill value cannot be converted to the
new `dtype` (e.g. trying to convert ``np.nan`` to an
integer dtype).
Examples
--------
>>> SparseDtype(int, 0).update_dtype(float)
Sparse[float64, 0.0]
>>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan))
Sparse[float64, nan]
"""
cls = type(self)
dtype = pandas_dtype(dtype)
if not isinstance(dtype, cls):
if is_extension_array_dtype(dtype):
raise TypeError("sparse arrays of extension dtypes not supported")
fill_value = astype_nansafe(np.array(self.fill_value), dtype).item()
dtype = cls(dtype, fill_value=fill_value)
return dtype
@property
def _subtype_with_str(self):
"""
Whether the SparseDtype's subtype should be considered ``str``.
Typically, pandas will store string data in an object-dtype array.
When converting values to a dtype, e.g. in ``.astype``, we need to
be more specific, we need the actual underlying type.
Returns
-------
>>> SparseDtype(int, 1)._subtype_with_str
dtype('int64')
>>> SparseDtype(object, 1)._subtype_with_str
dtype('O')
>>> dtype = SparseDtype(str, '')
>>> dtype.subtype
dtype('O')
>>> dtype._subtype_with_str
<class 'str'>
"""
if isinstance(self.fill_value, str):
return type(self.fill_value)
return self.subtype
def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]:
# TODO for now only handle SparseDtypes and numpy dtypes => extend
# with other compatibtle extension dtypes
if any(
isinstance(x, ExtensionDtype) and not isinstance(x, SparseDtype)
for x in dtypes
):
return None
fill_values = [x.fill_value for x in dtypes if isinstance(x, SparseDtype)]
fill_value = fill_values[0]
# np.nan isn't a singleton, so we may end up with multiple
# NaNs here, so we ignore tha all NA case too.
if not (len(set(fill_values)) == 1 or isna(fill_values).all()):
warnings.warn(
"Concatenating sparse arrays with multiple fill "
f"values: '{fill_values}'. Picking the first and "
"converting the rest.",
PerformanceWarning,
stacklevel=6,
)
np_dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes]
return SparseDtype(np.find_common_type(np_dtypes, []), fill_value=fill_value)
| bsd-3-clause |
davidcusatis/horizon | openstack_dashboard/dashboards/project/volumes/volumes/forms.py | 10 | 35894 | # Copyright 2012 Nebula, Inc.
# All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing volumes.
"""
from django.conf import settings
from django.core.urlresolvers import reverse
from django.forms import ValidationError # noqa
from django import http
from django.template.defaultfilters import filesizeformat # noqa
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import functions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.api import glance
from openstack_dashboard.api import nova
from openstack_dashboard.dashboards.project.images import utils
from openstack_dashboard.dashboards.project.instances import tables
from openstack_dashboard.usage import quotas
IMAGE_BACKEND_SETTINGS = getattr(settings, 'OPENSTACK_IMAGE_BACKEND', {})
IMAGE_FORMAT_CHOICES = IMAGE_BACKEND_SETTINGS.get('image_formats', [])
VALID_DISK_FORMATS = ('raw', 'vmdk', 'vdi', 'qcow2')
DEFAULT_CONTAINER_FORMAT = 'bare'
# Determine whether the extension for Cinder AZs is enabled
def cinder_az_supported(request):
try:
return cinder.extension_supported(request, 'AvailabilityZones')
except Exception:
exceptions.handle(request, _('Unable to determine if availability '
'zones extension is supported.'))
return False
def availability_zones(request):
zone_list = []
if cinder_az_supported(request):
try:
zones = api.cinder.availability_zone_list(request)
zone_list = [(zone.zoneName, zone.zoneName)
for zone in zones if zone.zoneState['available']]
zone_list.sort()
except Exception:
exceptions.handle(request, _('Unable to retrieve availability '
'zones.'))
if not zone_list:
zone_list.insert(0, ("", _("No availability zones found")))
elif len(zone_list) > 1:
zone_list.insert(0, ("", _("Any Availability Zone")))
return zone_list
class CreateForm(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Volume Name"),
required=False)
description = forms.CharField(max_length=255, widget=forms.Textarea(
attrs={'rows': 4}),
label=_("Description"), required=False)
volume_source_type = forms.ChoiceField(label=_("Volume Source"),
required=False,
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'source'}))
snapshot_source = forms.ChoiceField(
label=_("Use snapshot as a source"),
widget=forms.SelectWidget(
attrs={'class': 'snapshot-selector'},
data_attrs=('size', 'name'),
transform=lambda x: "%s (%s GiB)" % (x.name, x.size)),
required=False)
image_source = forms.ChoiceField(
label=_("Use image as a source"),
widget=forms.SelectWidget(
attrs={'class': 'image-selector'},
data_attrs=('size', 'name', 'min_disk'),
transform=lambda x: "%s (%s)" % (x.name, filesizeformat(x.bytes))),
required=False)
volume_source = forms.ChoiceField(
label=_("Use a volume as source"),
widget=forms.SelectWidget(
attrs={'class': 'image-selector'},
data_attrs=('size', 'name'),
transform=lambda x: "%s (%s GiB)" % (x.name, x.size)),
required=False)
type = forms.ChoiceField(
label=_("Type"),
required=False,
widget=forms.Select(
attrs={'class': 'switched',
'data-switch-on': 'source',
'data-source-no_source_type': _('Type'),
'data-source-image_source': _('Type')}))
size = forms.IntegerField(min_value=1, initial=1, label=_("Size (GiB)"))
availability_zone = forms.ChoiceField(
label=_("Availability Zone"),
required=False,
widget=forms.Select(
attrs={'class': 'switched',
'data-switch-on': 'source',
'data-source-no_source_type': _('Availability Zone'),
'data-source-image_source': _('Availability Zone')}))
def prepare_source_fields_if_snapshot_specified(self, request):
try:
snapshot = self.get_snapshot(request,
request.GET["snapshot_id"])
self.fields['name'].initial = snapshot.name
self.fields['size'].initial = snapshot.size
self.fields['snapshot_source'].choices = ((snapshot.id,
snapshot),)
try:
# Set the volume type from the original volume
orig_volume = cinder.volume_get(request,
snapshot.volume_id)
self.fields['type'].initial = orig_volume.volume_type
except Exception:
pass
self.fields['size'].help_text = (
_('Volume size must be equal to or greater than the '
'snapshot size (%sGiB)') % snapshot.size)
del self.fields['image_source']
del self.fields['volume_source']
del self.fields['volume_source_type']
del self.fields['availability_zone']
except Exception:
exceptions.handle(request,
_('Unable to load the specified snapshot.'))
def prepare_source_fields_if_image_specified(self, request):
self.fields['availability_zone'].choices = \
availability_zones(request)
try:
image = self.get_image(request,
request.GET["image_id"])
image.bytes = image.size
self.fields['name'].initial = image.name
min_vol_size = functions.bytes_to_gigabytes(
image.size)
size_help_text = (_('Volume size must be equal to or greater '
'than the image size (%s)')
% filesizeformat(image.size))
properties = getattr(image, 'properties', {})
min_disk_size = (getattr(image, 'min_disk', 0) or
properties.get('min_disk', 0))
if (min_disk_size > min_vol_size):
min_vol_size = min_disk_size
size_help_text = (_('Volume size must be equal to or '
'greater than the image minimum '
'disk size (%sGiB)')
% min_disk_size)
self.fields['size'].initial = min_vol_size
self.fields['size'].help_text = size_help_text
self.fields['image_source'].choices = ((image.id, image),)
del self.fields['snapshot_source']
del self.fields['volume_source']
del self.fields['volume_source_type']
except Exception:
msg = _('Unable to load the specified image. %s')
exceptions.handle(request, msg % request.GET['image_id'])
def prepare_source_fields_if_volume_specified(self, request):
self.fields['availability_zone'].choices = \
availability_zones(request)
volume = None
try:
volume = self.get_volume(request, request.GET["volume_id"])
except Exception:
msg = _('Unable to load the specified volume. %s')
exceptions.handle(request, msg % request.GET['volume_id'])
if volume is not None:
self.fields['name'].initial = volume.name
self.fields['description'].initial = volume.description
min_vol_size = volume.size
size_help_text = (_('Volume size must be equal to or greater '
'than the origin volume size (%sGiB)')
% volume.size)
self.fields['size'].initial = min_vol_size
self.fields['size'].help_text = size_help_text
self.fields['volume_source'].choices = ((volume.id, volume),)
self.fields['type'].initial = volume.type
del self.fields['snapshot_source']
del self.fields['image_source']
del self.fields['volume_source_type']
def prepare_source_fields_default(self, request):
source_type_choices = []
self.fields['availability_zone'].choices = \
availability_zones(request)
try:
available = api.cinder.VOLUME_STATE_AVAILABLE
snapshots = cinder.volume_snapshot_list(
request, search_opts=dict(status=available))
if snapshots:
source_type_choices.append(("snapshot_source",
_("Snapshot")))
choices = [('', _("Choose a snapshot"))] + \
[(s.id, s) for s in snapshots]
self.fields['snapshot_source'].choices = choices
else:
del self.fields['snapshot_source']
except Exception:
exceptions.handle(request,
_("Unable to retrieve volume snapshots."))
images = utils.get_available_images(request,
request.user.tenant_id)
if images:
source_type_choices.append(("image_source", _("Image")))
choices = [('', _("Choose an image"))]
for image in images:
image.bytes = image.size
image.size = functions.bytes_to_gigabytes(image.bytes)
choices.append((image.id, image))
self.fields['image_source'].choices = choices
else:
del self.fields['image_source']
volumes = self.get_volumes(request)
if volumes:
source_type_choices.append(("volume_source", _("Volume")))
choices = [('', _("Choose a volume"))]
for volume in volumes:
choices.append((volume.id, volume))
self.fields['volume_source'].choices = choices
else:
del self.fields['volume_source']
if source_type_choices:
choices = ([('no_source_type',
_("No source, empty volume"))] +
source_type_choices)
self.fields['volume_source_type'].choices = choices
else:
del self.fields['volume_source_type']
def __init__(self, request, *args, **kwargs):
super(CreateForm, self).__init__(request, *args, **kwargs)
volume_types = cinder.volume_type_list(request)
self.fields['type'].choices = [("no_type", _("No volume type"))] + \
[(type.name, type.name)
for type in volume_types]
if 'initial' in kwargs and 'type' in kwargs['initial']:
# if there is a default volume type to select, then remove
# the first ""No volume type" entry
self.fields['type'].choices.pop(0)
if "snapshot_id" in request.GET:
self.prepare_source_fields_if_snapshot_specified(request)
elif 'image_id' in request.GET:
self.prepare_source_fields_if_image_specified(request)
elif 'volume_id' in request.GET:
self.prepare_source_fields_if_volume_specified(request)
else:
self.prepare_source_fields_default(request)
def clean(self):
cleaned_data = super(CreateForm, self).clean()
source_type = self.cleaned_data.get('volume_source_type')
if (source_type == 'image_source' and
not cleaned_data.get('image_source')):
msg = _('Image source must be specified')
self._errors['image_source'] = self.error_class([msg])
elif (source_type == 'snapshot_source' and
not cleaned_data.get('snapshot_source')):
msg = _('Snapshot source must be specified')
self._errors['snapshot_source'] = self.error_class([msg])
elif (source_type == 'volume_source' and
not cleaned_data.get('volume_source')):
msg = _('Volume source must be specified')
self._errors['volume_source'] = self.error_class([msg])
return cleaned_data
def get_volumes(self, request):
volumes = []
try:
available = api.cinder.VOLUME_STATE_AVAILABLE
volumes = cinder.volume_list(self.request,
search_opts=dict(status=available))
except Exception:
exceptions.handle(request,
_('Unable to retrieve list of volumes.'))
return volumes
def handle(self, request, data):
try:
usages = quotas.tenant_limit_usages(self.request)
availableGB = usages['maxTotalVolumeGigabytes'] - \
usages['gigabytesUsed']
availableVol = usages['maxTotalVolumes'] - usages['volumesUsed']
snapshot_id = None
image_id = None
volume_id = None
source_type = data.get('volume_source_type', None)
az = data.get('availability_zone', None) or None
if (data.get("snapshot_source", None) and
source_type in ['', None, 'snapshot_source']):
# Create from Snapshot
snapshot = self.get_snapshot(request,
data["snapshot_source"])
snapshot_id = snapshot.id
if (data['size'] < snapshot.size):
error_message = (_('The volume size cannot be less than '
'the snapshot size (%sGiB)')
% snapshot.size)
raise ValidationError(error_message)
az = None
elif (data.get("image_source", None) and
source_type in ['', None, 'image_source']):
# Create from Snapshot
image = self.get_image(request,
data["image_source"])
image_id = image.id
image_size = functions.bytes_to_gigabytes(image.size)
if (data['size'] < image_size):
error_message = (_('The volume size cannot be less than '
'the image size (%s)')
% filesizeformat(image.size))
raise ValidationError(error_message)
properties = getattr(image, 'properties', {})
min_disk_size = (getattr(image, 'min_disk', 0) or
properties.get('min_disk', 0))
if (min_disk_size > 0 and data['size'] < min_disk_size):
error_message = (_('The volume size cannot be less than '
'the image minimum disk size (%sGiB)')
% min_disk_size)
raise ValidationError(error_message)
elif (data.get("volume_source", None) and
source_type in ['', None, 'volume_source']):
# Create from volume
volume = self.get_volume(request, data["volume_source"])
volume_id = volume.id
if data['size'] < volume.size:
error_message = (_('The volume size cannot be less than '
'the source volume size (%sGiB)')
% volume.size)
raise ValidationError(error_message)
else:
if type(data['size']) is str:
data['size'] = int(data['size'])
if availableGB < data['size']:
error_message = _('A volume of %(req)iGiB cannot be created '
'as you only have %(avail)iGiB of your '
'quota available.')
params = {'req': data['size'],
'avail': availableGB}
raise ValidationError(error_message % params)
elif availableVol <= 0:
error_message = _('You are already using all of your available'
' volumes.')
raise ValidationError(error_message)
metadata = {}
if data['type'] == 'no_type':
data['type'] = ''
volume = cinder.volume_create(request,
data['size'],
data['name'],
data['description'],
data['type'],
snapshot_id=snapshot_id,
image_id=image_id,
metadata=metadata,
availability_zone=az,
source_volid=volume_id)
message = _('Creating volume "%s"') % data['name']
messages.info(request, message)
return volume
except ValidationError as e:
self.api_error(e.messages[0])
return False
except Exception:
redirect = reverse("horizon:project:volumes:index")
exceptions.handle(request,
_("Unable to create volume."),
redirect=redirect)
@memoized
def get_snapshot(self, request, id):
return cinder.volume_snapshot_get(request, id)
@memoized
def get_image(self, request, id):
return glance.image_get(request, id)
@memoized
def get_volume(self, request, id):
return cinder.volume_get(request, id)
class AttachForm(forms.SelfHandlingForm):
instance = forms.ChoiceField(label=_("Attach to Instance"),
help_text=_("Select an instance to "
"attach to."))
device = forms.CharField(label=_("Device Name"),
widget=forms.TextInput(attrs={'placeholder':
'/dev/vdc'}),
required=False,
help_text=_("Actual device name may differ due "
"to hypervisor settings. If not "
"specified, then hypervisor will "
"select a device name."))
def __init__(self, *args, **kwargs):
super(AttachForm, self).__init__(*args, **kwargs)
# Hide the device field if the hypervisor doesn't support it.
if not nova.can_set_mount_point():
self.fields['device'].widget = forms.widgets.HiddenInput()
# populate volume_id
volume = kwargs.get('initial', {}).get("volume", None)
if volume:
volume_id = volume.id
else:
volume_id = None
self.fields['volume_id'] = forms.CharField(widget=forms.HiddenInput(),
initial=volume_id)
# Populate instance choices
instance_list = kwargs.get('initial', {}).get('instances', [])
instances = []
for instance in instance_list:
if instance.status in tables.VOLUME_ATTACH_READY_STATES and \
not any(instance.id == att["server_id"]
for att in volume.attachments):
instances.append((instance.id, '%s (%s)' % (instance.name,
instance.id)))
if instances:
instances.insert(0, ("", _("Select an instance")))
else:
instances = (("", _("No instances available")),)
self.fields['instance'].choices = instances
def handle(self, request, data):
instance_choices = dict(self.fields['instance'].choices)
instance_name = instance_choices.get(data['instance'],
_("Unknown instance (None)"))
# The name of the instance in the choices list has the ID appended to
# it, so let's slice that off...
instance_name = instance_name.rsplit(" (")[0]
# api requires non-empty device name or None
device = data.get('device') or None
try:
attach = api.nova.instance_volume_attach(request,
data['volume_id'],
data['instance'],
device)
volume = cinder.volume_get(request, data['volume_id'])
message = _('Attaching volume %(vol)s to instance '
'%(inst)s on %(dev)s.') % {"vol": volume.name,
"inst": instance_name,
"dev": attach.device}
messages.info(request, message)
return True
except Exception:
redirect = reverse("horizon:project:volumes:index")
exceptions.handle(request,
_('Unable to attach volume.'),
redirect=redirect)
class CreateSnapshotForm(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Snapshot Name"))
description = forms.CharField(max_length=255,
widget=forms.Textarea(attrs={'rows': 4}),
label=_("Description"),
required=False)
def __init__(self, request, *args, **kwargs):
super(CreateSnapshotForm, self).__init__(request, *args, **kwargs)
# populate volume_id
volume_id = kwargs.get('initial', {}).get('volume_id', [])
self.fields['volume_id'] = forms.CharField(widget=forms.HiddenInput(),
initial=volume_id)
def handle(self, request, data):
try:
volume = cinder.volume_get(request,
data['volume_id'])
force = False
message = _('Creating volume snapshot "%s".') % data['name']
if volume.status == 'in-use':
force = True
message = _('Forcing to create snapshot "%s" '
'from attached volume.') % data['name']
snapshot = cinder.volume_snapshot_create(request,
data['volume_id'],
data['name'],
data['description'],
force=force)
messages.info(request, message)
return snapshot
except Exception as e:
redirect = reverse("horizon:project:volumes:index")
msg = _('Unable to create volume snapshot.')
if e.code == 413:
msg = _('Requested snapshot would exceed the allowed quota.')
exceptions.handle(request,
msg,
redirect=redirect)
class CreateTransferForm(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Transfer Name"),
required=False)
def handle(self, request, data):
try:
volume_id = self.initial['volume_id']
transfer = cinder.transfer_create(request, volume_id, data['name'])
if data['name']:
msg = _('Created volume transfer: "%s".') % data['name']
else:
msg = _('Created volume transfer.')
messages.success(request, msg)
response = http.HttpResponseRedirect(
reverse("horizon:project:volumes:volumes:show_transfer",
args=(transfer.id, transfer.auth_key)))
return response
except Exception:
redirect = reverse("horizon:project:volumes:index")
exceptions.handle(request, _('Unable to create volume transfer.'),
redirect=redirect)
class AcceptTransferForm(forms.SelfHandlingForm):
# These max lengths correspond to the sizes in cinder
transfer_id = forms.CharField(max_length=36, label=_("Transfer ID"))
auth_key = forms.CharField(max_length=16, label=_("Authorization Key"))
def handle(self, request, data):
try:
transfer = cinder.transfer_accept(request,
data['transfer_id'],
data['auth_key'])
msg = (_('Successfully accepted volume transfer: "%s"')
% data['transfer_id'])
messages.success(request, msg)
return transfer
except Exception:
redirect = reverse("horizon:project:volumes:index")
exceptions.handle(request, _('Unable to accept volume transfer.'),
redirect=redirect)
class ShowTransferForm(forms.SelfHandlingForm):
name = forms.CharField(
label=_("Transfer Name"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
required=False)
id = forms.CharField(
label=_("Transfer ID"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
required=False)
auth_key = forms.CharField(
label=_("Authorization Key"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
required=False)
def handle(self, request, data):
pass
class UpdateForm(forms.SelfHandlingForm):
name = forms.CharField(max_length=255,
label=_("Volume Name"),
required=False)
description = forms.CharField(max_length=255,
widget=forms.Textarea(attrs={'rows': 4}),
label=_("Description"),
required=False)
bootable = forms.BooleanField(label=_("Bootable"),
required=False,
help_text=_("Specifies that the volume can "
"be used to launch an instance"))
def handle(self, request, data):
volume_id = self.initial['volume_id']
try:
cinder.volume_update(request, volume_id, data['name'],
data['description'])
except Exception:
redirect = reverse("horizon:project:volumes:index")
exceptions.handle(request,
_('Unable to update volume.'),
redirect=redirect)
# only update bootable flag if modified
make_bootable = data['bootable']
if make_bootable != self.initial['bootable']:
try:
cinder.volume_set_bootable(request, volume_id, make_bootable)
except Exception:
redirect = reverse("horizon:project:volumes:index")
exceptions.handle(request,
_('Unable to set bootable flag on volume.'),
redirect=redirect)
message = _('Updating volume "%s"') % data['name']
messages.info(request, message)
return True
class UploadToImageForm(forms.SelfHandlingForm):
name = forms.CharField(label=_('Volume Name'),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
image_name = forms.CharField(max_length=255, label=_('Image Name'))
disk_format = forms.ChoiceField(label=_('Disk Format'),
widget=forms.Select(),
required=False)
force = forms.BooleanField(
label=pgettext_lazy("Force upload volume in in-use status to image",
u"Force"),
widget=forms.CheckboxInput(),
required=False)
def __init__(self, request, *args, **kwargs):
super(UploadToImageForm, self).__init__(request, *args, **kwargs)
# 'vhd','iso','aki','ari' and 'ami' disk formats are supported by
# glance, but not by qemu-img. qemu-img supports 'vpc', 'cloop', 'cow'
# and 'qcow' which are not supported by glance.
# I can only use 'raw', 'vmdk', 'vdi' or 'qcow2' so qemu-img will not
# have issues when processes image request from cinder.
disk_format_choices = [(value, name) for value, name
in IMAGE_FORMAT_CHOICES
if value in VALID_DISK_FORMATS]
self.fields['disk_format'].choices = disk_format_choices
self.fields['disk_format'].initial = 'raw'
if self.initial['status'] != 'in-use':
self.fields['force'].widget = forms.widgets.HiddenInput()
def handle(self, request, data):
volume_id = self.initial['id']
try:
# 'aki','ari','ami' container formats are supported by glance,
# but they need matching disk format to use.
# Glance usually uses 'bare' for other disk formats except
# amazon's. Please check the comment in CreateImageForm class
cinder.volume_upload_to_image(request,
volume_id,
data['force'],
data['image_name'],
DEFAULT_CONTAINER_FORMAT,
data['disk_format'])
message = _(
'Successfully sent the request to upload volume to image '
'for volume: "%s"') % data['name']
messages.info(request, message)
return True
except Exception:
redirect = reverse("horizon:project:volumes:index")
error_message = _(
'Unable to upload volume to image for volume: "%s"') \
% data['name']
exceptions.handle(request, error_message, redirect=redirect)
class ExtendForm(forms.SelfHandlingForm):
name = forms.CharField(
label=_("Volume Name"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
required=False,
)
orig_size = forms.IntegerField(
label=_("Current Size (GiB)"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
required=False,
)
new_size = forms.IntegerField(label=_("New Size (GiB)"))
def clean(self):
cleaned_data = super(ExtendForm, self).clean()
new_size = cleaned_data.get('new_size')
orig_size = self.initial['orig_size']
if new_size <= orig_size:
error_msg = _("New size must be greater than current size.")
self._errors['new_size'] = self.error_class([error_msg])
return cleaned_data
usages = quotas.tenant_limit_usages(self.request)
availableGB = usages['maxTotalVolumeGigabytes'] - \
usages['gigabytesUsed']
if availableGB < (new_size - orig_size):
message = _('Volume cannot be extended to %(req)iGiB as '
'you only have %(avail)iGiB of your quota '
'available.')
params = {'req': new_size, 'avail': availableGB}
self._errors["new_size"] = self.error_class([message % params])
return cleaned_data
def handle(self, request, data):
volume_id = self.initial['id']
try:
volume = cinder.volume_extend(request,
volume_id,
data['new_size'])
message = _('Extending volume: "%s"') % data['name']
messages.info(request, message)
return volume
except Exception:
redirect = reverse("horizon:project:volumes:index")
exceptions.handle(request,
_('Unable to extend volume.'),
redirect=redirect)
class RetypeForm(forms.SelfHandlingForm):
name = forms.CharField(label=_('Volume Name'),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
volume_type = forms.ChoiceField(label=_('Type'))
MIGRATION_POLICY_CHOICES = [('never', _('Never')),
('on-demand', _('On Demand'))]
migration_policy = forms.ChoiceField(label=_('Migration Policy'),
widget=forms.Select(),
choices=(MIGRATION_POLICY_CHOICES),
initial='never',
required=False)
def __init__(self, request, *args, **kwargs):
super(RetypeForm, self).__init__(request, *args, **kwargs)
try:
volume_types = cinder.volume_type_list(request)
except Exception:
redirect_url = reverse("horizon:project:volumes:index")
error_message = _('Unable to retrieve the volume type list.')
exceptions.handle(request, error_message, redirect=redirect_url)
origin_type = self.initial['volume_type']
types_list = [(t.name, t.name)
for t in volume_types
if t.name != origin_type]
if types_list:
types_list.insert(0, ("", _("Select a new volume type")))
else:
types_list.insert(0, ("", _("No other volume types available")))
self.fields['volume_type'].choices = sorted(types_list)
def handle(self, request, data):
volume_id = self.initial['id']
try:
cinder.volume_retype(request,
volume_id,
data['volume_type'],
data['migration_policy'])
message = _(
'Successfully sent the request to change the volume '
'type to "%(vtype)s" for volume: "%(name)s"')
params = {'name': data['name'],
'vtype': data['volume_type']}
messages.info(request, message % params)
return True
except Exception:
redirect = reverse("horizon:project:volumes:index")
error_message = _(
'Unable to change the volume type for volume: "%s"') \
% data['name']
exceptions.handle(request, error_message, redirect=redirect)
| apache-2.0 |
benjaminjkraft/django | tests/select_for_update/tests.py | 123 | 9449 | from __future__ import unicode_literals
import threading
import time
from multiple_database.routers import TestRouter
from django.db import DatabaseError, connection, router, transaction
from django.test import (
TransactionTestCase, override_settings, skipIfDBFeature,
skipUnlessDBFeature,
)
from .models import Person
# We need to set settings.DEBUG to True so we can capture the output SQL
# to examine.
@override_settings(DEBUG=True)
class SelectForUpdateTests(TransactionTestCase):
available_apps = ['select_for_update']
def setUp(self):
# This is executed in autocommit mode so that code in
# run_select_for_update can see this data.
self.person = Person.objects.create(name='Reinhardt')
# We need another database connection in transaction to test that one
# connection issuing a SELECT ... FOR UPDATE will block.
self.new_connection = connection.copy()
def tearDown(self):
try:
self.end_blocking_transaction()
except (DatabaseError, AttributeError):
pass
self.new_connection.close()
def start_blocking_transaction(self):
self.new_connection.set_autocommit(False)
# Start a blocking transaction. At some point,
# end_blocking_transaction() should be called.
self.cursor = self.new_connection.cursor()
sql = 'SELECT * FROM %(db_table)s %(for_update)s;' % {
'db_table': Person._meta.db_table,
'for_update': self.new_connection.ops.for_update_sql(),
}
self.cursor.execute(sql, ())
self.cursor.fetchone()
def end_blocking_transaction(self):
# Roll back the blocking transaction.
self.new_connection.rollback()
self.new_connection.set_autocommit(True)
def has_for_update_sql(self, tested_connection, nowait=False):
# Examine the SQL that was executed to determine whether it
# contains the 'SELECT..FOR UPDATE' stanza.
for_update_sql = tested_connection.ops.for_update_sql(nowait)
sql = tested_connection.queries[-1]['sql']
return bool(sql.find(for_update_sql) > -1)
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_sql_generated(self):
"""
Test that the backend's FOR UPDATE variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic():
list(Person.objects.all().select_for_update())
self.assertTrue(self.has_for_update_sql(connection))
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_for_update_sql_generated_nowait(self):
"""
Test that the backend's FOR UPDATE NOWAIT variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic():
list(Person.objects.all().select_for_update(nowait=True))
self.assertTrue(self.has_for_update_sql(connection, nowait=True))
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_nowait_raises_error_on_block(self):
"""
If nowait is specified, we expect an error to be raised rather
than blocking.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={'nowait': True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipIfDBFeature('has_select_for_update_nowait')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_nowait_raises_error(self):
"""
If a SELECT...FOR UPDATE NOWAIT is run on a database backend
that supports FOR UPDATE but not NOWAIT, then we should find
that a DatabaseError is raised.
"""
self.assertRaises(
DatabaseError,
list,
Person.objects.all().select_for_update(nowait=True)
)
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_requires_transaction(self):
"""
Test that a TransactionManagementError is raised
when a select_for_update query is executed outside of a transaction.
"""
with self.assertRaises(transaction.TransactionManagementError):
list(Person.objects.all().select_for_update())
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_requires_transaction_only_in_execution(self):
"""
Test that no TransactionManagementError is raised
when select_for_update is invoked outside of a transaction -
only when the query is executed.
"""
people = Person.objects.all().select_for_update()
with self.assertRaises(transaction.TransactionManagementError):
list(people)
def run_select_for_update(self, status, nowait=False):
"""
Utility method that runs a SELECT FOR UPDATE against all
Person instances. After the select_for_update, it attempts
to update the name of the only record, save, and commit.
This function expects to run in a separate thread.
"""
status.append('started')
try:
# We need to enter transaction management again, as this is done on
# per-thread basis
with transaction.atomic():
people = list(
Person.objects.all().select_for_update(nowait=nowait)
)
people[0].name = 'Fred'
people[0].save()
except DatabaseError as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
connection.close()
@skipUnlessDBFeature('has_select_for_update')
@skipUnlessDBFeature('supports_transactions')
def test_block(self):
"""
Check that a thread running a select_for_update that
accesses rows being touched by a similar operation
on another connection blocks correctly.
"""
# First, let's start the transaction in our thread.
self.start_blocking_transaction()
# Now, try it again using the ORM's select_for_update
# facility. Do this in a separate thread.
status = []
thread = threading.Thread(
target=self.run_select_for_update, args=(status,)
)
# The thread should immediately block, but we'll sleep
# for a bit to make sure.
thread.start()
sanity_count = 0
while len(status) != 1 and sanity_count < 10:
sanity_count += 1
time.sleep(1)
if sanity_count >= 10:
raise ValueError('Thread did not run and block')
# Check the person hasn't been updated. Since this isn't
# using FOR UPDATE, it won't block.
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Reinhardt', p.name)
# When we end our blocking transaction, our thread should
# be able to continue.
self.end_blocking_transaction()
thread.join(5.0)
# Check the thread has finished. Assuming it has, we should
# find that it has updated the person's name.
self.assertFalse(thread.isAlive())
# We must commit the transaction to ensure that MySQL gets a fresh read,
# since by default it runs in REPEATABLE READ mode
transaction.commit()
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Fred', p.name)
@skipUnlessDBFeature('has_select_for_update')
def test_raw_lock_not_available(self):
"""
Check that running a raw query which can't obtain a FOR UPDATE lock
raises the correct exception
"""
self.start_blocking_transaction()
def raw(status):
try:
list(
Person.objects.raw(
'SELECT * FROM %s %s' % (
Person._meta.db_table,
connection.ops.for_update_sql(nowait=True)
)
)
)
except DatabaseError as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
connection.close()
status = []
thread = threading.Thread(target=raw, kwargs={'status': status})
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipUnlessDBFeature('has_select_for_update')
@override_settings(DATABASE_ROUTERS=[TestRouter()])
def test_select_for_update_on_multidb(self):
query = Person.objects.select_for_update()
self.assertEqual(router.db_for_write(Person), query.db)
@skipUnlessDBFeature('has_select_for_update')
def test_select_for_update_with_get(self):
with transaction.atomic():
person = Person.objects.select_for_update().get(name='Reinhardt')
self.assertEqual(person.name, 'Reinhardt')
| bsd-3-clause |
rve/pracmcm | ca/final.py | 1 | 10402 | import random
lane_num = 2
lane = [[], []]
max_car_num = 10000
road_len = 1000
h = 6
p_b = 0.94
p_0 = 0.5
p_d = 0.1
v_max = [6, 10]
gap = 7
p_car = 1
p_crash = 0
time_period = 200
class Car:
car_cnt = 0
def __init__(self, v = 1, lane = 1):
self.size = 1
if random.random() < 0.1:
self.size = 0
self.c_id = Car.car_cnt
Car.car_cnt += 1
#print(Car.car_cnt, max_car_num)
#self.v = 0
self.v = random.randint(0, v_max[self.size])
self.lane = random.randint(0, 1)
#self.pos = random.randint(0, road_len)
self.b = 0
def __lt__(self, other):
return self.pos < other.pos
def mmod(a, b):
return (a - b + road_len) % road_len
def rand(v, b, t_h, t_s):
if b == 1 and t_h < t_s:
return p_b
if v == 0 and not(b == 1 and t_h < t_s):
return p_0
return p_d
def main():
Car.car_cnt = 0
lane_cnt = [0] * lane_num
car_num = 0
flow = [0.0] * lane_num
lane_change_cnt = 0
car = [0] * max_car_num
'''
for i in range(max_car_num):
car[i] = Car()
'''
for i in range(2):
for j in range(road_len):
lane[i].append('.')
v_now = 0
#adding new cars
cur = 0
vis = [[0]*road_len, [0]*road_len]
for i in range(max_car_num):
car[car_num] = Car()
car[car_num].pos = int(cur / lane_num)
cur += float(road_len) * lane_num / max_car_num
car[car_num].lane = random.randint(0,1)
if vis[car[car_num].lane][car[car_num].pos] == 0:
vis[car[car_num].lane][car[car_num].pos] = 1
else:
car[car_num].lane = 1 - car[car_num].lane
vis[car[car_num].lane][car[car_num].pos] = 1
#print (car[car_num].pos, car[car_num].lane)
#print car[car_num].pos, car[car_num].lane
# random lane
#car[car_num].lane = random.randint(0,1)
lane_cnt[car[car_num].lane] += 1
car_num += 1
#print car_num,lane_cnt
sum_v = 0
sum_vs = [0] * lane_num
speed_1 = []
for i in range(time_period):
v_succ = [2 ** 31] * lane_num
if time_period-1 == i:
for j in range(car_num):
sum_v += car[j].v
sum_vs[car[j].lane] += car[j].v
for j in list(reversed(range(car_num))):
#for j in (range(car_num)):
if time_period-1 == i:
speed_1.append(car[j].v)
v_succ_cur = v_succ
k = j + 1
while k < car_num and car[k].lane != car[j].lane:
k += 1
if k >= car_num:
k = 0
while k < car_num and car[k].lane != car[j].lane:
k += 1
kk = k + 1
while kk < car_num and car[kk].lane != car[j].lane:
kk += 1
if kk >= car_num:
kk = 0
while kk < car_num and car[kk].lane != car[j].lane:
kk += 1
#0 Determine p
t_s = min(car[j].v, h)
d = mmod(car[k].pos, car[j].pos)
if k >= car_num - 1:
d = 2 ** 31
if int(car[j].v) > 0 and d != 2 ** 31:
t_h = d / car[j].v
else:
t_h = t_s
p = rand(car[j].v, car[k].b, t_h, t_s)
b_now = car[j].b
car[j].b = 0
if j > 0:
v_succ[car[j - 1].lane] = v_now
else:
v_succ[0] = [2**31] * lane_num
v_now = car[j].v
#1 accelerate
if k >= car_num or (b_now == 0 and car[j].b == 0) or t_h >= t_s:
car[j].v = min(car[j].v + 1, v_max[car[j].size])
#2 braking
if kk < car_num:
v_anti = min(mmod(car[kk].pos, car[k].pos), car[k].v)
else:
v_anti = car[k].v
d_eff = d - 1 + max(v_anti - gap, 0)
car[j].v = max(min(d_eff, car[j].v), 0)
#car[j].v = max(min(d_eff, v_now), 0)
if car[j].v < v_now:
car[j].b = 1
#3 random brake
if random.random() < p:
car[j].v = max(car[j].v - 1, 0)
if p == p_b:
car[j].b = 1
#traffic accident
#if random.random() < p_crash:
#car[j].v /= 3
#lane changing
k = j + 1
l = j - 1
while (k < car_num and car[k].lane == car[j].lane):
k += 1
if k >= car_num:
k = 0
while (k < car_num and car[k].lane == car[j].lane):
k += 1
kk = k + 1
while (kk < car_num and car[kk].lane == car[j].lane):
kk += 1
if kk >= car_num:
kk = 0
while (kk < car_num and car[kk].lane == car[j].lane):
kk += 1
if k < car_num:
v_anti = car[k].v
if kk < car_num:
v_anti = min(mmod(car[kk].pos, car[k].pos), car[k].v)
d_p = mmod(car[k].pos, car[j].pos)
d_p_eff = mmod(car[k].pos, car[j].pos) + max(car[k].v - gap, 0)
else:
d_p = 2 ** 31
d_p_eff = 2 ** 31
while (l > 0 and car[l].lane == car[j].lane):
l -= 1
if l < 0:
l = car_num - 1
while (l > 0 and car[l].lane == car[j].lane):
l -= 1
dst = 1 - car[j].lane
#if ((dst == 1 and v_now <= 7) or v_now > d) and b_now == 0: # velocity based rule
#if (car[j].b == 0 and (\
#v_now > d or\
#(dst == 1 and v_max[car[j].size] <= 7))): # slow car right rule
#if (car[j].b == 0 and (\
#v_now > d or\
#dst == 1\
#)):
'''
if v_now > 0:
t_p_h = float(d_p) / v_now
else:
t_p_h = 4
if (dst == 0 and b_now == 0 and v_now > d) or\
(dst == 1 and v_now <= 7 and b_now == 0 and t_p_h > 3.0 and (t_h > 6.0 or v_now > d)): # velocity-based with paper
if v_now > 0:
t_p_h = float(d_p) / v_now
else:
t_p_h = 4
if (dst == 0 and b_now == 0 and v_now > d) or\
(dst == 1 and b_now == 0 and t_p_h > 3.0 and (t_h > 6.0 or v_now > d)): # right priority rule in paper
'''
if b_now == 0 and v_now > d: # symmetric rule
if (d_p_eff >= v_now and
(j == 0 or l < 0 or (mmod(car[j].pos, car[l].pos)) >=
v_succ_cur[dst])):
lane_cnt[dst] += 1
lane_cnt[1-dst] -= 1
car[j].lane = dst
car[j].v = v_now
lane_change_cnt += 1
#4 car motion
car[j].pos = (car[j].pos + car[j].v) % road_len
if i > int(time_period * 0.618):
# calculate flow
for m in range(road_len/10):
pinp = m * 10
for k in range(car_num):
if car[k].pos <= pinp and car[k].pos + car[k].v >= pinp and car[k].v > 0:
flow[car[k].lane] += 1
flow = [float(i)*10/road_len for i in flow]
car[:car_num] = sorted(car[:car_num])
for i in reversed(range(car_num)):
if car[i].pos > road_len:
del car[i]
car_num -= 1
'''
line = '.' * road_len
for j in range(car_num):
if car[j].lane == 0:
#line = line[:car[j].pos] + str((car[j].v + 1) / 2) + line[(car[j].pos + 1):]
line = line[:car[j].pos] + str((car[j].v)) + line[(car[j].pos + 1):]
print "car %d: id=%d pos=%d v=%d" % (j, car[j].c_id, car[j].pos, car[j].v)
print line
line = '.' * road_len
for j in range(car_num):
if car[j].lane == 1:
#line = line[:car[j].pos] + str((car[j].v + 1) / 2) + line[(car[j].pos + 1):]
line = line[:car[j].pos] + str((car[j].v)) + line[(car[j].pos + 1):]
print line
print
#'''
avg_vs = [float(sum_vs[i])/lane_cnt[i] if lane_cnt[i] != 0 else 0 for i in range(lane_num)]
return lane_cnt, car_num, float(sum_v)/car_num, flow,\
lane_change_cnt,avg_vs, speed_1
if __name__ == "__main__":
#print main()
#'''
print('rolen car_l car_r car_n avg_v flow_l flow_r flow lanchge v_l v_r')
for i in range(0,20):
#p_car = float(i + 1) / 10
max_car_num = (i+1)*100
lane_cnt = [0] * lane_num
flow_sum = [0] * lane_num
rep = 20
avg_vs = []
lane_change_sum = 0
avg_v_lrs = [[],[]]
speed_1 = []
for j in range(rep):
ret, car_num, avg_v_instance, flow, lane_change_cnt, avg_v_lr,\
speed_1 = main()
#print(' '.join(map(str,speed_1)))
lane_cnt[0] += ret[0]
lane_cnt[1] += ret[1]
flow_sum[0] += flow[0]
flow_sum[1] += flow[1]
lane_change_sum += lane_change_cnt
avg_vs.append(avg_v_instance)
[avg_v_lrs[i].append(avg_v_lr[i]) for i in
range(lane_num)]
lane_cnt = [i / rep for i in lane_cnt]
avg_v = sum(avg_vs)/float(len(avg_vs))
avg_v_lr = [sum(i)/float(len(i)) for i in avg_v_lrs]
flow_l = flow_sum[0]/rep
flow_r = flow_sum[1]/rep + 0.00001
print('%4d %5d %5d %5d %6.2f %6.2f %6.2f %5.2f %8d %7.2f %7.2f'
% (road_len,lane_cnt[0], lane_cnt[1],
car_num, avg_v, flow_l,
flow_r,
flow_l+flow_r,
lane_change_sum / rep,\
avg_v_lr[0],\
avg_v_lr[1]
))
#print("="*10, " "*5, "%.2f" % (avg_v*car_num/road_len))
#print(", ".join(map(lambda x:"%.2f" % x,avg_vs[:5])))
#print("="*10)
#'''
| mit |
zorroz/microblog | flask/lib/python2.7/site.py | 306 | 27543 | """Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
In earlier versions of Python (up to 1.5a3), scripts or modules that
needed to use site-specific modules would place ``import site''
somewhere near the top of their code. Because of the automatic
import, this is no longer necessary (but code that does it still
works).
This will append site-specific paths to the module search path. On
Unix, it starts with sys.prefix and sys.exec_prefix (if different) and
appends lib/python<version>/site-packages as well as lib/site-python.
It also supports the Debian convention of
lib/python<version>/dist-packages. On other platforms (mainly Mac and
Windows), it uses just sys.prefix (and sys.exec_prefix, if different,
but this is unlikely). The resulting directories, if they exist, are
appended to sys.path, and also inspected for path configuration files.
FOR DEBIAN, this sys.path is augmented with directories in /usr/local.
Local addons go into /usr/local/lib/python<version>/site-packages
(resp. /usr/local/lib/site-python), Debian addons install into
/usr/{lib,share}/python<version>/dist-packages.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.X/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.X/site-packages/bar
/usr/local/lib/python2.X/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
try:
import __builtin__ as builtins
except ImportError:
import builtins
try:
set
except NameError:
from sets import Set as set
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = None
# for distutils.commands.install
USER_SITE = None
USER_BASE = None
_is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_jython = sys.platform[:4] == 'java'
if _is_jython:
ModuleType = type(os)
def makepath(*paths):
dir = os.path.join(*paths)
if _is_jython and (dir == '__classpath__' or
dir.startswith('__pyclasspath__')):
return dir, dir
dir = os.path.abspath(dir)
return dir, os.path.normcase(dir)
def abs__file__():
"""Set all module' __file__ attribute to an absolute path"""
for m in sys.modules.values():
if ((_is_jython and not isinstance(m, ModuleType)) or
hasattr(m, '__loader__')):
# only modules need the abspath in Jython. and don't mess
# with a PEP 302-supplied __file__
continue
f = getattr(m, '__file__', None)
if f is None:
continue
m.__file__ = os.path.abspath(f)
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
# XXX This should not be part of site.py, since it is needed even when
# using the -S option for Python. See http://www.python.org/sf/586680
def addbuilddir():
"""Append ./build/lib.<platform> in case we're running in the build dir
(especially for Guido :-)"""
from distutils.util import get_platform
s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
if hasattr(sys, 'gettotalrefcount'):
s += '-pydebug'
s = os.path.join(os.path.dirname(sys.path[-1]), s)
sys.path.append(s)
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Add a new path to known_paths by combining sitedir and 'name' or execute
sitedir if it starts with 'import'"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
return
try:
for line in f:
if line.startswith("#"):
continue
if line.startswith("import"):
exec(line)
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
finally:
f.close()
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
names.sort()
for name in names:
if name.endswith(os.extsep + "pth"):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def addsitepackages(known_paths, sys_prefix=sys.prefix, exec_prefix=sys.exec_prefix):
"""Add site-packages (and possibly site-python) to sys.path"""
prefixes = [os.path.join(sys_prefix, "local"), sys_prefix]
if exec_prefix != sys_prefix:
prefixes.append(os.path.join(exec_prefix, "local"))
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys_prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
os.path.join(prefix, "python" + sys.version[:3], "lib-dynload")]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
if sys.version[0] == '2':
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
else:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return None
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if hasattr(sys, 'flags') and getattr(sys.flags, 'no_user_site', False):
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
USER_BASE is the root directory for all Python versions
USER_SITE is the user specific site-packages directory
USER_SITE/.. can be used for data.
"""
global USER_BASE, USER_SITE, ENABLE_USER_SITE
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
#if sys.platform in ('os2emx', 'riscos'):
# # Don't know what to put here
# USER_BASE = ''
# USER_SITE = ''
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser(base, "Python")
USER_SITE = os.path.join(USER_BASE,
"Python" + sys.version[0] + sys.version[2],
"site-packages")
else:
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser("~", ".local")
USER_SITE = os.path.join(USER_BASE, "lib",
"python" + sys.version[:3],
"site-packages")
if ENABLE_USER_SITE and os.path.isdir(USER_SITE):
addsitedir(USER_SITE, known_paths)
if ENABLE_USER_SITE:
for dist_libdir in ("lib", "local/lib"):
user_site = os.path.join(USER_BASE, dist_libdir,
"python" + sys.version[:3],
"dist-packages")
if os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def setBEGINLIBPATH():
"""The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path.
"""
dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
libpath = os.environ['BEGINLIBPATH'].split(';')
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ['BEGINLIBPATH'] = ';'.join(libpath)
def setquit():
"""Define new built-ins 'quit' and 'exit'.
These are simply strings that display a hint on how to exit.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
builtins.quit = Quitter('quit')
builtins.exit = Quitter('exit')
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = open(filename, "rU")
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print(self.__lines[i])
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
try:
key = raw_input(prompt)
except NameError:
key = input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
def setcopyright():
"""Set 'copyright' and 'credits' in __builtin__"""
builtins.copyright = _Printer("copyright", sys.copyright)
if _is_jython:
builtins.credits = _Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
elif _is_pypy:
builtins.credits = _Printer(
"credits",
"PyPy is maintained by the PyPy developers: http://pypy.org/")
else:
builtins.credits = _Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
builtins.license = _Printer(
"license", "See http://www.python.org/%.3s/license.html" % sys.version,
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
class _Helper(object):
"""Define the built-in 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
builtins.help = _Helper()
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
def setencoding():
"""Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this."""
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion and implicit
# Unicode to string conversion.
encoding = "undefined"
if encoding != "ascii":
# On Non-Unicode builds this will raise an AttributeError...
sys.setdefaultencoding(encoding) # Needs Python Unicode build !
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
def virtual_install_main_packages():
f = open(os.path.join(os.path.dirname(__file__), 'orig-prefix.txt'))
sys.real_prefix = f.read().strip()
f.close()
pos = 2
hardcoded_relative_dirs = []
if sys.path[0] == '':
pos += 1
if _is_jython:
paths = [os.path.join(sys.real_prefix, 'Lib')]
elif _is_pypy:
if sys.version_info > (3, 2):
cpyver = '%d' % sys.version_info[0]
elif sys.pypy_version_info >= (1, 5):
cpyver = '%d.%d' % sys.version_info[:2]
else:
cpyver = '%d.%d.%d' % sys.version_info[:3]
paths = [os.path.join(sys.real_prefix, 'lib_pypy'),
os.path.join(sys.real_prefix, 'lib-python', cpyver)]
if sys.pypy_version_info < (1, 9):
paths.insert(1, os.path.join(sys.real_prefix,
'lib-python', 'modified-%s' % cpyver))
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
#
# This is hardcoded in the Python executable, but relative to sys.prefix:
for path in paths[:]:
plat_path = os.path.join(path, 'plat-%s' % sys.platform)
if os.path.exists(plat_path):
paths.append(plat_path)
elif sys.platform == 'win32':
paths = [os.path.join(sys.real_prefix, 'Lib'), os.path.join(sys.real_prefix, 'DLLs')]
else:
paths = [os.path.join(sys.real_prefix, 'lib', 'python'+sys.version[:3])]
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
lib64_path = os.path.join(sys.real_prefix, 'lib64', 'python'+sys.version[:3])
if os.path.exists(lib64_path):
if _is_64bit:
paths.insert(0, lib64_path)
else:
paths.append(lib64_path)
# This is hardcoded in the Python executable, but relative to
# sys.prefix. Debian change: we need to add the multiarch triplet
# here, which is where the real stuff lives. As per PEP 421, in
# Python 3.3+, this lives in sys.implementation, while in Python 2.7
# it lives in sys.
try:
arch = getattr(sys, 'implementation', sys)._multiarch
except AttributeError:
# This is a non-multiarch aware Python. Fallback to the old way.
arch = sys.platform
plat_path = os.path.join(sys.real_prefix, 'lib',
'python'+sys.version[:3],
'plat-%s' % arch)
if os.path.exists(plat_path):
paths.append(plat_path)
# This is hardcoded in the Python executable, but
# relative to sys.prefix, so we have to fix up:
for path in list(paths):
tk_dir = os.path.join(path, 'lib-tk')
if os.path.exists(tk_dir):
paths.append(tk_dir)
# These are hardcoded in the Apple's Python executable,
# but relative to sys.prefix, so we have to fix them up:
if sys.platform == 'darwin':
hardcoded_paths = [os.path.join(relative_dir, module)
for relative_dir in hardcoded_relative_dirs
for module in ('plat-darwin', 'plat-mac', 'plat-mac/lib-scriptpackages')]
for path in hardcoded_paths:
if os.path.exists(path):
paths.append(path)
sys.path.extend(paths)
def force_global_eggs_after_local_site_packages():
"""
Force easy_installed eggs in the global environment to get placed
in sys.path after all packages inside the virtualenv. This
maintains the "least surprise" result that packages in the
virtualenv always mask global packages, never the other way
around.
"""
egginsert = getattr(sys, '__egginsert', 0)
for i, path in enumerate(sys.path):
if i > egginsert and path.startswith(sys.prefix):
egginsert = i
sys.__egginsert = egginsert + 1
def virtual_addsitepackages(known_paths):
force_global_eggs_after_local_site_packages()
return addsitepackages(known_paths, sys_prefix=sys.real_prefix)
def fixclasspath():
"""Adjust the special classpath sys.path entries for Jython. These
entries should follow the base virtualenv lib directories.
"""
paths = []
classpaths = []
for path in sys.path:
if path == '__classpath__' or path.startswith('__pyclasspath__'):
classpaths.append(path)
else:
paths.append(path)
sys.path = paths
sys.path.extend(classpaths)
def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
def main():
global ENABLE_USER_SITE
virtual_install_main_packages()
abs__file__()
paths_in_sys = removeduppaths()
if (os.name == "posix" and sys.path and
os.path.basename(sys.path[-1]) == "Modules"):
addbuilddir()
if _is_jython:
fixclasspath()
GLOBAL_SITE_PACKAGES = not os.path.exists(os.path.join(os.path.dirname(__file__), 'no-global-site-packages.txt'))
if not GLOBAL_SITE_PACKAGES:
ENABLE_USER_SITE = False
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
paths_in_sys = addsitepackages(paths_in_sys)
paths_in_sys = addusersitepackages(paths_in_sys)
if GLOBAL_SITE_PACKAGES:
paths_in_sys = virtual_addsitepackages(paths_in_sys)
if sys.platform == 'os2emx':
setBEGINLIBPATH()
setquit()
setcopyright()
sethelper()
aliasmbcs()
setencoding()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
print("sys.path = [")
for dir in sys.path:
print(" %r," % (dir,))
print("]")
def exists(path):
if os.path.isdir(path):
return "exists"
else:
return "doesn't exist"
print("USER_BASE: %r (%s)" % (USER_BASE, exists(USER_BASE)))
print("USER_SITE: %r (%s)" % (USER_SITE, exists(USER_BASE)))
print("ENABLE_USER_SITE: %r" % ENABLE_USER_SITE)
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print(os.pathsep.join(buffer))
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print(textwrap.dedent(help % (sys.argv[0], os.pathsep)))
sys.exit(10)
if __name__ == '__main__':
_script()
| bsd-3-clause |
basinilya/openwrt | target/linux/x86/image/mkimg_bifferboard.py | 561 | 1265 | #!/usr/bin/env python
"""
Create firmware for 4/8MB Bifferboards, suitable for uploading using
either bb_upload8.py or bb_eth_upload8.py
"""
import struct, sys
# Increase the kmax value if the script gives errors about the kernel being
# too large. You need to set the Biffboot kmax value to the same value you
# use here.
kmax = 0x10
# No need to change this for 4MB devices, it's only used to tell you if
# the firmware is too large!
flash_size = 0x800000
# This is always the same, for 1MB, 4MB and 8MB devices
config_extent = 0x6000
kernel_extent = kmax * 0x10000
if __name__ == "__main__":
if len(sys.argv) != 4:
print "usage: mkimg_bifferboard.py <kernel> <rootfs> <output file>"
sys.exit(-1)
bzimage = sys.argv[1]
rootfs = sys.argv[2]
target = sys.argv[3]
# Kernel first
fw = file(bzimage).read()
if len(fw) > (kernel_extent - config_extent):
raise IOError("Kernel too large")
# Pad up to end of kernel partition
while len(fw) < (kernel_extent - config_extent):
fw += "\xff"
fw += file(rootfs).read()
# Check length of total
if len(fw) > (flash_size - 0x10000 - config_extent):
raise IOError("Rootfs too large")
file(target,"wb").write(fw)
print "Firmware written to '%s'" % target
| gpl-2.0 |
dut3062796s/PTVS | Python/Tests/TestData/VirtualEnv/env/Lib/encodings/rot_13.py | 88 | 2697 | #!/usr/bin/env python
""" Python Character Mapping Codec for ROT13.
See http://ucsub.colorado.edu/~kominek/rot13/ for details.
Written by Marc-Andre Lemburg (mal@lemburg.com).
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_map)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='rot-13',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0041: 0x004e,
0x0042: 0x004f,
0x0043: 0x0050,
0x0044: 0x0051,
0x0045: 0x0052,
0x0046: 0x0053,
0x0047: 0x0054,
0x0048: 0x0055,
0x0049: 0x0056,
0x004a: 0x0057,
0x004b: 0x0058,
0x004c: 0x0059,
0x004d: 0x005a,
0x004e: 0x0041,
0x004f: 0x0042,
0x0050: 0x0043,
0x0051: 0x0044,
0x0052: 0x0045,
0x0053: 0x0046,
0x0054: 0x0047,
0x0055: 0x0048,
0x0056: 0x0049,
0x0057: 0x004a,
0x0058: 0x004b,
0x0059: 0x004c,
0x005a: 0x004d,
0x0061: 0x006e,
0x0062: 0x006f,
0x0063: 0x0070,
0x0064: 0x0071,
0x0065: 0x0072,
0x0066: 0x0073,
0x0067: 0x0074,
0x0068: 0x0075,
0x0069: 0x0076,
0x006a: 0x0077,
0x006b: 0x0078,
0x006c: 0x0079,
0x006d: 0x007a,
0x006e: 0x0061,
0x006f: 0x0062,
0x0070: 0x0063,
0x0071: 0x0064,
0x0072: 0x0065,
0x0073: 0x0066,
0x0074: 0x0067,
0x0075: 0x0068,
0x0076: 0x0069,
0x0077: 0x006a,
0x0078: 0x006b,
0x0079: 0x006c,
0x007a: 0x006d,
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
### Filter API
def rot13(infile, outfile):
outfile.write(infile.read().encode('rot-13'))
if __name__ == '__main__':
import sys
rot13(sys.stdin, sys.stdout)
| apache-2.0 |
zhouzhenghui/python-for-android | python3-alpha/python3-src/Doc/includes/test.py | 139 | 3744 | """Test module for the noddy examples
Noddy 1:
>>> import noddy
>>> n1 = noddy.Noddy()
>>> n2 = noddy.Noddy()
>>> del n1
>>> del n2
Noddy 2
>>> import noddy2
>>> n1 = noddy2.Noddy('jim', 'fulton', 42)
>>> n1.first
'jim'
>>> n1.last
'fulton'
>>> n1.number
42
>>> n1.name()
'jim fulton'
>>> n1.first = 'will'
>>> n1.name()
'will fulton'
>>> n1.last = 'tell'
>>> n1.name()
'will tell'
>>> del n1.first
>>> n1.name()
Traceback (most recent call last):
...
AttributeError: first
>>> n1.first
Traceback (most recent call last):
...
AttributeError: first
>>> n1.first = 'drew'
>>> n1.first
'drew'
>>> del n1.number
Traceback (most recent call last):
...
TypeError: can't delete numeric/char attribute
>>> n1.number=2
>>> n1.number
2
>>> n1.first = 42
>>> n1.name()
'42 tell'
>>> n2 = noddy2.Noddy()
>>> n2.name()
' '
>>> n2.first
''
>>> n2.last
''
>>> del n2.first
>>> n2.first
Traceback (most recent call last):
...
AttributeError: first
>>> n2.first
Traceback (most recent call last):
...
AttributeError: first
>>> n2.name()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
AttributeError: first
>>> n2.number
0
>>> n3 = noddy2.Noddy('jim', 'fulton', 'waaa')
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TypeError: an integer is required
>>> del n1
>>> del n2
Noddy 3
>>> import noddy3
>>> n1 = noddy3.Noddy('jim', 'fulton', 42)
>>> n1 = noddy3.Noddy('jim', 'fulton', 42)
>>> n1.name()
'jim fulton'
>>> del n1.first
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TypeError: Cannot delete the first attribute
>>> n1.first = 42
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TypeError: The first attribute value must be a string
>>> n1.first = 'will'
>>> n1.name()
'will fulton'
>>> n2 = noddy3.Noddy()
>>> n2 = noddy3.Noddy()
>>> n2 = noddy3.Noddy()
>>> n3 = noddy3.Noddy('jim', 'fulton', 'waaa')
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TypeError: an integer is required
>>> del n1
>>> del n2
Noddy 4
>>> import noddy4
>>> n1 = noddy4.Noddy('jim', 'fulton', 42)
>>> n1.first
'jim'
>>> n1.last
'fulton'
>>> n1.number
42
>>> n1.name()
'jim fulton'
>>> n1.first = 'will'
>>> n1.name()
'will fulton'
>>> n1.last = 'tell'
>>> n1.name()
'will tell'
>>> del n1.first
>>> n1.name()
Traceback (most recent call last):
...
AttributeError: first
>>> n1.first
Traceback (most recent call last):
...
AttributeError: first
>>> n1.first = 'drew'
>>> n1.first
'drew'
>>> del n1.number
Traceback (most recent call last):
...
TypeError: can't delete numeric/char attribute
>>> n1.number=2
>>> n1.number
2
>>> n1.first = 42
>>> n1.name()
'42 tell'
>>> n2 = noddy4.Noddy()
>>> n2 = noddy4.Noddy()
>>> n2 = noddy4.Noddy()
>>> n2 = noddy4.Noddy()
>>> n2.name()
' '
>>> n2.first
''
>>> n2.last
''
>>> del n2.first
>>> n2.first
Traceback (most recent call last):
...
AttributeError: first
>>> n2.first
Traceback (most recent call last):
...
AttributeError: first
>>> n2.name()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
AttributeError: first
>>> n2.number
0
>>> n3 = noddy4.Noddy('jim', 'fulton', 'waaa')
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TypeError: an integer is required
Test cyclic gc(?)
>>> import gc
>>> gc.disable()
>>> x = []
>>> l = [x]
>>> n2.first = l
>>> n2.first
[[]]
>>> l.append(n2)
>>> del l
>>> del n1
>>> del n2
>>> sys.getrefcount(x)
3
>>> ignore = gc.collect()
>>> sys.getrefcount(x)
2
>>> gc.enable()
"""
import os
import sys
from distutils.util import get_platform
PLAT_SPEC = "%s-%s" % (get_platform(), sys.version[0:3])
src = os.path.join("build", "lib.%s" % PLAT_SPEC)
sys.path.append(src)
if __name__ == "__main__":
import doctest, __main__
doctest.testmod(__main__)
| apache-2.0 |
rouxbuciu/Handle-My-Music-Business | menu_services.py | 2 | 1960 | import os
import menu_main
import fr_functions
import cfg
# =================================
# SERVICES MENU
# =================================
def view_services():
os.system('clear')
print("\n\nService".ljust(40) + " Price")
print("="*60)
for n in cfg.SERVICES:
print("| " + str(n).ljust(38) + "| " + str(cfg.SERVICES[n]))
print("\n\nPress [return] to go back to Services Management.")
choice = input("")
menu_main.execute_menu('s')
def add_service():
new_service = input("\n\nEnter a service to add:\n\n >> ").title()
print("\nHow much does this cost per hour?\n\n")
cfg.SERVICES[new_service] = fr_functions.check_if_number()
fr_functions.save_database()
menu_main.execute_menu('s')
def remove_service():
os.system('clear')
print("\n\nServices currently offered:\n")
for n in cfg.SERVICES:
print(n)
print("""
*** WARNING ***
Removing a service deletes it permanently from the database.""")
name, item_exists, item_index = fr_functions.lookup(
'service', cfg.SERVICES)
if item_exists is True:
if fr_functions.verification() == 'y':
del cfg.SERVICES[name]
fr_functions.save_database()
else:
fr_functions.alert(
"%s does not exist in the database." % name.title())
fr_functions.save_database()
menu_main.execute_menu('s')
def edit_price():
os.system('clear')
while True:
name, item_exists, item_index = fr_functions.lookup(
'service', cfg.SERVICES)
if item_exists is True:
print("\nEnter new price per hour for %s." % name)
cfg.SERVICES[name] = fr_functions.check_if_number()
break
else:
fr_functions.alert(
"%s does not exist in current Services database." % name)
break
fr_functions.save_database()
menu_main.execute_menu('s')
| gpl-2.0 |
terryshi96/script | repo.py | 1 | 2429 | #! /usr/bin/python
# -*- encoding: utf-8 -*-
import sys
import os
import commands
import datetime
def post_dingtalk(msg):
print('sending dingtalk message.....')
os.system("curl %s -H 'Content-Type: application/json' \
-d '{\"msgtype\": \"text\",\"text\": {\"content\": \" %s \"}}'"%(dingtalk_url, msg))
if __name__ == '__main__':
dingtalk_url = ''
arg = sys.argv
if len(arg) != 4:
print('lack arguments')
exit(1)
base_path = '../'
project = arg[1]
project_path = base_path + project
# create / finish
action = arg[2]
# tab / branch
keep = arg[3]
print(project,action,keep)
current_tmp = commands.getstatusoutput('cd %s && git flow release list'%project_path)
current = current_tmp[1].strip().strip('*').strip()
tag = 'v0.1.' + datetime.datetime.now().strftime("%Y%m%d%H%M")
flag = 1
# 判断是否为git flow
if current.find('Not a gitflow-enabled repo yet') == -1:
print('current release: \n' + current + '\n')
if action == 'create':
# 判断是否已有release
if current.find('No release branches exist') == -1:
print('please finish the release first')
exit(1)
else:
print('release ' + tag + ' will be created')
flag=os.system('cd %s && git checkout develop && git pull && git flow release start %s && git flow release publish %s'%(project_path, tag, tag))
if flag == 0:
post_dingtalk("Project %s release branch has been created\n release/%s"%(project, tag))
elif action == 'finish':
print('release ' + current + ' will be finished')
if keep != 'tag':
flag=os.system('cd %s && git checkout release/%s && git pull && \
git flow release finish -k -n -m %s -p %s && git push && git checkout develop && git branch -d release/%s'%(project_path, current, current, current, current))
else:
flag=os.system('cd %s && git checkout release/%s && git pull && \
git flow release finish -m %s -p %s'%(project_path, current, current, current))
if flag == 0:
post_dingtalk("Project %s release branch release/%s has been finished\n "%(project, tag))
else:
os.system('cd %s && pwd && git status'%project_path)
print('\nDone')
| mit |
determinedcheetahs/cheetah_juniper | hadoop/contrib/hod/hodlib/GridServices/service.py | 182 | 8174 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""defines Service as abstract interface"""
# -*- python -*-
import random, socket
class Service:
""" the service base class that all the
other services inherit from. """
def __init__(self, serviceDesc, workDirs):
self.serviceDesc = serviceDesc
self.workDirs = workDirs
def getName(self):
return self.serviceDesc.getName()
def getInfoAddrs(self):
"""Return a list of addresses that provide
information about the servie"""
return []
def isLost(self):
"""True if the service is down"""
raise NotImplementedError
def addNodes(self, nodeList):
"""add nodeSet"""
raise NotImplementedError
def removeNodes(self, nodeList):
"""remove a nodeset"""
raise NotImplementedError
def getWorkers(self):
raise NotImplementedError
def needsMore(self):
"""return number of nodes the service wants to add"""
raise NotImplementedError
def needsLess(self):
"""return number of nodes the service wants to remove"""
raise NotImplementedError
class MasterSlave(Service):
""" the base class for a master slave
service architecture. """
def __init__(self, serviceDesc, workDirs,requiredNode):
Service.__init__(self, serviceDesc, workDirs)
self.launchedMaster = False
self.masterInitialized = False
self.masterAddress = 'none'
self.requiredNode = requiredNode
self.failedMsg = None
self.masterFailureCount = 0
def getRequiredNode(self):
return self.requiredNode
def getMasterRequest(self):
""" the number of master you need
to run for this service. """
raise NotImplementedError
def isLaunchable(self, serviceDict):
""" if your service does not depend on
other services. is set to true by default. """
return True
def getMasterCommands(self, serviceDict):
""" a list of master commands you
want to run for this service. """
raise NotImplementedError
def getAdminCommands(self, serviceDict):
""" a list of admin commands you
want to run for this service. """
raise NotImplementedError
def getWorkerCommands(self, serviceDict):
""" a list of worker commands you want to
run for this service. """
raise NotImplementedError
def setMasterNodes(self, list):
""" set the status of master nodes
after they start running on a node cluster. """
raise NotImplementedError
def addNodes(self, list):
""" add nodes to a service. Not implemented
currently. """
raise NotImplementedError
def getMasterAddrs(self):
""" return the addresses of master. the
hostname:port to which worker nodes should
connect. """
raise NotImplementedError
def setMasterParams(self, list):
""" set the various master params
depending on what each hodring set
the master params to. """
raise NotImplementedError
def setlaunchedMaster(self):
""" set the status of master launched
to true. """
self.launchedMaster = True
def isMasterLaunched(self):
""" return if a master has been launched
for the service or not. """
return self.launchedMaster
def isMasterInitialized(self):
""" return if a master if launched
has been initialized or not. """
return self.masterInitialized
def setMasterInitialized(self):
""" set the master initialized to
true. """
self.masterInitialized = True
# Reset failure related variables, as master is initialized successfully.
self.masterFailureCount = 0
self.failedMsg = None
def getMasterAddress(self):
""" it needs to change to reflect
more that one masters. Currently it
keeps a knowledge of where the master
was launched and to keep track if it was actually
up or not. """
return self.masterAddress
def setMasterAddress(self, addr):
self.masterAddress = addr
def isExternal(self):
return self.serviceDesc.isExternal()
def setMasterFailed(self, err):
"""Sets variables related to Master failure"""
self.masterFailureCount += 1
self.failedMsg = err
# When command is sent to HodRings, this would have been set to True.
# Reset it to reflect the correct status.
self.launchedMaster = False
def getMasterFailed(self):
return self.failedMsg
def getMasterFailureCount(self):
return self.masterFailureCount
class NodeRequest:
""" A class to define
a node request. """
def __init__(self, n, required = [], preferred = [], isPreemptee = True):
self.numNodes = n
self.preferred = preferred
self.isPreemptee = isPreemptee
self.required = required
def setNumNodes(self, n):
self.numNodes = n
def setPreferredList(self, list):
self.preferred = list
def setIsPreemptee(self, flag):
self.isPreemptee = flag
class ServiceUtil:
""" this class should be moved out of
service.py to a util file"""
localPortUsed = {}
def getUniqRandomPort(h=None, low=50000, high=60000, retry=900, log=None):
"""This allocates a randome free port between low and high"""
# We use a default value of 900 retries, which takes an agreeable
# time limit of ~ 6.2 seconds to check 900 ports, in the worse case
# of no available port in those 900.
while retry > 0:
n = random.randint(low, high)
if n in ServiceUtil.localPortUsed:
continue
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if not h:
h = socket.gethostname()
avail = False
if log: log.debug("Trying to see if port %s is available"% n)
try:
s.bind((h, n))
if log: log.debug("Yes, port %s is available" % n)
avail = True
except socket.error,e:
if log: log.debug("Could not bind to the port %s. Reason %s" % (n,e))
retry -= 1
pass
# The earlier code that used to be here had syntax errors. The code path
# couldn't be followd anytime, so the error remained uncaught.
# This time I stumbled upon the error
s.close()
if avail:
ServiceUtil.localPortUsed[n] = True
return n
raise ValueError, "Can't find unique local port between %d and %d" % (low, high)
getUniqRandomPort = staticmethod(getUniqRandomPort)
def getUniqPort(h=None, low=40000, high=60000, retry=900, log=None):
"""get unique port on a host that can be used by service
This and its consumer code should disappear when master
nodes get allocatet by nodepool"""
# We use a default value of 900 retries, which takes an agreeable
# time limit of ~ 6.2 seconds to check 900 ports, in the worse case
# of no available port in those 900.
n = low
while retry > 0:
n = n + 1
if n in ServiceUtil.localPortUsed:
continue
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if not h:
h = socket.gethostname()
avail = False
if log: log.debug("Trying to see if port %s is available"% n)
try:
s.bind((h, n))
if log: log.debug("Yes, port %s is available" % n)
avail = True
except socket.error,e:
if log: log.debug("Could not bind to the port %s. Reason %s" % (n,e))
retry -= 1
pass
s.close()
if avail:
ServiceUtil.localPortUsed[n] = True
return n
raise ValueError, "Can't find unique local port between %d and %d" % (low, high)
getUniqPort = staticmethod(getUniqPort)
| apache-2.0 |
brianparry/elastalert | tests/config_test.py | 6 | 7403 | # -*- coding: utf-8 -*-
import copy
import datetime
import mock
import pytest
import elastalert.alerts
import elastalert.ruletypes
from elastalert.config import get_file_paths
from elastalert.config import load_configuration
from elastalert.config import load_options
from elastalert.config import load_rules
from elastalert.util import EAException
test_config = {'rules_folder': 'test_folder',
'run_every': {'minutes': 10},
'buffer_time': {'minutes': 10},
'es_host': 'elasticsearch.test',
'es_port': 12345,
'writeback_index': 'test_index'}
test_rule = {'es_host': 'test_host',
'es_port': 12345,
'name': 'testrule',
'type': 'spike',
'spike_height': 2,
'spike_type': 'up',
'timeframe': {'minutes': 10},
'index': 'test_index',
'query_key': 'testkey',
'compare_key': 'comparekey',
'filter': [{'term': {'key': 'value'}}],
'alert': 'email',
'use_count_query': True,
'doc_type': 'blsh',
'email': 'test@test.test',
'aggregation': {'hours': 2},
'include': ['comparekey', '@timestamp']}
test_args = mock.Mock()
test_args.config = 'test_config'
test_args.rule = None
def test_import_rules():
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy['type'] = 'testing.test.RuleType'
with mock.patch('elastalert.config.yaml_loader') as mock_open:
mock_open.return_value = test_rule_copy
# Test that type is imported
with mock.patch('__builtin__.__import__') as mock_import:
mock_import.return_value = elastalert.ruletypes
load_configuration('test_config', test_config)
assert mock_import.call_args_list[0][0][0] == 'testing.test'
assert mock_import.call_args_list[0][0][3] == ['RuleType']
# Test that alerts are imported
test_rule_copy = copy.deepcopy(test_rule)
mock_open.return_value = test_rule_copy
test_rule_copy['alert'] = 'testing2.test2.Alerter'
with mock.patch('__builtin__.__import__') as mock_import:
mock_import.return_value = elastalert.alerts
load_configuration('test_config', test_config)
assert mock_import.call_args_list[0][0][0] == 'testing2.test2'
assert mock_import.call_args_list[0][0][3] == ['Alerter']
def test_load_rules():
test_rule_copy = copy.deepcopy(test_rule)
test_config_copy = copy.deepcopy(test_config)
with mock.patch('elastalert.config.yaml_loader') as mock_open:
mock_open.side_effect = [test_config_copy, test_rule_copy]
with mock.patch('os.listdir') as mock_ls:
mock_ls.return_value = ['testrule.yaml']
rules = load_rules(test_args)
assert isinstance(rules['rules'][0]['type'], elastalert.ruletypes.RuleType)
assert isinstance(rules['rules'][0]['alert'][0], elastalert.alerts.Alerter)
assert isinstance(rules['rules'][0]['timeframe'], datetime.timedelta)
assert isinstance(rules['run_every'], datetime.timedelta)
for included_key in ['comparekey', 'testkey', '@timestamp']:
assert included_key in rules['rules'][0]['include']
# Assert include doesn't contain duplicates
assert rules['rules'][0]['include'].count('@timestamp') == 1
assert rules['rules'][0]['include'].count('comparekey') == 1
def test_load_default_host_port():
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy.pop('es_host')
test_rule_copy.pop('es_port')
test_config_copy = copy.deepcopy(test_config)
with mock.patch('elastalert.config.yaml_loader') as mock_open:
mock_open.side_effect = [test_config_copy, test_rule_copy]
with mock.patch('os.listdir') as mock_ls:
mock_ls.return_value = ['testrule.yaml']
rules = load_rules(test_args)
# Assert include doesn't contain duplicates
assert rules['es_port'] == 12345
assert rules['es_host'] == 'elasticsearch.test'
def test_compound_query_key():
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy.pop('use_count_query')
test_rule_copy['query_key'] = ['field1', 'field2']
load_options(test_rule_copy, test_config)
assert 'field1' in test_rule_copy['include']
assert 'field2' in test_rule_copy['include']
assert test_rule_copy['query_key'] == 'field1,field2'
assert test_rule_copy['compound_query_key'] == ['field1', 'field2']
def test_raises_on_missing_config():
optional_keys = ('aggregation', 'use_count_query', 'query_key', 'compare_key', 'filter', 'include', 'es_host', 'es_port')
test_rule_copy = copy.deepcopy(test_rule)
for key in test_rule_copy.keys():
test_rule_copy = copy.deepcopy(test_rule)
test_config_copy = copy.deepcopy(test_config)
test_rule_copy.pop(key)
# Non required keys
if key in optional_keys:
continue
with mock.patch('elastalert.config.yaml_loader') as mock_open:
mock_open.side_effect = [test_config_copy, test_rule_copy]
with mock.patch('os.listdir') as mock_ls:
mock_ls.return_value = ['testrule.yaml']
with pytest.raises(EAException):
load_rules(test_args)
def test_raises_on_bad_generate_kibana_filters():
test_rule['generate_kibana_link'] = True
bad_filters = [[{'not': {'terms': {'blah': 'blah'}}}],
[{'terms': {'blah': 'blah'}}],
[{'query': {'not_querystring': 'this:that'}}],
[{'query': {'wildcard': 'this*that'}}],
[{'blah': 'blah'}]]
good_filters = [[{'term': {'field': 'value'}}],
[{'not': {'term': {'this': 'that'}}}],
[{'not': {'query': {'query_string': {'query': 'this:that'}}}}],
[{'query': {'query_string': {'query': 'this:that'}}}],
[{'range': {'blah': {'from': 'a', 'to': 'b'}}}],
[{'not': {'range': {'blah': {'from': 'a', 'to': 'b'}}}}]]
# Test that all the good filters work, but fail with a bad filter added
for good in good_filters:
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy['filter'] = good
with mock.patch('elastalert.config.yaml_loader') as mock_open:
mock_open.return_value = test_rule_copy
load_configuration('blah', test_config)
for bad in bad_filters:
test_rule_copy['filter'] = good + bad
with pytest.raises(EAException):
load_configuration('blah', test_config)
def test_get_file_paths():
conf = {'scan_subdirectories': True, 'rules_folder': 'root'}
walk_paths = (('root', ('folder_a', 'folder_b'), ('rule.yaml',)),
('root/folder_a', (), ('a.yaml', 'ab.yaml')),
('root/folder_b', (), ('b.yaml',)))
with mock.patch('os.walk') as mock_walk:
mock_walk.return_value = walk_paths
paths = get_file_paths(conf)
assert 'root/rule.yaml' in paths
assert 'root/folder_a/a.yaml' in paths
assert 'root/folder_a/ab.yaml' in paths
assert 'root/folder_b/b.yaml' in paths
assert len(paths) == 4
| apache-2.0 |
zbitmanis/cmanager | scalerd/zba.py | 1 | 3189 | #!/usr/bin/env python3
"""
Copyright 2017 Andris Zbitkovskis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
import argparse
import daemon
import signal
import logging
import configparser
from daemon import pidfile
from weigherstate import WeigherState
from scaler import Scaler
debug =True
def start_daemon(pidf, logf, workd, rndf, fork ,amqp_url):
global debug
interactive = not fork in ['y', 'Y']
LOG_FILE='/var/log/scaler/daemon.log'
logger = logging.getLogger()
logger.setLevel(logging.DEBUG if debug else logging.INFO)
fmt = logging.Formatter("%(asctime)s %(levelname)s %(message)s","%Y-%m-%d %H:%M:%S")
fh = logging.StreamHandler() if interactive else logging.FileHandler(LOG_FILE)
fh.setFormatter(fmt)
logger.addHandler(fh)
result_file=open(logf,'a')
scaler = Scaler(result_file , rndf,logger,amqp_url= amqp_url)
if not interactive :
if debug:
print("scaler: entered run()")
print("scaler: pidf:{} logf:{} workd: {} rndf: {} fork:{} ".format(pidf, logf,workd,rndf, fork))
print("scaler: about to start daemonization")
scaler.daemon=True
ctx=daemon.DaemonContext(
working_directory=workd ,
umask=0o002,
pidfile=pidfile.TimeoutPIDLockFile(pidf),
)
ctx.files_preserve=[fh.stream,result_file ]
ctx.signal_map = {
signal.SIGHUP: 'terminate',
}
ctx.stdout=fh.stream
ctx.stderr=fh.stream
with ctx:
scaler.run()
else:
scaler.run()
def main(argv):
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Ceph Scaler daemon")
parser.add_argument('-p', '--pid-file', default='/var/run/scaler/scaler.pid')
parser.add_argument('-l', '--log-file', default='/var/log/scaler/scaler.log')
parser.add_argument('-w', '--working-dir', default='/var/lib/scaler')
parser.add_argument('-r', '--rnd-table', default='/var/lib/scaler/rndtable.txt')
parser.add_argument('-f', '--fork', choices=['y','n'] , default='y', help='Fork' )
args = parser.parse_args()
configParser = configparser.RawConfigParser()
configFilePath = '/etc/scaler.cfg'
configParser.read(configFilePath)
host=configParser.get('amqp', 'host')
user=configParser.get('amqp', 'user')
password=configParser.get('amqp', 'password')
amqp_url="amqp://{}:{}@{}:5672/%2F".format(user,password,host)
start_daemon(pidf=args.pid_file, logf=args.log_file,workd = args.working_dir,rndf = args.rnd_table , fork=args.fork , amqp_url=amqp_url)
# main(sys.argv)
| apache-2.0 |
ovnicraft/openerp-restaurant | account/wizard/account_unreconcile.py | 385 | 2086 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class account_unreconcile(osv.osv_memory):
_name = "account.unreconcile"
_description = "Account Unreconcile"
def trans_unrec(self, cr, uid, ids, context=None):
obj_move_line = self.pool.get('account.move.line')
if context is None:
context = {}
if context.get('active_ids', False):
obj_move_line._remove_move_reconcile(cr, uid, context['active_ids'], context=context)
return {'type': 'ir.actions.act_window_close'}
class account_unreconcile_reconcile(osv.osv_memory):
_name = "account.unreconcile.reconcile"
_description = "Account Unreconcile Reconcile"
def trans_unrec_reconcile(self, cr, uid, ids, context=None):
obj_move_reconcile = self.pool.get('account.move.reconcile')
if context is None:
context = {}
rec_ids = context['active_ids']
if rec_ids:
obj_move_reconcile.unlink(cr, uid, rec_ids, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
iemejia/beam | sdks/python/apache_beam/transforms/util.py | 2 | 45927 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple utility PTransforms.
"""
# pytype: skip-file
import collections
import contextlib
import random
import re
import threading
import time
import uuid
from typing import TYPE_CHECKING
from typing import Any
from typing import Iterable
from typing import List
from typing import Tuple
from typing import TypeVar
from typing import Union
from apache_beam import coders
from apache_beam import typehints
from apache_beam.metrics import Metrics
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.transforms import window
from apache_beam.transforms.combiners import CountCombineFn
from apache_beam.transforms.core import CombinePerKey
from apache_beam.transforms.core import DoFn
from apache_beam.transforms.core import FlatMap
from apache_beam.transforms.core import Flatten
from apache_beam.transforms.core import GroupByKey
from apache_beam.transforms.core import Map
from apache_beam.transforms.core import MapTuple
from apache_beam.transforms.core import ParDo
from apache_beam.transforms.core import Windowing
from apache_beam.transforms.ptransform import PTransform
from apache_beam.transforms.ptransform import ptransform_fn
from apache_beam.transforms.timeutil import TimeDomain
from apache_beam.transforms.trigger import AccumulationMode
from apache_beam.transforms.trigger import Always
from apache_beam.transforms.userstate import BagStateSpec
from apache_beam.transforms.userstate import CombiningValueStateSpec
from apache_beam.transforms.userstate import TimerSpec
from apache_beam.transforms.userstate import on_timer
from apache_beam.transforms.window import NonMergingWindowFn
from apache_beam.transforms.window import TimestampCombiner
from apache_beam.transforms.window import TimestampedValue
from apache_beam.typehints.sharded_key_type import ShardedKeyType
from apache_beam.utils import windowed_value
from apache_beam.utils.annotations import deprecated
from apache_beam.utils.annotations import experimental
from apache_beam.utils.sharded_key import ShardedKey
if TYPE_CHECKING:
from apache_beam import pvalue
from apache_beam.runners.pipeline_context import PipelineContext
__all__ = [
'BatchElements',
'CoGroupByKey',
'Distinct',
'Keys',
'KvSwap',
'Regex',
'Reify',
'RemoveDuplicates',
'Reshuffle',
'ToString',
'Values',
'WithKeys',
'GroupIntoBatches'
]
K = TypeVar('K')
V = TypeVar('V')
T = TypeVar('T')
class CoGroupByKey(PTransform):
"""Groups results across several PCollections by key.
Given an input dict of serializable keys (called "tags") to 0 or more
PCollections of (key, value) tuples, it creates a single output PCollection
of (key, value) tuples whose keys are the unique input keys from all inputs,
and whose values are dicts mapping each tag to an iterable of whatever values
were under the key in the corresponding PCollection, in this manner::
('some key', {'tag1': ['value 1 under "some key" in pcoll1',
'value 2 under "some key" in pcoll1',
...],
'tag2': ... ,
... })
For example, given::
{'tag1': pc1, 'tag2': pc2, 333: pc3}
where::
pc1 = [(k1, v1)]
pc2 = []
pc3 = [(k1, v31), (k1, v32), (k2, v33)]
The output PCollection would be::
[(k1, {'tag1': [v1], 'tag2': [], 333: [v31, v32]}),
(k2, {'tag1': [], 'tag2': [], 333: [v33]})]
CoGroupByKey also works for tuples, lists, or other flat iterables of
PCollections, in which case the values of the resulting PCollections
will be tuples whose nth value is the list of values from the nth
PCollection---conceptually, the "tags" are the indices into the input.
Thus, for this input::
(pc1, pc2, pc3)
the output would be::
[(k1, ([v1], [], [v31, v32]),
(k2, ([], [], [v33]))]
Attributes:
**kwargs: Accepts a single named argument "pipeline", which specifies the
pipeline that "owns" this PTransform. Ordinarily CoGroupByKey can obtain
this information from one of the input PCollections, but if there are none
(or if there's a chance there may be none), this argument is the only way
to provide pipeline information, and should be considered mandatory.
"""
def __init__(self, **kwargs):
super(CoGroupByKey, self).__init__()
self.pipeline = kwargs.pop('pipeline', None)
if kwargs:
raise ValueError('Unexpected keyword arguments: %s' % list(kwargs.keys()))
def _extract_input_pvalues(self, pvalueish):
try:
# If this works, it's a dict.
return pvalueish, tuple(pvalueish.values())
except AttributeError:
pcolls = tuple(pvalueish)
return pcolls, pcolls
def expand(self, pcolls):
"""Performs CoGroupByKey on argument pcolls; see class docstring."""
# For associating values in K-V pairs with the PCollections they came from.
def _pair_tag_with_value(key_value, tag):
(key, value) = key_value
return (key, (tag, value))
# Creates the key, value pairs for the output PCollection. Values are either
# lists or dicts (per the class docstring), initialized by the result of
# result_ctor(result_ctor_arg).
def _merge_tagged_vals_under_key(key_grouped, result_ctor, result_ctor_arg):
(key, grouped) = key_grouped
result_value = result_ctor(result_ctor_arg)
for tag, value in grouped:
result_value[tag].append(value)
return (key, result_value)
try:
# If pcolls is a dict, we turn it into (tag, pcoll) pairs for use in the
# general-purpose code below. The result value constructor creates dicts
# whose keys are the tags.
result_ctor_arg = list(pcolls)
result_ctor = lambda tags: dict((tag, []) for tag in tags)
pcolls = pcolls.items()
except AttributeError:
# Otherwise, pcolls is a list/tuple, so we turn it into (index, pcoll)
# pairs. The result value constructor makes tuples with len(pcolls) slots.
pcolls = list(enumerate(pcolls))
result_ctor_arg = len(pcolls)
result_ctor = lambda size: tuple([] for _ in range(size))
# Check input PCollections for PCollection-ness, and that they all belong
# to the same pipeline.
for _, pcoll in pcolls:
self._check_pcollection(pcoll)
if self.pipeline:
assert pcoll.pipeline == self.pipeline
return ([
pcoll | 'pair_with_%s' % tag >> Map(_pair_tag_with_value, tag) for tag,
pcoll in pcolls
]
| Flatten(pipeline=self.pipeline)
| GroupByKey()
| Map(_merge_tagged_vals_under_key, result_ctor, result_ctor_arg))
@ptransform_fn
@typehints.with_input_types(Tuple[K, V])
@typehints.with_output_types(K)
def Keys(pcoll, label='Keys'): # pylint: disable=invalid-name
"""Produces a PCollection of first elements of 2-tuples in a PCollection."""
return pcoll | label >> MapTuple(lambda k, _: k)
@ptransform_fn
@typehints.with_input_types(Tuple[K, V])
@typehints.with_output_types(V)
def Values(pcoll, label='Values'): # pylint: disable=invalid-name
"""Produces a PCollection of second elements of 2-tuples in a PCollection."""
return pcoll | label >> MapTuple(lambda _, v: v)
@ptransform_fn
@typehints.with_input_types(Tuple[K, V])
@typehints.with_output_types(Tuple[V, K])
def KvSwap(pcoll, label='KvSwap'): # pylint: disable=invalid-name
"""Produces a PCollection reversing 2-tuples in a PCollection."""
return pcoll | label >> MapTuple(lambda k, v: (v, k))
@ptransform_fn
@typehints.with_input_types(T)
@typehints.with_output_types(T)
def Distinct(pcoll): # pylint: disable=invalid-name
"""Produces a PCollection containing distinct elements of a PCollection."""
return (
pcoll
| 'ToPairs' >> Map(lambda v: (v, None))
| 'Group' >> CombinePerKey(lambda vs: None)
| 'Distinct' >> Keys())
@deprecated(since='2.12', current='Distinct')
@ptransform_fn
@typehints.with_input_types(T)
@typehints.with_output_types(T)
def RemoveDuplicates(pcoll):
"""Produces a PCollection containing distinct elements of a PCollection."""
return pcoll | 'RemoveDuplicates' >> Distinct()
class _BatchSizeEstimator(object):
"""Estimates the best size for batches given historical timing.
"""
_MAX_DATA_POINTS = 100
_MAX_GROWTH_FACTOR = 2
def __init__(
self,
min_batch_size=1,
max_batch_size=10000,
target_batch_overhead=.05,
target_batch_duration_secs=1,
variance=0.25,
clock=time.time,
ignore_first_n_seen_per_batch_size=0):
if min_batch_size > max_batch_size:
raise ValueError(
"Minimum (%s) must not be greater than maximum (%s)" %
(min_batch_size, max_batch_size))
if target_batch_overhead and not 0 < target_batch_overhead <= 1:
raise ValueError(
"target_batch_overhead (%s) must be between 0 and 1" %
(target_batch_overhead))
if target_batch_duration_secs and target_batch_duration_secs <= 0:
raise ValueError(
"target_batch_duration_secs (%s) must be positive" %
(target_batch_duration_secs))
if not (target_batch_overhead or target_batch_duration_secs):
raise ValueError(
"At least one of target_batch_overhead or "
"target_batch_duration_secs must be positive.")
if ignore_first_n_seen_per_batch_size < 0:
raise ValueError(
'ignore_first_n_seen_per_batch_size (%s) must be non '
'negative' % (ignore_first_n_seen_per_batch_size))
self._min_batch_size = min_batch_size
self._max_batch_size = max_batch_size
self._target_batch_overhead = target_batch_overhead
self._target_batch_duration_secs = target_batch_duration_secs
self._variance = variance
self._clock = clock
self._data = []
self._ignore_next_timing = False
self._ignore_first_n_seen_per_batch_size = (
ignore_first_n_seen_per_batch_size)
self._batch_size_num_seen = {}
self._replay_last_batch_size = None
self._size_distribution = Metrics.distribution(
'BatchElements', 'batch_size')
self._time_distribution = Metrics.distribution(
'BatchElements', 'msec_per_batch')
# Beam distributions only accept integer values, so we use this to
# accumulate under-reported values until they add up to whole milliseconds.
# (Milliseconds are chosen because that's conventionally used elsewhere in
# profiling-style counters.)
self._remainder_msecs = 0
def ignore_next_timing(self):
"""Call to indicate the next timing should be ignored.
For example, the first emit of a ParDo operation is known to be anomalous
due to setup that may occur.
"""
self._ignore_next_timing = True
@contextlib.contextmanager
def record_time(self, batch_size):
start = self._clock()
yield
elapsed = self._clock() - start
elapsed_msec = 1e3 * elapsed + self._remainder_msecs
self._size_distribution.update(batch_size)
self._time_distribution.update(int(elapsed_msec))
self._remainder_msecs = elapsed_msec - int(elapsed_msec)
# If we ignore the next timing, replay the batch size to get accurate
# timing.
if self._ignore_next_timing:
self._ignore_next_timing = False
self._replay_last_batch_size = batch_size
else:
self._data.append((batch_size, elapsed))
if len(self._data) >= self._MAX_DATA_POINTS:
self._thin_data()
def _thin_data(self):
# Make sure we don't change the parity of len(self._data)
# As it's used below to alternate jitter.
self._data.pop(random.randrange(len(self._data) // 4))
self._data.pop(random.randrange(len(self._data) // 2))
@staticmethod
def linear_regression_no_numpy(xs, ys):
# Least squares fit for y = a + bx over all points.
n = float(len(xs))
xbar = sum(xs) / n
ybar = sum(ys) / n
if xbar == 0:
return ybar, 0
if all(xs[0] == x for x in xs):
# Simply use the mean if all values in xs are same.
return 0, ybar / xbar
b = (
sum([(x - xbar) * (y - ybar)
for x, y in zip(xs, ys)]) / sum([(x - xbar)**2 for x in xs]))
a = ybar - b * xbar
return a, b
@staticmethod
def linear_regression_numpy(xs, ys):
# pylint: disable=wrong-import-order, wrong-import-position
import numpy as np
from numpy import sum
n = len(xs)
if all(xs[0] == x for x in xs):
# If all values of xs are same then fallback to linear_regression_no_numpy
return _BatchSizeEstimator.linear_regression_no_numpy(xs, ys)
xs = np.asarray(xs, dtype=float)
ys = np.asarray(ys, dtype=float)
# First do a simple least squares fit for y = a + bx over all points.
b, a = np.polyfit(xs, ys, 1)
if n < 10:
return a, b
else:
# Refine this by throwing out outliers, according to Cook's distance.
# https://en.wikipedia.org/wiki/Cook%27s_distance
sum_x = sum(xs)
sum_x2 = sum(xs**2)
errs = a + b * xs - ys
s2 = sum(errs**2) / (n - 2)
if s2 == 0:
# It's an exact fit!
return a, b
h = (sum_x2 - 2 * sum_x * xs + n * xs**2) / (n * sum_x2 - sum_x**2)
cook_ds = 0.5 / s2 * errs**2 * (h / (1 - h)**2)
# Re-compute the regression, excluding those points with Cook's distance
# greater than 0.5, and weighting by the inverse of x to give a more
# stable y-intercept (as small batches have relatively more information
# about the fixed overhead).
weight = (cook_ds <= 0.5) / xs
b, a = np.polyfit(xs, ys, 1, w=weight)
return a, b
try:
# pylint: disable=wrong-import-order, wrong-import-position
import numpy as np
linear_regression = linear_regression_numpy
except ImportError:
linear_regression = linear_regression_no_numpy
def _calculate_next_batch_size(self):
if self._min_batch_size == self._max_batch_size:
return self._min_batch_size
elif len(self._data) < 1:
return self._min_batch_size
elif len(self._data) < 2:
# Force some variety so we have distinct batch sizes on which to do
# linear regression below.
return int(
max(
min(
self._max_batch_size,
self._min_batch_size * self._MAX_GROWTH_FACTOR),
self._min_batch_size + 1))
# There tends to be a lot of noise in the top quantile, which also
# has outsided influence in the regression. If we have enough data,
# Simply declare the top 20% to be outliers.
trimmed_data = sorted(self._data)[:max(20, len(self._data) * 4 // 5)]
# Linear regression for y = a + bx, where x is batch size and y is time.
xs, ys = zip(*trimmed_data)
a, b = self.linear_regression(xs, ys)
# Avoid nonsensical or division-by-zero errors below due to noise.
a = max(a, 1e-10)
b = max(b, 1e-20)
last_batch_size = self._data[-1][0]
cap = min(last_batch_size * self._MAX_GROWTH_FACTOR, self._max_batch_size)
target = self._max_batch_size
if self._target_batch_duration_secs:
# Solution to a + b*x = self._target_batch_duration_secs.
target = min(target, (self._target_batch_duration_secs - a) / b)
if self._target_batch_overhead:
# Solution to a / (a + b*x) = self._target_batch_overhead.
target = min(target, (a / b) * (1 / self._target_batch_overhead - 1))
# Avoid getting stuck at a single batch size (especially the minimal
# batch size) which would not allow us to extrapolate to other batch
# sizes.
# Jitter alternates between 0 and 1.
jitter = len(self._data) % 2
# Smear our samples across a range centered at the target.
if len(self._data) > 10:
target += int(target * self._variance * 2 * (random.random() - .5))
return int(max(self._min_batch_size + jitter, min(target, cap)))
def next_batch_size(self):
# Check if we should replay a previous batch size due to it not being
# recorded.
if self._replay_last_batch_size:
result = self._replay_last_batch_size
self._replay_last_batch_size = None
else:
result = self._calculate_next_batch_size()
seen_count = self._batch_size_num_seen.get(result, 0) + 1
if seen_count <= self._ignore_first_n_seen_per_batch_size:
self.ignore_next_timing()
self._batch_size_num_seen[result] = seen_count
return result
class _GlobalWindowsBatchingDoFn(DoFn):
def __init__(self, batch_size_estimator):
self._batch_size_estimator = batch_size_estimator
def start_bundle(self):
self._batch = []
self._batch_size = self._batch_size_estimator.next_batch_size()
# The first emit often involves non-trivial setup.
self._batch_size_estimator.ignore_next_timing()
def process(self, element):
self._batch.append(element)
if len(self._batch) >= self._batch_size:
with self._batch_size_estimator.record_time(self._batch_size):
yield self._batch
self._batch = []
self._batch_size = self._batch_size_estimator.next_batch_size()
def finish_bundle(self):
if self._batch:
with self._batch_size_estimator.record_time(self._batch_size):
yield window.GlobalWindows.windowed_value(self._batch)
self._batch = None
self._batch_size = self._batch_size_estimator.next_batch_size()
class _WindowAwareBatchingDoFn(DoFn):
_MAX_LIVE_WINDOWS = 10
def __init__(self, batch_size_estimator):
self._batch_size_estimator = batch_size_estimator
def start_bundle(self):
self._batches = collections.defaultdict(list)
self._batch_size = self._batch_size_estimator.next_batch_size()
# The first emit often involves non-trivial setup.
self._batch_size_estimator.ignore_next_timing()
def process(self, element, window=DoFn.WindowParam):
self._batches[window].append(element)
if len(self._batches[window]) >= self._batch_size:
with self._batch_size_estimator.record_time(self._batch_size):
yield windowed_value.WindowedValue(
self._batches[window], window.max_timestamp(), (window, ))
del self._batches[window]
self._batch_size = self._batch_size_estimator.next_batch_size()
elif len(self._batches) > self._MAX_LIVE_WINDOWS:
window, _ = sorted(
self._batches.items(),
key=lambda window_batch: len(window_batch[1]),
reverse=True)[0]
with self._batch_size_estimator.record_time(self._batch_size):
yield windowed_value.WindowedValue(
self._batches[window], window.max_timestamp(), (window, ))
del self._batches[window]
self._batch_size = self._batch_size_estimator.next_batch_size()
def finish_bundle(self):
for window, batch in self._batches.items():
if batch:
with self._batch_size_estimator.record_time(self._batch_size):
yield windowed_value.WindowedValue(
batch, window.max_timestamp(), (window, ))
self._batches = None
self._batch_size = self._batch_size_estimator.next_batch_size()
@typehints.with_input_types(T)
@typehints.with_output_types(List[T])
class BatchElements(PTransform):
"""A Transform that batches elements for amortized processing.
This transform is designed to precede operations whose processing cost
is of the form
time = fixed_cost + num_elements * per_element_cost
where the per element cost is (often significantly) smaller than the fixed
cost and could be amortized over multiple elements. It consumes a PCollection
of element type T and produces a PCollection of element type List[T].
This transform attempts to find the best batch size between the minimim
and maximum parameters by profiling the time taken by (fused) downstream
operations. For a fixed batch size, set the min and max to be equal.
Elements are batched per-window and batches emitted in the window
corresponding to its contents.
Args:
min_batch_size: (optional) the smallest number of elements per batch
max_batch_size: (optional) the largest number of elements per batch
target_batch_overhead: (optional) a target for fixed_cost / time,
as used in the formula above
target_batch_duration_secs: (optional) a target for total time per bundle,
in seconds
variance: (optional) the permitted (relative) amount of deviation from the
(estimated) ideal batch size used to produce a wider base for
linear interpolation
clock: (optional) an alternative to time.time for measuring the cost of
donwstream operations (mostly for testing)
"""
def __init__(
self,
min_batch_size=1,
max_batch_size=10000,
target_batch_overhead=.05,
target_batch_duration_secs=1,
variance=0.25,
clock=time.time):
self._batch_size_estimator = _BatchSizeEstimator(
min_batch_size=min_batch_size,
max_batch_size=max_batch_size,
target_batch_overhead=target_batch_overhead,
target_batch_duration_secs=target_batch_duration_secs,
variance=variance,
clock=clock)
def expand(self, pcoll):
if getattr(pcoll.pipeline.runner, 'is_streaming', False):
raise NotImplementedError("Requires stateful processing (BEAM-2687)")
elif pcoll.windowing.is_default():
# This is the same logic as _GlobalWindowsBatchingDoFn, but optimized
# for that simpler case.
return pcoll | ParDo(
_GlobalWindowsBatchingDoFn(self._batch_size_estimator))
else:
return pcoll | ParDo(_WindowAwareBatchingDoFn(self._batch_size_estimator))
class _IdentityWindowFn(NonMergingWindowFn):
"""Windowing function that preserves existing windows.
To be used internally with the Reshuffle transform.
Will raise an exception when used after DoFns that return TimestampedValue
elements.
"""
def __init__(self, window_coder):
"""Create a new WindowFn with compatible coder.
To be applied to PCollections with windows that are compatible with the
given coder.
Arguments:
window_coder: coders.Coder object to be used on windows.
"""
super(_IdentityWindowFn, self).__init__()
if window_coder is None:
raise ValueError('window_coder should not be None')
self._window_coder = window_coder
def assign(self, assign_context):
if assign_context.window is None:
raise ValueError(
'assign_context.window should not be None. '
'This might be due to a DoFn returning a TimestampedValue.')
return [assign_context.window]
def get_window_coder(self):
return self._window_coder
@typehints.with_input_types(Tuple[K, V])
@typehints.with_output_types(Tuple[K, V])
class ReshufflePerKey(PTransform):
"""PTransform that returns a PCollection equivalent to its input,
but operationally provides some of the side effects of a GroupByKey,
in particular checkpointing, and preventing fusion of the surrounding
transforms.
ReshufflePerKey is experimental. No backwards compatibility guarantees.
"""
def expand(self, pcoll):
windowing_saved = pcoll.windowing
if windowing_saved.is_default():
# In this (common) case we can use a trivial trigger driver
# and avoid the (expensive) window param.
globally_windowed = window.GlobalWindows.windowed_value(None)
MIN_TIMESTAMP = window.MIN_TIMESTAMP
def reify_timestamps(element, timestamp=DoFn.TimestampParam):
key, value = element
if timestamp == MIN_TIMESTAMP:
timestamp = None
return key, (value, timestamp)
def restore_timestamps(element):
key, values = element
return [
globally_windowed.with_value((key, value)) if timestamp is None else
window.GlobalWindows.windowed_value((key, value), timestamp)
for (value, timestamp) in values
]
else:
# typing: All conditional function variants must have identical signatures
def reify_timestamps( # type: ignore[misc]
element, timestamp=DoFn.TimestampParam, window=DoFn.WindowParam):
key, value = element
# Transport the window as part of the value and restore it later.
return key, windowed_value.WindowedValue(value, timestamp, [window])
def restore_timestamps(element):
key, windowed_values = element
return [wv.with_value((key, wv.value)) for wv in windowed_values]
ungrouped = pcoll | Map(reify_timestamps).with_output_types(Any)
# TODO(BEAM-8104) Using global window as one of the standard window.
# This is to mitigate the Dataflow Java Runner Harness limitation to
# accept only standard coders.
ungrouped._windowing = Windowing(
window.GlobalWindows(),
triggerfn=Always(),
accumulation_mode=AccumulationMode.DISCARDING,
timestamp_combiner=TimestampCombiner.OUTPUT_AT_EARLIEST)
result = (
ungrouped
| GroupByKey()
| FlatMap(restore_timestamps).with_output_types(Any))
result._windowing = windowing_saved
return result
@typehints.with_input_types(T)
@typehints.with_output_types(T)
class Reshuffle(PTransform):
"""PTransform that returns a PCollection equivalent to its input,
but operationally provides some of the side effects of a GroupByKey,
in particular checkpointing, and preventing fusion of the surrounding
transforms.
Reshuffle adds a temporary random key to each element, performs a
ReshufflePerKey, and finally removes the temporary key.
Reshuffle is experimental. No backwards compatibility guarantees.
"""
def expand(self, pcoll):
# type: (pvalue.PValue) -> pvalue.PCollection
return (
pcoll
| 'AddRandomKeys' >> Map(lambda t: (random.getrandbits(32), t)).
with_input_types(T).with_output_types(Tuple[int, T])
| ReshufflePerKey()
| 'RemoveRandomKeys' >> Map(lambda t: t[1]).with_input_types(
Tuple[int, T]).with_output_types(T))
def to_runner_api_parameter(self, unused_context):
# type: (PipelineContext) -> Tuple[str, None]
return common_urns.composites.RESHUFFLE.urn, None
@staticmethod
@PTransform.register_urn(common_urns.composites.RESHUFFLE.urn, None)
def from_runner_api_parameter(
unused_ptransform, unused_parameter, unused_context):
return Reshuffle()
@ptransform_fn
def WithKeys(pcoll, k):
"""PTransform that takes a PCollection, and either a constant key or a
callable, and returns a PCollection of (K, V), where each of the values in
the input PCollection has been paired with either the constant key or a key
computed from the value.
"""
if callable(k):
return pcoll | Map(lambda v: (k(v), v))
return pcoll | Map(lambda v: (k, v))
@experimental()
@typehints.with_input_types(Tuple[K, V])
@typehints.with_output_types(Tuple[K, Iterable[V]])
class GroupIntoBatches(PTransform):
"""PTransform that batches the input into desired batch size. Elements are
buffered until they are equal to batch size provided in the argument at which
point they are output to the output Pcollection.
Windows are preserved (batches will contain elements from the same window)
GroupIntoBatches is experimental. Its use case will depend on the runner if
it has support of States and Timers.
"""
def __init__(
self, batch_size, max_buffering_duration_secs=None, clock=time.time):
"""Create a new GroupIntoBatches.
Arguments:
batch_size: (required) How many elements should be in a batch
max_buffering_duration_secs: (optional) How long in seconds at most an
incomplete batch of elements is allowed to be buffered in the states.
The duration must be a positive second duration and should be given as
an int or float. Setting this parameter to zero effectively means no
buffering limit.
clock: (optional) an alternative to time.time (mostly for testing)
"""
self.params = _GroupIntoBatchesParams(
batch_size, max_buffering_duration_secs)
self.clock = clock
def expand(self, pcoll):
input_coder = coders.registry.get_coder(pcoll)
return pcoll | ParDo(
_pardo_group_into_batches(
input_coder,
self.params.batch_size,
self.params.max_buffering_duration_secs,
self.clock))
def to_runner_api_parameter(
self,
unused_context # type: PipelineContext
): # type: (...) -> Tuple[str, beam_runner_api_pb2.GroupIntoBatchesPayload]
return (
common_urns.group_into_batches_components.GROUP_INTO_BATCHES.urn,
self.params.get_payload())
@staticmethod
@PTransform.register_urn(
common_urns.group_into_batches_components.GROUP_INTO_BATCHES.urn,
beam_runner_api_pb2.GroupIntoBatchesPayload)
def from_runner_api_parameter(unused_ptransform, proto, unused_context):
return GroupIntoBatches(*_GroupIntoBatchesParams.parse_payload(proto))
@typehints.with_input_types(Tuple[K, V])
@typehints.with_output_types(
typehints.Tuple[
ShardedKeyType[typehints.TypeVariable(K)], # type: ignore[misc]
typehints.Iterable[typehints.TypeVariable(V)]])
class WithShardedKey(PTransform):
"""A GroupIntoBatches transform that outputs batched elements associated
with sharded input keys.
By default, keys are sharded to such that the input elements with the same
key are spread to all available threads executing the transform. Runners may
override the default sharding to do a better load balancing during the
execution time.
"""
def __init__(
self, batch_size, max_buffering_duration_secs=None, clock=time.time):
"""Create a new GroupIntoBatches with sharded output.
See ``GroupIntoBatches`` transform for a description of input parameters.
"""
self.params = _GroupIntoBatchesParams(
batch_size, max_buffering_duration_secs)
self.clock = clock
_shard_id_prefix = uuid.uuid4().bytes
def expand(self, pcoll):
key_type, value_type = pcoll.element_type.tuple_types
sharded_pcoll = pcoll | Map(
lambda key_value: (
ShardedKey(
key_value[0],
# Use [uuid, thread id] as the shard id.
GroupIntoBatches.WithShardedKey._shard_id_prefix + bytes(
threading.get_ident().to_bytes(8, 'big'))),
key_value[1])).with_output_types(
typehints.Tuple[
ShardedKeyType[key_type], # type: ignore[misc]
value_type])
return (
sharded_pcoll
| GroupIntoBatches(
self.params.batch_size,
self.params.max_buffering_duration_secs,
self.clock))
def to_runner_api_parameter(
self,
unused_context # type: PipelineContext
): # type: (...) -> Tuple[str, beam_runner_api_pb2.GroupIntoBatchesPayload]
return (
common_urns.composites.GROUP_INTO_BATCHES_WITH_SHARDED_KEY.urn,
self.params.get_payload())
@staticmethod
@PTransform.register_urn(
common_urns.composites.GROUP_INTO_BATCHES_WITH_SHARDED_KEY.urn,
beam_runner_api_pb2.GroupIntoBatchesPayload)
def from_runner_api_parameter(unused_ptransform, proto, unused_context):
return GroupIntoBatches.WithShardedKey(
*_GroupIntoBatchesParams.parse_payload(proto))
class _GroupIntoBatchesParams:
"""This class represents the parameters for
:class:`apache_beam.utils.GroupIntoBatches` transform, used to define how
elements should be batched.
"""
def __init__(self, batch_size, max_buffering_duration_secs):
self.batch_size = batch_size
self.max_buffering_duration_secs = (
0
if max_buffering_duration_secs is None else max_buffering_duration_secs)
self._validate()
def __eq__(self, other):
if other is None or not isinstance(other, _GroupIntoBatchesParams):
return False
return (
self.batch_size == other.batch_size and
self.max_buffering_duration_secs == other.max_buffering_duration_secs)
def _validate(self):
assert self.batch_size is not None and self.batch_size > 0, (
'batch_size must be a positive value')
assert (
self.max_buffering_duration_secs is not None and
self.max_buffering_duration_secs >= 0), (
'max_buffering_duration must be a non-negative value')
def get_payload(self):
return beam_runner_api_pb2.GroupIntoBatchesPayload(
batch_size=self.batch_size,
max_buffering_duration_millis=int(
self.max_buffering_duration_secs * 1000))
@staticmethod
def parse_payload(
proto # type: beam_runner_api_pb2.GroupIntoBatchesPayload
):
return proto.batch_size, proto.max_buffering_duration_millis / 1000
def _pardo_group_into_batches(
input_coder, batch_size, max_buffering_duration_secs, clock=time.time):
ELEMENT_STATE = BagStateSpec('values', input_coder)
COUNT_STATE = CombiningValueStateSpec('count', input_coder, CountCombineFn())
WINDOW_TIMER = TimerSpec('window_end', TimeDomain.WATERMARK)
BUFFERING_TIMER = TimerSpec('buffering_end', TimeDomain.REAL_TIME)
class _GroupIntoBatchesDoFn(DoFn):
def process(
self,
element,
window=DoFn.WindowParam,
element_state=DoFn.StateParam(ELEMENT_STATE),
count_state=DoFn.StateParam(COUNT_STATE),
window_timer=DoFn.TimerParam(WINDOW_TIMER),
buffering_timer=DoFn.TimerParam(BUFFERING_TIMER)):
# Allowed lateness not supported in Python SDK
# https://beam.apache.org/documentation/programming-guide/#watermarks-and-late-data
window_timer.set(window.end)
element_state.add(element)
count_state.add(1)
count = count_state.read()
if count == 1 and max_buffering_duration_secs > 0:
# This is the first element in batch. Start counting buffering time if a
# limit was set.
buffering_timer.set(clock() + max_buffering_duration_secs)
if count >= batch_size:
return self.flush_batch(element_state, count_state, buffering_timer)
@on_timer(WINDOW_TIMER)
def on_window_timer(
self,
element_state=DoFn.StateParam(ELEMENT_STATE),
count_state=DoFn.StateParam(COUNT_STATE),
buffering_timer=DoFn.TimerParam(BUFFERING_TIMER)):
return self.flush_batch(element_state, count_state, buffering_timer)
@on_timer(BUFFERING_TIMER)
def on_buffering_timer(
self,
element_state=DoFn.StateParam(ELEMENT_STATE),
count_state=DoFn.StateParam(COUNT_STATE),
buffering_timer=DoFn.TimerParam(BUFFERING_TIMER)):
return self.flush_batch(element_state, count_state, buffering_timer)
def flush_batch(self, element_state, count_state, buffering_timer):
batch = [element for element in element_state.read()]
if not batch:
return
key, _ = batch[0]
batch_values = [v for (k, v) in batch]
element_state.clear()
count_state.clear()
buffering_timer.clear()
yield key, batch_values
return _GroupIntoBatchesDoFn()
class ToString(object):
"""
PTransform for converting a PCollection element, KV or PCollection Iterable
to string.
"""
# pylint: disable=invalid-name
@staticmethod
def Element():
"""
Transforms each element of the PCollection to a string.
"""
return 'ElementToString' >> Map(str)
@staticmethod
def Iterables(delimiter=None):
"""
Transforms each item in the iterable of the input of PCollection to a
string. There is no trailing delimiter.
"""
if delimiter is None:
delimiter = ','
return (
'IterablesToString' >>
Map(lambda xs: delimiter.join(str(x) for x in xs)).with_input_types(
Iterable[Any]).with_output_types(str))
# An alias for Iterables.
Kvs = Iterables
class Reify(object):
"""PTransforms for converting between explicit and implicit form of various
Beam values."""
@typehints.with_input_types(T)
@typehints.with_output_types(T)
class Timestamp(PTransform):
"""PTransform to wrap a value in a TimestampedValue with it's
associated timestamp."""
@staticmethod
def add_timestamp_info(element, timestamp=DoFn.TimestampParam):
yield TimestampedValue(element, timestamp)
def expand(self, pcoll):
return pcoll | ParDo(self.add_timestamp_info)
@typehints.with_input_types(T)
@typehints.with_output_types(T)
class Window(PTransform):
"""PTransform to convert an element in a PCollection into a tuple of
(element, timestamp, window), wrapped in a TimestampedValue with it's
associated timestamp."""
@staticmethod
def add_window_info(
element, timestamp=DoFn.TimestampParam, window=DoFn.WindowParam):
yield TimestampedValue((element, timestamp, window), timestamp)
def expand(self, pcoll):
return pcoll | ParDo(self.add_window_info)
@typehints.with_input_types(Tuple[K, V])
@typehints.with_output_types(Tuple[K, V])
class TimestampInValue(PTransform):
"""PTransform to wrap the Value in a KV pair in a TimestampedValue with
the element's associated timestamp."""
@staticmethod
def add_timestamp_info(element, timestamp=DoFn.TimestampParam):
key, value = element
yield (key, TimestampedValue(value, timestamp))
def expand(self, pcoll):
return pcoll | ParDo(self.add_timestamp_info)
@typehints.with_input_types(Tuple[K, V])
@typehints.with_output_types(Tuple[K, V])
class WindowInValue(PTransform):
"""PTransform to convert the Value in a KV pair into a tuple of
(value, timestamp, window), with the whole element being wrapped inside a
TimestampedValue."""
@staticmethod
def add_window_info(
element, timestamp=DoFn.TimestampParam, window=DoFn.WindowParam):
key, value = element
yield TimestampedValue((key, (value, timestamp, window)), timestamp)
def expand(self, pcoll):
return pcoll | ParDo(self.add_window_info)
class Regex(object):
"""
PTransform to use Regular Expression to process the elements in a
PCollection.
"""
ALL = "__regex_all_groups"
@staticmethod
def _regex_compile(regex):
"""Return re.compile if the regex has a string value"""
if isinstance(regex, str):
regex = re.compile(regex)
return regex
@staticmethod
@typehints.with_input_types(str)
@typehints.with_output_types(str)
@ptransform_fn
def matches(pcoll, regex, group=0):
"""
Returns the matches (group 0 by default) if zero or more characters at the
beginning of string match the regular expression. To match the entire
string, add "$" sign at the end of regex expression.
Group can be integer value or a string value.
Args:
regex: the regular expression string or (re.compile) pattern.
group: (optional) name/number of the group, it can be integer or a string
value. Defaults to 0, meaning the entire matched string will be
returned.
"""
regex = Regex._regex_compile(regex)
def _process(element):
m = regex.match(element)
if m:
yield m.group(group)
return pcoll | FlatMap(_process)
@staticmethod
@typehints.with_input_types(str)
@typehints.with_output_types(List[str])
@ptransform_fn
def all_matches(pcoll, regex):
"""
Returns all matches (groups) if zero or more characters at the beginning
of string match the regular expression.
Args:
regex: the regular expression string or (re.compile) pattern.
"""
regex = Regex._regex_compile(regex)
def _process(element):
m = regex.match(element)
if m:
yield [m.group(ix) for ix in range(m.lastindex + 1)]
return pcoll | FlatMap(_process)
@staticmethod
@typehints.with_input_types(str)
@typehints.with_output_types(Tuple[str, str])
@ptransform_fn
def matches_kv(pcoll, regex, keyGroup, valueGroup=0):
"""
Returns the KV pairs if the string matches the regular expression, deriving
the key & value from the specified group of the regular expression.
Args:
regex: the regular expression string or (re.compile) pattern.
keyGroup: The Regex group to use as the key. Can be int or str.
valueGroup: (optional) Regex group to use the value. Can be int or str.
The default value "0" returns entire matched string.
"""
regex = Regex._regex_compile(regex)
def _process(element):
match = regex.match(element)
if match:
yield (match.group(keyGroup), match.group(valueGroup))
return pcoll | FlatMap(_process)
@staticmethod
@typehints.with_input_types(str)
@typehints.with_output_types(str)
@ptransform_fn
def find(pcoll, regex, group=0):
"""
Returns the matches if a portion of the line matches the Regex. Returns
the entire group (group 0 by default). Group can be integer value or a
string value.
Args:
regex: the regular expression string or (re.compile) pattern.
group: (optional) name of the group, it can be integer or a string value.
"""
regex = Regex._regex_compile(regex)
def _process(element):
r = regex.search(element)
if r:
yield r.group(group)
return pcoll | FlatMap(_process)
@staticmethod
@typehints.with_input_types(str)
@typehints.with_output_types(Union[List[str], List[Tuple[str, str]]])
@ptransform_fn
def find_all(pcoll, regex, group=0, outputEmpty=True):
"""
Returns the matches if a portion of the line matches the Regex. By default,
list of group 0 will return with empty items. To get all groups, pass the
`Regex.ALL` flag in the `group` parameter which returns all the groups in
the tuple format.
Args:
regex: the regular expression string or (re.compile) pattern.
group: (optional) name of the group, it can be integer or a string value.
outputEmpty: (optional) Should empty be output. True to output empties
and false if not.
"""
regex = Regex._regex_compile(regex)
def _process(element):
matches = regex.finditer(element)
if group == Regex.ALL:
yield [(m.group(), m.groups()[0]) for m in matches
if outputEmpty or m.groups()[0]]
else:
yield [m.group(group) for m in matches if outputEmpty or m.group(group)]
return pcoll | FlatMap(_process)
@staticmethod
@typehints.with_input_types(str)
@typehints.with_output_types(Tuple[str, str])
@ptransform_fn
def find_kv(pcoll, regex, keyGroup, valueGroup=0):
"""
Returns the matches if a portion of the line matches the Regex. Returns the
specified groups as the key and value pair.
Args:
regex: the regular expression string or (re.compile) pattern.
keyGroup: The Regex group to use as the key. Can be int or str.
valueGroup: (optional) Regex group to use the value. Can be int or str.
The default value "0" returns entire matched string.
"""
regex = Regex._regex_compile(regex)
def _process(element):
matches = regex.finditer(element)
if matches:
for match in matches:
yield (match.group(keyGroup), match.group(valueGroup))
return pcoll | FlatMap(_process)
@staticmethod
@typehints.with_input_types(str)
@typehints.with_output_types(str)
@ptransform_fn
def replace_all(pcoll, regex, replacement):
"""
Returns the matches if a portion of the line matches the regex and
replaces all matches with the replacement string.
Args:
regex: the regular expression string or (re.compile) pattern.
replacement: the string to be substituted for each match.
"""
regex = Regex._regex_compile(regex)
return pcoll | Map(lambda elem: regex.sub(replacement, elem))
@staticmethod
@typehints.with_input_types(str)
@typehints.with_output_types(str)
@ptransform_fn
def replace_first(pcoll, regex, replacement):
"""
Returns the matches if a portion of the line matches the regex and replaces
the first match with the replacement string.
Args:
regex: the regular expression string or (re.compile) pattern.
replacement: the string to be substituted for each match.
"""
regex = Regex._regex_compile(regex)
return pcoll | Map(lambda elem: regex.sub(replacement, elem, 1))
@staticmethod
@typehints.with_input_types(str)
@typehints.with_output_types(List[str])
@ptransform_fn
def split(pcoll, regex, outputEmpty=False):
"""
Returns the list string which was splitted on the basis of regular
expression. It will not output empty items (by defaults).
Args:
regex: the regular expression string or (re.compile) pattern.
outputEmpty: (optional) Should empty be output. True to output empties
and false if not.
"""
regex = Regex._regex_compile(regex)
outputEmpty = bool(outputEmpty)
def _process(element):
r = regex.split(element)
if r and not outputEmpty:
r = list(filter(None, r))
yield r
return pcoll | FlatMap(_process)
| apache-2.0 |
sdgdsffdsfff/jumpserver | apps/tickets/views.py | 2 | 1446 | from django.views.generic import TemplateView, DetailView
from django.utils.translation import ugettext as _
from common.permissions import PermissionsMixin, IsValidUser
from .models import Ticket
from . import mixins
class TicketListView(PermissionsMixin, TemplateView):
template_name = 'tickets/ticket_list.html'
permission_classes = (IsValidUser,)
def get_context_data(self, **kwargs):
assign = self.request.GET.get('assign', '0') == '1'
context = super().get_context_data(**kwargs)
assigned_open_count = Ticket.get_assigned_tickets(self.request.user)\
.filter(status=Ticket.STATUS_OPEN).count()
context.update({
'app': _("Tickets"),
'action': _("Ticket list"),
'assign': assign,
'assigned_open_count': assigned_open_count
})
return context
class TicketDetailView(PermissionsMixin, mixins.TicketMixin, DetailView):
template_name = 'tickets/ticket_detail.html'
permission_classes = (IsValidUser,)
queryset = Ticket.objects.all()
def get_context_data(self, **kwargs):
ticket = self.get_object()
has_action_perm = ticket.is_assignee(self.request.user)
context = super().get_context_data(**kwargs)
context.update({
'app': _("Tickets"),
'action': _("Ticket detail"),
'has_action_perm': has_action_perm,
})
return context
| gpl-2.0 |
jeeftor/alfredToday | src/lib/dateutil/tz/tz.py | 41 | 33867 | # -*- coding: utf-8 -*-
"""
This module offers timezone implementations subclassing the abstract
:py:`datetime.tzinfo` type. There are classes to handle tzfile format files
(usually are in :file:`/etc/localtime`, :file:`/usr/share/zoneinfo`, etc), TZ
environment string (in all known formats), given ranges (with help from
relative deltas), local machine timezone, fixed offset timezone, and UTC
timezone.
"""
import datetime
import struct
import time
import sys
import os
from six import string_types, PY3
from ._common import tzname_in_python2
try:
from .win import tzwin, tzwinlocal
except ImportError:
tzwin = tzwinlocal = None
relativedelta = None
parser = None
rrule = None
ZERO = datetime.timedelta(0)
EPOCHORDINAL = datetime.datetime.utcfromtimestamp(0).toordinal()
class tzutc(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return "UTC"
def __eq__(self, other):
return (isinstance(other, tzutc) or
(isinstance(other, tzoffset) and other._offset == ZERO))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzoffset(datetime.tzinfo):
def __init__(self, name, offset):
self._name = name
self._offset = datetime.timedelta(seconds=offset)
def utcoffset(self, dt):
return self._offset
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._name
def __eq__(self, other):
return (isinstance(other, tzoffset) and
self._offset == other._offset)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
repr(self._name),
self._offset.days*86400+self._offset.seconds)
__reduce__ = object.__reduce__
class tzlocal(datetime.tzinfo):
def __init__(self):
self._std_offset = datetime.timedelta(seconds=-time.timezone)
if time.daylight:
self._dst_offset = datetime.timedelta(seconds=-time.altzone)
else:
self._dst_offset = self._std_offset
def utcoffset(self, dt):
if dt is None:
return dt
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
# We can't use mktime here. It is unstable when deciding if
# the hour near to a change is DST or not.
#
# timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
# dt.minute, dt.second, dt.weekday(), 0, -1))
# return time.localtime(timestamp).tm_isdst
#
# The code above yields the following result:
#
# >>> import tz, datetime
# >>> t = tz.tzlocal()
# >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
# 'BRDT'
# >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
# 'BRST'
# >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
# 'BRST'
# >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
# 'BRDT'
# >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
# 'BRDT'
#
# Here is a more stable implementation:
#
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
return time.localtime(timestamp+time.timezone).tm_isdst
def __eq__(self, other):
return (isinstance(other, tzlocal) and
(self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class _ttinfo(object):
__slots__ = ["offset", "delta", "isdst", "abbr", "isstd", "isgmt"]
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def __repr__(self):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
def __eq__(self, other):
if not isinstance(other, _ttinfo):
return False
return (self.offset == other.offset and
self.delta == other.delta and
self.isdst == other.isdst and
self.abbr == other.abbr and
self.isstd == other.isstd and
self.isgmt == other.isgmt)
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
state = {}
for name in self.__slots__:
state[name] = getattr(self, name, None)
return state
def __setstate__(self, state):
for name in self.__slots__:
if name in state:
setattr(self, name, state[name])
class tzfile(datetime.tzinfo):
# http://www.twinsun.com/tz/tz-link.htm
# ftp://ftp.iana.org/tz/tz*.tar.gz
def __init__(self, fileobj, filename=None):
file_opened_here = False
if isinstance(fileobj, string_types):
self._filename = fileobj
fileobj = open(fileobj, 'rb')
file_opened_here = True
elif filename is not None:
self._filename = filename
elif hasattr(fileobj, "name"):
self._filename = fileobj.name
else:
self._filename = repr(fileobj)
# From tzfile(5):
#
# The time zone information files used by tzset(3)
# begin with the magic characters "TZif" to identify
# them as time zone information files, followed by
# sixteen bytes reserved for future use, followed by
# six four-byte values of type long, written in a
# ``standard'' byte order (the high-order byte
# of the value is written first).
try:
if fileobj.read(4).decode() != "TZif":
raise ValueError("magic not found")
fileobj.read(16)
(
# The number of UTC/local indicators stored in the file.
ttisgmtcnt,
# The number of standard/wall indicators stored in the file.
ttisstdcnt,
# The number of leap seconds for which data is
# stored in the file.
leapcnt,
# The number of "transition times" for which data
# is stored in the file.
timecnt,
# The number of "local time types" for which data
# is stored in the file (must not be zero).
typecnt,
# The number of characters of "time zone
# abbreviation strings" stored in the file.
charcnt,
) = struct.unpack(">6l", fileobj.read(24))
# The above header is followed by tzh_timecnt four-byte
# values of type long, sorted in ascending order.
# These values are written in ``standard'' byte order.
# Each is used as a transition time (as returned by
# time(2)) at which the rules for computing local time
# change.
if timecnt:
self._trans_list = struct.unpack(">%dl" % timecnt,
fileobj.read(timecnt*4))
else:
self._trans_list = []
# Next come tzh_timecnt one-byte values of type unsigned
# char; each one tells which of the different types of
# ``local time'' types described in the file is associated
# with the same-indexed transition time. These values
# serve as indices into an array of ttinfo structures that
# appears next in the file.
if timecnt:
self._trans_idx = struct.unpack(">%dB" % timecnt,
fileobj.read(timecnt))
else:
self._trans_idx = []
# Each ttinfo structure is written as a four-byte value
# for tt_gmtoff of type long, in a standard byte
# order, followed by a one-byte value for tt_isdst
# and a one-byte value for tt_abbrind. In each
# structure, tt_gmtoff gives the number of
# seconds to be added to UTC, tt_isdst tells whether
# tm_isdst should be set by localtime(3), and
# tt_abbrind serves as an index into the array of
# time zone abbreviation characters that follow the
# ttinfo structure(s) in the file.
ttinfo = []
for i in range(typecnt):
ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
abbr = fileobj.read(charcnt).decode()
# Then there are tzh_leapcnt pairs of four-byte
# values, written in standard byte order; the
# first value of each pair gives the time (as
# returned by time(2)) at which a leap second
# occurs; the second gives the total number of
# leap seconds to be applied after the given time.
# The pairs of values are sorted in ascending order
# by time.
# Not used, for now (but read anyway for correct file position)
if leapcnt:
leap = struct.unpack(">%dl" % (leapcnt*2),
fileobj.read(leapcnt*8))
# Then there are tzh_ttisstdcnt standard/wall
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as standard
# time or wall clock time, and are used when
# a time zone file is used in handling POSIX-style
# time zone environment variables.
if ttisstdcnt:
isstd = struct.unpack(">%db" % ttisstdcnt,
fileobj.read(ttisstdcnt))
# Finally, there are tzh_ttisgmtcnt UTC/local
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as UTC or
# local time, and are used when a time zone file
# is used in handling POSIX-style time zone envi-
# ronment variables.
if ttisgmtcnt:
isgmt = struct.unpack(">%db" % ttisgmtcnt,
fileobj.read(ttisgmtcnt))
# ** Everything has been read **
finally:
if file_opened_here:
fileobj.close()
# Build ttinfo list
self._ttinfo_list = []
for i in range(typecnt):
gmtoff, isdst, abbrind = ttinfo[i]
# Round to full-minutes if that's not the case. Python's
# datetime doesn't accept sub-minute timezones. Check
# http://python.org/sf/1447945 for some information.
gmtoff = (gmtoff+30)//60*60
tti = _ttinfo()
tti.offset = gmtoff
tti.delta = datetime.timedelta(seconds=gmtoff)
tti.isdst = isdst
tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
self._ttinfo_list.append(tti)
# Replace ttinfo indexes for ttinfo objects.
trans_idx = []
for idx in self._trans_idx:
trans_idx.append(self._ttinfo_list[idx])
self._trans_idx = tuple(trans_idx)
# Set standard, dst, and before ttinfos. before will be
# used when a given time is before any transitions,
# and will be set to the first non-dst ttinfo, or to
# the first dst, if all of them are dst.
self._ttinfo_std = None
self._ttinfo_dst = None
self._ttinfo_before = None
if self._ttinfo_list:
if not self._trans_list:
self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
else:
for i in range(timecnt-1, -1, -1):
tti = self._trans_idx[i]
if not self._ttinfo_std and not tti.isdst:
self._ttinfo_std = tti
elif not self._ttinfo_dst and tti.isdst:
self._ttinfo_dst = tti
if self._ttinfo_std and self._ttinfo_dst:
break
else:
if self._ttinfo_dst and not self._ttinfo_std:
self._ttinfo_std = self._ttinfo_dst
for tti in self._ttinfo_list:
if not tti.isdst:
self._ttinfo_before = tti
break
else:
self._ttinfo_before = self._ttinfo_list[0]
# Now fix transition times to become relative to wall time.
#
# I'm not sure about this. In my tests, the tz source file
# is setup to wall time, and in the binary file isstd and
# isgmt are off, so it should be in wall time. OTOH, it's
# always in gmt time. Let me know if you have comments
# about this.
laststdoffset = 0
self._trans_list = list(self._trans_list)
for i in range(len(self._trans_list)):
tti = self._trans_idx[i]
if not tti.isdst:
# This is std time.
self._trans_list[i] += tti.offset
laststdoffset = tti.offset
else:
# This is dst time. Convert to std.
self._trans_list[i] += laststdoffset
self._trans_list = tuple(self._trans_list)
def _find_ttinfo(self, dt, laststd=0):
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
idx = 0
for trans in self._trans_list:
if timestamp < trans:
break
idx += 1
else:
return self._ttinfo_std
if idx == 0:
return self._ttinfo_before
if laststd:
while idx > 0:
tti = self._trans_idx[idx-1]
if not tti.isdst:
return tti
idx -= 1
else:
return self._ttinfo_std
else:
return self._trans_idx[idx-1]
def utcoffset(self, dt):
if dt is None:
return None
if not self._ttinfo_std:
return ZERO
return self._find_ttinfo(dt).delta
def dst(self, dt):
if not self._ttinfo_dst:
return ZERO
tti = self._find_ttinfo(dt)
if not tti.isdst:
return ZERO
# The documentation says that utcoffset()-dst() must
# be constant for every dt.
return tti.delta-self._find_ttinfo(dt, laststd=1).delta
# An alternative for that would be:
#
# return self._ttinfo_dst.offset-self._ttinfo_std.offset
#
# However, this class stores historical changes in the
# dst offset, so I belive that this wouldn't be the right
# way to implement this.
@tzname_in_python2
def tzname(self, dt):
if not self._ttinfo_std:
return None
return self._find_ttinfo(dt).abbr
def __eq__(self, other):
if not isinstance(other, tzfile):
return False
return (self._trans_list == other._trans_list and
self._trans_idx == other._trans_idx and
self._ttinfo_list == other._ttinfo_list)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._filename))
def __reduce__(self):
if not os.path.isfile(self._filename):
raise ValueError("Unpickable %s class" % self.__class__.__name__)
return (self.__class__, (self._filename,))
class tzrange(datetime.tzinfo):
def __init__(self, stdabbr, stdoffset=None,
dstabbr=None, dstoffset=None,
start=None, end=None):
global relativedelta
if not relativedelta:
from dateutil import relativedelta
self._std_abbr = stdabbr
self._dst_abbr = dstabbr
if stdoffset is not None:
self._std_offset = datetime.timedelta(seconds=stdoffset)
else:
self._std_offset = ZERO
if dstoffset is not None:
self._dst_offset = datetime.timedelta(seconds=dstoffset)
elif dstabbr and stdoffset is not None:
self._dst_offset = self._std_offset+datetime.timedelta(hours=+1)
else:
self._dst_offset = ZERO
if dstabbr and start is None:
self._start_delta = relativedelta.relativedelta(
hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
else:
self._start_delta = start
if dstabbr and end is None:
self._end_delta = relativedelta.relativedelta(
hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
else:
self._end_delta = end
def utcoffset(self, dt):
if dt is None:
return None
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
if self._isdst(dt):
return self._dst_abbr
else:
return self._std_abbr
def _isdst(self, dt):
if not self._start_delta:
return False
year = datetime.datetime(dt.year, 1, 1)
start = year+self._start_delta
end = year+self._end_delta
dt = dt.replace(tzinfo=None)
if start < end:
return dt >= start and dt < end
else:
return dt >= start or dt < end
def __eq__(self, other):
if not isinstance(other, tzrange):
return False
return (self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr and
self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._start_delta == other._start_delta and
self._end_delta == other._end_delta)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(...)" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzstr(tzrange):
def __init__(self, s):
global parser
if not parser:
from dateutil import parser
self._s = s
res = parser._parsetz(s)
if res is None:
raise ValueError("unknown string format")
# Here we break the compatibility with the TZ variable handling.
# GMT-3 actually *means* the timezone -3.
if res.stdabbr in ("GMT", "UTC"):
res.stdoffset *= -1
# We must initialize it first, since _delta() needs
# _std_offset and _dst_offset set. Use False in start/end
# to avoid building it two times.
tzrange.__init__(self, res.stdabbr, res.stdoffset,
res.dstabbr, res.dstoffset,
start=False, end=False)
if not res.dstabbr:
self._start_delta = None
self._end_delta = None
else:
self._start_delta = self._delta(res.start)
if self._start_delta:
self._end_delta = self._delta(res.end, isend=1)
def _delta(self, x, isend=0):
kwargs = {}
if x.month is not None:
kwargs["month"] = x.month
if x.weekday is not None:
kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
if x.week > 0:
kwargs["day"] = 1
else:
kwargs["day"] = 31
elif x.day:
kwargs["day"] = x.day
elif x.yday is not None:
kwargs["yearday"] = x.yday
elif x.jyday is not None:
kwargs["nlyearday"] = x.jyday
if not kwargs:
# Default is to start on first sunday of april, and end
# on last sunday of october.
if not isend:
kwargs["month"] = 4
kwargs["day"] = 1
kwargs["weekday"] = relativedelta.SU(+1)
else:
kwargs["month"] = 10
kwargs["day"] = 31
kwargs["weekday"] = relativedelta.SU(-1)
if x.time is not None:
kwargs["seconds"] = x.time
else:
# Default is 2AM.
kwargs["seconds"] = 7200
if isend:
# Convert to standard time, to follow the documented way
# of working with the extra hour. See the documentation
# of the tzinfo class.
delta = self._dst_offset-self._std_offset
kwargs["seconds"] -= delta.seconds+delta.days*86400
return relativedelta.relativedelta(**kwargs)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
class _tzicalvtzcomp(object):
def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
tzname=None, rrule=None):
self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
self.tzoffsetdiff = self.tzoffsetto-self.tzoffsetfrom
self.isdst = isdst
self.tzname = tzname
self.rrule = rrule
class _tzicalvtz(datetime.tzinfo):
def __init__(self, tzid, comps=[]):
self._tzid = tzid
self._comps = comps
self._cachedate = []
self._cachecomp = []
def _find_comp(self, dt):
if len(self._comps) == 1:
return self._comps[0]
dt = dt.replace(tzinfo=None)
try:
return self._cachecomp[self._cachedate.index(dt)]
except ValueError:
pass
lastcomp = None
lastcompdt = None
for comp in self._comps:
if not comp.isdst:
# Handle the extra hour in DST -> STD
compdt = comp.rrule.before(dt-comp.tzoffsetdiff, inc=True)
else:
compdt = comp.rrule.before(dt, inc=True)
if compdt and (not lastcompdt or lastcompdt < compdt):
lastcompdt = compdt
lastcomp = comp
if not lastcomp:
# RFC says nothing about what to do when a given
# time is before the first onset date. We'll look for the
# first standard component, or the first component, if
# none is found.
for comp in self._comps:
if not comp.isdst:
lastcomp = comp
break
else:
lastcomp = comp[0]
self._cachedate.insert(0, dt)
self._cachecomp.insert(0, lastcomp)
if len(self._cachedate) > 10:
self._cachedate.pop()
self._cachecomp.pop()
return lastcomp
def utcoffset(self, dt):
if dt is None:
return None
return self._find_comp(dt).tzoffsetto
def dst(self, dt):
comp = self._find_comp(dt)
if comp.isdst:
return comp.tzoffsetdiff
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._find_comp(dt).tzname
def __repr__(self):
return "<tzicalvtz %s>" % repr(self._tzid)
__reduce__ = object.__reduce__
class tzical(object):
def __init__(self, fileobj):
global rrule
if not rrule:
from dateutil import rrule
if isinstance(fileobj, string_types):
self._s = fileobj
# ical should be encoded in UTF-8 with CRLF
fileobj = open(fileobj, 'r')
elif hasattr(fileobj, "name"):
self._s = fileobj.name
else:
self._s = repr(fileobj)
self._vtz = {}
self._parse_rfc(fileobj.read())
def keys(self):
return list(self._vtz.keys())
def get(self, tzid=None):
if tzid is None:
keys = list(self._vtz.keys())
if len(keys) == 0:
raise ValueError("no timezones defined")
elif len(keys) > 1:
raise ValueError("more than one timezone available")
tzid = keys[0]
return self._vtz.get(tzid)
def _parse_offset(self, s):
s = s.strip()
if not s:
raise ValueError("empty offset")
if s[0] in ('+', '-'):
signal = (-1, +1)[s[0] == '+']
s = s[1:]
else:
signal = +1
if len(s) == 4:
return (int(s[:2])*3600+int(s[2:])*60)*signal
elif len(s) == 6:
return (int(s[:2])*3600+int(s[2:4])*60+int(s[4:]))*signal
else:
raise ValueError("invalid offset: "+s)
def _parse_rfc(self, s):
lines = s.splitlines()
if not lines:
raise ValueError("empty string")
# Unfold
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
tzid = None
comps = []
invtz = False
comptype = None
for line in lines:
if not line:
continue
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError("empty property name")
name = parms[0].upper()
parms = parms[1:]
if invtz:
if name == "BEGIN":
if value in ("STANDARD", "DAYLIGHT"):
# Process component
pass
else:
raise ValueError("unknown component: "+value)
comptype = value
founddtstart = False
tzoffsetfrom = None
tzoffsetto = None
rrulelines = []
tzname = None
elif name == "END":
if value == "VTIMEZONE":
if comptype:
raise ValueError("component not closed: "+comptype)
if not tzid:
raise ValueError("mandatory TZID not found")
if not comps:
raise ValueError(
"at least one component is needed")
# Process vtimezone
self._vtz[tzid] = _tzicalvtz(tzid, comps)
invtz = False
elif value == comptype:
if not founddtstart:
raise ValueError("mandatory DTSTART not found")
if tzoffsetfrom is None:
raise ValueError(
"mandatory TZOFFSETFROM not found")
if tzoffsetto is None:
raise ValueError(
"mandatory TZOFFSETFROM not found")
# Process component
rr = None
if rrulelines:
rr = rrule.rrulestr("\n".join(rrulelines),
compatible=True,
ignoretz=True,
cache=True)
comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto,
(comptype == "DAYLIGHT"),
tzname, rr)
comps.append(comp)
comptype = None
else:
raise ValueError("invalid component end: "+value)
elif comptype:
if name == "DTSTART":
rrulelines.append(line)
founddtstart = True
elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
rrulelines.append(line)
elif name == "TZOFFSETFROM":
if parms:
raise ValueError(
"unsupported %s parm: %s " % (name, parms[0]))
tzoffsetfrom = self._parse_offset(value)
elif name == "TZOFFSETTO":
if parms:
raise ValueError(
"unsupported TZOFFSETTO parm: "+parms[0])
tzoffsetto = self._parse_offset(value)
elif name == "TZNAME":
if parms:
raise ValueError(
"unsupported TZNAME parm: "+parms[0])
tzname = value
elif name == "COMMENT":
pass
else:
raise ValueError("unsupported property: "+name)
else:
if name == "TZID":
if parms:
raise ValueError(
"unsupported TZID parm: "+parms[0])
tzid = value
elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
pass
else:
raise ValueError("unsupported property: "+name)
elif name == "BEGIN" and value == "VTIMEZONE":
tzid = None
comps = []
invtz = True
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
if sys.platform != "win32":
TZFILES = ["/etc/localtime", "localtime"]
TZPATHS = ["/usr/share/zoneinfo", "/usr/lib/zoneinfo", "/etc/zoneinfo"]
else:
TZFILES = []
TZPATHS = []
def gettz(name=None):
tz = None
if not name:
try:
name = os.environ["TZ"]
except KeyError:
pass
if name is None or name == ":":
for filepath in TZFILES:
if not os.path.isabs(filepath):
filename = filepath
for path in TZPATHS:
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
break
else:
continue
if os.path.isfile(filepath):
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = tzlocal()
else:
if name.startswith(":"):
name = name[:-1]
if os.path.isabs(name):
if os.path.isfile(name):
tz = tzfile(name)
else:
tz = None
else:
for path in TZPATHS:
filepath = os.path.join(path, name)
if not os.path.isfile(filepath):
filepath = filepath.replace(' ', '_')
if not os.path.isfile(filepath):
continue
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = None
if tzwin is not None:
try:
tz = tzwin(name)
except WindowsError:
tz = None
if not tz:
from dateutil.zoneinfo import gettz
tz = gettz(name)
if not tz:
for c in name:
# name must have at least one offset to be a tzstr
if c in "0123456789":
try:
tz = tzstr(name)
except ValueError:
pass
break
else:
if name in ("GMT", "UTC"):
tz = tzutc()
elif name in time.tzname:
tz = tzlocal()
return tz
# vim:ts=4:sw=4:et
| mit |
Changaco/oh-mainline | vendor/packages/Django/django/contrib/staticfiles/management/commands/findstatic.py | 101 | 1185 | from __future__ import unicode_literals
import os
from optparse import make_option
from django.core.management.base import LabelCommand
from django.utils.encoding import smart_text
from django.contrib.staticfiles import finders
class Command(LabelCommand):
help = "Finds the absolute paths for the given static file(s)."
args = "[file ...]"
label = 'static file'
option_list = LabelCommand.option_list + (
make_option('--first', action='store_false', dest='all', default=True,
help="Only return the first match for each static file."),
)
def handle_label(self, path, **options):
verbosity = int(options.get('verbosity', 1))
result = finders.find(path, all=options['all'])
path = smart_text(path)
if result:
if not isinstance(result, (list, tuple)):
result = [result]
output = '\n '.join(
(smart_text(os.path.realpath(path)) for path in result))
self.stdout.write("Found '%s' here:\n %s" % (path, output))
else:
if verbosity >= 1:
self.stderr.write("No matching file found for '%s'." % path)
| agpl-3.0 |
ipfire/pakfire | src/pakfire/logger.py | 1 | 2606 | #!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
# Copyright (C) 2011 Pakfire development team #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import logging
import sys
import time
class BuildFormatter(logging.Formatter):
def __init__(self):
self._fmt = "[%(asctime)s] %(message)s"
self.datefmt = None
self.starttime = time.time()
def converter(self, recordtime):
"""
This returns a timestamp relatively to the time when we started
the build.
"""
recordtime -= self.starttime
return time.gmtime(recordtime)
def formatTime(self, record, datefmt=None):
ct = self.converter(record.created)
t = time.strftime("%H:%M:%S", ct)
s = "%s,%03d" % (t, record.msecs)
return s
class ConsoleHandler(logging.Handler):
"""
This simply writes everything to the console it receives.
"""
def emit(self, record):
try:
msg = self.format(record)
# Select output file
if record.levelno == logging.DEBUG or record.levelno >= logging.ERROR:
fd = sys.stderr
else:
fd = sys.stdout
# Write the output
fd.write(msg)
# Immediately flush
self.flush()
except Exception:
self.handleError(record)
def flush(self):
sys.stdout.flush()
sys.stderr.flush()
| gpl-3.0 |
armijnhemel/cleanup-for-discogs | cleanup-discogs-continuous.py | 1 | 63272 | #!/usr/bin/env python3
# Tool to discover 'smells' in the Discogs data via the API. It downloads
# release data and flags releases that need to be fixed.
#
# The checks are (nearly) identical to cleanup-discogs.py
#
# The results that are printed by this script are by no means complete
# or accurate.
#
# Licensed under the terms of the General Public License version 3
#
# SPDX-License-Identifier: GPL-3.0-only
#
# Copyright 2017 - 2019 - Armijn Hemel for Tjaldur Software Governance Solutions
import sys
import os
import re
import datetime
import time
import json
import subprocess
import argparse
import configparser
import tempfile
import requests
import discogssmells
# grab the current year. Make sure to set the clock of your machine
# to the correct date or use NTP!
currentyear = datetime.datetime.utcnow().year
# grab the latest release from the API. Results tend to get cached
# by the Discogs nginx instance for some reason.
def get_latest_release(headers):
latest = 'https://api.discogs.com/database/search?type=release&sort=date_added'
r = requests.get(latest, headers=headers)
if r.status_code != 200:
return
# now parse the response
responsejson = r.json()
if not 'results' in responsejson:
return
return responsejson['results'][0]['id']
# convenience method to check if roles are valid
def checkrole(artist, release_id, credits):
invalidroles = []
if not '[' in artist['role']:
roles = map(lambda x: x.strip(), artist['role'].split(','))
for role in roles:
if role == '':
continue
if not role in credits:
invalidroles.append(role)
else:
# sometimes there is an additional description in the role in
# between [ and ]
# This method is definitely not catching everything.
rolesplit = artist['role'].split('[')
for rs in rolesplit:
if ']' in rs:
rs_tmp = rs
while ']' in rs_tmp:
rs_tmp = rs_tmp.split(']', 1)[1]
roles = map(lambda x: x.strip(), rs_tmp.split(','))
for role in roles:
if role == '':
continue
# ugly hack because sometimes the extra data between [ and ]
# appears halfway the words in a role, sigh.
if role == 'By':
continue
if not role in credits:
invalidroles.append(role)
return invalidroles
# process the contents of a release
def processrelease(release, config_settings, count, credits, ibuddy, favourites):
releaseurl = 'https://www.discogs.com/release/%s'
# only process entries that have a status of 'Accepted'
if release['status'] == 'Rejected':
return count
elif release['status'] == 'Draft':
return count
elif release['status'] == 'Deleted':
return count
errormsgs = []
# store some data that is used by multiple checks
founddeposito = False
year = None
release_id = release['id']
# check for favourite artist, if defined
for artist in release['artists']:
if artist['name'] in favourites:
if ibuddy != None:
ibuddy.executecommand('HEART:WINGSHIGH:RED:GO:SHORTSLEEP:NOHEART:WINGSLOW:GO:SHORTSLEEP:HEART:LEFT::WINGSHIGH::GO:SHORTSLEEP:NOHEART:RIGHT:GO:HEART:GO:BLUE:SHORTSLEEP:WINGSLOW:GO:SHORTSLEEP:RESET')
ibuddy.reset()
if config_settings['use_notify_send']:
count += 1
errormsgs.append('%8d -- Favourite Artist (%s): https://www.discogs.com/release/%s' % (count, artist['name'], str(release_id)))
# check for misspellings of Czechoslovak and Czech releases
# People use 0x115 instead of 0x11B, which look very similar but 0x115
# is not valid in the Czech alphabet. Check for all data except
# the YouTube playlist.
# https://www.discogs.com/group/thread/757556
# This is important for the following elements:
# * tracklist (title, subtracks not supported yet)
# * artist and extraartists (including extraartists in tracklist)
# * notes
# * BaOI identifiers (both value and description)
if config_settings['check_spelling_cs']:
if 'country' in release:
if release['country'] == 'Czechoslovakia' or release['country'] == 'Czech Republic':
for t in release['tracklist']:
if chr(0x115) in t['title']:
count += 1
errormsgs.append('%8d -- Czech character (0x115, tracklist: %s): https://www.discogs.com/release/%s' % (count, t['position'], str(release_id)))
if 'extraartists' in t:
for artist in t['extraartists']:
if chr(0x115) in artist['name']:
count += 1
errormsgs.append('%8d -- Czech character (0x115, artist name at: %s): https://www.discogs.com/release/%s' % (count, t['position'], str(release_id)))
if 'artists' in release:
for artist in release['artists']:
if chr(0x115) in artist['name']:
count += 1
errormsgs.append('%8d -- Czech character (0x115, artist name: %s): https://www.discogs.com/release/%s' % (count, artist['name'], str(release_id)))
if 'extraartists' in release:
for artist in release['extraartists']:
if chr(0x115) in artist['name']:
count += 1
errormsgs.append('%8d -- Czech character (0x115, artist name: %s): https://www.discogs.com/release/%s' % (count, artist['name'], str(release_id)))
for i in release['identifiers']:
if chr(0x115) in i['value']:
count += 1
errormsgs.append('%8d -- Czech character (0x115, BaOI): https://www.discogs.com/release/%s' % (count, str(release_id)))
if 'description' in i:
if chr(0x115) in i['description']:
count += 1
errormsgs.append('%8d -- Czech character (0x115, BaOI): https://www.discogs.com/release/%s' % (count, str(release_id)))
if 'notes' in release:
if chr(0x115) in release['notes']:
count += 1
errormsgs.append('%8d -- Czech character (0x115, Notes): https://www.discogs.com/release/%s' % (count, str(release_id)))
# check credit roles in three places:
# 1. artists
# 2. extraartists (release level)
# 3. extraartists (track level)
if 'check_credits' in config_settings:
if config_settings['check_credits']:
if 'artists' in release:
for artist in release['artists']:
if 'role' in artist:
invalidroles = checkrole(artist, release_id, credits)
for role in invalidroles:
count += 1
errormsgs.append('%8d -- Role \'%s\' invalid: https://www.discogs.com/release/%s' % (count, role, str(release_id)))
if 'extraartists' in release:
for artist in release['extraartists']:
if 'role' in artist:
invalidroles = checkrole(artist, release_id, credits)
for role in invalidroles:
count += 1
errormsgs.append('%8d -- Role \'%s\' invalid: https://www.discogs.com/release/%s' % (count, role, str(release_id)))
for t in release['tracklist']:
if 'extraartists' in t:
for artist in t['extraartists']:
if 'role' in artist:
invalidroles = checkrole(artist, release_id, credits)
for role in invalidroles:
count += 1
errormsgs.append('%8d -- Role \'%s\' invalid: https://www.discogs.com/release/%s' % (count, role, str(release_id)))
# check release month and year
if 'released' in release:
if config_settings['check_month']:
if '-' in release['released']:
monthres = re.search('-(\d+)-', release['released'])
if monthres != None:
monthnr = int(monthres.groups()[0])
if monthnr == 0:
count += 1
errormsgs.append('%8d -- Month 00: https://www.discogs.com/release/%s' % (count, str(release_id)))
elif monthnr > 12:
count += 1
errormsgs.append('%8d -- Month impossible (%d): https://www.discogs.com/release/%s' % (count, monthnr, str(release_id)))
try:
year = int(release['released'].split('-', 1)[0])
# TODO: check for implausible old years
except ValueError:
if config_settings['check_year']:
count += 1
errormsgs.append('%8d -- Year \'%s\' invalid: https://www.discogs.com/release/%s' % (count, release['released'], str(release_id)))
# check the tracklist
tracklistcorrect = True
tracklistpositions = set()
formattexts = set()
if config_settings['check_tracklisting'] and len(release['formats']) == 1:
formattext = release['formats'][0]['name']
formattexts.add(formattext)
formatqty = int(release['formats'][0]['qty'])
for t in release['tracklist']:
if tracklistcorrect:
if formattext in ['Vinyl', 'Cassette', 'Shellac', '8-Track Cartridge']:
try:
int(t['position'])
count += 1
errormsgs.append('%8d -- Tracklisting (%s): https://www.discogs.com/release/%s' % (count, formattext, str(release_id)))
tracklistcorrect = False
break
except:
pass
if formatqty == 1:
if t['position'].strip() != '' and t['position'].strip() != '-' and t['type_'] != 'heading' and t['position'] in tracklistpositions:
count += 1
errormsgs.append('%8d -- Tracklisting reuse (%s, %s): https://www.discogs.com/release/%s' % (count, formattext, t['position'], str(release_id)))
tracklistpositions.add(t['position'])
# various checks for labels
for l in release['labels']:
# check for several identifiers being used as catalog numbers
if 'catno' in l:
if config_settings['check_label_code']:
if l['catno'].lower().startswith('lc'):
falsepositive = False
# American releases on Epic (label 1005 in Discogs)
# sometimes start with LC
if l['id'] == 1005:
falsepositive = True
if not falsepositive:
if discogssmells.labelcodere.match(l['catno'].lower()) != None:
count += 1
errormsgs.append('%8d -- Possible Label Code (in Catalogue Number): https://www.discogs.com/release/%s' % (count, str(release_id)))
if config_settings['check_deposito']:
# now check for D.L.
dlfound = False
for d in discogssmells.depositores:
result = d.search(l['catno'])
if result != None:
for depositovalre in discogssmells.depositovalres:
if depositovalre.search(l['catno']) != None:
dlfound = True
break
if dlfound:
count += 1
errormsgs.append('%8d -- Possible Depósito Legal (in Catalogue Number): https://www.discogs.com/release/%s' % (count, str(release_id)))
if 'name' in l:
if config_settings['check_label_name']:
if l['name'] == 'London' and l['id'] == 26905:
count += 1
errormsgs.append('%8d -- Wrong label (London): https://www.discogs.com/release/%s' % (count, str(release_id)))
'''
if name == 'format':
for (k,v) in attrs.items():
if k == 'name':
if v == 'CD':
self.iscd = True
self.formattexts.add(v)
elif k == 'qty':
if self.formatmaxqty == 0:
self.formatmaxqty = max(self.formatmaxqty, int(v))
else:
self.formatmaxqty += int(v)
'''
# various checks for the formats
formattexts = set()
for f in release['formats']:
if 'descriptions' in f:
if 'Styrene' in f['descriptions']:
pass
# store the names of the formats. This is useful later for SID code checks
if 'name' in f:
formattexts.add(f['name'])
if 'text' in f:
if f['text'] != '':
if config_settings['check_spars_code']:
tmpspars = f['text'].lower().strip()
for s in ['.', ' ', '•', '·', '[', ']', '-', '|', '/']:
tmpspars = tmpspars.replace(s, '')
if tmpspars in discogssmells.validsparscodes:
count += 1
errormsgs.append('%8d -- Possible SPARS Code (in Format): https://www.discogs.com/release/%s' % (count, str(release_id)))
if config_settings['check_label_code']:
if f['text'].lower().startswith('lc'):
if discogssmells.labelcodere.match(f['text'].lower()) != None:
count += 1
errormsgs.append('%8d -- Possible Label Code (in Format): https://www.discogs.com/release/%s' % (count, str(release_id)))
# walk through the BaOI identifiers
for identifier in release['identifiers']:
v = identifier['value']
if config_settings['check_creative_commons']:
if 'creative commons' in v.lower():
count += 1
errormsgs.append('%8d -- Creative Commons reference: https://www.discogs.com/release/%s' % (count, str(release)))
if 'description' in identifier:
if 'creative commons' in identifier['description'].lower():
count += 1
errormsgs.append('%8d -- Creative Commons reference: https://www.discogs.com/release/%s' % (count, str(release)))
if config_settings['check_spars_code']:
if identifier['type'] == 'SPARS Code':
if v.lower() != "none":
# Sony format codes
# https://www.discogs.com/forum/thread/339244
# https://www.discogs.com/forum/thread/358285
if v == 'CDC' or v == 'CDM':
count += 1
errormsgs.append('%8d -- Sony Format Code in SPARS: https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
tmpspars = v.lower().strip()
for s in ['.', ' ', '•', '·', '[', ']', '-', '|', '/']:
tmpspars = tmpspars.replace(s, '')
if not tmpspars in discogssmells.validsparscodes:
count += 1
errormsgs.append('%8d -- SPARS Code (format): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
# first check the description free text field
sparsfound = False
if 'description' in identifier:
for spars in discogssmells.spars_ftf:
if spars in identifier['description'].lower():
sparsfound = True
# then also check the value to see if there is a valid SPARS
if v.lower() in discogssmells.validsparscodes:
sparsfound = True
else:
if 'd' in v.lower():
tmpspars = v.strip()
for s in ['.', ' ', '•', '·', '[', ']', '-', '|', '/']:
tmpspars = tmpspars.replace(s, '')
if tmpspars in discogssmells.validsparscodes:
sparsfound = True
# print error if some SPARS code reference was found
if sparsfound:
count += 1
errormsgs.append('%8d -- SPARS Code (BaOI): https://www.discogs.com/release/%s' % (count, str(release_id)))
if config_settings['check_label_code']:
if identifier['type'] == 'Label Code':
# check how many people use 'O' instead of '0'
if v.lower().startswith('lc'):
if 'O' in identifier['value']:
errormsgs.append('%8d -- Spelling error in Label Code): https://www.discogs.com/release/%s' % (count, str(release_id)))
sys.stdout.flush()
if discogssmells.labelcodere.match(v.lower()) is None:
count += 1
errormsgs.append('%8d -- Label Code (value): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
if identifier['type'] == 'Rights Society':
if v.lower().startswith('lc'):
if discogssmells.labelcodere.match(v.lower()) != None:
count += 1
errormsgs.append('%8d -- Label Code (in Rights Society): https://www.discogs.com/release/%s' % (count, str(release_id)))
elif identifier['type'] == 'Barcode':
if v.lower().startswith('lc'):
if discogssmells.labelcodere.match(v.lower()) != None:
count += 1
errormsgs.append('%8d -- Label Code (in Barcode): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
if 'description' in identifier:
if identifier['description'].lower() in discogssmells.label_code_ftf:
count += 1
errormsgs.append('%8d -- Label Code: https://www.discogs.com/release/%s' % (count, str(release_id)))
if config_settings['check_rights_society']:
if identifier['type'] != 'Rights Society':
foundrightssociety = False
for r in discogssmells.rights_societies:
if v.replace('.', '') == r or v.replace(' ', '') == r:
count += 1
foundrightssociety = True
if identifier['type'] == 'Barcode':
errormsgs.append('%8d -- Rights Society (Barcode): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
errormsgs.append('%8d -- Rights Society (BaOI): https://www.discogs.com/release/%s' % (count, str(release_id)))
break
if not foundrightssociety and 'description' in identifier:
if identifier['description'].lower() in discogssmells.rights_societies_ftf:
count += 1
errormsgs.append('%8d -- Rights Society: https://www.discogs.com/release/%s' % (count, str(release_id)))
# temporary hack, move to own configuration option
asinstrict = False
if config_settings['check_asin']:
if identifier['type'] == 'ASIN':
if not asinstrict:
tmpasin = v.strip().replace('-', '')
else:
tmpasin = v
if not len(tmpasin.split(':')[-1].strip()) == 10:
count += 1
errormsgs.append('%8d -- ASIN (wrong length): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
if 'description' in identifier:
if identifier['description'].lower().startswith('asin'):
count += 1
errormsgs.append('%8d -- ASIN (BaOI): https://www.discogs.com/release/%s' % (count, str(release_id)))
if config_settings['check_isrc']:
if identifier['type'] == 'ISRC':
# Check the length of ISRC fields. According to the
# specifications these should be 12 in length. Some ISRC
# identifiers that have been recorded in the database
# span a range of tracks. These will be reported as wrong ISRC
# codes. It is unclear what needs to be done with those.
# first get rid of cruft
isrc_tmp = v.strip().upper()
if isrc_tmp.startswith('ISRC'):
isrc_tmp = isrc_tmp.split('ISRC')[-1].strip()
if isrc_tmp.startswith('CODE'):
isrc_tmp = isrc_tmp.split('CODE')[-1].strip()
# replace a few characters
isrc_tmp = isrc_tmp.replace('-', '')
isrc_tmp = isrc_tmp.replace(' ', '')
isrc_tmp = isrc_tmp.replace('.', '')
isrc_tmp = isrc_tmp.replace(':', '')
isrc_tmp = isrc_tmp.replace('–', '')
if not len(isrc_tmp) == 12:
count += 1
errormsgs.append('%8d -- ISRC (wrong length): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
if 'description' in identifier:
if identifier['description'].lower().startswith('isrc'):
count += 1
errormsgs.append('%8d -- ISRC Code (BaOI): https://www.discogs.com/release/%s' % (count, str(release_id)))
elif identifier['description'].lower().startswith('issrc'):
count += 1
errormsgs.append('%8d -- ISRC Code (BaOI): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
for isrc in discogssmells.isrc_ftf:
if isrc in identifier['description'].lower():
count += 1
errormsgs.append('%8d -- ISRC Code (BaOI): https://www.discogs.com/release/%s' % (count, str(release_id)))
if identifier['type'] == 'Barcode':
pass
# check depósito legal in BaOI
if config_settings['check_deposito']:
if 'country' in release:
if release['country'] == 'Spain':
if identifier['type'] == 'Depósito Legal':
founddeposito = True
if v.strip().endswith('.'):
count += 1
errormsgs.append('%8d -- Depósito Legal (formatting): https://www.discogs.com/release/%s' % (count, str(release_id)))
if year != None:
# now try to find the year
depositoyear = None
if v.strip().endswith('℗'):
count += 1
errormsgs.append('%8d -- Depósito Legal (formatting, has ℗): https://www.discogs.com/release/%s' % (count, str(release_id)))
# ugly hack, remove ℗ to make at least be able to do some sort of check
v = v.strip().rsplit('℗', 1)[0]
# several separators, including some Unicode ones
for sep in ['-', '–', '/', '.', ' ', '\'', '_']:
try:
depositoyeartext = v.strip().rsplit(sep, 1)[-1]
if sep == '.' and len(depositoyeartext) == 3:
continue
if '.' in depositoyeartext:
depositoyeartext = depositoyeartext.replace('.', '')
depositoyear = int(depositoyeartext)
if depositoyear < 100:
# correct the year. This won't work correctly after 2099.
if depositoyear <= currentyear - 2000:
depositoyear += 2000
else:
depositoyear += 1900
break
except:
pass
# TODO, also allow (year), example: https://www.discogs.com/release/265497
if depositoyear != None:
if depositoyear < 1900:
count += 1
errormsgs.append("%8d -- Depósito Legal (impossible year): https://www.discogs.com/release/%s" % (count, str(release_id)))
elif depositoyear > currentyear:
count += 1
errormsgs.append("%8d -- Depósito Legal (impossible year): https://www.discogs.com/release/%s" % (count, str(release_id)))
elif year < depositoyear:
count += 1
errormsgs.append("%8d -- Depósito Legal (release date earlier): https://www.discogs.com/release/%s" % (count, str(release_id)))
else:
count += 1
errormsgs.append("%8d -- Depósito Legal (year not found): https://www.discogs.com/release/%s" % (count, str(release_id)))
elif identifier['type'] == 'Barcode':
for depositovalre in discogssmells.depositovalres:
if depositovalre.match(v.lower()) != None:
founddeposito = True
count += 1
errormsgs.append('%8d -- Depósito Legal (in Barcode): https://www.discogs.com/release/%s' % (count, str(release_id)))
break
else:
if v.startswith("Depósito"):
founddeposito = True
count += 1
errormsgs.append('%8d -- Depósito Legal (BaOI): https://www.discogs.com/release/%s' % (count, str(release_id)))
elif v.startswith("D.L."):
founddeposito = True
count += 1
errormsgs.append('%8d -- Depósito Legal (BaOI): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
if 'description' in identifier:
found = False
for d in discogssmells.depositores:
result = d.search(identifier['description'].lower())
if result != None:
found = True
break
# sometimes the depósito value itself can be found in the free text field
if not found:
for depositovalre in discogssmells.depositovalres:
deposres = depositovalre.match(identifier['description'].lower())
if deposres != None:
found = True
break
if found:
founddeposito = True
count += 1
errormsgs.append('%8d -- Depósito Legal (BaOI): https://www.discogs.com/release/%s' % (count, str(release_id)))
# temporary hack, move to own configuration option
mould_sid_strict = False
if config_settings['check_mould_sid']:
if identifier['type'] == 'Mould SID Code':
if v.strip() != 'none':
# cleanup first for not so heavy formatting booboos
mould_tmp = v.strip().lower().replace(' ', '')
mould_tmp = mould_tmp.replace('-', '')
# some people insist on using ƒ instead of f
mould_tmp = mould_tmp.replace('ƒ', 'f')
res = discogssmells.mouldsidre.match(mould_tmp)
if res is None:
count += 1
errormsgs.append('%8d -- Mould SID Code (value): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
if mould_sid_strict:
mould_split = mould_tmp.split('ifpi', 1)[-1]
for ch in ['i', 'o', 's', 'q']:
if ch in mould_split[-2:]:
count += 1
errormsgs.append('%8d -- Mould SID Code (strict value): https://www.discogs.com/release/%s' % (count, str(release_id)))
# rough check to find SID codes for formats other than CD/CD-like
if len(formattexts) == 1:
for fmt in set(['Vinyl', 'Cassette', 'Shellac', 'File', 'VHS', 'DCC', 'Memory Stick', 'Edison Disc']):
if fmt in formattexts:
count += 1
errormsgs.append('%8d -- Mould SID Code (Wrong Format: %s): https://www.discogs.com/release/%s' % (count, fmt, str(release_id)))
break
if year != None:
if year < 1993:
count += 1
errormsgs.append('%8d -- SID Code (wrong year): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
if 'description' in identifier:
description = identifier['description'].lower()
# squash repeated spaces
description = re.sub('\s+', ' ', description)
description = description.strip()
if description in ['source identification code', 'sid', 'sid code', 'sid-code']:
count += 1
errormsgs.append('%8d -- Unspecified SID Code: https://www.discogs.com/release/%s' % (count, str(release_id)))
elif description in discogssmells.mouldsids:
count += 1
errormsgs.append('%8d -- Mould SID Code: https://www.discogs.com/release/%s' % (count, str(release_id)))
if config_settings['check_mastering_sid']:
if identifier['type'] == 'Mastering SID Code':
if v.strip() != 'none':
# cleanup first for not so heavy formatting booboos
master_tmp = v.strip().lower().replace(' ', '')
master_tmp = master_tmp.replace('-', '')
# some people insist on using ƒ instead of f
master_tmp = master_tmp.replace('ƒ', 'f')
res = discogssmells.masteringsidre.match(master_tmp)
if res is None:
count += 1
errormsgs.append('%8d -- Mastering SID Code (value): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
# rough check to find SID codes for formats other than CD/CD-like
if len(formattexts) == 1:
for fmt in set(['Vinyl', 'Cassette', 'Shellac', 'File', 'VHS', 'DCC', 'Memory Stick', 'Edison Disc']):
if fmt in formattexts:
count += 1
errormsgs.append('%8d -- Mastering SID Code (Wrong Format: %s): https://www.discogs.com/release/%s' % (count, fmt, str(release_id)))
if year != None:
if year < 1993:
count += 1
errormsgs.append('%8d -- SID Code (wrong year): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
if 'description' in identifier:
description = identifier['description'].lower()
# squash repeated spaces
description = re.sub('\s+', ' ', description)
description = description.strip()
if description in ['source identification code', 'sid', 'sid code', 'sid-code']:
count += 1
errormsgs.append('%8d -- Unspecified SID Code: https://www.discogs.com/release/%s' % (count, str(release_id)))
elif description in discogssmells.masteringsids:
count += 1
errormsgs.append('%8d -- Mastering SID Code: https://www.discogs.com/release/%s' % (count, str(release_id)))
elif description in ['sid code matrix', 'sid code - matrix', 'sid code (matrix)', 'sid-code, matrix', 'sid-code matrix', 'sid code (matrix ring)', 'sid code, matrix ring', 'sid code: matrix ring']:
count += 1
errormsgs.append('%8d -- Possible Mastering SID Code: https://www.discogs.com/release/%s' % (count, str(release_id)))
if config_settings['check_pkd']:
if 'country' in release:
if release['country'] == 'India':
if 'pkd' in v.lower() or "production date" in v.lower():
if year != None:
# try a few variants
pkdres = re.search("\d{1,2}/((?:19|20)?\d{2})", v)
if pkdres != None:
pkdyear = int(pkdres.groups()[0])
if pkdyear < 100:
# correct the year. This won't work correctly after 2099.
if pkdyear <= currentyear - 2000:
pkdyear += 2000
else:
pkdyear += 1900
if pkdyear < 1900:
count += 1
errormsgs.append("%8d -- Indian PKD (impossible year): https://www.discogs.com/release/%s" % (count, str(release_id)))
elif pkdyear > currentyear:
count += 1
errormsgs.append("%8d -- Indian PKD (impossible year): https://www.discogs.com/release/%s" % (count, str(release_id)))
elif year < pkdyear:
count += 1
errormsgs.append("%8d -- Indian PKD (release date earlier): https://www.discogs.com/release/%s" % (count, str(release_id)))
else:
count += 1
errormsgs.append('%8d -- India PKD code (no year): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
# now check the description
if 'description' in identifier:
description = identifier['description'].lower()
if 'pkd' in description or "production date" in description:
if year != None:
# try a few variants
pkdres = re.search("\d{1,2}/((?:19|20)?\d{2})", attrvalue)
if pkdres != None:
pkdyear = int(pkdres.groups()[0])
if pkdyear < 100:
# correct the year. This won't work correctly after 2099.
if pkdyear <= currentyear - 2000:
pkdyear += 2000
else:
pkdyear += 1900
if pkdyear < 1900:
count += 1
errormsgs.append("%8d -- Indian PKD (impossible year): https://www.discogs.com/release/%s" % (count, str(release_id)))
elif pkdyear > currentyear:
count += 1
errormsgs.append("%8d -- Indian PKD (impossible year): https://www.discogs.com/release/%s" % (count, str(release_id)))
elif year < pkdyear:
count += 1
errormsgs.append("%8d -- Indian PKD (release date earlier): https://www.discogs.com/release/%s" % (count, str(release_id)))
else:
count += 1
errormsgs.append('%8d -- India PKD code (no year): https://www.discogs.com/release/%s' % (count, str(release_id)))
# check Czechoslovak manufacturing dates
if config_settings['check_manufacturing_date_cs']:
# config hack, needs to be in its own configuration option
strict_cs = False
strict_cs = True
if 'country' in release:
if release['country'] == 'Czechoslovakia':
if 'description' in identifier:
description = identifier['description'].lower()
if 'date' in description:
if year != None:
manufacturing_date_res = re.search("(\d{2})\s+\d$", identifier['value'].rstrip())
if manufacturing_date_res != None:
manufacturing_year = int(manufacturing_date_res.groups()[0])
if manufacturing_year < 100:
manufacturing_year += 1900
if manufacturing_year > year:
count += 1
errormsgs.append("%8d -- Czechoslovak manufacturing date (release year wrong): https://www.discogs.com/release/%s" % (count, str(release_id)))
# possibly this check makes sense, but not always
elif manufacturing_year < year and strict_cs:
count += 1
errormsgs.append("%8d -- Czechoslovak manufacturing date (release year possibly wrong): https://www.discogs.com/release/%s" % (count, str(release_id)))
# finally check the notes for some errors
if 'notes' in release:
if '카지노' in release['notes']:
# Korean casino spam that pops up every once in a while
errormsgs.append('Spam: https://www.discogs.com/release/%s' % str(release_id))
if 'country' in release:
if release['country'] == 'Spain':
if config_settings['check_deposito'] and not founddeposito:
# sometimes "deposito legal" can be found in the "notes" section
content_lower = release['notes'].lower()
for d in discogssmells.depositores:
result = d.search(content_lower)
if result != None:
count += 1
found = True
errormsgs.append('%8d -- Depósito Legal (Notes): https://www.discogs.com/release/%s' % (count, str(release_id)))
break
if config_settings['check_html']:
# see https://support.discogs.com/en/support/solutions/articles/13000014661-how-can-i-format-text-
if '<a href="http://www.discogs.com/release/' in release['notes'].lower():
count += 1
errormsgs.append('%8d -- old link (Notes): https://www.discogs.com/release/%s' % (count, str(release_id)))
if config_settings['check_creative_commons']:
ccfound = False
for cc in discogssmells.creativecommons:
if cc in release['notes']:
count += 1
errormsgs.append('%8d -- Creative Commons reference (%s): https://www.discogs.com/release/%s' % (count, cc, str(release)))
ccfound = True
break
if not ccfound:
if 'creative commons' in reales['notes'].lower():
count += 1
errormsgs.append('%8d -- Creative Commons reference: https://www.discogs.com/release/%s' % (count, str(release)))
ccfound = True
break
for e in errormsgs:
print(e)
if config_settings['use_notify_send']:
p = subprocess.Popen(['notify-send', "-t", "3000", "Error", e], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stanout, stanerr) = p.communicate()
sys.stdout.flush()
return count
def main(argv):
parser = argparse.ArgumentParser()
# the following options are provided on the commandline
parser.add_argument("-c", "--config", action="store", dest="cfg", help="path to configuration file", metavar="FILE")
parser.add_argument("-s", "--startvalue", action="store", dest="startvalue", help="start value for releases", metavar="STARTVALUE")
parser.add_argument("-l", "--latest", action="store", dest="latest_value", help="value for latest release", metavar="LATEST")
args = parser.parse_args()
# some checks for the configuration file
if args.cfg is None:
parser.error("Configuration file missing")
if not os.path.exists(args.cfg):
parser.error("Configuration file does not exist")
config = configparser.ConfigParser()
configfile = open(args.cfg, 'r')
try:
config.read_file(configfile)
except Exception:
print("Cannot read configuration file", file=sys.stderr)
sys.exit(1)
startvalue = None
# check for a startvalue
if args.startvalue != None:
try:
startvalue = int(args.startvalue)
except:
parser.error("start value is not a valid integer, exciting")
latest_release = None
# check for a startvalue
if args.latest_value != None:
try:
latest_release = int(args.latest_value)
except:
parser.error("latest value is not a valid integer, exciting")
# process the configuration file and store settings
config_settings = {}
for section in config.sections():
if section == 'cleanup':
# store settings for depósito legal checks
try:
if config.get(section, 'deposito') == 'yes':
config_settings['check_deposito'] = True
else:
config_settings['check_deposito'] = False
except Exception:
config_settings['check_deposito'] = True
# store settings for rights society checks
try:
if config.get(section, 'rights_society') == 'yes':
config_settings['check_rights_society'] = True
else:
config_settings['check_rights_society'] = False
except Exception:
config_settings['check_rights_society'] = True
# store settings for label code checks
try:
if config.get(section, 'label_code') == 'yes':
config_settings['check_label_code'] = True
else:
config_settings['check_label_code'] = False
except Exception:
config_settings['check_label_code'] = True
# store settings for label name checks
try:
if config.get(section, 'label_name') == 'yes':
config_settings['check_label_name'] = True
else:
config_settings['check_label_name'] = False
except Exception:
config_settings['check_label_name'] = True
# store settings for ISRC checks
try:
if config.get(section, 'isrc') == 'yes':
config_settings['check_isrc'] = True
else:
config_settings['check_isrc'] = False
except Exception:
config_settings['check_isrc'] = True
# store settings for ASIN checks
try:
if config.get(section, 'asin') == 'yes':
config_settings['check_asin'] = True
else:
config_settings['check_asin'] = False
except Exception:
config_settings['check_asin'] = True
# store settings for mastering SID checks
try:
if config.get(section, 'mastering_sid') == 'yes':
config_settings['check_mastering_sid'] = True
else:
config_settings['check_mastering_sid'] = False
except Exception:
config_settings['check_mastering_sid'] = True
# store settings for mould SID checks
try:
if config.get(section, 'mould_sid') == 'yes':
config_settings['check_mould_sid'] = True
else:
config_settings['check_mould_sid'] = False
except Exception:
config_settings['check_mould_sid'] = True
# store settings for SPARS Code checks
try:
if config.get(section, 'spars') == 'yes':
config_settings['check_spars_code'] = True
else:
config_settings['check_spars_code'] = False
except Exception:
config_settings['check_spars_code'] = True
# store settings for Indian PKD checks
try:
if config.get(section, 'pkd') == 'yes':
config_settings['check_pkd'] = True
else:
config_settings['check_pkd'] = False
except Exception:
config_settings['check_pkd'] = True
# check for Czechoslovak manufacturing dates
try:
if config.get(section, 'manufacturing_date_cs') == 'yes':
config_settings['check_manufacturing_date_cs'] = True
else:
config_settings['check_manufacturing_date_cs'] = False
except Exception:
config_settings['check_manufacturing_date_cs'] = True
# check for Czechoslovak and Czech spelling (0x115 used instead of 0x11B)
try:
if config.get(section, 'spelling_cs') == 'yes':
config_settings['check_spelling_cs'] = True
else:
config_settings['check_spelling_cs'] = False
except Exception:
config_settings['check_spelling_cs'] = True
# store settings for tracklisting checks, default True
try:
if config.get(section, 'tracklisting') == 'yes':
config_settings['check_tracklisting'] = True
else:
config_settings['check_tracklisting'] = False
except Exception:
config_settings['check_tracklisting'] = True
# store settings for credits list checks
try:
if config.get(section, 'credits') == 'yes':
creditsfile = config.get(section, 'creditsfile')
if os.path.exists(creditsfile):
config_settings['creditsfile'] = creditsfile
config_settings['check_credits'] = True
else:
config_settings['check_credits'] = False
except Exception:
config_settings['check_credits'] = False
# store settings for URLs in Notes checks
try:
if config.get(section, 'html') == 'yes':
config_settings['check_html'] = True
else:
config_settings['check_html'] = False
except Exception:
config_settings['check_html'] = True
# month is 00 check: default is False
try:
if config.get(section, 'month') == 'yes':
config_settings['check_month'] = True
else:
config_settings['check_month'] = False
except Exception:
config_settings['check_month'] = False
# year is wrong check: default is False
try:
if config.get(section, 'year') == 'yes':
config_settings['check_year'] = True
else:
config_settings['check_year'] = False
except Exception:
config_settings['check_year'] = False
# reporting all: default is False
try:
if config.get(section, 'reportall') == 'yes':
config_settings['reportall'] = True
else:
config_settings['reportall'] = False
except Exception:
config_settings['reportall'] = False
# debug: default is False
try:
if config.get(section, 'debug') == 'yes':
config_settings['debug'] = True
else:
config_settings['debug'] = False
except Exception:
config_settings['debug'] = False
# report creative commons references: default is False
try:
if config.get(section, 'creative_commons') == 'yes':
config_settings['check_creative_commons'] = True
else:
config_settings['check_creative_commons'] = False
except Exception:
config_settings['check_creative_commons'] = False
elif section == 'api':
# data directory to store JSON files
try:
storedir = config.get(section, 'storedir')
if not os.path.exists(os.path.normpath(storedir)):
config_settings['storedir'] = None
else:
# test if the directory is writable
testfile = tempfile.mkstemp(dir=storedir)
os.fdopen(testfile[0]).close()
os.unlink(testfile[1])
config_settings['storedir'] = storedir
except Exception:
config_settings['storedir'] = None
break
try:
token = config.get(section, 'token')
config_settings['token'] = token
except Exception:
config_settings['token'] = None
try:
username = config.get(section, 'username')
config_settings['username'] = username
except Exception:
config_settings['username'] = None
# skipdownloaded: default is False
config_settings['skipdownloaded'] = False
try:
if config.get(section, 'skipdownloaded') == 'yes':
config_settings['skipdownloaded'] = True
except Exception:
pass
# skip404: default is True
config_settings['skip404'] = True
try:
if config.get(section, 'skip404') == 'yes':
config_settings['skip404'] = True
else:
config_settings['skip404'] = False
except Exception:
pass
# record404: default is True
config_settings['record404'] = True
try:
if config.get(section, 'record404') == 'yes':
config_settings['record404'] = True
else:
config_settings['record404'] = False
except Exception:
pass
# specify location of 404 file
try:
release404 = os.path.normpath(config.get(section, '404file'))
config_settings['404file'] = release404
except:
pass
# specify whether or not notify-send (Linux desktops
# should be used or not. Not recommended.
config_settings['use_notify_send'] = True
try:
if config.get(section, 'notify') == 'yes':
config_settings['use_notify_send'] = True
else:
config_settings['use_notify_send'] = False
except Exception:
pass
if config_settings['use_notify_send']:
try:
p = subprocess.Popen(['notify-send', "-t", "3000", "Test for notify-send"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stanout, stanerr) = p.communicate()
except Exception:
config_settings['use_notify_send'] = False
configfile.close()
if config_settings['storedir'] is None:
print("Data store directory non-existent or not writable, exiting.", file=sys.stderr)
sys.exit(1)
if config_settings['token'] is None:
print("Token not specified, exiting.", file=sys.stderr)
sys.exit(1)
if config_settings['username'] is None:
print("Discogs user name not specified, exiting.", file=sys.stderr)
sys.exit(1)
# a list of accepted roles. This is an external file, generated with extractcredits.py
# from the 'helper-scripts' directory.
credits = set()
if 'check_credits' in config_settings:
if config_settings['check_credits']:
creditsfile = open(config_settings['creditsfile'], 'r')
credits = set(map(lambda x: x.strip(), creditsfile.readlines()))
creditsfile.close()
# a file with release numbers that give a 404 error
# This needs more work
if config_settings['skip404']:
if '404file' in config_settings:
if not os.path.isabs(config_settings['404file']):
release404filename = os.path.join(config_settings['storedir'], config_settings['404file'])
if not os.path.exists(release404filename):
release404file = open(release404filename, 'w')
release404file.close()
else:
release404filename = config_settings['404file']
else:
# simply create the file
pass
# use a (somewhat) exponential backoff in case too many requests have been made
ratelimitbackoff = 5
# set the User Agent and Authorization header for each user request
useragentstring = "DiscogsCleanupForUser-%s/0.1" % config_settings['username']
headers = {'user-agent': useragentstring,
'Authorization': 'Discogs token=%s' % config_settings['token']
}
if latest_release is None:
latest_release = get_latest_release(headers)
if latest_release is None:
print("Something went wrong, try again later", file=sys.stderr)
sys.exit(1)
# if no start value has been provided start with the latest from the
# Discogs website.
if startvalue is None:
startvalue = latest_release
# populate a set with all the 404s that were found.
skip404s = set()
count = 0
if config_settings['skip404']:
release404file = open(release404filename, 'r')
for l in release404file:
# needs to be made more robust
skip404s.add(int(l.strip()))
release404file.close()
# now open again for writing, so new 404 errors can be
# stored.
release404file = open(release404filename, 'a')
# This is just something very silly: if you have an iBuddy device and
# have the corresponding Python module installed it will respond to
# data it finds (currently only favourite artists).
#
# https://github.com/armijnhemel/py3buddy
#
# Not recommended.
ibuddy_enabled = False
try:
import py3buddy
ibuddy_enabled = True
except:
pass
ibuddy = None
if ibuddy_enabled:
ibuddy_config = {}
ibuddy = py3buddy.iBuddy(ibuddy_config)
if ibuddy.dev is None:
ibuddy = None
ibuddy_enabled = False
# example:
#favourites = set(['Bob Dylan', 'Iron Maiden', 'The Beatles'])
favourites = set()
newsleep = 600
# now start a big loop
# https://www.discogs.com/developers/#page:authentication
while True:
for releasenr in range(startvalue, latest_release+1):
if startvalue == latest_release:
break
targetfilename = os.path.join(storedir, "%d" % (releasenr//1000000), "%d.json" % releasenr)
os.makedirs(os.path.join(storedir, "%d" % (releasenr//1000000)), exist_ok=True)
if config_settings['skip404']:
if releasenr in skip404s:
continue
if config_settings['skipdownloaded']:
if os.path.exists(targetfilename):
if os.stat(targetfilename).st_size != 0:
responsejsonfile = open(targetfilename, 'r')
responsejson = json.loads(responsejsonfile.read())
responsejsonfile.close()
count = processrelease(responsejson, config_settings, count, credits, ibuddy, favourites)
continue
print("downloading: %d" % releasenr, file=sys.stderr)
r = requests.get('https://api.discogs.com/releases/%d' % releasenr, headers=headers)
# now first check the headers to see if it is OK to do more requests
if r.status_code != 200:
if r.status_code == 404:
print("%d" % releasenr, file=release404file)
release404file.flush()
if r.status_code == 429:
if 'Retry-After' in r.headers:
try:
retryafter = int(r.headers['Retry-After'])
print("Rate limiting, sleeping for %d seconds" % retryafter, file=sys.stderr)
time.sleep(retryafter)
sys.stderr.flush()
except:
print("Rate limiting, sleeping for %d seconds" % 60, file=sys.stderr)
time.sleep(60)
sys.stderr.flush()
else:
print("Rate limiting, sleeping for %d seconds" % 60, file=sys.stderr)
time.sleep(60)
sys.stderr.flush()
# TODO: the current release will not have been downloaded and processed
continue
# in case there is no 429 response check the headers
if 'X-Discogs-Ratelimit-Remaining' in r.headers:
ratelimit = int(r.headers['X-Discogs-Ratelimit-Remaining'])
if ratelimit == 0:
# no more requests are allowed, so sleep for some
# time, max 60 seconds
time.sleep(ratelimitbackoff)
print("Rate limiting, sleeping for %d seconds" % ratelimitbackoff, file=sys.stderr)
sys.stderr.flush()
if ratelimitbackoff < 60:
ratelimitbackoff = min(60, ratelimitbackoff * 2)
else:
ratelimitbackoff = 5
# now process the response. This should be JSON, so decode it,
# and also write the JSON data to a separate file for offline
# processing (if necessary).
try:
responsejson = r.json()
jsonreleasefile = open(targetfilename, 'w')
jsonreleasefile.write(r.text)
jsonreleasefile.close()
except:
# response doesn't contain JSON, so something is wrong.
# sleep a bit then continue
time.sleep(2)
continue
# now process the JSON content
count = processrelease(responsejson, config_settings, count, credits, ibuddy, favourites)
# be gentle for Discogs and sleep
time.sleep(0.2)
sys.stderr.flush()
# now set startvalue to latest_release
startvalue = latest_release
# and find the newest release again
print("Grabbing new data", file=sys.stderr)
latest_release = get_latest_release(headers)
if latest_release is None:
print("Something went wrong, try again later", file=sys.stderr)
break
if latest_release < startvalue:
pass
print("Latest = %d" % latest_release, file=sys.stderr)
print("Sleeping for %d seconds" % newsleep, file=sys.stderr)
sys.stderr.flush()
# sleep for ten minutes to make sure some new things
# have been added to Discogs
time.sleep(newsleep)
release404file.close()
if __name__ == "__main__":
main(sys.argv)
| gpl-3.0 |
mrshelly/openerp71313 | openerp/addons/marketing/__init__.py | 55 | 1073 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
nydehi/google-appengine-wx-launcher | launcher/log_console_unittest.py | 28 | 2208 | #!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unittests for log_console.py"""
import unittest
import wx
import launcher
class LogConsoleTest(unittest.TestCase):
def ConfirmedShow(self, doshow):
"""Dropped into the Console so we can insure Show(False) has been called."""
if doshow == False:
self.did_hide = True
def ConfirmedDestroy(self):
"""Dropped into the Console so we can insure Destroy() has been called."""
self.did_destroy = True
def setUp(self):
# Must always create a wx.App first
self.app = wx.PySimpleApp()
def testBasics(self):
project = launcher.Project('path', 8000, 'name')
lc = launcher.LogConsole(project)
self.assertEqual(project, lc.project)
def testCloseHandler(self):
"""Test our close handler (if not force, hide window and save for later)"""
project = launcher.Project('path', 8000, 'name')
lc = launcher.LogConsole(project)
self.did_hide = False
self.did_destroy = False
orig_show = lc.Show
orig_destroy = lc.Destroy
# Override some methods so we can track behavior
lc.Show = self.ConfirmedShow
lc.Destroy = self.ConfirmedDestroy
# Call our wx.EVT_CLOSE handler; CAN be veto'ed
lc.Close(force=False)
self.assertTrue(self.did_hide)
self.assertFalse(self.did_destroy)
self.did_hide = False
self.did_destroy = False
# Call our wx.EVT_CLOSE handler; CANNOT be veto'ed
lc.Close(force=True)
self.assertFalse(self.did_hide)
self.assertTrue(self.did_destroy)
# Restore original methods
lc.Show = orig_show
lc.Destroy = orig_destroy
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
Neural-Network/TicTacToe | pybrain/supervised/evolino/population.py | 25 | 6360 | __author__ = 'Michael Isik'
from pybrain.supervised.evolino.gpopulation import Population, SimplePopulation
from pybrain.supervised.evolino.gfilter import Randomization
from pybrain.supervised.evolino.individual import EvolinoIndividual, EvolinoSubIndividual
from pybrain.tools.kwargsprocessor import KWArgsProcessor
from copy import copy
from random import randrange
class EvolinoPopulation(Population):
""" Evolino's population class.
EvolinoIndividuals aren't stored directly, but there is a list of
subpopulations.
These subpopulations are used to generate EvolinoIndividuals on demand.
On initialization, a prototype individual must be supplied. Its genome
should be a list of chromosomes. A chromosome should be a list of floats.
A subpopulation of size subPopulationSize is created for each of these
chromosomes.
:key nCombinations: Denotes the number of times each subindividual should
be built into an individual. default=1
:key valueInitializer:
"""
def __init__(self, individual, subPopulationSize, nCombinations=1, valueInitializer=Randomization(-0.1, 0.1), **kwargs):
""" :key individual: A prototype individual which is used to determine
the structure of the genome.
:key subPopulationSize: integer describing the size of the subpopulations
"""
Population.__init__(self)
self._subPopulations = []
self.nCombinations = nCombinations
ap = KWArgsProcessor(self, kwargs)
ap.add('verbosity', default=0)
genome = individual.getGenome()
for chromosome in genome:
self._subPopulations.append(
EvolinoSubPopulation(chromosome, subPopulationSize, valueInitializer))
def getIndividuals(self):
""" Returns a set of individuals of type EvolinoIndividual. The individuals
are generated on the fly. Note that each subpopulation has the same size.
So the number of resulting EvolinoIndividuals is subPopulationSize,
since each chromosome of each subpopulation will be assembled once.
The subpopulation container is a sequence with strict order. This
sequence is iterated subPopulationSize times. In each iteration
one random EvolinoSubIndividual is taken from each sub population.
After each iteration the resulting sequence of sub individuals
is supplied to the constructor of a new EvolinoIndividual.
All EvolinoIndividuals are collected in a set, which is finally returned.
"""
assert len(self._subPopulations)
individuals = set()
for _ in range(self.nCombinations):
subIndividualsList = [ list(sp.getIndividuals()) for sp in self._subPopulations ]
nIndividuals = len(subIndividualsList[0])
for _ in range(nIndividuals):
subIndividualCombination = []
for subIndividuals in subIndividualsList:
sub_individual = subIndividuals.pop(randrange(len(subIndividuals)))
subIndividualCombination.append(sub_individual)
individuals.add(EvolinoIndividual(subIndividualCombination))
return individuals
def getSubPopulations(self):
""" Returns a shallow copy of the list of subpopulation. """
return copy(self._subPopulations)
def setIndividualFitness(self, individual, fitness):
""" The fitness value is not stored directly inside this population,
but is propagated to the subpopulations of all the subindividuals
of which the individual consists of.
The individual's fitness value is only adjusted if its bigger than
the old value.
To reset these values use clearFitness().
"""
# additive fitness distribution
# subIndividuals = individual.getSubIndividuals()
# for i,sp in enumerate(self._subPopulations):
# sp.addIndividualFitness( subIndividuals[i], fitness )
# max fitness distribution
subIndividuals = individual.getSubIndividuals()
for i, sp in enumerate(self._subPopulations):
sub_individual = subIndividuals[i]
old_fitness = sp.getIndividualFitness(sub_individual)
if old_fitness < fitness:
sp.setIndividualFitness(sub_individual, fitness)
def clearFitness(self):
""" Clears all fitness values of all subpopulations. """
for sp in self._subPopulations:
sp.clearFitness()
class EvolinoSubPopulation(SimplePopulation):
""" The class for Evolino subpopulations. Mostly the same as SimplePopulation
but with a few extensions.
It contains a set of EvolinoSubIndividuals.
On initialization, a prototype individual is created from the prototype
chromosome. This individual is then cloned and added so that the
population exists of maxNIndividuals individuals.
The genomes of these clones are then randomized by the Randomization
operator.
"""
def __init__(self, chromosome, maxNIndividuals, valueInitializer=Randomization(-0.1, 0.1), **kwargs):
""" :key chromosome: The prototype chromosome
:key maxNIndividuals: The maximum allowed number of individuals
"""
SimplePopulation.__init__(self)
self._prototype = EvolinoSubIndividual(chromosome)
self._maxNIndividuals = maxNIndividuals
self._valueInitializer = valueInitializer
self.setArgs(**kwargs)
for _ in range(maxNIndividuals):
self.addIndividual(self._prototype.copy())
self._valueInitializer.apply(self)
def setArgs(self, **kwargs):
for key, val in kwargs.items():
getattr(self, key)
setattr(self, key, val)
def getMaxNIndividuals(self):
""" Returns the maximum allowed number of individuals """
return self._maxNIndividuals
def addIndividualFitness(self, individual, fitness):
""" Add fitness to the individual's fitness value.
:key fitness: a float value denoting the fitness
"""
self._fitness[individual] += fitness
| bsd-3-clause |
wangyum/spark | python/pyspark/sql/window.py | 23 | 12863 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark import since, SparkContext
from pyspark.sql.column import _to_seq, _to_java_column
__all__ = ["Window", "WindowSpec"]
def _to_java_cols(cols):
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
return _to_seq(sc, cols, _to_java_column)
class Window(object):
"""
Utility functions for defining window in DataFrames.
.. versionadded:: 1.4
Notes
-----
When ordering is not defined, an unbounded window frame (rowFrame,
unboundedPreceding, unboundedFollowing) is used by default. When ordering is defined,
a growing window frame (rangeFrame, unboundedPreceding, currentRow) is used by default.
Examples
--------
>>> # ORDER BY date ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
>>> window = Window.orderBy("date").rowsBetween(Window.unboundedPreceding, Window.currentRow)
>>> # PARTITION BY country ORDER BY date RANGE BETWEEN 3 PRECEDING AND 3 FOLLOWING
>>> window = Window.orderBy("date").partitionBy("country").rangeBetween(-3, 3)
"""
_JAVA_MIN_LONG = -(1 << 63) # -9223372036854775808
_JAVA_MAX_LONG = (1 << 63) - 1 # 9223372036854775807
_PRECEDING_THRESHOLD = max(-sys.maxsize, _JAVA_MIN_LONG)
_FOLLOWING_THRESHOLD = min(sys.maxsize, _JAVA_MAX_LONG)
unboundedPreceding = _JAVA_MIN_LONG
unboundedFollowing = _JAVA_MAX_LONG
currentRow = 0
@staticmethod
@since(1.4)
def partitionBy(*cols):
"""
Creates a :class:`WindowSpec` with the partitioning defined.
"""
sc = SparkContext._active_spark_context
jspec = sc._jvm.org.apache.spark.sql.expressions.Window.partitionBy(_to_java_cols(cols))
return WindowSpec(jspec)
@staticmethod
@since(1.4)
def orderBy(*cols):
"""
Creates a :class:`WindowSpec` with the ordering defined.
"""
sc = SparkContext._active_spark_context
jspec = sc._jvm.org.apache.spark.sql.expressions.Window.orderBy(_to_java_cols(cols))
return WindowSpec(jspec)
@staticmethod
def rowsBetween(start, end):
"""
Creates a :class:`WindowSpec` with the frame boundaries defined,
from `start` (inclusive) to `end` (inclusive).
Both `start` and `end` are relative positions from the current row.
For example, "0" means "current row", while "-1" means the row before
the current row, and "5" means the fifth row after the current row.
We recommend users use ``Window.unboundedPreceding``, ``Window.unboundedFollowing``,
and ``Window.currentRow`` to specify special boundary values, rather than using integral
values directly.
A row based boundary is based on the position of the row within the partition.
An offset indicates the number of rows above or below the current row, the frame for the
current row starts or ends. For instance, given a row based sliding frame with a lower bound
offset of -1 and a upper bound offset of +2. The frame for row with index 5 would range from
index 4 to index 7.
.. versionadded:: 2.1.0
Parameters
----------
start : int
boundary start, inclusive.
The frame is unbounded if this is ``Window.unboundedPreceding``, or
any value less than or equal to -9223372036854775808.
end : int
boundary end, inclusive.
The frame is unbounded if this is ``Window.unboundedFollowing``, or
any value greater than or equal to 9223372036854775807.
Examples
--------
>>> from pyspark.sql import Window
>>> from pyspark.sql import functions as func
>>> from pyspark.sql import SQLContext
>>> sc = SparkContext.getOrCreate()
>>> sqlContext = SQLContext(sc)
>>> tup = [(1, "a"), (1, "a"), (2, "a"), (1, "b"), (2, "b"), (3, "b")]
>>> df = sqlContext.createDataFrame(tup, ["id", "category"])
>>> window = Window.partitionBy("category").orderBy("id").rowsBetween(Window.currentRow, 1)
>>> df.withColumn("sum", func.sum("id").over(window)).sort("id", "category", "sum").show()
+---+--------+---+
| id|category|sum|
+---+--------+---+
| 1| a| 2|
| 1| a| 3|
| 1| b| 3|
| 2| a| 2|
| 2| b| 5|
| 3| b| 3|
+---+--------+---+
"""
if start <= Window._PRECEDING_THRESHOLD:
start = Window.unboundedPreceding
if end >= Window._FOLLOWING_THRESHOLD:
end = Window.unboundedFollowing
sc = SparkContext._active_spark_context
jspec = sc._jvm.org.apache.spark.sql.expressions.Window.rowsBetween(start, end)
return WindowSpec(jspec)
@staticmethod
def rangeBetween(start, end):
"""
Creates a :class:`WindowSpec` with the frame boundaries defined,
from `start` (inclusive) to `end` (inclusive).
Both `start` and `end` are relative from the current row. For example,
"0" means "current row", while "-1" means one off before the current row,
and "5" means the five off after the current row.
We recommend users use ``Window.unboundedPreceding``, ``Window.unboundedFollowing``,
and ``Window.currentRow`` to specify special boundary values, rather than using integral
values directly.
A range-based boundary is based on the actual value of the ORDER BY
expression(s). An offset is used to alter the value of the ORDER BY expression, for
instance if the current ORDER BY expression has a value of 10 and the lower bound offset
is -3, the resulting lower bound for the current row will be 10 - 3 = 7. This however puts a
number of constraints on the ORDER BY expressions: there can be only one expression and this
expression must have a numerical data type. An exception can be made when the offset is
unbounded, because no value modification is needed, in this case multiple and non-numeric
ORDER BY expression are allowed.
.. versionadded:: 2.1.0
Parameters
----------
start : int
boundary start, inclusive.
The frame is unbounded if this is ``Window.unboundedPreceding``, or
any value less than or equal to max(-sys.maxsize, -9223372036854775808).
end : int
boundary end, inclusive.
The frame is unbounded if this is ``Window.unboundedFollowing``, or
any value greater than or equal to min(sys.maxsize, 9223372036854775807).
Examples
--------
>>> from pyspark.sql import Window
>>> from pyspark.sql import functions as func
>>> from pyspark.sql import SQLContext
>>> sc = SparkContext.getOrCreate()
>>> sqlContext = SQLContext(sc)
>>> tup = [(1, "a"), (1, "a"), (2, "a"), (1, "b"), (2, "b"), (3, "b")]
>>> df = sqlContext.createDataFrame(tup, ["id", "category"])
>>> window = Window.partitionBy("category").orderBy("id").rangeBetween(Window.currentRow, 1)
>>> df.withColumn("sum", func.sum("id").over(window)).sort("id", "category").show()
+---+--------+---+
| id|category|sum|
+---+--------+---+
| 1| a| 4|
| 1| a| 4|
| 1| b| 3|
| 2| a| 2|
| 2| b| 5|
| 3| b| 3|
+---+--------+---+
"""
if start <= Window._PRECEDING_THRESHOLD:
start = Window.unboundedPreceding
if end >= Window._FOLLOWING_THRESHOLD:
end = Window.unboundedFollowing
sc = SparkContext._active_spark_context
jspec = sc._jvm.org.apache.spark.sql.expressions.Window.rangeBetween(start, end)
return WindowSpec(jspec)
class WindowSpec(object):
"""
A window specification that defines the partitioning, ordering,
and frame boundaries.
Use the static methods in :class:`Window` to create a :class:`WindowSpec`.
.. versionadded:: 1.4.0
"""
def __init__(self, jspec):
self._jspec = jspec
def partitionBy(self, *cols):
"""
Defines the partitioning columns in a :class:`WindowSpec`.
.. versionadded:: 1.4.0
Parameters
----------
cols : str, :class:`Column` or list
names of columns or expressions
"""
return WindowSpec(self._jspec.partitionBy(_to_java_cols(cols)))
def orderBy(self, *cols):
"""
Defines the ordering columns in a :class:`WindowSpec`.
.. versionadded:: 1.4.0
Parameters
----------
cols : str, :class:`Column` or list
names of columns or expressions
"""
return WindowSpec(self._jspec.orderBy(_to_java_cols(cols)))
def rowsBetween(self, start, end):
"""
Defines the frame boundaries, from `start` (inclusive) to `end` (inclusive).
Both `start` and `end` are relative positions from the current row.
For example, "0" means "current row", while "-1" means the row before
the current row, and "5" means the fifth row after the current row.
We recommend users use ``Window.unboundedPreceding``, ``Window.unboundedFollowing``,
and ``Window.currentRow`` to specify special boundary values, rather than using integral
values directly.
.. versionadded:: 1.4.0
Parameters
----------
start : int
boundary start, inclusive.
The frame is unbounded if this is ``Window.unboundedPreceding``, or
any value less than or equal to max(-sys.maxsize, -9223372036854775808).
end : int
boundary end, inclusive.
The frame is unbounded if this is ``Window.unboundedFollowing``, or
any value greater than or equal to min(sys.maxsize, 9223372036854775807).
"""
if start <= Window._PRECEDING_THRESHOLD:
start = Window.unboundedPreceding
if end >= Window._FOLLOWING_THRESHOLD:
end = Window.unboundedFollowing
return WindowSpec(self._jspec.rowsBetween(start, end))
def rangeBetween(self, start, end):
"""
Defines the frame boundaries, from `start` (inclusive) to `end` (inclusive).
Both `start` and `end` are relative from the current row. For example,
"0" means "current row", while "-1" means one off before the current row,
and "5" means the five off after the current row.
We recommend users use ``Window.unboundedPreceding``, ``Window.unboundedFollowing``,
and ``Window.currentRow`` to specify special boundary values, rather than using integral
values directly.
.. versionadded:: 1.4.0
Parameters
----------
start : int
boundary start, inclusive.
The frame is unbounded if this is ``Window.unboundedPreceding``, or
any value less than or equal to max(-sys.maxsize, -9223372036854775808).
end : int
boundary end, inclusive.
The frame is unbounded if this is ``Window.unboundedFollowing``, or
any value greater than or equal to min(sys.maxsize, 9223372036854775807).
"""
if start <= Window._PRECEDING_THRESHOLD:
start = Window.unboundedPreceding
if end >= Window._FOLLOWING_THRESHOLD:
end = Window.unboundedFollowing
return WindowSpec(self._jspec.rangeBetween(start, end))
def _test():
import doctest
import pyspark.sql.window
SparkContext('local[4]', 'PythonTest')
globs = pyspark.sql.window.__dict__.copy()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.window, globs=globs,
optionflags=doctest.NORMALIZE_WHITESPACE)
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
vermouth1992/deep-learning-playground | utils/data_utils.py | 2 | 8781 | import cPickle as pickle
import numpy as np
import os
from scipy.misc import imread
def load_CIFAR_batch(filename):
""" load single batch of cifar """
with open(filename, 'rb') as f:
datadict = pickle.load(f)
X = datadict['data']
Y = datadict['labels']
X = X.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).astype("float32")
Y = np.array(Y)
return X, Y
def load_CIFAR10(ROOT):
""" load all of cifar """
xs = []
ys = []
for b in range(1, 6):
f = os.path.join(os.path.expanduser(ROOT), 'data_batch_%d' % (b,))
X, Y = load_CIFAR_batch(f)
xs.append(X)
ys.append(Y)
Xtr = np.concatenate(xs)
Ytr = np.concatenate(ys)
del X, Y
Xte, Yte = load_CIFAR_batch(os.path.join(os.path.expanduser(ROOT), 'test_batch'))
return Xtr, Ytr, Xte, Yte
def get_CIFAR10_data(cifar10_dir=os.path.expanduser('~/Documents/Deep_Learning_Resources/datasets/cifar-10-batches-py'),
num_training=49000, num_validation=1000, num_test=10000, subtract_mean=True):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for classifiers. These are the same steps as we used for the SVM, but
condensed to a single function.
"""
# Load the raw CIFAR-10 data
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
if subtract_mean:
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Transpose so that channels come first
X_train = X_train.transpose(0, 3, 1, 2).copy()
X_val = X_val.transpose(0, 3, 1, 2).copy()
X_test = X_test.transpose(0, 3, 1, 2).copy()
dev_mask = range(int(X_train.shape[0] * 0.001))
X_dev = X_train[dev_mask]
y_dev = y_train[dev_mask]
# Package data into a dictionary
return {
'X_train': X_train, 'y_train': y_train,
'X_val': X_val, 'y_val': y_val,
'X_test': X_test, 'y_test': y_test,
'X_dev': X_dev, 'y_dev': y_dev,
}
def load_tiny_imagenet(path, dtype=np.float32, subtract_mean=True):
"""
Load TinyImageNet. Each of TinyImageNet-100-A, TinyImageNet-100-B, and
TinyImageNet-200 have the same directory structure, so this can be used
to load any of them.
Inputs:
- path: String giving path to the directory to load.
- dtype: numpy datatype used to load the data.
- subtract_mean: Whether to subtract the mean training image.
Returns: A dictionary with the following entries:
- class_names: A list where class_names[i] is a list of strings giving the
WordNet names for class i in the loaded dataset.
- X_train: (N_tr, 3, 64, 64) array of training images
- y_train: (N_tr,) array of training labels
- X_val: (N_val, 3, 64, 64) array of validation images
- y_val: (N_val,) array of validation labels
- X_test: (N_test, 3, 64, 64) array of testing images.
- y_test: (N_test,) array of test labels; if test labels are not available
(such as in student code) then y_test will be None.
- mean_image: (3, 64, 64) array giving mean training image
"""
# First load wnids
with open(os.path.join(path, 'wnids.txt'), 'r') as f:
wnids = [x.strip() for x in f]
# Map wnids to integer labels
wnid_to_label = {wnid: i for i, wnid in enumerate(wnids)}
# Use words.txt to get names for each class
with open(os.path.join(path, 'words.txt'), 'r') as f:
wnid_to_words = dict(line.split('\t') for line in f)
for wnid, words in wnid_to_words.iteritems():
wnid_to_words[wnid] = [w.strip() for w in words.split(',')]
class_names = [wnid_to_words[wnid] for wnid in wnids]
# Next load training data.
X_train = []
y_train = []
for i, wnid in enumerate(wnids):
if (i + 1) % 20 == 0:
print 'loading training data for synset %d / %d' % (i + 1, len(wnids))
# To figure out the filenames we need to open the boxes file
boxes_file = os.path.join(path, 'train', wnid, '%s_boxes.txt' % wnid)
with open(boxes_file, 'r') as f:
filenames = [x.split('\t')[0] for x in f]
num_images = len(filenames)
X_train_block = np.zeros((num_images, 3, 64, 64), dtype=dtype)
y_train_block = wnid_to_label[wnid] * np.ones(num_images, dtype=np.int64)
for j, img_file in enumerate(filenames):
img_file = os.path.join(path, 'train', wnid, 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
## grayscale file
img.shape = (64, 64, 1)
X_train_block[j] = img.transpose(2, 0, 1)
X_train.append(X_train_block)
y_train.append(y_train_block)
# We need to concatenate all training data
X_train = np.concatenate(X_train, axis=0)
y_train = np.concatenate(y_train, axis=0)
# Next load validation data
with open(os.path.join(path, 'val', 'val_annotations.txt'), 'r') as f:
img_files = []
val_wnids = []
for line in f:
img_file, wnid = line.split('\t')[:2]
img_files.append(img_file)
val_wnids.append(wnid)
num_val = len(img_files)
y_val = np.array([wnid_to_label[wnid] for wnid in val_wnids])
X_val = np.zeros((num_val, 3, 64, 64), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'val', 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
img.shape = (64, 64, 1)
X_val[i] = img.transpose(2, 0, 1)
# Next load test images
# Students won't have test labels, so we need to iterate over files in the
# images directory.
img_files = os.listdir(os.path.join(path, 'test', 'images'))
X_test = np.zeros((len(img_files), 3, 64, 64), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'test', 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
img.shape = (64, 64, 1)
X_test[i] = img.transpose(2, 0, 1)
y_test = None
y_test_file = os.path.join(path, 'test', 'test_annotations.txt')
if os.path.isfile(y_test_file):
with open(y_test_file, 'r') as f:
img_file_to_wnid = {}
for line in f:
line = line.split('\t')
img_file_to_wnid[line[0]] = line[1]
y_test = [wnid_to_label[img_file_to_wnid[img_file]] for img_file in img_files]
y_test = np.array(y_test)
mean_image = X_train.mean(axis=0)
if subtract_mean:
X_train -= mean_image[None]
X_val -= mean_image[None]
X_test -= mean_image[None]
return {
'class_names': class_names,
'X_train': X_train,
'y_train': y_train,
'X_val': X_val,
'y_val': y_val,
'X_test': X_test,
'y_test': y_test,
'class_names': class_names,
'mean_image': mean_image,
}
def load_models(models_dir):
"""
Load saved models from disk. This will attempt to unpickle all files in a
directory; any files that give errors on unpickling (such as README.txt) will
be skipped.
Inputs:
- models_dir: String giving the path to a directory containing model files.
Each model file is a pickled dictionary with a 'model' field.
Returns:
A dictionary mapping model file names to models.
"""
models = {}
for model_file in os.listdir(models_dir):
with open(os.path.join(models_dir, model_file), 'rb') as f:
try:
models[model_file] = pickle.load(f)['model']
except pickle.UnpicklingError:
continue
return models
def to_categorical(y, nb_classes=None):
"""Converts a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
# Arguments
y: class vector to be converted into a matrix
(integers from 0 to nb_classes).
nb_classes: total number of classes.
# Returns
A binary matrix representation of the input.
"""
y = np.array(y, dtype='int').ravel()
if not nb_classes:
nb_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, nb_classes))
categorical[np.arange(n), y] = 1
return categorical
| apache-2.0 |
suyashphadtare/vestasi-erp-1 | erpnext/accounts/doctype/pricing_rule/pricing_rule.py | 31 | 9280 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
import copy
from frappe import throw, _
from frappe.utils import flt, cint
from frappe.model.document import Document
class MultiplePricingRuleConflict(frappe.ValidationError): pass
class PricingRule(Document):
def validate(self):
self.validate_mandatory()
self.validate_applicable_for_selling_or_buying()
self.validate_min_max_qty()
self.cleanup_fields_value()
self.validate_price_or_discount()
self.validate_max_discount()
def validate_mandatory(self):
for field in ["apply_on", "applicable_for"]:
tocheck = frappe.scrub(self.get(field) or "")
if tocheck and not self.get(tocheck):
throw(_("{0} is required").format(self.meta.get_label(tocheck)), frappe.MandatoryError)
def validate_applicable_for_selling_or_buying(self):
if not self.selling and not self.buying:
throw(_("Atleast one of the Selling or Buying must be selected"))
if not self.selling and self.applicable_for in ["Customer", "Customer Group",
"Territory", "Sales Partner", "Campaign"]:
throw(_("Selling must be checked, if Applicable For is selected as {0}"
.format(self.applicable_for)))
if not self.buying and self.applicable_for in ["Supplier", "Supplier Type"]:
throw(_("Buying must be checked, if Applicable For is selected as {0}"
.format(self.applicable_for)))
def validate_min_max_qty(self):
if self.min_qty and self.max_qty and flt(self.min_qty) > flt(self.max_qty):
throw(_("Min Qty can not be greater than Max Qty"))
def cleanup_fields_value(self):
for logic_field in ["apply_on", "applicable_for", "price_or_discount"]:
fieldname = frappe.scrub(self.get(logic_field) or "")
# reset all values except for the logic field
options = (self.meta.get_options(logic_field) or "").split("\n")
for f in options:
if not f: continue
f = frappe.scrub(f)
if f!=fieldname:
self.set(f, None)
def validate_price_or_discount(self):
for field in ["Price", "Discount Percentage"]:
if flt(self.get(frappe.scrub(field))) < 0:
throw(_("{0} can not be negative").format(field))
def validate_max_discount(self):
if self.price_or_discount == "Discount Percentage" and self.item_code:
max_discount = frappe.db.get_value("Item", self.item_code, "max_discount")
if max_discount and flt(self.discount_percentage) > flt(max_discount):
throw(_("Max discount allowed for item: {0} is {1}%").format(self.item_code, max_discount))
#--------------------------------------------------------------------------------
@frappe.whitelist()
def apply_pricing_rule(args):
"""
args = {
"item_list": [{"doctype": "", "name": "", "item_code": "", "brand": "", "item_group": ""}, ...],
"customer": "something",
"customer_group": "something",
"territory": "something",
"supplier": "something",
"supplier_type": "something",
"currency": "something",
"conversion_rate": "something",
"price_list": "something",
"plc_conversion_rate": "something",
"company": "something",
"transaction_date": "something",
"campaign": "something",
"sales_partner": "something",
"ignore_pricing_rule": "something"
}
"""
if isinstance(args, basestring):
args = json.loads(args)
args = frappe._dict(args)
# list of dictionaries
out = []
if args.get("parenttype") == "Material Request": return out
if not args.transaction_type:
args.transaction_type = "buying" if frappe.get_meta(args.parenttype).get_field("supplier") \
else "selling"
item_list = args.get("item_list")
args.pop("item_list")
for item in item_list:
args_copy = copy.deepcopy(args)
args_copy.update(item)
out.append(get_pricing_rule_for_item(args_copy))
return out
def get_pricing_rule_for_item(args):
if args.get("parenttype") == "Material Request": return {}
item_details = frappe._dict({
"doctype": args.doctype,
"name": args.name,
"pricing_rule": None
})
if args.ignore_pricing_rule or not args.item_code:
return item_details
if not (args.item_group and args.brand):
args.item_group, args.brand = frappe.db.get_value("Item", args.item_code, ["item_group", "brand"])
if not args.item_group:
frappe.throw(_("Item Group not mentioned in item master for item {0}").format(args.item_code))
if args.customer and not (args.customer_group and args.territory):
customer = frappe.db.get_value("Customer", args.customer, ["customer_group", "territory"])
if customer:
args.customer_group, args.territory = customer
elif args.supplier and not args.supplier_type:
args.supplier_type = frappe.db.get_value("Supplier", args.supplier, "supplier_type")
pricing_rules = get_pricing_rules(args)
pricing_rule = filter_pricing_rules(args, pricing_rules)
if pricing_rule:
item_details.pricing_rule = pricing_rule.name
if pricing_rule.price_or_discount == "Price":
item_details.update({
"price_list_rate": pricing_rule.price/flt(args.conversion_rate) \
if args.conversion_rate else 0.0,
"discount_percentage": 0.0
})
else:
item_details.discount_percentage = pricing_rule.discount_percentage
return item_details
def get_pricing_rules(args):
def _get_tree_conditions(parenttype, allow_blank=True):
field = frappe.scrub(parenttype)
condition = ""
if args.get(field):
lft, rgt = frappe.db.get_value(parenttype, args[field], ["lft", "rgt"])
parent_groups = frappe.db.sql_list("""select name from `tab%s`
where lft<=%s and rgt>=%s""" % (parenttype, '%s', '%s'), (lft, rgt))
if parent_groups:
if allow_blank: parent_groups.append('')
condition = " ifnull("+field+", '') in ('" + \
"', '".join([d.replace("'", "\\'").replace('"', '\\"') for d in parent_groups])+"')"
return condition
conditions = ""
for field in ["company", "customer", "supplier", "supplier_type", "campaign", "sales_partner"]:
if args.get(field):
conditions += " and ifnull("+field+", '') in (%("+field+")s, '')"
else:
conditions += " and ifnull("+field+", '') = ''"
for parenttype in ["Customer Group", "Territory"]:
group_condition = _get_tree_conditions(parenttype)
if group_condition:
conditions += " and " + group_condition
if not args.price_list: args.price_list = None
conditions += " and ifnull(for_price_list, '') in (%(price_list)s, '')"
if args.get("transaction_date"):
conditions += """ and %(transaction_date)s between ifnull(valid_from, '2000-01-01')
and ifnull(valid_upto, '2500-12-31')"""
item_group_condition = _get_tree_conditions("Item Group", False)
if item_group_condition: item_group_condition = " or " + item_group_condition
return frappe.db.sql("""select * from `tabPricing Rule`
where (item_code=%(item_code)s {item_group_condition} or brand=%(brand)s)
and docstatus < 2 and ifnull(disable, 0) = 0
and ifnull({transaction_type}, 0) = 1 {conditions}
order by priority desc, name desc""".format(
item_group_condition=item_group_condition,
transaction_type=args.transaction_type, conditions=conditions), args, as_dict=1)
def filter_pricing_rules(args, pricing_rules):
# filter for qty
if pricing_rules and args.get("qty"):
pricing_rules = filter(lambda x: (args.qty>=flt(x.min_qty)
and (args.qty<=x.max_qty if x.max_qty else True)), pricing_rules)
# find pricing rule with highest priority
if pricing_rules:
max_priority = max([cint(p.priority) for p in pricing_rules])
if max_priority:
pricing_rules = filter(lambda x: cint(x.priority)==max_priority, pricing_rules)
# apply internal priority
all_fields = ["item_code", "item_group", "brand", "customer", "customer_group", "territory",
"supplier", "supplier_type", "campaign", "sales_partner"]
if len(pricing_rules) > 1:
for field_set in [["item_code", "item_group", "brand"],
["customer", "customer_group", "territory"], ["supplier", "supplier_type"]]:
remaining_fields = list(set(all_fields) - set(field_set))
if if_all_rules_same(pricing_rules, remaining_fields):
pricing_rules = apply_internal_priority(pricing_rules, field_set, args)
break
if len(pricing_rules) > 1:
price_or_discount = list(set([d.price_or_discount for d in pricing_rules]))
if len(price_or_discount) == 1 and price_or_discount[0] == "Discount Percentage":
pricing_rules = filter(lambda x: x.for_price_list==args.price_list, pricing_rules) \
or pricing_rules
if len(pricing_rules) > 1:
frappe.throw(_("Multiple Price Rule exists with same criteria, please resolve \
conflict by assigning priority. Price Rules: {0}")
.format("\n".join([d.name for d in pricing_rules])), MultiplePricingRuleConflict)
elif pricing_rules:
return pricing_rules[0]
def if_all_rules_same(pricing_rules, fields):
all_rules_same = True
val = [pricing_rules[0][k] for k in fields]
for p in pricing_rules[1:]:
if val != [p[k] for k in fields]:
all_rules_same = False
break
return all_rules_same
def apply_internal_priority(pricing_rules, field_set, args):
filtered_rules = []
for field in field_set:
if args.get(field):
filtered_rules = filter(lambda x: x[field]==args[field], pricing_rules)
if filtered_rules: break
return filtered_rules or pricing_rules
| agpl-3.0 |
VaishnoReddy/Aminator- | aminator/plugins/provisioner/apt.py | 5 | 7258 | # -*- coding: utf-8 -*-
#
#
# Copyright 2013 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""
aminator.plugins.provisioner.apt
================================
basic apt provisioner
"""
import logging
import os
from aminator.exceptions import ProvisionException
from aminator.plugins.provisioner.base import BaseProvisionerPlugin
from aminator.util import retry
from aminator.util.linux import monitor_command, result_to_dict
from aminator.util.metrics import cmdsucceeds, cmdfails, timer, lapse
__all__ = ('AptProvisionerPlugin',)
log = logging.getLogger(__name__)
class AptProvisionerUpdateException(ProvisionException):
pass
class AptProvisionerPlugin(BaseProvisionerPlugin):
"""
AptProvisionerPlugin takes the majority of its behavior from BaseProvisionerPlugin
See BaseProvisionerPlugin for details
"""
_name = 'apt'
@cmdsucceeds("aminator.provisioner.apt.provision_package.count")
@cmdfails("aminator.provisioner.apt.provision_package.error")
@lapse("aminator.provisioner.apt.provision_package.duration")
def _provision_package(self):
context = self._config.context
os.environ['DEBIAN_FRONTEND'] = 'noninteractive'
return self.install(context.package.arg,
local_install=context.package.get('local_install', False))
def _store_package_metadata(self):
context = self._config.context
config = self._config.plugins[self.full_name]
metadata = self.deb_package_metadata(context.package.arg, config.get('pkg_query_format', ''), context.package.get('local_install', False))
for x in config.pkg_attributes:
if x == 'version' and x in metadata:
if ':' in metadata[x]:
# strip epoch element from version
vers = metadata[x]
metadata[x] = vers[vers.index(':') + 1:]
if '-' in metadata[x]:
# debs include release in version so split
# version into version-release to compat w/rpm
vers, rel = metadata[x].split('-', 1)
metadata[x] = vers
metadata['release'] = rel
else:
metadata['release'] = 0
# this is probably not necessary given above
metadata.setdefault(x, None)
context.package.attributes = metadata
@staticmethod
def dpkg_install(package):
dpkg_result = monitor_command(['dpkg', '-i', package])
if not dpkg_result.success:
log.debug('failure:{0.command} :{0.std_err}'.format(dpkg_result.result))
return dpkg_result
def _fix_localinstall_deps(self, package):
# use apt-get to resolve dependencies after a dpkg -i
fix_deps_result = self.apt_get_install('--fix-missing')
if not fix_deps_result.success:
log.debug('failure:{0.command} :{0.std_err}'.format(fix_deps_result.result))
return fix_deps_result
def _localinstall(self, package):
"""install deb file with dpkg then resolve dependencies
"""
dpkg_ret = self.dpkg_install(package)
if not dpkg_ret.success:
# expected when package has dependencies that are not installed
update_metadata_result = self.apt_get_update()
if not update_metadata_result.success:
errmsg = 'Repo metadata refresh failed: {0.std_err}'
errmsg = errmsg.format(update_metadata_result.result)
return update_metadata_result
log.info("Installing dependencies for package {0}".format(package))
fix_deps_result = self._fix_localinstall_deps(package)
if not fix_deps_result.success:
log.critical("Error encountered installing dependencies: "
"{0.std_err}".format(fix_deps_result.result))
return fix_deps_result
return dpkg_ret
@staticmethod
def deb_query(package, queryformat, local=False):
if local:
cmd = 'dpkg-deb -W'.split()
cmd.append('--showformat={0}'.format(queryformat))
else:
cmd = 'dpkg-query -W'.split()
cmd.append('-f={0}'.format(queryformat))
cmd.append(package)
deb_query_result = monitor_command(cmd)
if not deb_query_result.success:
log.debug('failure:{0.command} :{0.std_err}'.format(deb_query_result.result))
return deb_query_result
@cmdsucceeds("aminator.provisioner.apt.apt_get_update.count")
@cmdfails("aminator.provisioner.apt.apt_get_update.error")
@timer("aminator.provisioner.apt.apt_get_update.duration")
@retry(ExceptionToCheck=AptProvisionerUpdateException, tries=5, delay=1, backoff=0.5, logger=log)
def apt_get_update(self):
self.apt_get_clean()
dpkg_update = monitor_command(['apt-get', 'update'])
if not dpkg_update.success:
log.debug('failure: {0.command} :{0.std_err}'.format(dpkg_update.result))
# trigger retry. expiring retries should fail the bake as this
# exception will propagate out to the provisioning context handler
raise AptProvisionerUpdateException('apt-get update failed')
return dpkg_update
@staticmethod
def apt_get_clean():
return monitor_command(['apt-get', 'clean'])
@staticmethod
def apt_get_install(*options):
cmd = ['apt-get', '-y', 'install']
cmd.extend(options)
install_result = monitor_command(cmd)
if not install_result.success:
log.debug('failure:{0.command} :{0.std_err}'.format(install_result.result))
return install_result
def _install(self, package):
return self.apt_get_install(package)
def install(self, package, local_install=False):
if local_install:
install_result = self._localinstall(package)
else:
update_metadata_result = self.apt_get_update()
if not update_metadata_result.success:
errmsg = 'Repo metadata refresh failed: {0.std_err}'
errmsg = errmsg.format(update_metadata_result.result)
return update_metadata_result
install_result = self._install(package)
if not install_result.success:
errmsg = 'Error installing package {0}: {1.std_err}'
errmsg = errmsg.format(package, install_result.result)
log.critical(errmsg)
return install_result
@classmethod
def deb_package_metadata(cls, package, queryformat, local=False):
return result_to_dict(cls.deb_query(package, queryformat, local))
| apache-2.0 |
shootsoft/practice | LeetCode/python/061-090/068-text-justification/formatstring.py | 1 | 2595 | class Solution:
# @param words, a list of strings
# @param L, an integer
# @return a list of strings
def fullJustify(self, words, L):
result = []
length = len(words)
i = 0
c = 0
sublength =0
sub = []
while i < length:
lc = len(words[i])
#print 'lc=',lc
c += lc + 1
sublength += lc
#print lc, c, sublength
if c < L:
sub.append(words[i])
elif c==L or c-1==L:
sub.append(words[i])
#print sub
result.append(self.joinWords(sub, L, sublength, i == length-1))
sub = []
c = 0
sublength = 0
elif c>L:
result.append(self.joinWords(sub, L, sublength-lc, False))
sub = [words[i]]
c = lc+1
sublength = lc
i += 1
if c>0:
print sub
result.append(self.joinWords(sub, L, sublength, True))
return result
def joinWords(self, words, L, sublength, isLast):
count = len(words)
if count ==1 :
#print sublength
return words[0] + (' ' * (L-sublength))
else:
avg = (L - sublength) / (count -1)
right = (L - sublength) % (count -1)
avgs = ' ' * avg
rights = ''
if isLast:
avgs = ' '
rights = (L - sublength - count+1) * ' '
result = words[0] + avgs
if isLast == False and right>0:
result += ' '
right -= 1
for i in range(1, count-1):
result += words[i] + avgs
if isLast == False and right>0:
result += ' '
right -= 1
#print result
result += words[count-1]
if isLast == True:
result += rights
return result
s = Solution()
print s.fullJustify([""], 2)
print s.fullJustify(["0"], 2)
print s.fullJustify(["This", "is", "an", "example", "of", "text", "justification."], 16)
print s.fullJustify(["What","must","be","shall","be."], 12)
print s.fullJustify(["Don't","go","around","saying","the","world","owes","you","a","living;","the","world","owes","you","nothing;","it","was","here","first."], 30)
print s.fullJustify(["My","momma","always","said,","\"Life","was","like","a","box","of","chocolates.","You","never","know","what","you're","gonna","get."], 20)
| apache-2.0 |
bdh1011/wau | venv/lib/python2.7/site-packages/numpy/ma/timer_comparison.py | 76 | 17500 | from __future__ import division, absolute_import, print_function
import timeit
from functools import reduce
import numpy as np
from numpy import float_
import np.core.fromnumeric as fromnumeric
from np.testing.utils import build_err_msg
# Fixme: this does not look right.
np.seterr(all='ignore')
pi = np.pi
class moduletester(object):
def __init__(self, module):
self.module = module
self.allequal = module.allequal
self.arange = module.arange
self.array = module.array
# self.average = module.average
self.concatenate = module.concatenate
self.count = module.count
self.equal = module.equal
self.filled = module.filled
self.getmask = module.getmask
self.getmaskarray = module.getmaskarray
self.id = id
self.inner = module.inner
self.make_mask = module.make_mask
self.masked = module.masked
self.masked_array = module.masked_array
self.masked_values = module.masked_values
self.mask_or = module.mask_or
self.nomask = module.nomask
self.ones = module.ones
self.outer = module.outer
self.repeat = module.repeat
self.resize = module.resize
self.sort = module.sort
self.take = module.take
self.transpose = module.transpose
self.zeros = module.zeros
self.MaskType = module.MaskType
try:
self.umath = module.umath
except AttributeError:
self.umath = module.core.umath
self.testnames = []
def assert_array_compare(self, comparison, x, y, err_msg='', header='',
fill_value=True):
"""Asserts that a comparison relation between two masked arrays is satisfied
elementwise."""
xf = self.filled(x)
yf = self.filled(y)
m = self.mask_or(self.getmask(x), self.getmask(y))
x = self.filled(self.masked_array(xf, mask=m), fill_value)
y = self.filled(self.masked_array(yf, mask=m), fill_value)
if (x.dtype.char != "O"):
x = x.astype(float_)
if isinstance(x, np.ndarray) and x.size > 1:
x[np.isnan(x)] = 0
elif np.isnan(x):
x = 0
if (y.dtype.char != "O"):
y = y.astype(float_)
if isinstance(y, np.ndarray) and y.size > 1:
y[np.isnan(y)] = 0
elif np.isnan(y):
y = 0
try:
cond = (x.shape==() or y.shape==()) or x.shape == y.shape
if not cond:
msg = build_err_msg([x, y],
err_msg
+ '\n(shapes %s, %s mismatch)' % (x.shape,
y.shape),
header=header,
names=('x', 'y'))
assert cond, msg
val = comparison(x, y)
if m is not self.nomask and fill_value:
val = self.masked_array(val, mask=m)
if isinstance(val, bool):
cond = val
reduced = [0]
else:
reduced = val.ravel()
cond = reduced.all()
reduced = reduced.tolist()
if not cond:
match = 100-100.0*reduced.count(1)/len(reduced)
msg = build_err_msg([x, y],
err_msg
+ '\n(mismatch %s%%)' % (match,),
header=header,
names=('x', 'y'))
assert cond, msg
except ValueError:
msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y'))
raise ValueError(msg)
def assert_array_equal(self, x, y, err_msg=''):
"""Checks the elementwise equality of two masked arrays."""
self.assert_array_compare(self.equal, x, y, err_msg=err_msg,
header='Arrays are not equal')
def test_0(self):
"Tests creation"
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
xm = self.masked_array(x, mask=m)
xm[0]
def test_1(self):
"Tests creation"
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = self.masked_array(x, mask=m1)
ym = self.masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = self.masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1.e+20, x)
xm.set_fill_value(1.e+20)
assert((xm-ym).filled(0).any())
#fail_if_equal(xm.mask.astype(int_), ym.mask.astype(int_))
s = x.shape
assert(xm.size == reduce(lambda x, y:x*y, s))
assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1))
for s in [(4, 3), (6, 2)]:
x.shape = s
y.shape = s
xm.shape = s
ym.shape = s
xf.shape = s
assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1))
def test_2(self):
"Tests conversions and indexing"
x1 = np.array([1, 2, 4, 3])
x2 = self.array(x1, mask=[1, 0, 0, 0])
x3 = self.array(x1, mask=[0, 1, 0, 1])
x4 = self.array(x1)
# test conversion to strings
junk, garbage = str(x2), repr(x2)
# assert_equal(np.sort(x1), self.sort(x2, fill_value=0))
# tests of indexing
assert type(x2[1]) is type(x1[1])
assert x1[1] == x2[1]
# assert self.allequal(x1[2],x2[2])
# assert self.allequal(x1[2:5],x2[2:5])
# assert self.allequal(x1[:],x2[:])
# assert self.allequal(x1[1:], x3[1:])
x1[2] = 9
x2[2] = 9
self.assert_array_equal(x1, x2)
x1[1:3] = 99
x2[1:3] = 99
# assert self.allequal(x1,x2)
x2[1] = self.masked
# assert self.allequal(x1,x2)
x2[1:3] = self.masked
# assert self.allequal(x1,x2)
x2[:] = x1
x2[1] = self.masked
# assert self.allequal(self.getmask(x2),self.array([0,1,0,0]))
x3[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0])
# assert self.allequal(self.getmask(x3), self.array([0,1,1,0]))
x4[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0])
# assert self.allequal(self.getmask(x4), self.array([0,1,1,0]))
# assert self.allequal(x4, self.array([1,2,3,4]))
x1 = np.arange(5)*1.0
x2 = self.masked_values(x1, 3.0)
# assert self.allequal(x1,x2)
# assert self.allequal(self.array([0,0,0,1,0], self.MaskType), x2.mask)
x1 = self.array([1, 'hello', 2, 3], object)
x2 = np.array([1, 'hello', 2, 3], object)
s1 = x1[1]
s2 = x2[1]
assert x1[1:1].shape == (0,)
# Tests copy-size
n = [0, 0, 1, 0, 0]
m = self.make_mask(n)
m2 = self.make_mask(m)
assert(m is m2)
m3 = self.make_mask(m, copy=1)
assert(m is not m3)
def test_3(self):
"Tests resize/repeat"
x4 = self.arange(4)
x4[2] = self.masked
y4 = self.resize(x4, (8,))
assert self.allequal(self.concatenate([x4, x4]), y4)
assert self.allequal(self.getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])
y5 = self.repeat(x4, (2, 2, 2, 2), axis=0)
self.assert_array_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3])
y6 = self.repeat(x4, 2, axis=0)
assert self.allequal(y5, y6)
y7 = x4.repeat((2, 2, 2, 2), axis=0)
assert self.allequal(y5, y7)
y8 = x4.repeat(2, 0)
assert self.allequal(y5, y8)
#----------------------------------
def test_4(self):
"Test of take, transpose, inner, outer products"
x = self.arange(24)
y = np.arange(24)
x[5:6] = self.masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1)))
assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1))
assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)),
self.inner(x, y))
assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)),
self.outer(x, y))
y = self.array(['abc', 1, 'def', 2, 3], object)
y[2] = self.masked
t = self.take(y, [0, 3, 4])
assert t[0] == 'abc'
assert t[1] == 2
assert t[2] == 3
#----------------------------------
def test_5(self):
"Tests inplace w/ scalar"
x = self.arange(10)
y = self.arange(10)
xm = self.arange(10)
xm[2] = self.masked
x += 1
assert self.allequal(x, y+1)
xm += 1
assert self.allequal(xm, y+1)
x = self.arange(10)
xm = self.arange(10)
xm[2] = self.masked
x -= 1
assert self.allequal(x, y-1)
xm -= 1
assert self.allequal(xm, y-1)
x = self.arange(10)*1.0
xm = self.arange(10)*1.0
xm[2] = self.masked
x *= 2.0
assert self.allequal(x, y*2)
xm *= 2.0
assert self.allequal(xm, y*2)
x = self.arange(10)*2
xm = self.arange(10)*2
xm[2] = self.masked
x /= 2
assert self.allequal(x, y)
xm /= 2
assert self.allequal(xm, y)
x = self.arange(10)*1.0
xm = self.arange(10)*1.0
xm[2] = self.masked
x /= 2.0
assert self.allequal(x, y/2.0)
xm /= self.arange(10)
self.assert_array_equal(xm, self.ones((10,)))
x = self.arange(10).astype(float_)
xm = self.arange(10)
xm[2] = self.masked
id1 = self.id(x.raw_data())
x += 1.
#assert id1 == self.id(x.raw_data())
assert self.allequal(x, y+1.)
def test_6(self):
"Tests inplace w/ array"
x = self.arange(10, dtype=float_)
y = self.arange(10)
xm = self.arange(10, dtype=float_)
xm[2] = self.masked
m = xm.mask
a = self.arange(10, dtype=float_)
a[-1] = self.masked
x += a
xm += a
assert self.allequal(x, y+a)
assert self.allequal(xm, y+a)
assert self.allequal(xm.mask, self.mask_or(m, a.mask))
x = self.arange(10, dtype=float_)
xm = self.arange(10, dtype=float_)
xm[2] = self.masked
m = xm.mask
a = self.arange(10, dtype=float_)
a[-1] = self.masked
x -= a
xm -= a
assert self.allequal(x, y-a)
assert self.allequal(xm, y-a)
assert self.allequal(xm.mask, self.mask_or(m, a.mask))
x = self.arange(10, dtype=float_)
xm = self.arange(10, dtype=float_)
xm[2] = self.masked
m = xm.mask
a = self.arange(10, dtype=float_)
a[-1] = self.masked
x *= a
xm *= a
assert self.allequal(x, y*a)
assert self.allequal(xm, y*a)
assert self.allequal(xm.mask, self.mask_or(m, a.mask))
x = self.arange(10, dtype=float_)
xm = self.arange(10, dtype=float_)
xm[2] = self.masked
m = xm.mask
a = self.arange(10, dtype=float_)
a[-1] = self.masked
x /= a
xm /= a
#----------------------------------
def test_7(self):
"Tests ufunc"
d = (self.array([1.0, 0, -1, pi/2]*2, mask=[0, 1]+[0]*6),
self.array([1.0, 0, -1, pi/2]*2, mask=[1, 0]+[0]*6),)
for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
# 'sin', 'cos', 'tan',
# 'arcsin', 'arccos', 'arctan',
# 'sinh', 'cosh', 'tanh',
# 'arcsinh',
# 'arccosh',
# 'arctanh',
# 'absolute', 'fabs', 'negative',
# # 'nonzero', 'around',
# 'floor', 'ceil',
# # 'sometrue', 'alltrue',
# 'logical_not',
# 'add', 'subtract', 'multiply',
# 'divide', 'true_divide', 'floor_divide',
# 'remainder', 'fmod', 'hypot', 'arctan2',
# 'equal', 'not_equal', 'less_equal', 'greater_equal',
# 'less', 'greater',
# 'logical_and', 'logical_or', 'logical_xor',
]:
#print f
try:
uf = getattr(self.umath, f)
except AttributeError:
uf = getattr(fromnumeric, f)
mf = getattr(self.module, f)
args = d[:uf.nin]
ur = uf(*args)
mr = mf(*args)
self.assert_array_equal(ur.filled(0), mr.filled(0), f)
self.assert_array_equal(ur._mask, mr._mask)
#----------------------------------
def test_99(self):
# test average
ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
self.assert_array_equal(2.0, self.average(ott, axis=0))
self.assert_array_equal(2.0, self.average(ott, weights=[1., 1., 2., 1.]))
result, wts = self.average(ott, weights=[1., 1., 2., 1.], returned=1)
self.assert_array_equal(2.0, result)
assert(wts == 4.0)
ott[:] = self.masked
assert(self.average(ott, axis=0) is self.masked)
ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
ott = ott.reshape(2, 2)
ott[:, 1] = self.masked
self.assert_array_equal(self.average(ott, axis=0), [2.0, 0.0])
assert(self.average(ott, axis=1)[0] is self.masked)
self.assert_array_equal([2., 0.], self.average(ott, axis=0))
result, wts = self.average(ott, axis=0, returned=1)
self.assert_array_equal(wts, [1., 0.])
w1 = [0, 1, 1, 1, 1, 0]
w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]
x = self.arange(6)
self.assert_array_equal(self.average(x, axis=0), 2.5)
self.assert_array_equal(self.average(x, axis=0, weights=w1), 2.5)
y = self.array([self.arange(6), 2.0*self.arange(6)])
self.assert_array_equal(self.average(y, None), np.add.reduce(np.arange(6))*3./12.)
self.assert_array_equal(self.average(y, axis=0), np.arange(6) * 3./2.)
self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0])
self.assert_array_equal(self.average(y, None, weights=w2), 20./6.)
self.assert_array_equal(self.average(y, axis=0, weights=w2), [0., 1., 2., 3., 4., 10.])
self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0])
m1 = self.zeros(6)
m2 = [0, 0, 1, 1, 0, 0]
m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]
m4 = self.ones(6)
m5 = [0, 1, 1, 1, 1, 1]
self.assert_array_equal(self.average(self.masked_array(x, m1), axis=0), 2.5)
self.assert_array_equal(self.average(self.masked_array(x, m2), axis=0), 2.5)
# assert(self.average(masked_array(x, m4),axis=0) is masked)
self.assert_array_equal(self.average(self.masked_array(x, m5), axis=0), 0.0)
self.assert_array_equal(self.count(self.average(self.masked_array(x, m4), axis=0)), 0)
z = self.masked_array(y, m3)
self.assert_array_equal(self.average(z, None), 20./6.)
self.assert_array_equal(self.average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5])
self.assert_array_equal(self.average(z, axis=1), [2.5, 5.0])
self.assert_array_equal(self.average(z, axis=0, weights=w2), [0., 1., 99., 99., 4.0, 10.0])
#------------------------
def test_A(self):
x = self.arange(24)
y = np.arange(24)
x[5:6] = self.masked
x = x.reshape(2, 3, 4)
################################################################################
if __name__ == '__main__':
setup_base = "from __main__ import moduletester \n"\
"import numpy\n" \
"tester = moduletester(module)\n"
# setup_new = "import np.ma.core_ini as module\n"+setup_base
setup_cur = "import np.ma.core as module\n"+setup_base
# setup_alt = "import np.ma.core_alt as module\n"+setup_base
# setup_tmp = "import np.ma.core_tmp as module\n"+setup_base
(nrepeat, nloop) = (10, 10)
if 1:
for i in range(1, 8):
func = 'tester.test_%i()' % i
# new = timeit.Timer(func, setup_new).repeat(nrepeat, nloop*10)
cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10)
# alt = timeit.Timer(func, setup_alt).repeat(nrepeat, nloop*10)
# tmp = timeit.Timer(func, setup_tmp).repeat(nrepeat, nloop*10)
# new = np.sort(new)
cur = np.sort(cur)
# alt = np.sort(alt)
# tmp = np.sort(tmp)
print("#%i" % i +50*'.')
print(eval("moduletester.test_%i.__doc__" % i))
# print "core_ini : %.3f - %.3f" % (new[0], new[1])
print("core_current : %.3f - %.3f" % (cur[0], cur[1]))
# print "core_alt : %.3f - %.3f" % (alt[0], alt[1])
# print "core_tmp : %.3f - %.3f" % (tmp[0], tmp[1])
| mit |
cognitiveclass/edx-platform | common/djangoapps/service_status/views.py | 51 | 1304 | """
Django Views for service status app
"""
import json
import time
from django.http import HttpResponse
from dogapi import dog_stats_api
from service_status import tasks
from djcelery import celery
from celery.exceptions import TimeoutError
def index(_):
"""
An empty view
"""
return HttpResponse()
@dog_stats_api.timed('status.service.celery.status')
def celery_status(_):
"""
A view that returns Celery stats
"""
stats = celery.control.inspect().stats() or {}
return HttpResponse(json.dumps(stats, indent=4),
content_type="application/json")
@dog_stats_api.timed('status.service.celery.ping')
def celery_ping(_):
"""
A Simple view that checks if Celery can process a simple task
"""
start = time.time()
result = tasks.delayed_ping.apply_async(('ping', 0.1))
task_id = result.id
# Wait until we get the result
try:
value = result.get(timeout=4.0)
success = True
except TimeoutError:
value = None
success = False
output = {
'success': success,
'task_id': task_id,
'value': value,
'time': time.time() - start,
}
return HttpResponse(json.dumps(output, indent=4),
content_type="application/json")
| agpl-3.0 |
brandond/ansible | lib/ansible/modules/windows/win_pagefile.py | 52 | 3997 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Liran Nisanov <lirannis@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_pagefile
version_added: "2.4"
short_description: Query or change pagefile configuration
description:
- Query current pagefile configuration.
- Enable/Disable AutomaticManagedPagefile.
- Create new or override pagefile configuration.
options:
drive:
description:
- The drive of the pagefile.
type: str
initial_size:
description:
- The initial size of the pagefile in megabytes.
type: int
maximum_size:
description:
- The maximum size of the pagefile in megabytes.
type: int
override:
description:
- Override the current pagefile on the drive.
type: bool
default: yes
system_managed:
description:
- Configures current pagefile to be managed by the system.
type: bool
default: no
automatic:
description:
- Configures AutomaticManagedPagefile for the entire system.
type: bool
remove_all:
description:
- Remove all pagefiles in the system, not including automatic managed.
type: bool
default: no
test_path:
description:
- Use Test-Path on the drive to make sure the drive is accessible before creating the pagefile.
type: bool
default: yes
state:
description:
- State of the pagefile.
type: str
choices: [ absent, present, query ]
default: query
notes:
- There is difference between automatic managed pagefiles that configured once for the entire system and system managed pagefile that configured per pagefile.
- InitialSize 0 and MaximumSize 0 means the pagefile is managed by the system.
- Value out of range exception may be caused by several different issues, two common problems - No such drive, Pagefile size is too small.
- Setting a pagefile when AutomaticManagedPagefile is on will disable the AutomaticManagedPagefile.
author:
- Liran Nisanov (@LiranNis)
'''
EXAMPLES = r'''
- name: Query pagefiles configuration
win_pagefile:
- name: Query C pagefile
win_pagefile:
drive: C
- name: Set C pagefile, don't override if exists
win_pagefile:
drive: C
initial_size: 1024
maximum_size: 1024
override: no
state: present
- name: Set C pagefile, override if exists
win_pagefile:
drive: C
initial_size: 1024
maximum_size: 1024
state: present
- name: Remove C pagefile
win_pagefile:
drive: C
state: absent
- name: Remove all current pagefiles, enable AutomaticManagedPagefile and query at the end
win_pagefile:
remove_all: yes
automatic: yes
- name: Remove all pagefiles disable AutomaticManagedPagefile and set C pagefile
win_pagefile:
drive: C
initial_size: 2048
maximum_size: 2048
remove_all: yes
automatic: no
state: present
- name: Set D pagefile, override if exists
win_pagefile:
drive: d
initial_size: 1024
maximum_size: 1024
state: present
'''
RETURN = r'''
automatic_managed_pagefiles:
description: Whether the pagefiles is automatically managed.
returned: When state is query.
type: bool
sample: true
pagefiles:
description: Contains caption, description, initial_size, maximum_size and name for each pagefile in the system.
returned: When state is query.
type: list
sample:
[{"caption": "c:\\ 'pagefile.sys'", "description": "'pagefile.sys' @ c:\\", "initial_size": 2048, "maximum_size": 2048, "name": "c:\\pagefile.sys"},
{"caption": "d:\\ 'pagefile.sys'", "description": "'pagefile.sys' @ d:\\", "initial_size": 1024, "maximum_size": 1024, "name": "d:\\pagefile.sys"}]
'''
| gpl-3.0 |
smx-smx/dsl-n55u-bender | release/src/router/libxml2/doc/index.py | 87 | 32908 | #!/usr/bin/python -u
#
# imports the API description and fills up a database with
# name relevance to modules, functions or web pages
#
# Operation needed:
# =================
#
# install mysqld, the python wrappers for mysql and libxml2, start mysqld
# Change the root passwd of mysql:
# mysqladmin -u root password new_password
# Create the new database xmlsoft
# mysqladmin -p create xmlsoft
# Create a database user 'veillard' and give him passord access
# change veillard and abcde with the right user name and passwd
# mysql -p
# password:
# mysql> GRANT ALL PRIVILEGES ON xmlsoft TO veillard@localhost
# IDENTIFIED BY 'abcde' WITH GRANT OPTION;
#
# As the user check the access:
# mysql -p xmlsoft
# Enter password:
# Welcome to the MySQL monitor....
# mysql> use xmlsoft
# Database changed
# mysql> quit
# Bye
#
# Then run the script in the doc subdir, it will create the symbols and
# word tables and populate them with informations extracted from
# the libxml2-api.xml API description, and make then accessible read-only
# by nobody@loaclhost the user expected to be Apache's one
#
# On the Apache configuration, make sure you have php support enabled
#
import MySQLdb
import libxml2
import sys
import string
import os
#
# We are not interested in parsing errors here
#
def callback(ctx, str):
return
libxml2.registerErrorHandler(callback, None)
#
# The dictionnary of tables required and the SQL command needed
# to create them
#
TABLES={
"symbols" : """CREATE TABLE symbols (
name varchar(255) BINARY NOT NULL,
module varchar(255) BINARY NOT NULL,
type varchar(25) NOT NULL,
descr varchar(255),
UNIQUE KEY name (name),
KEY module (module))""",
"words" : """CREATE TABLE words (
name varchar(50) BINARY NOT NULL,
symbol varchar(255) BINARY NOT NULL,
relevance int,
KEY name (name),
KEY symbol (symbol),
UNIQUE KEY ID (name, symbol))""",
"wordsHTML" : """CREATE TABLE wordsHTML (
name varchar(50) BINARY NOT NULL,
resource varchar(255) BINARY NOT NULL,
section varchar(255),
id varchar(50),
relevance int,
KEY name (name),
KEY resource (resource),
UNIQUE KEY ref (name, resource))""",
"wordsArchive" : """CREATE TABLE wordsArchive (
name varchar(50) BINARY NOT NULL,
ID int(11) NOT NULL,
relevance int,
KEY name (name),
UNIQUE KEY ref (name, ID))""",
"pages" : """CREATE TABLE pages (
resource varchar(255) BINARY NOT NULL,
title varchar(255) BINARY NOT NULL,
UNIQUE KEY name (resource))""",
"archives" : """CREATE TABLE archives (
ID int(11) NOT NULL auto_increment,
resource varchar(255) BINARY NOT NULL,
title varchar(255) BINARY NOT NULL,
UNIQUE KEY id (ID,resource(255)),
INDEX (ID),
INDEX (resource))""",
"Queries" : """CREATE TABLE Queries (
ID int(11) NOT NULL auto_increment,
Value varchar(50) NOT NULL,
Count int(11) NOT NULL,
UNIQUE KEY id (ID,Value(35)),
INDEX (ID))""",
"AllQueries" : """CREATE TABLE AllQueries (
ID int(11) NOT NULL auto_increment,
Value varchar(50) NOT NULL,
Count int(11) NOT NULL,
UNIQUE KEY id (ID,Value(35)),
INDEX (ID))""",
}
#
# The XML API description file to parse
#
API="libxml2-api.xml"
DB=None
#########################################################################
# #
# MySQL database interfaces #
# #
#########################################################################
def createTable(db, name):
global TABLES
if db == None:
return -1
if name == None:
return -1
c = db.cursor()
ret = c.execute("DROP TABLE IF EXISTS %s" % (name))
if ret == 1:
print "Removed table %s" % (name)
print "Creating table %s" % (name)
try:
ret = c.execute(TABLES[name])
except:
print "Failed to create table %s" % (name)
return -1
return ret
def checkTables(db, verbose = 1):
global TABLES
if db == None:
return -1
c = db.cursor()
nbtables = c.execute("show tables")
if verbose:
print "Found %d tables" % (nbtables)
tables = {}
i = 0
while i < nbtables:
l = c.fetchone()
name = l[0]
tables[name] = {}
i = i + 1
for table in TABLES.keys():
if not tables.has_key(table):
print "table %s missing" % (table)
createTable(db, table)
try:
ret = c.execute("SELECT count(*) from %s" % table);
row = c.fetchone()
if verbose:
print "Table %s contains %d records" % (table, row[0])
except:
print "Troubles with table %s : repairing" % (table)
ret = c.execute("repair table %s" % table);
print "repairing returned %d" % (ret)
ret = c.execute("SELECT count(*) from %s" % table);
row = c.fetchone()
print "Table %s contains %d records" % (table, row[0])
if verbose:
print "checkTables finished"
# make sure apache can access the tables read-only
try:
ret = c.execute("GRANT SELECT ON xmlsoft.* TO nobody@localhost")
ret = c.execute("GRANT INSERT,SELECT,UPDATE ON xmlsoft.Queries TO nobody@localhost")
except:
pass
return 0
def openMySQL(db="xmlsoft", passwd=None, verbose = 1):
global DB
if passwd == None:
try:
passwd = os.environ["MySQL_PASS"]
except:
print "No password available, set environment MySQL_PASS"
sys.exit(1)
DB = MySQLdb.connect(passwd=passwd, db=db)
if DB == None:
return -1
ret = checkTables(DB, verbose)
return ret
def updateWord(name, symbol, relevance):
global DB
if DB == None:
openMySQL()
if DB == None:
return -1
if name == None:
return -1
if symbol == None:
return -1
c = DB.cursor()
try:
ret = c.execute(
"""INSERT INTO words (name, symbol, relevance) VALUES ('%s','%s', %d)""" %
(name, symbol, relevance))
except:
try:
ret = c.execute(
"""UPDATE words SET relevance = %d where name = '%s' and symbol = '%s'""" %
(relevance, name, symbol))
except:
print "Update word (%s, %s, %s) failed command" % (name, symbol, relevance)
print "UPDATE words SET relevance = %d where name = '%s' and symbol = '%s'" % (relevance, name, symbol)
print sys.exc_type, sys.exc_value
return -1
return ret
def updateSymbol(name, module, type, desc):
global DB
updateWord(name, name, 50)
if DB == None:
openMySQL()
if DB == None:
return -1
if name == None:
return -1
if module == None:
return -1
if type == None:
return -1
try:
desc = string.replace(desc, "'", " ")
l = string.split(desc, ".")
desc = l[0]
desc = desc[0:99]
except:
desc = ""
c = DB.cursor()
try:
ret = c.execute(
"""INSERT INTO symbols (name, module, type, descr) VALUES ('%s','%s', '%s', '%s')""" %
(name, module, type, desc))
except:
try:
ret = c.execute(
"""UPDATE symbols SET module='%s', type='%s', descr='%s' where name='%s'""" %
(module, type, desc, name))
except:
print "Update symbol (%s, %s, %s) failed command" % (name, module, type)
print """UPDATE symbols SET module='%s', type='%s', descr='%s' where name='%s'""" % (module, type, desc, name)
print sys.exc_type, sys.exc_value
return -1
return ret
def addFunction(name, module, desc = ""):
return updateSymbol(name, module, 'function', desc)
def addMacro(name, module, desc = ""):
return updateSymbol(name, module, 'macro', desc)
def addEnum(name, module, desc = ""):
return updateSymbol(name, module, 'enum', desc)
def addStruct(name, module, desc = ""):
return updateSymbol(name, module, 'struct', desc)
def addConst(name, module, desc = ""):
return updateSymbol(name, module, 'const', desc)
def addType(name, module, desc = ""):
return updateSymbol(name, module, 'type', desc)
def addFunctype(name, module, desc = ""):
return updateSymbol(name, module, 'functype', desc)
def addPage(resource, title):
global DB
if DB == None:
openMySQL()
if DB == None:
return -1
if resource == None:
return -1
c = DB.cursor()
try:
ret = c.execute(
"""INSERT INTO pages (resource, title) VALUES ('%s','%s')""" %
(resource, title))
except:
try:
ret = c.execute(
"""UPDATE pages SET title='%s' WHERE resource='%s'""" %
(title, resource))
except:
print "Update symbol (%s, %s, %s) failed command" % (name, module, type)
print """UPDATE pages SET title='%s' WHERE resource='%s'""" % (title, resource)
print sys.exc_type, sys.exc_value
return -1
return ret
def updateWordHTML(name, resource, desc, id, relevance):
global DB
if DB == None:
openMySQL()
if DB == None:
return -1
if name == None:
return -1
if resource == None:
return -1
if id == None:
id = ""
if desc == None:
desc = ""
else:
try:
desc = string.replace(desc, "'", " ")
desc = desc[0:99]
except:
desc = ""
c = DB.cursor()
try:
ret = c.execute(
"""INSERT INTO wordsHTML (name, resource, section, id, relevance) VALUES ('%s','%s', '%s', '%s', '%d')""" %
(name, resource, desc, id, relevance))
except:
try:
ret = c.execute(
"""UPDATE wordsHTML SET section='%s', id='%s', relevance='%d' where name='%s' and resource='%s'""" %
(desc, id, relevance, name, resource))
except:
print "Update symbol (%s, %s, %d) failed command" % (name, resource, relevance)
print """UPDATE wordsHTML SET section='%s', id='%s', relevance='%d' where name='%s' and resource='%s'""" % (desc, id, relevance, name, resource)
print sys.exc_type, sys.exc_value
return -1
return ret
def checkXMLMsgArchive(url):
global DB
if DB == None:
openMySQL()
if DB == None:
return -1
if url == None:
return -1
c = DB.cursor()
try:
ret = c.execute(
"""SELECT ID FROM archives WHERE resource='%s'""" % (url))
row = c.fetchone()
if row == None:
return -1
except:
return -1
return row[0]
def addXMLMsgArchive(url, title):
global DB
if DB == None:
openMySQL()
if DB == None:
return -1
if url == None:
return -1
if title == None:
title = ""
else:
title = string.replace(title, "'", " ")
title = title[0:99]
c = DB.cursor()
try:
cmd = """INSERT INTO archives (resource, title) VALUES ('%s','%s')""" % (url, title)
ret = c.execute(cmd)
cmd = """SELECT ID FROM archives WHERE resource='%s'""" % (url)
ret = c.execute(cmd)
row = c.fetchone()
if row == None:
print "addXMLMsgArchive failed to get the ID: %s" % (url)
return -1
except:
print "addXMLMsgArchive failed command: %s" % (cmd)
return -1
return((int)(row[0]))
def updateWordArchive(name, id, relevance):
global DB
if DB == None:
openMySQL()
if DB == None:
return -1
if name == None:
return -1
if id == None:
return -1
c = DB.cursor()
try:
ret = c.execute(
"""INSERT INTO wordsArchive (name, id, relevance) VALUES ('%s', '%d', '%d')""" %
(name, id, relevance))
except:
try:
ret = c.execute(
"""UPDATE wordsArchive SET relevance='%d' where name='%s' and ID='%d'""" %
(relevance, name, id))
except:
print "Update word archive (%s, %d, %d) failed command" % (name, id, relevance)
print """UPDATE wordsArchive SET relevance='%d' where name='%s' and ID='%d'""" % (relevance, name, id)
print sys.exc_type, sys.exc_value
return -1
return ret
#########################################################################
# #
# Word dictionnary and analysis routines #
# #
#########################################################################
#
# top 100 english word without the one len < 3 + own set
#
dropWords = {
'the':0, 'this':0, 'can':0, 'man':0, 'had':0, 'him':0, 'only':0,
'and':0, 'not':0, 'been':0, 'other':0, 'even':0, 'are':0, 'was':0,
'new':0, 'most':0, 'but':0, 'when':0, 'some':0, 'made':0, 'from':0,
'who':0, 'could':0, 'after':0, 'that':0, 'will':0, 'time':0, 'also':0,
'have':0, 'more':0, 'these':0, 'did':0, 'was':0, 'two':0, 'many':0,
'they':0, 'may':0, 'before':0, 'for':0, 'which':0, 'out':0, 'then':0,
'must':0, 'one':0, 'through':0, 'with':0, 'you':0, 'said':0,
'first':0, 'back':0, 'were':0, 'what':0, 'any':0, 'years':0, 'his':0,
'her':0, 'where':0, 'all':0, 'its':0, 'now':0, 'much':0, 'she':0,
'about':0, 'such':0, 'your':0, 'there':0, 'into':0, 'like':0, 'may':0,
'would':0, 'than':0, 'our':0, 'well':0, 'their':0, 'them':0, 'over':0,
'down':0,
'net':0, 'www':0, 'bad':0, 'Okay':0, 'bin':0, 'cur':0,
}
wordsDict = {}
wordsDictHTML = {}
wordsDictArchive = {}
def cleanupWordsString(str):
str = string.replace(str, ".", " ")
str = string.replace(str, "!", " ")
str = string.replace(str, "?", " ")
str = string.replace(str, ",", " ")
str = string.replace(str, "'", " ")
str = string.replace(str, '"', " ")
str = string.replace(str, ";", " ")
str = string.replace(str, "(", " ")
str = string.replace(str, ")", " ")
str = string.replace(str, "{", " ")
str = string.replace(str, "}", " ")
str = string.replace(str, "<", " ")
str = string.replace(str, ">", " ")
str = string.replace(str, "=", " ")
str = string.replace(str, "/", " ")
str = string.replace(str, "*", " ")
str = string.replace(str, ":", " ")
str = string.replace(str, "#", " ")
str = string.replace(str, "\\", " ")
str = string.replace(str, "\n", " ")
str = string.replace(str, "\r", " ")
str = string.replace(str, "\xc2", " ")
str = string.replace(str, "\xa0", " ")
return str
def cleanupDescrString(str):
str = string.replace(str, "'", " ")
str = string.replace(str, "\n", " ")
str = string.replace(str, "\r", " ")
str = string.replace(str, "\xc2", " ")
str = string.replace(str, "\xa0", " ")
l = string.split(str)
str = string.join(str)
return str
def splitIdentifier(str):
ret = []
while str != "":
cur = string.lower(str[0])
str = str[1:]
if ((cur < 'a') or (cur > 'z')):
continue
while (str != "") and (str[0] >= 'A') and (str[0] <= 'Z'):
cur = cur + string.lower(str[0])
str = str[1:]
while (str != "") and (str[0] >= 'a') and (str[0] <= 'z'):
cur = cur + str[0]
str = str[1:]
while (str != "") and (str[0] >= '0') and (str[0] <= '9'):
str = str[1:]
ret.append(cur)
return ret
def addWord(word, module, symbol, relevance):
global wordsDict
if word == None or len(word) < 3:
return -1
if module == None or symbol == None:
return -1
if dropWords.has_key(word):
return 0
if ord(word[0]) > 0x80:
return 0
if wordsDict.has_key(word):
d = wordsDict[word]
if d == None:
return 0
if len(d) > 500:
wordsDict[word] = None
return 0
try:
relevance = relevance + d[(module, symbol)]
except:
pass
else:
wordsDict[word] = {}
wordsDict[word][(module, symbol)] = relevance
return relevance
def addString(str, module, symbol, relevance):
if str == None or len(str) < 3:
return -1
ret = 0
str = cleanupWordsString(str)
l = string.split(str)
for word in l:
if len(word) > 2:
ret = ret + addWord(word, module, symbol, 5)
return ret
def addWordHTML(word, resource, id, section, relevance):
global wordsDictHTML
if word == None or len(word) < 3:
return -1
if resource == None or section == None:
return -1
if dropWords.has_key(word):
return 0
if ord(word[0]) > 0x80:
return 0
section = cleanupDescrString(section)
if wordsDictHTML.has_key(word):
d = wordsDictHTML[word]
if d == None:
print "skipped %s" % (word)
return 0
try:
(r,i,s) = d[resource]
if i != None:
id = i
if s != None:
section = s
relevance = relevance + r
except:
pass
else:
wordsDictHTML[word] = {}
d = wordsDictHTML[word];
d[resource] = (relevance, id, section)
return relevance
def addStringHTML(str, resource, id, section, relevance):
if str == None or len(str) < 3:
return -1
ret = 0
str = cleanupWordsString(str)
l = string.split(str)
for word in l:
if len(word) > 2:
try:
r = addWordHTML(word, resource, id, section, relevance)
if r < 0:
print "addWordHTML failed: %s %s" % (word, resource)
ret = ret + r
except:
print "addWordHTML failed: %s %s %d" % (word, resource, relevance)
print sys.exc_type, sys.exc_value
return ret
def addWordArchive(word, id, relevance):
global wordsDictArchive
if word == None or len(word) < 3:
return -1
if id == None or id == -1:
return -1
if dropWords.has_key(word):
return 0
if ord(word[0]) > 0x80:
return 0
if wordsDictArchive.has_key(word):
d = wordsDictArchive[word]
if d == None:
print "skipped %s" % (word)
return 0
try:
r = d[id]
relevance = relevance + r
except:
pass
else:
wordsDictArchive[word] = {}
d = wordsDictArchive[word];
d[id] = relevance
return relevance
def addStringArchive(str, id, relevance):
if str == None or len(str) < 3:
return -1
ret = 0
str = cleanupWordsString(str)
l = string.split(str)
for word in l:
i = len(word)
if i > 2:
try:
r = addWordArchive(word, id, relevance)
if r < 0:
print "addWordArchive failed: %s %s" % (word, id)
else:
ret = ret + r
except:
print "addWordArchive failed: %s %s %d" % (word, id, relevance)
print sys.exc_type, sys.exc_value
return ret
#########################################################################
# #
# XML API description analysis #
# #
#########################################################################
def loadAPI(filename):
doc = libxml2.parseFile(filename)
print "loaded %s" % (filename)
return doc
def foundExport(file, symbol):
if file == None:
return 0
if symbol == None:
return 0
addFunction(symbol, file)
l = splitIdentifier(symbol)
for word in l:
addWord(word, file, symbol, 10)
return 1
def analyzeAPIFile(top):
count = 0
name = top.prop("name")
cur = top.children
while cur != None:
if cur.type == 'text':
cur = cur.next
continue
if cur.name == "exports":
count = count + foundExport(name, cur.prop("symbol"))
else:
print "unexpected element %s in API doc <file name='%s'>" % (name)
cur = cur.next
return count
def analyzeAPIFiles(top):
count = 0
cur = top.children
while cur != None:
if cur.type == 'text':
cur = cur.next
continue
if cur.name == "file":
count = count + analyzeAPIFile(cur)
else:
print "unexpected element %s in API doc <files>" % (cur.name)
cur = cur.next
return count
def analyzeAPIEnum(top):
file = top.prop("file")
if file == None:
return 0
symbol = top.prop("name")
if symbol == None:
return 0
addEnum(symbol, file)
l = splitIdentifier(symbol)
for word in l:
addWord(word, file, symbol, 10)
return 1
def analyzeAPIConst(top):
file = top.prop("file")
if file == None:
return 0
symbol = top.prop("name")
if symbol == None:
return 0
addConst(symbol, file)
l = splitIdentifier(symbol)
for word in l:
addWord(word, file, symbol, 10)
return 1
def analyzeAPIType(top):
file = top.prop("file")
if file == None:
return 0
symbol = top.prop("name")
if symbol == None:
return 0
addType(symbol, file)
l = splitIdentifier(symbol)
for word in l:
addWord(word, file, symbol, 10)
return 1
def analyzeAPIFunctype(top):
file = top.prop("file")
if file == None:
return 0
symbol = top.prop("name")
if symbol == None:
return 0
addFunctype(symbol, file)
l = splitIdentifier(symbol)
for word in l:
addWord(word, file, symbol, 10)
return 1
def analyzeAPIStruct(top):
file = top.prop("file")
if file == None:
return 0
symbol = top.prop("name")
if symbol == None:
return 0
addStruct(symbol, file)
l = splitIdentifier(symbol)
for word in l:
addWord(word, file, symbol, 10)
info = top.prop("info")
if info != None:
info = string.replace(info, "'", " ")
info = string.strip(info)
l = string.split(info)
for word in l:
if len(word) > 2:
addWord(word, file, symbol, 5)
return 1
def analyzeAPIMacro(top):
file = top.prop("file")
if file == None:
return 0
symbol = top.prop("name")
if symbol == None:
return 0
symbol = string.replace(symbol, "'", " ")
symbol = string.strip(symbol)
info = None
cur = top.children
while cur != None:
if cur.type == 'text':
cur = cur.next
continue
if cur.name == "info":
info = cur.content
break
cur = cur.next
l = splitIdentifier(symbol)
for word in l:
addWord(word, file, symbol, 10)
if info == None:
addMacro(symbol, file)
print "Macro %s description has no <info>" % (symbol)
return 0
info = string.replace(info, "'", " ")
info = string.strip(info)
addMacro(symbol, file, info)
l = string.split(info)
for word in l:
if len(word) > 2:
addWord(word, file, symbol, 5)
return 1
def analyzeAPIFunction(top):
file = top.prop("file")
if file == None:
return 0
symbol = top.prop("name")
if symbol == None:
return 0
symbol = string.replace(symbol, "'", " ")
symbol = string.strip(symbol)
info = None
cur = top.children
while cur != None:
if cur.type == 'text':
cur = cur.next
continue
if cur.name == "info":
info = cur.content
elif cur.name == "return":
rinfo = cur.prop("info")
if rinfo != None:
rinfo = string.replace(rinfo, "'", " ")
rinfo = string.strip(rinfo)
addString(rinfo, file, symbol, 7)
elif cur.name == "arg":
ainfo = cur.prop("info")
if ainfo != None:
ainfo = string.replace(ainfo, "'", " ")
ainfo = string.strip(ainfo)
addString(ainfo, file, symbol, 5)
name = cur.prop("name")
if name != None:
name = string.replace(name, "'", " ")
name = string.strip(name)
addWord(name, file, symbol, 7)
cur = cur.next
if info == None:
print "Function %s description has no <info>" % (symbol)
addFunction(symbol, file, "")
else:
info = string.replace(info, "'", " ")
info = string.strip(info)
addFunction(symbol, file, info)
addString(info, file, symbol, 5)
l = splitIdentifier(symbol)
for word in l:
addWord(word, file, symbol, 10)
return 1
def analyzeAPISymbols(top):
count = 0
cur = top.children
while cur != None:
if cur.type == 'text':
cur = cur.next
continue
if cur.name == "macro":
count = count + analyzeAPIMacro(cur)
elif cur.name == "function":
count = count + analyzeAPIFunction(cur)
elif cur.name == "const":
count = count + analyzeAPIConst(cur)
elif cur.name == "typedef":
count = count + analyzeAPIType(cur)
elif cur.name == "struct":
count = count + analyzeAPIStruct(cur)
elif cur.name == "enum":
count = count + analyzeAPIEnum(cur)
elif cur.name == "functype":
count = count + analyzeAPIFunctype(cur)
else:
print "unexpected element %s in API doc <files>" % (cur.name)
cur = cur.next
return count
def analyzeAPI(doc):
count = 0
if doc == None:
return -1
root = doc.getRootElement()
if root.name != "api":
print "Unexpected root name"
return -1
cur = root.children
while cur != None:
if cur.type == 'text':
cur = cur.next
continue
if cur.name == "files":
pass
# count = count + analyzeAPIFiles(cur)
elif cur.name == "symbols":
count = count + analyzeAPISymbols(cur)
else:
print "unexpected element %s in API doc" % (cur.name)
cur = cur.next
return count
#########################################################################
# #
# Web pages parsing and analysis #
# #
#########################################################################
import glob
def analyzeHTMLText(doc, resource, p, section, id):
words = 0
try:
content = p.content
words = words + addStringHTML(content, resource, id, section, 5)
except:
return -1
return words
def analyzeHTMLPara(doc, resource, p, section, id):
words = 0
try:
content = p.content
words = words + addStringHTML(content, resource, id, section, 5)
except:
return -1
return words
def analyzeHTMLPre(doc, resource, p, section, id):
words = 0
try:
content = p.content
words = words + addStringHTML(content, resource, id, section, 5)
except:
return -1
return words
def analyzeHTML(doc, resource, p, section, id):
words = 0
try:
content = p.content
words = words + addStringHTML(content, resource, id, section, 5)
except:
return -1
return words
def analyzeHTML(doc, resource):
para = 0;
ctxt = doc.xpathNewContext()
try:
res = ctxt.xpathEval("//head/title")
title = res[0].content
except:
title = "Page %s" % (resource)
addPage(resource, title)
try:
items = ctxt.xpathEval("//h1 | //h2 | //h3 | //text()")
section = title
id = ""
for item in items:
if item.name == 'h1' or item.name == 'h2' or item.name == 'h3':
section = item.content
if item.prop("id"):
id = item.prop("id")
elif item.prop("name"):
id = item.prop("name")
elif item.type == 'text':
analyzeHTMLText(doc, resource, item, section, id)
para = para + 1
elif item.name == 'p':
analyzeHTMLPara(doc, resource, item, section, id)
para = para + 1
elif item.name == 'pre':
analyzeHTMLPre(doc, resource, item, section, id)
para = para + 1
else:
print "Page %s, unexpected %s element" % (resource, item.name)
except:
print "Page %s: problem analyzing" % (resource)
print sys.exc_type, sys.exc_value
return para
def analyzeHTMLPages():
ret = 0
HTMLfiles = glob.glob("*.html") + glob.glob("tutorial/*.html")
for html in HTMLfiles:
if html[0:3] == "API":
continue
if html == "xml.html":
continue
try:
doc = libxml2.parseFile(html)
except:
doc = libxml2.htmlParseFile(html, None)
try:
res = analyzeHTML(doc, html)
print "Parsed %s : %d paragraphs" % (html, res)
ret = ret + 1
except:
print "could not parse %s" % (html)
return ret
#########################################################################
# #
# Mail archives parsing and analysis #
# #
#########################################################################
import time
def getXMLDateArchive(t = None):
if t == None:
t = time.time()
T = time.gmtime(t)
month = time.strftime("%B", T)
year = T[0]
url = "http://mail.gnome.org/archives/xml/%d-%s/date.html" % (year, month)
return url
def scanXMLMsgArchive(url, title, force = 0):
if url == None or title == None:
return 0
ID = checkXMLMsgArchive(url)
if force == 0 and ID != -1:
return 0
if ID == -1:
ID = addXMLMsgArchive(url, title)
if ID == -1:
return 0
try:
print "Loading %s" % (url)
doc = libxml2.htmlParseFile(url, None);
except:
doc = None
if doc == None:
print "Failed to parse %s" % (url)
return 0
addStringArchive(title, ID, 20)
ctxt = doc.xpathNewContext()
texts = ctxt.xpathEval("//pre//text()")
for text in texts:
addStringArchive(text.content, ID, 5)
return 1
def scanXMLDateArchive(t = None, force = 0):
global wordsDictArchive
wordsDictArchive = {}
url = getXMLDateArchive(t)
print "loading %s" % (url)
try:
doc = libxml2.htmlParseFile(url, None);
except:
doc = None
if doc == None:
print "Failed to parse %s" % (url)
return -1
ctxt = doc.xpathNewContext()
anchors = ctxt.xpathEval("//a[@href]")
links = 0
newmsg = 0
for anchor in anchors:
href = anchor.prop("href")
if href == None or href[0:3] != "msg":
continue
try:
links = links + 1
msg = libxml2.buildURI(href, url)
title = anchor.content
if title != None and title[0:4] == 'Re: ':
title = title[4:]
if title != None and title[0:6] == '[xml] ':
title = title[6:]
newmsg = newmsg + scanXMLMsgArchive(msg, title, force)
except:
pass
return newmsg
#########################################################################
# #
# Main code: open the DB, the API XML and analyze it #
# #
#########################################################################
def analyzeArchives(t = None, force = 0):
global wordsDictArchive
ret = scanXMLDateArchive(t, force)
print "Indexed %d words in %d archive pages" % (len(wordsDictArchive), ret)
i = 0
skipped = 0
for word in wordsDictArchive.keys():
refs = wordsDictArchive[word]
if refs == None:
skipped = skipped + 1
continue;
for id in refs.keys():
relevance = refs[id]
updateWordArchive(word, id, relevance)
i = i + 1
print "Found %d associations in HTML pages" % (i)
def analyzeHTMLTop():
global wordsDictHTML
ret = analyzeHTMLPages()
print "Indexed %d words in %d HTML pages" % (len(wordsDictHTML), ret)
i = 0
skipped = 0
for word in wordsDictHTML.keys():
refs = wordsDictHTML[word]
if refs == None:
skipped = skipped + 1
continue;
for resource in refs.keys():
(relevance, id, section) = refs[resource]
updateWordHTML(word, resource, section, id, relevance)
i = i + 1
print "Found %d associations in HTML pages" % (i)
def analyzeAPITop():
global wordsDict
global API
try:
doc = loadAPI(API)
ret = analyzeAPI(doc)
print "Analyzed %d blocs" % (ret)
doc.freeDoc()
except:
print "Failed to parse and analyze %s" % (API)
print sys.exc_type, sys.exc_value
sys.exit(1)
print "Indexed %d words" % (len(wordsDict))
i = 0
skipped = 0
for word in wordsDict.keys():
refs = wordsDict[word]
if refs == None:
skipped = skipped + 1
continue;
for (module, symbol) in refs.keys():
updateWord(word, symbol, refs[(module, symbol)])
i = i + 1
print "Found %d associations, skipped %d words" % (i, skipped)
def usage():
print "Usage index.py [--force] [--archive] [--archive-year year] [--archive-month month] [--API] [--docs]"
sys.exit(1)
def main():
try:
openMySQL()
except:
print "Failed to open the database"
print sys.exc_type, sys.exc_value
sys.exit(1)
args = sys.argv[1:]
force = 0
if args:
i = 0
while i < len(args):
if args[i] == '--force':
force = 1
elif args[i] == '--archive':
analyzeArchives(None, force)
elif args[i] == '--archive-year':
i = i + 1;
year = args[i]
months = ["January" , "February", "March", "April", "May",
"June", "July", "August", "September", "October",
"November", "December"];
for month in months:
try:
str = "%s-%s" % (year, month)
T = time.strptime(str, "%Y-%B")
t = time.mktime(T) + 3600 * 24 * 10;
analyzeArchives(t, force)
except:
print "Failed to index month archive:"
print sys.exc_type, sys.exc_value
elif args[i] == '--archive-month':
i = i + 1;
month = args[i]
try:
T = time.strptime(month, "%Y-%B")
t = time.mktime(T) + 3600 * 24 * 10;
analyzeArchives(t, force)
except:
print "Failed to index month archive:"
print sys.exc_type, sys.exc_value
elif args[i] == '--API':
analyzeAPITop()
elif args[i] == '--docs':
analyzeHTMLTop()
else:
usage()
i = i + 1
else:
usage()
if __name__ == "__main__":
main()
| gpl-2.0 |
sho-h/ruby_env | devkit/mingw/bin/lib/posixfile.py | 67 | 8240 | """Extended file operations available in POSIX.
f = posixfile.open(filename, [mode, [bufsize]])
will create a new posixfile object
f = posixfile.fileopen(fileobject)
will create a posixfile object from a builtin file object
f.file()
will return the original builtin file object
f.dup()
will return a new file object based on a new filedescriptor
f.dup2(fd)
will return a new file object based on the given filedescriptor
f.flags(mode)
will turn on the associated flag (merge)
mode can contain the following characters:
(character representing a flag)
a append only flag
c close on exec flag
n no delay flag
s synchronization flag
(modifiers)
! turn flags 'off' instead of default 'on'
= copy flags 'as is' instead of default 'merge'
? return a string in which the characters represent the flags
that are set
note: - the '!' and '=' modifiers are mutually exclusive.
- the '?' modifier will return the status of the flags after they
have been changed by other characters in the mode string
f.lock(mode [, len [, start [, whence]]])
will (un)lock a region
mode can contain the following characters:
(character representing type of lock)
u unlock
r read lock
w write lock
(modifiers)
| wait until the lock can be granted
? return the first lock conflicting with the requested lock
or 'None' if there is no conflict. The lock returned is in the
format (mode, len, start, whence, pid) where mode is a
character representing the type of lock ('r' or 'w')
note: - the '?' modifier prevents a region from being locked; it is
query only
"""
import warnings
warnings.warn("The posixfile module is deprecated; "
"fcntl.lockf() provides better locking", DeprecationWarning, 2)
class _posixfile_:
"""File wrapper class that provides extra POSIX file routines."""
states = ['open', 'closed']
#
# Internal routines
#
def __repr__(self):
file = self._file_
return "<%s posixfile '%s', mode '%s' at %s>" % \
(self.states[file.closed], file.name, file.mode, \
hex(id(self))[2:])
#
# Initialization routines
#
def open(self, name, mode='r', bufsize=-1):
import __builtin__
return self.fileopen(__builtin__.open(name, mode, bufsize))
def fileopen(self, file):
import types
if repr(type(file)) != "<type 'file'>":
raise TypeError, 'posixfile.fileopen() arg must be file object'
self._file_ = file
# Copy basic file methods
for maybemethod in dir(file):
if not maybemethod.startswith('_'):
attr = getattr(file, maybemethod)
if isinstance(attr, types.BuiltinMethodType):
setattr(self, maybemethod, attr)
return self
#
# New methods
#
def file(self):
return self._file_
def dup(self):
import posix
if not hasattr(posix, 'fdopen'):
raise AttributeError, 'dup() method unavailable'
return posix.fdopen(posix.dup(self._file_.fileno()), self._file_.mode)
def dup2(self, fd):
import posix
if not hasattr(posix, 'fdopen'):
raise AttributeError, 'dup() method unavailable'
posix.dup2(self._file_.fileno(), fd)
return posix.fdopen(fd, self._file_.mode)
def flags(self, *which):
import fcntl, os
if which:
if len(which) > 1:
raise TypeError, 'Too many arguments'
which = which[0]
else: which = '?'
l_flags = 0
if 'n' in which: l_flags = l_flags | os.O_NDELAY
if 'a' in which: l_flags = l_flags | os.O_APPEND
if 's' in which: l_flags = l_flags | os.O_SYNC
file = self._file_
if '=' not in which:
cur_fl = fcntl.fcntl(file.fileno(), fcntl.F_GETFL, 0)
if '!' in which: l_flags = cur_fl & ~ l_flags
else: l_flags = cur_fl | l_flags
l_flags = fcntl.fcntl(file.fileno(), fcntl.F_SETFL, l_flags)
if 'c' in which:
arg = ('!' not in which) # 0 is don't, 1 is do close on exec
l_flags = fcntl.fcntl(file.fileno(), fcntl.F_SETFD, arg)
if '?' in which:
which = '' # Return current flags
l_flags = fcntl.fcntl(file.fileno(), fcntl.F_GETFL, 0)
if os.O_APPEND & l_flags: which = which + 'a'
if fcntl.fcntl(file.fileno(), fcntl.F_GETFD, 0) & 1:
which = which + 'c'
if os.O_NDELAY & l_flags: which = which + 'n'
if os.O_SYNC & l_flags: which = which + 's'
return which
def lock(self, how, *args):
import struct, fcntl
if 'w' in how: l_type = fcntl.F_WRLCK
elif 'r' in how: l_type = fcntl.F_RDLCK
elif 'u' in how: l_type = fcntl.F_UNLCK
else: raise TypeError, 'no type of lock specified'
if '|' in how: cmd = fcntl.F_SETLKW
elif '?' in how: cmd = fcntl.F_GETLK
else: cmd = fcntl.F_SETLK
l_whence = 0
l_start = 0
l_len = 0
if len(args) == 1:
l_len = args[0]
elif len(args) == 2:
l_len, l_start = args
elif len(args) == 3:
l_len, l_start, l_whence = args
elif len(args) > 3:
raise TypeError, 'too many arguments'
# Hack by davem@magnet.com to get locking to go on freebsd;
# additions for AIX by Vladimir.Marangozov@imag.fr
import sys, os
if sys.platform in ('netbsd1',
'openbsd2',
'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5',
'freebsd6', 'freebsd7', 'freebsd8',
'bsdos2', 'bsdos3', 'bsdos4'):
flock = struct.pack('lxxxxlxxxxlhh', \
l_start, l_len, os.getpid(), l_type, l_whence)
elif sys.platform in ('aix3', 'aix4'):
flock = struct.pack('hhlllii', \
l_type, l_whence, l_start, l_len, 0, 0, 0)
else:
flock = struct.pack('hhllhh', \
l_type, l_whence, l_start, l_len, 0, 0)
flock = fcntl.fcntl(self._file_.fileno(), cmd, flock)
if '?' in how:
if sys.platform in ('netbsd1',
'openbsd2',
'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5',
'bsdos2', 'bsdos3', 'bsdos4'):
l_start, l_len, l_pid, l_type, l_whence = \
struct.unpack('lxxxxlxxxxlhh', flock)
elif sys.platform in ('aix3', 'aix4'):
l_type, l_whence, l_start, l_len, l_sysid, l_pid, l_vfs = \
struct.unpack('hhlllii', flock)
elif sys.platform == "linux2":
l_type, l_whence, l_start, l_len, l_pid, l_sysid = \
struct.unpack('hhllhh', flock)
else:
l_type, l_whence, l_start, l_len, l_sysid, l_pid = \
struct.unpack('hhllhh', flock)
if l_type != fcntl.F_UNLCK:
if l_type == fcntl.F_RDLCK:
return 'r', l_len, l_start, l_whence, l_pid
else:
return 'w', l_len, l_start, l_whence, l_pid
def open(name, mode='r', bufsize=-1):
"""Public routine to open a file as a posixfile object."""
return _posixfile_().open(name, mode, bufsize)
def fileopen(file):
"""Public routine to get a posixfile object from a Python file object."""
return _posixfile_().fileopen(file)
#
# Constants
#
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
#
# End of posixfile.py
#
| mit |
ch1nh5/tizen_rt | external/iotivity/iotivity_1.2-rel/extlibs/gtest/gtest-1.7.0/test/gtest_test_utils.py | 1100 | 10812 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# The environment variable for specifying the path to the premature-exit file.
PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE'
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets/unsets an environment variable to a given value."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO(vladl@google.com): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| apache-2.0 |
espadrine/opera | chromium/src/chrome/test/chromedriver/archive.py | 3 | 2295 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Downloads items from the Chromium continuous archive."""
import os
import platform
import urllib
import util
CHROME_27_REVISION = '190466'
CHROME_28_REVISION = '198276'
_SITE = 'http://commondatastorage.googleapis.com'
class Site(object):
CONTINUOUS = _SITE + '/chromium-browser-continuous'
SNAPSHOT = _SITE + '/chromium-browser-snapshots'
def GetLatestRevision(site=Site.CONTINUOUS):
"""Returns the latest revision (as a string) available for this platform.
Args:
site: the archive site to check against, default to the continuous one.
"""
url = site + '/%s/LAST_CHANGE'
return urllib.urlopen(url % _GetDownloadPlatform()).read()
def DownloadChrome(revision, dest_dir, site=Site.CONTINUOUS):
"""Downloads the packaged Chrome from the archive to the given directory.
Args:
revision: the revision of Chrome to download.
dest_dir: the directory to download Chrome to.
site: the archive site to download from, default to the continuous one.
Returns:
The path to the unzipped Chrome binary.
"""
def GetZipName():
if util.IsWindows():
return 'chrome-win32'
elif util.IsMac():
return 'chrome-mac'
elif util.IsLinux():
return 'chrome-linux'
def GetChromePathFromPackage():
if util.IsWindows():
return 'chrome.exe'
elif util.IsMac():
return 'Chromium.app/Contents/MacOS/Chromium'
elif util.IsLinux():
return 'chrome'
zip_path = os.path.join(dest_dir, 'chrome-%s.zip' % revision)
if not os.path.exists(zip_path):
url = site + '/%s/%s/%s.zip' % (_GetDownloadPlatform(), revision,
GetZipName())
print 'Downloading', url, '...'
urllib.urlretrieve(url, zip_path)
util.Unzip(zip_path, dest_dir)
return os.path.join(dest_dir, GetZipName(), GetChromePathFromPackage())
def _GetDownloadPlatform():
"""Returns the name for this platform on the archive site."""
if util.IsWindows():
return 'Win'
elif util.IsMac():
return 'Mac'
elif util.IsLinux():
if platform.architecture()[0] == '64bit':
return 'Linux_x64'
else:
return 'Linux'
| bsd-3-clause |
akashsinghal/Speech-Memorization-App | Python_Backend/env/lib/python3.6/site-packages/pip/_vendor/cachecontrol/adapter.py | 327 | 4608 | import types
import functools
from pip._vendor.requests.adapters import HTTPAdapter
from .controller import CacheController
from .cache import DictCache
from .filewrapper import CallbackFileWrapper
class CacheControlAdapter(HTTPAdapter):
invalidating_methods = set(['PUT', 'DELETE'])
def __init__(self, cache=None,
cache_etags=True,
controller_class=None,
serializer=None,
heuristic=None,
*args, **kw):
super(CacheControlAdapter, self).__init__(*args, **kw)
self.cache = cache or DictCache()
self.heuristic = heuristic
controller_factory = controller_class or CacheController
self.controller = controller_factory(
self.cache,
cache_etags=cache_etags,
serializer=serializer,
)
def send(self, request, **kw):
"""
Send a request. Use the request information to see if it
exists in the cache and cache the response if we need to and can.
"""
if request.method == 'GET':
cached_response = self.controller.cached_request(request)
if cached_response:
return self.build_response(request, cached_response,
from_cache=True)
# check for etags and add headers if appropriate
request.headers.update(
self.controller.conditional_headers(request)
)
resp = super(CacheControlAdapter, self).send(request, **kw)
return resp
def build_response(self, request, response, from_cache=False):
"""
Build a response by making a request or using the cache.
This will end up calling send and returning a potentially
cached response
"""
if not from_cache and request.method == 'GET':
# Check for any heuristics that might update headers
# before trying to cache.
if self.heuristic:
response = self.heuristic.apply(response)
# apply any expiration heuristics
if response.status == 304:
# We must have sent an ETag request. This could mean
# that we've been expired already or that we simply
# have an etag. In either case, we want to try and
# update the cache if that is the case.
cached_response = self.controller.update_cached_response(
request, response
)
if cached_response is not response:
from_cache = True
# We are done with the server response, read a
# possible response body (compliant servers will
# not return one, but we cannot be 100% sure) and
# release the connection back to the pool.
response.read(decode_content=False)
response.release_conn()
response = cached_response
# We always cache the 301 responses
elif response.status == 301:
self.controller.cache_response(request, response)
else:
# Wrap the response file with a wrapper that will cache the
# response when the stream has been consumed.
response._fp = CallbackFileWrapper(
response._fp,
functools.partial(
self.controller.cache_response,
request,
response,
)
)
if response.chunked:
super_update_chunk_length = response._update_chunk_length
def _update_chunk_length(self):
super_update_chunk_length()
if self.chunk_left == 0:
self._fp._close()
response._update_chunk_length = types.MethodType(_update_chunk_length, response)
resp = super(CacheControlAdapter, self).build_response(
request, response
)
# See if we should invalidate the cache.
if request.method in self.invalidating_methods and resp.ok:
cache_url = self.controller.cache_url(request.url)
self.cache.delete(cache_url)
# Give the request a from_cache attr to let people use it
resp.from_cache = from_cache
return resp
def close(self):
self.cache.close()
super(CacheControlAdapter, self).close()
| apache-2.0 |
ekamioka/gpss-research | experiments/2014-01-09-radio.py | 4 | 1466 | Experiment(description='Trying to recreate old results using latest code',
data_dir='../data/radio/',
max_depth=4,
random_order=False,
k=1,
debug=False,
local_computation=False,
n_rand=9,
sd=2,
jitter_sd=0.1,
max_jobs=200,
verbose=False,
make_predictions=False,
skip_complete=True,
results_dir='../results/2014-01-09-radio/',
iters=250,
base_kernels='SE,Per,Lin,Const,Noise',
random_seed=1,
period_heuristic=3,
period_heuristic_type='min',
max_period_heuristic=1.5, # Encourage it to see periodicity
subset=True,
subset_size=250,
full_iters=10,
bundle_size=5,
additive_form=True,
mean='ff.MeanZero()', # Starting mean
kernel='ff.SumKernel(operands=[ff.NoiseKernel(), ff.ConstKernel(), ff.SqExpKernel(dimension=0), ff.ProductKernel(operands=[ff.PeriodicKernel(dimension=0, lengthscale=0.334902, period=0.000316), ff.PeriodicKernel(dimension=0, lengthscale=1.108831, period=2.296433), ff.SqExpKernel(dimension=0)])])', # Starting kernel
lik='ff.LikGauss(sf=-np.Inf)', # Starting likelihood
score='bic',
search_operators=[('A', 'B', {'A': 'kernel', 'B': 'base'}),
('A', ('None',), {'A': 'kernel'})]) | mit |
nadley/Sick-Beard | lib/hachoir_parser/archive/ace.py | 90 | 9964 | """
ACE parser
From wotsit.org and the SDK header (bitflags)
Partial study of a new block type (5) I've called "new_recovery", as its
syntax is very close to the former one (of type 2).
Status: can only read totally file and header blocks.
Author: Christophe Gisquet <christophe.gisquet@free.fr>
Creation date: 19 january 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (StaticFieldSet, FieldSet,
Bit, Bits, NullBits, RawBytes, Enum,
UInt8, UInt16, UInt32,
PascalString8, PascalString16, String,
TimeDateMSDOS32)
from lib.hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_parser.common.msdos import MSDOSFileAttr32
MAGIC = "**ACE**"
OS_MSDOS = 0
OS_WIN32 = 2
HOST_OS = {
0: "MS-DOS",
1: "OS/2",
2: "Win32",
3: "Unix",
4: "MAC-OS",
5: "Win NT",
6: "Primos",
7: "APPLE GS",
8: "ATARI",
9: "VAX VMS",
10: "AMIGA",
11: "NEXT",
}
COMPRESSION_TYPE = {
0: "Store",
1: "Lempel-Ziv 77",
2: "ACE v2.0",
}
COMPRESSION_MODE = {
0: "fastest",
1: "fast",
2: "normal",
3: "good",
4: "best",
}
# TODO: Computing the CRC16 would also prove useful
#def markerValidate(self):
# return not self["extend"].value and self["signature"].value == MAGIC and \
# self["host_os"].value<12
class MarkerFlags(StaticFieldSet):
format = (
(Bit, "extend", "Whether the header is extended"),
(Bit, "has_comment", "Whether the archive has a comment"),
(NullBits, "unused", 7, "Reserved bits"),
(Bit, "sfx", "SFX"),
(Bit, "limited_dict", "Junior SFX with 256K dictionary"),
(Bit, "multi_volume", "Part of a set of ACE archives"),
(Bit, "has_av_string", "This header holds an AV-string"),
(Bit, "recovery_record", "Recovery record preset"),
(Bit, "locked", "Archive is locked"),
(Bit, "solid", "Archive uses solid compression")
)
def markerFlags(self):
yield MarkerFlags(self, "flags", "Marker flags")
def markerHeader(self):
yield String(self, "signature", 7, "Signature")
yield UInt8(self, "ver_extract", "Version needed to extract archive")
yield UInt8(self, "ver_created", "Version used to create archive")
yield Enum(UInt8(self, "host_os", "OS where the files were compressed"), HOST_OS)
yield UInt8(self, "vol_num", "Volume number")
yield TimeDateMSDOS32(self, "time", "Date and time (MS DOS format)")
yield Bits(self, "reserved", 64, "Reserved size for future extensions")
flags = self["flags"]
if flags["has_av_string"].value:
yield PascalString8(self, "av_string", "AV String")
if flags["has_comment"].value:
size = filesizeHandler(UInt16(self, "comment_size", "Comment size"))
yield size
if size.value > 0:
yield RawBytes(self, "compressed_comment", size.value, \
"Compressed comment")
class FileFlags(StaticFieldSet):
format = (
(Bit, "extend", "Whether the header is extended"),
(Bit, "has_comment", "Presence of file comment"),
(Bits, "unused", 10, "Unused bit flags"),
(Bit, "encrypted", "File encrypted with password"),
(Bit, "previous", "File continued from previous volume"),
(Bit, "next", "File continues on the next volume"),
(Bit, "solid", "File compressed using previously archived files")
)
def fileFlags(self):
yield FileFlags(self, "flags", "File flags")
def fileHeader(self):
yield filesizeHandler(UInt32(self, "compressed_size", "Size of the compressed file"))
yield filesizeHandler(UInt32(self, "uncompressed_size", "Uncompressed file size"))
yield TimeDateMSDOS32(self, "ftime", "Date and time (MS DOS format)")
if self["/header/host_os"].value in (OS_MSDOS, OS_WIN32):
yield MSDOSFileAttr32(self, "file_attr", "File attributes")
else:
yield textHandler(UInt32(self, "file_attr", "File attributes"), hexadecimal)
yield textHandler(UInt32(self, "file_crc32", "CRC32 checksum over the compressed file)"), hexadecimal)
yield Enum(UInt8(self, "compression_type", "Type of compression"), COMPRESSION_TYPE)
yield Enum(UInt8(self, "compression_mode", "Quality of compression"), COMPRESSION_MODE)
yield textHandler(UInt16(self, "parameters", "Compression parameters"), hexadecimal)
yield textHandler(UInt16(self, "reserved", "Reserved data"), hexadecimal)
# Filename
yield PascalString16(self, "filename", "Filename")
# Comment
if self["flags/has_comment"].value:
yield filesizeHandler(UInt16(self, "comment_size", "Size of the compressed comment"))
if self["comment_size"].value > 0:
yield RawBytes(self, "comment_data", self["comment_size"].value, "Comment data")
def fileBody(self):
size = self["compressed_size"].value
if size > 0:
yield RawBytes(self, "compressed_data", size, "Compressed data")
def fileDesc(self):
return "File entry: %s (%s)" % (self["filename"].value, self["compressed_size"].display)
def recoveryHeader(self):
yield filesizeHandler(UInt32(self, "rec_blk_size", "Size of recovery data"))
self.body_size = self["rec_blk_size"].size
yield String(self, "signature", 7, "Signature, normally '**ACE**'")
yield textHandler(UInt32(self, "relative_start",
"Relative start (to this block) of the data this block is mode of"),
hexadecimal)
yield UInt32(self, "num_blocks", "Number of blocks the data is split into")
yield UInt32(self, "size_blocks", "Size of these blocks")
yield UInt16(self, "crc16_blocks", "CRC16 over recovery data")
# size_blocks blocks of size size_blocks follow
# The ultimate data is the xor data of all those blocks
size = self["size_blocks"].value
for index in xrange(self["num_blocks"].value):
yield RawBytes(self, "data[]", size, "Recovery block %i" % index)
yield RawBytes(self, "xor_data", size, "The XOR value of the above data blocks")
def recoveryDesc(self):
return "Recovery block, size=%u" % self["body_size"].display
def newRecoveryHeader(self):
"""
This header is described nowhere
"""
if self["flags/extend"].value:
yield filesizeHandler(UInt32(self, "body_size", "Size of the unknown body following"))
self.body_size = self["body_size"].value
yield textHandler(UInt32(self, "unknown[]", "Unknown field, probably 0"),
hexadecimal)
yield String(self, "signature", 7, "Signature, normally '**ACE**'")
yield textHandler(UInt32(self, "relative_start",
"Offset (=crc16's) of this block in the file"), hexadecimal)
yield textHandler(UInt32(self, "unknown[]",
"Unknown field, probably 0"), hexadecimal)
class BaseFlags(StaticFieldSet):
format = (
(Bit, "extend", "Whether the header is extended"),
(NullBits, "unused", 15, "Unused bit flags")
)
def parseFlags(self):
yield BaseFlags(self, "flags", "Unknown flags")
def parseHeader(self):
if self["flags/extend"].value:
yield filesizeHandler(UInt32(self, "body_size", "Size of the unknown body following"))
self.body_size = self["body_size"].value
def parseBody(self):
if self.body_size > 0:
yield RawBytes(self, "body_data", self.body_size, "Body data, unhandled")
class Block(FieldSet):
TAG_INFO = {
0: ("header", "Archiver header", markerFlags, markerHeader, None),
1: ("file[]", fileDesc, fileFlags, fileHeader, fileBody),
2: ("recovery[]", recoveryDesc, recoveryHeader, None, None),
5: ("new_recovery[]", None, None, newRecoveryHeader, None)
}
def __init__(self, parent, name, description=None):
FieldSet.__init__(self, parent, name, description)
self.body_size = 0
self.desc_func = None
type = self["block_type"].value
if type in self.TAG_INFO:
self._name, desc, self.parseFlags, self.parseHeader, self.parseBody = self.TAG_INFO[type]
if desc:
if isinstance(desc, str):
self._description = desc
else:
self.desc_func = desc
else:
self.warning("Processing as unknown block block of type %u" % type)
if not self.parseFlags:
self.parseFlags = parseFlags
if not self.parseHeader:
self.parseHeader = parseHeader
if not self.parseBody:
self.parseBody = parseBody
def createFields(self):
yield textHandler(UInt16(self, "crc16", "Archive CRC16 (from byte 4 on)"), hexadecimal)
yield filesizeHandler(UInt16(self, "head_size", "Block size (from byte 4 on)"))
yield UInt8(self, "block_type", "Block type")
# Flags
for flag in self.parseFlags(self):
yield flag
# Rest of the header
for field in self.parseHeader(self):
yield field
size = self["head_size"].value - (self.current_size//8) + (2+2)
if size > 0:
yield RawBytes(self, "extra_data", size, "Extra header data, unhandled")
# Body in itself
for field in self.parseBody(self):
yield field
def createDescription(self):
if self.desc_func:
return self.desc_func(self)
else:
return "Block: %s" % self["type"].display
class AceFile(Parser):
endian = LITTLE_ENDIAN
PARSER_TAGS = {
"id": "ace",
"category": "archive",
"file_ext": ("ace",),
"mime": (u"application/x-ace-compressed",),
"min_size": 50*8,
"description": "ACE archive"
}
def validate(self):
if self.stream.readBytes(7*8, len(MAGIC)) != MAGIC:
return "Invalid magic"
return True
def createFields(self):
while not self.eof:
yield Block(self, "block[]")
| gpl-3.0 |
ibmdb/db2pythonbuildpack | vendor/pip-1.3.1/pip/basecommand.py | 63 | 7160 | """Base Command class, and related routines"""
import os
import socket
import sys
import tempfile
import traceback
import time
import optparse
from pip.log import logger
from pip.download import urlopen
from pip.exceptions import (BadCommand, InstallationError, UninstallationError,
CommandError)
from pip.backwardcompat import StringIO, ssl
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.status_codes import SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND
from pip.util import get_prog
__all__ = ['Command']
# for backwards compatibiliy
get_proxy = urlopen.get_proxy
class Command(object):
name = None
usage = None
hidden = False
def __init__(self, main_parser):
parser_kw = {
'usage': self.usage,
'prog': '%s %s' % (get_prog(), self.name),
'formatter': UpdatingDefaultsHelpFormatter(),
'add_help_option': False,
'name': self.name,
'description': self.__doc__,
}
self.main_parser = main_parser
self.parser = ConfigOptionParser(**parser_kw)
# Commands should add options to this option group
optgroup_name = '%s Options' % self.name.capitalize()
self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
# Re-add all options and option groups.
for group in main_parser.option_groups:
self._copy_option_group(self.parser, group)
# Copies all general options from the main parser.
self._copy_options(self.parser, main_parser.option_list)
def _copy_options(self, parser, options):
"""Populate an option parser or group with options."""
for option in options:
if not option.dest:
continue
parser.add_option(option)
def _copy_option_group(self, parser, group):
"""Copy option group (including options) to another parser."""
new_group = optparse.OptionGroup(parser, group.title)
self._copy_options(new_group, group.option_list)
parser.add_option_group(new_group)
def merge_options(self, initial_options, options):
# Make sure we have all global options carried over
attrs = ['log', 'proxy', 'require_venv',
'log_explicit_levels', 'log_file',
'timeout', 'default_vcs',
'skip_requirements_regex',
'no_input', 'exists_action',
'cert']
if not ssl:
attrs.append('insecure')
for attr in attrs:
setattr(options, attr, getattr(initial_options, attr) or getattr(options, attr))
options.quiet += initial_options.quiet
options.verbose += initial_options.verbose
def setup_logging(self):
pass
def main(self, args, initial_options):
options, args = self.parser.parse_args(args)
self.merge_options(initial_options, options)
level = 1 # Notify
level += options.verbose
level -= options.quiet
level = logger.level_for_integer(4 - level)
complete_log = []
logger.consumers.extend(
[(level, sys.stdout),
(logger.DEBUG, complete_log.append)])
if options.log_explicit_levels:
logger.explicit_levels = True
self.setup_logging()
#TODO: try to get these passing down from the command?
# without resorting to os.environ to hold these.
if options.no_input:
os.environ['PIP_NO_INPUT'] = '1'
if options.exists_action:
os.environ['PIP_EXISTS_ACTION'] = ''.join(options.exists_action)
if not ssl and options.insecure:
os.environ['PIP_INSECURE'] = '1'
if options.cert:
os.environ['PIP_CERT'] = options.cert
if options.require_venv:
# If a venv is required check if it can really be found
if not os.environ.get('VIRTUAL_ENV'):
logger.fatal('Could not find an activated virtualenv (required).')
sys.exit(VIRTUALENV_NOT_FOUND)
if options.log:
log_fp = open_logfile(options.log, 'a')
logger.consumers.append((logger.DEBUG, log_fp))
else:
log_fp = None
socket.setdefaulttimeout(options.timeout or None)
urlopen.setup(proxystr=options.proxy, prompting=not options.no_input)
exit = SUCCESS
store_log = False
try:
status = self.run(options, args)
# FIXME: all commands should return an exit status
# and when it is done, isinstance is not needed anymore
if isinstance(status, int):
exit = status
except (InstallationError, UninstallationError):
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except BadCommand:
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except CommandError:
e = sys.exc_info()[1]
logger.fatal('ERROR: %s' % e)
logger.info('Exception information:\n%s' % format_exc())
exit = ERROR
except KeyboardInterrupt:
logger.fatal('Operation cancelled by user')
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except:
logger.fatal('Exception:\n%s' % format_exc())
store_log = True
exit = UNKNOWN_ERROR
if log_fp is not None:
log_fp.close()
if store_log:
log_fn = options.log_file
text = '\n'.join(complete_log)
try:
log_fp = open_logfile(log_fn, 'w')
except IOError:
temp = tempfile.NamedTemporaryFile(delete=False)
log_fn = temp.name
log_fp = open_logfile(log_fn, 'w')
logger.fatal('Storing complete log in %s' % log_fn)
log_fp.write(text)
log_fp.close()
return exit
def format_exc(exc_info=None):
if exc_info is None:
exc_info = sys.exc_info()
out = StringIO()
traceback.print_exception(*exc_info, **dict(file=out))
return out.getvalue()
def open_logfile(filename, mode='a'):
"""Open the named log file in append mode.
If the file already exists, a separator will also be printed to
the file to separate past activity from current activity.
"""
filename = os.path.expanduser(filename)
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
exists = os.path.exists(filename)
log_fp = open(filename, mode)
if exists:
log_fp.write('%s\n' % ('-' * 60))
log_fp.write('%s run on %s\n' % (sys.argv[0], time.strftime('%c')))
return log_fp
| mit |
dandygithub/kodi | addons/plugin.audio.music.yandex/requests/__init__.py | 35 | 2197 | # -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
Requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('https://www.python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post('http://httpbin.org/post', data=payload)
>>> print(r.text)
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2016 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '2.12.4'
__build__ = 0x021204
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2016 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
try:
from .packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
except ImportError:
pass
import warnings
# urllib3's DependencyWarnings should be silenced.
from .packages.urllib3.exceptions import DependencyWarning
warnings.simplefilter('ignore', DependencyWarning)
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError,
FileModeWarning, ConnectTimeout, ReadTimeout
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
# FileModeWarnings go off per the default.
warnings.simplefilter('default', FileModeWarning, append=True)
| gpl-3.0 |
cr/fxos-certsuite | web-platform-tests/tests/tools/pywebsocket/src/mod_pywebsocket/handshake/hybi.py | 139 | 17070 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file provides the opening handshake processor for the WebSocket
protocol (RFC 6455).
Specification:
http://tools.ietf.org/html/rfc6455
"""
# Note: request.connection.write is used in this module, even though mod_python
# document says that it should be used only in connection handlers.
# Unfortunately, we have no other options. For example, request.write is not
# suitable because it doesn't allow direct raw bytes writing.
import base64
import logging
import os
import re
from mod_pywebsocket import common
from mod_pywebsocket.extensions import get_extension_processor
from mod_pywebsocket.extensions import is_compression_extension
from mod_pywebsocket.handshake._base import check_request_line
from mod_pywebsocket.handshake._base import format_header
from mod_pywebsocket.handshake._base import get_mandatory_header
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake._base import parse_token_list
from mod_pywebsocket.handshake._base import validate_mandatory_header
from mod_pywebsocket.handshake._base import validate_subprotocol
from mod_pywebsocket.handshake._base import VersionException
from mod_pywebsocket.stream import Stream
from mod_pywebsocket.stream import StreamOptions
from mod_pywebsocket import util
# Used to validate the value in the Sec-WebSocket-Key header strictly. RFC 4648
# disallows non-zero padding, so the character right before == must be any of
# A, Q, g and w.
_SEC_WEBSOCKET_KEY_REGEX = re.compile('^[+/0-9A-Za-z]{21}[AQgw]==$')
# Defining aliases for values used frequently.
_VERSION_HYBI08 = common.VERSION_HYBI08
_VERSION_HYBI08_STRING = str(_VERSION_HYBI08)
_VERSION_LATEST = common.VERSION_HYBI_LATEST
_VERSION_LATEST_STRING = str(_VERSION_LATEST)
_SUPPORTED_VERSIONS = [
_VERSION_LATEST,
_VERSION_HYBI08,
]
def compute_accept(key):
"""Computes value for the Sec-WebSocket-Accept header from value of the
Sec-WebSocket-Key header.
"""
accept_binary = util.sha1_hash(
key + common.WEBSOCKET_ACCEPT_UUID).digest()
accept = base64.b64encode(accept_binary)
return (accept, accept_binary)
class Handshaker(object):
"""Opening handshake processor for the WebSocket protocol (RFC 6455)."""
def __init__(self, request, dispatcher):
"""Construct an instance.
Args:
request: mod_python request.
dispatcher: Dispatcher (dispatch.Dispatcher).
Handshaker will add attributes such as ws_resource during handshake.
"""
self._logger = util.get_class_logger(self)
self._request = request
self._dispatcher = dispatcher
def _validate_connection_header(self):
connection = get_mandatory_header(
self._request, common.CONNECTION_HEADER)
try:
connection_tokens = parse_token_list(connection)
except HandshakeException, e:
raise HandshakeException(
'Failed to parse %s: %s' % (common.CONNECTION_HEADER, e))
connection_is_valid = False
for token in connection_tokens:
if token.lower() == common.UPGRADE_CONNECTION_TYPE.lower():
connection_is_valid = True
break
if not connection_is_valid:
raise HandshakeException(
'%s header doesn\'t contain "%s"' %
(common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
def do_handshake(self):
self._request.ws_close_code = None
self._request.ws_close_reason = None
# Parsing.
check_request_line(self._request)
validate_mandatory_header(
self._request,
common.UPGRADE_HEADER,
common.WEBSOCKET_UPGRADE_TYPE)
self._validate_connection_header()
self._request.ws_resource = self._request.uri
unused_host = get_mandatory_header(self._request, common.HOST_HEADER)
self._request.ws_version = self._check_version()
# This handshake must be based on latest hybi. We are responsible to
# fallback to HTTP on handshake failure as latest hybi handshake
# specifies.
try:
self._get_origin()
self._set_protocol()
self._parse_extensions()
# Key validation, response generation.
key = self._get_key()
(accept, accept_binary) = compute_accept(key)
self._logger.debug(
'%s: %r (%s)',
common.SEC_WEBSOCKET_ACCEPT_HEADER,
accept,
util.hexify(accept_binary))
self._logger.debug('Protocol version is RFC 6455')
# Setup extension processors.
processors = []
if self._request.ws_requested_extensions is not None:
for extension_request in self._request.ws_requested_extensions:
processor = get_extension_processor(extension_request)
# Unknown extension requests are just ignored.
if processor is not None:
processors.append(processor)
self._request.ws_extension_processors = processors
# List of extra headers. The extra handshake handler may add header
# data as name/value pairs to this list and pywebsocket appends
# them to the WebSocket handshake.
self._request.extra_headers = []
# Extra handshake handler may modify/remove processors.
self._dispatcher.do_extra_handshake(self._request)
processors = filter(lambda processor: processor is not None,
self._request.ws_extension_processors)
# Ask each processor if there are extensions on the request which
# cannot co-exist. When processor decided other processors cannot
# co-exist with it, the processor marks them (or itself) as
# "inactive". The first extension processor has the right to
# make the final call.
for processor in reversed(processors):
if processor.is_active():
processor.check_consistency_with_other_processors(
processors)
processors = filter(lambda processor: processor.is_active(),
processors)
accepted_extensions = []
# We need to take into account of mux extension here.
# If mux extension exists:
# - Remove processors of extensions for logical channel,
# which are processors located before the mux processor
# - Pass extension requests for logical channel to mux processor
# - Attach the mux processor to the request. It will be referred
# by dispatcher to see whether the dispatcher should use mux
# handler or not.
mux_index = -1
for i, processor in enumerate(processors):
if processor.name() == common.MUX_EXTENSION:
mux_index = i
break
if mux_index >= 0:
logical_channel_extensions = []
for processor in processors[:mux_index]:
logical_channel_extensions.append(processor.request())
processor.set_active(False)
self._request.mux_processor = processors[mux_index]
self._request.mux_processor.set_extensions(
logical_channel_extensions)
processors = filter(lambda processor: processor.is_active(),
processors)
stream_options = StreamOptions()
for index, processor in enumerate(processors):
if not processor.is_active():
continue
extension_response = processor.get_extension_response()
if extension_response is None:
# Rejected.
continue
accepted_extensions.append(extension_response)
processor.setup_stream_options(stream_options)
if not is_compression_extension(processor.name()):
continue
# Inactivate all of the following compression extensions.
for j in xrange(index + 1, len(processors)):
if is_compression_extension(processors[j].name()):
processors[j].set_active(False)
if len(accepted_extensions) > 0:
self._request.ws_extensions = accepted_extensions
self._logger.debug(
'Extensions accepted: %r',
map(common.ExtensionParameter.name, accepted_extensions))
else:
self._request.ws_extensions = None
self._request.ws_stream = self._create_stream(stream_options)
if self._request.ws_requested_protocols is not None:
if self._request.ws_protocol is None:
raise HandshakeException(
'do_extra_handshake must choose one subprotocol from '
'ws_requested_protocols and set it to ws_protocol')
validate_subprotocol(self._request.ws_protocol)
self._logger.debug(
'Subprotocol accepted: %r',
self._request.ws_protocol)
else:
if self._request.ws_protocol is not None:
raise HandshakeException(
'ws_protocol must be None when the client didn\'t '
'request any subprotocol')
self._send_handshake(accept)
except HandshakeException, e:
if not e.status:
# Fallback to 400 bad request by default.
e.status = common.HTTP_STATUS_BAD_REQUEST
raise e
def _get_origin(self):
if self._request.ws_version is _VERSION_HYBI08:
origin_header = common.SEC_WEBSOCKET_ORIGIN_HEADER
else:
origin_header = common.ORIGIN_HEADER
origin = self._request.headers_in.get(origin_header)
if origin is None:
self._logger.debug('Client request does not have origin header')
self._request.ws_origin = origin
def _check_version(self):
version = get_mandatory_header(self._request,
common.SEC_WEBSOCKET_VERSION_HEADER)
if version == _VERSION_HYBI08_STRING:
return _VERSION_HYBI08
if version == _VERSION_LATEST_STRING:
return _VERSION_LATEST
if version.find(',') >= 0:
raise HandshakeException(
'Multiple versions (%r) are not allowed for header %s' %
(version, common.SEC_WEBSOCKET_VERSION_HEADER),
status=common.HTTP_STATUS_BAD_REQUEST)
raise VersionException(
'Unsupported version %r for header %s' %
(version, common.SEC_WEBSOCKET_VERSION_HEADER),
supported_versions=', '.join(map(str, _SUPPORTED_VERSIONS)))
def _set_protocol(self):
self._request.ws_protocol = None
protocol_header = self._request.headers_in.get(
common.SEC_WEBSOCKET_PROTOCOL_HEADER)
if protocol_header is None:
self._request.ws_requested_protocols = None
return
self._request.ws_requested_protocols = parse_token_list(
protocol_header)
self._logger.debug('Subprotocols requested: %r',
self._request.ws_requested_protocols)
def _parse_extensions(self):
extensions_header = self._request.headers_in.get(
common.SEC_WEBSOCKET_EXTENSIONS_HEADER)
if not extensions_header:
self._request.ws_requested_extensions = None
return
if self._request.ws_version is common.VERSION_HYBI08:
allow_quoted_string=False
else:
allow_quoted_string=True
try:
self._request.ws_requested_extensions = common.parse_extensions(
extensions_header, allow_quoted_string=allow_quoted_string)
except common.ExtensionParsingException, e:
raise HandshakeException(
'Failed to parse Sec-WebSocket-Extensions header: %r' % e)
self._logger.debug(
'Extensions requested: %r',
map(common.ExtensionParameter.name,
self._request.ws_requested_extensions))
def _validate_key(self, key):
if key.find(',') >= 0:
raise HandshakeException('Request has multiple %s header lines or '
'contains illegal character \',\': %r' %
(common.SEC_WEBSOCKET_KEY_HEADER, key))
# Validate
key_is_valid = False
try:
# Validate key by quick regex match before parsing by base64
# module. Because base64 module skips invalid characters, we have
# to do this in advance to make this server strictly reject illegal
# keys.
if _SEC_WEBSOCKET_KEY_REGEX.match(key):
decoded_key = base64.b64decode(key)
if len(decoded_key) == 16:
key_is_valid = True
except TypeError, e:
pass
if not key_is_valid:
raise HandshakeException(
'Illegal value for header %s: %r' %
(common.SEC_WEBSOCKET_KEY_HEADER, key))
return decoded_key
def _get_key(self):
key = get_mandatory_header(
self._request, common.SEC_WEBSOCKET_KEY_HEADER)
decoded_key = self._validate_key(key)
self._logger.debug(
'%s: %r (%s)',
common.SEC_WEBSOCKET_KEY_HEADER,
key,
util.hexify(decoded_key))
return key
def _create_stream(self, stream_options):
return Stream(self._request, stream_options)
def _create_handshake_response(self, accept):
response = []
response.append('HTTP/1.1 101 Switching Protocols\r\n')
# WebSocket headers
response.append(format_header(
common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE))
response.append(format_header(
common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
response.append(format_header(
common.SEC_WEBSOCKET_ACCEPT_HEADER, accept))
if self._request.ws_protocol is not None:
response.append(format_header(
common.SEC_WEBSOCKET_PROTOCOL_HEADER,
self._request.ws_protocol))
if (self._request.ws_extensions is not None and
len(self._request.ws_extensions) != 0):
response.append(format_header(
common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
common.format_extensions(self._request.ws_extensions)))
# Headers not specific for WebSocket
for name, value in self._request.extra_headers:
response.append(format_header(name, value))
response.append('\r\n')
return ''.join(response)
def _send_handshake(self, accept):
raw_response = self._create_handshake_response(accept)
self._request.connection.write(raw_response)
self._logger.debug('Sent server\'s opening handshake: %r',
raw_response)
# vi:sts=4 sw=4 et
| mpl-2.0 |
VisheshHanda/production_backup | erpnext/buying/doctype/purchase_order/test_purchase_order.py | 19 | 5835 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
import frappe.defaults
from frappe.utils import flt, add_days, nowdate
from erpnext.buying.doctype.purchase_order.purchase_order import make_purchase_receipt, make_purchase_invoice
class TestPurchaseOrder(unittest.TestCase):
def test_make_purchase_receipt(self):
po = create_purchase_order(do_not_submit=True)
self.assertRaises(frappe.ValidationError, make_purchase_receipt, po.name)
po.submit()
pr = create_pr_against_po(po.name)
self.assertEquals(len(pr.get("items")), 1)
def test_ordered_qty(self):
existing_ordered_qty = get_ordered_qty()
po = create_purchase_order(do_not_submit=True)
self.assertRaises(frappe.ValidationError, make_purchase_receipt, po.name)
po.submit()
self.assertEqual(get_ordered_qty(), existing_ordered_qty + 10)
create_pr_against_po(po.name)
self.assertEqual(get_ordered_qty(), existing_ordered_qty + 6)
po.load_from_db()
self.assertEquals(po.get("items")[0].received_qty, 4)
frappe.db.set_value('Item', '_Test Item', 'tolerance', 50)
pr = create_pr_against_po(po.name, received_qty=8)
self.assertEqual(get_ordered_qty(), existing_ordered_qty)
po.load_from_db()
self.assertEquals(po.get("items")[0].received_qty, 12)
pr.cancel()
self.assertEqual(get_ordered_qty(), existing_ordered_qty + 6)
po.load_from_db()
self.assertEquals(po.get("items")[0].received_qty, 4)
def test_ordered_qty_against_pi_with_update_stock(self):
existing_ordered_qty = get_ordered_qty()
po = create_purchase_order()
self.assertEqual(get_ordered_qty(), existing_ordered_qty + 10)
frappe.db.set_value('Item', '_Test Item', 'tolerance', 50)
pi = make_purchase_invoice(po.name)
pi.update_stock = 1
pi.items[0].qty = 12
pi.insert()
pi.submit()
self.assertEqual(get_ordered_qty(), existing_ordered_qty)
po.load_from_db()
self.assertEquals(po.get("items")[0].received_qty, 12)
pi.cancel()
self.assertEqual(get_ordered_qty(), existing_ordered_qty + 10)
po.load_from_db()
self.assertEquals(po.get("items")[0].received_qty, 0)
def test_make_purchase_invoice(self):
po = create_purchase_order(do_not_submit=True)
self.assertRaises(frappe.ValidationError, make_purchase_invoice, po.name)
po.submit()
pi = make_purchase_invoice(po.name)
self.assertEquals(pi.doctype, "Purchase Invoice")
self.assertEquals(len(pi.get("items", [])), 1)
def test_subcontracting(self):
po = create_purchase_order(item_code="_Test FG Item", is_subcontracted="Yes")
self.assertEquals(len(po.get("supplied_items")), 2)
def test_warehouse_company_validation(self):
from erpnext.stock.utils import InvalidWarehouseCompany
po = create_purchase_order(company="_Test Company 1", do_not_save=True)
self.assertRaises(InvalidWarehouseCompany, po.insert)
def test_uom_integer_validation(self):
from erpnext.utilities.transaction_base import UOMMustBeIntegerError
po = create_purchase_order(qty=3.4, do_not_save=True)
self.assertRaises(UOMMustBeIntegerError, po.insert)
def test_ordered_qty_for_closing_po(self):
bin = frappe.get_all("Bin", filters={"item_code": "_Test Item", "warehouse": "_Test Warehouse - _TC"},
fields=["ordered_qty"])
existing_ordered_qty = bin[0].ordered_qty if bin else 0.0
po = create_purchase_order(item_code= "_Test Item", qty=1)
self.assertEquals(get_ordered_qty(item_code= "_Test Item", warehouse="_Test Warehouse - _TC"), existing_ordered_qty+1)
po.update_status("Closed")
self.assertEquals(get_ordered_qty(item_code="_Test Item", warehouse="_Test Warehouse - _TC"), existing_ordered_qty)
def test_group_same_items(self):
frappe.db.set_value("Buying Settings", None, "allow_multiple_items", 1)
frappe.get_doc({
"doctype": "Purchase Order",
"company": "_Test Company",
"supplier" : "_Test Supplier",
"is_subcontracted" : "No",
"currency" : frappe.db.get_value("Company", "_Test Company", "default_currency"),
"conversion_factor" : 1,
"items" : get_same_items(),
"group_same_items": 1
}).insert(ignore_permissions=True)
def get_same_items():
return [
{
"item_code": "_Test FG Item",
"warehouse": "_Test Warehouse - _TC",
"qty": 1,
"rate": 500,
"schedule_date": add_days(nowdate(), 1)
},
{
"item_code": "_Test FG Item",
"warehouse": "_Test Warehouse - _TC",
"qty": 4,
"rate": 500,
"schedule_date": add_days(nowdate(), 1)
}
]
def create_purchase_order(**args):
po = frappe.new_doc("Purchase Order")
args = frappe._dict(args)
if args.transaction_date:
po.transaction_date = args.transaction_date
po.company = args.company or "_Test Company"
po.supplier = args.customer or "_Test Supplier"
po.is_subcontracted = args.is_subcontracted or "No"
po.currency = args.currency or frappe.db.get_value("Company", po.company, "default_currency")
po.conversion_factor = args.conversion_factor or 1
po.append("items", {
"item_code": args.item or args.item_code or "_Test Item",
"warehouse": args.warehouse or "_Test Warehouse - _TC",
"qty": args.qty or 10,
"rate": args.rate or 500,
"schedule_date": add_days(nowdate(), 1)
})
if not args.do_not_save:
po.insert()
if not args.do_not_submit:
po.submit()
return po
def create_pr_against_po(po, received_qty=4):
pr = make_purchase_receipt(po)
pr.get("items")[0].qty = received_qty
pr.insert()
pr.submit()
return pr
def get_ordered_qty(item_code="_Test Item", warehouse="_Test Warehouse - _TC"):
return flt(frappe.db.get_value("Bin", {"item_code": item_code, "warehouse": warehouse},
"ordered_qty"))
test_dependencies = ["BOM", "Item Price"]
test_records = frappe.get_test_records('Purchase Order')
| gpl-3.0 |
CKehl/pylearn2 | pylearn2/models/vae/kl.py | 45 | 5415 | """
Classes implementing logic related to the analytical computation of the KL
divergence between :math:`q_\\phi(\\mathbf{z} \\mid \\mathbf{x})` and
:math:`p_\\theta(\\mathbf{z})` in the VAE framework
"""
__authors__ = "Vincent Dumoulin"
__copyright__ = "Copyright 2014, Universite de Montreal"
__credits__ = ["Vincent Dumoulin"]
__license__ = "3-clause BSD"
__maintainer__ = "Vincent Dumoulin"
__email__ = "pylearn-dev@googlegroups"
import sys
import inspect
import theano.tensor as T
from pylearn2.utils import wraps
from pylearn2.models.vae import prior, conditional
class ImpossiblePrior(prior.Prior):
"""
A Prior that's incompatible with everything
Parameters
----------
See `Prior`
"""
class ImpossiblePosterior(conditional.Conditional):
"""
A Conditional that's incompatible with everything
Parameters
----------
See `Conditional`
"""
class KLIntegrator(object):
"""
Class responsible for computing the analytical KL divergence term in the
VAE criterion
"""
prior_class = ImpossiblePrior
posterior_class = ImpossiblePosterior
def kl_divergence(self, phi, theta, prior, posterior):
"""
Computes the KL-divergence term of the VAE criterion.
Parameters
----------
phi : tuple of tensor_like
Parameters of the distribution
:math:`q_\\phi(\\mathbf{z} \\mid \\mathbf{x})`
theta : tuple of tensor_like
Parameters of the distribution :math:`p_\\theta(\\mathbf{z})`
"""
raise NotImplementedError(str(self.__class__) + " does not " +
"implement kl_divergence")
def per_component_kl_divergence(self, phi, theta, prior, posterior):
"""
If the prior/posterior combination allows it, computes the
per-component KL divergence term
Parameters
----------
phi : tuple of tensor_like
Parameters of the distribution
:math:`q_\\phi(\\mathbf{z} \\mid \\mathbf{x})`
theta : tuple of tensor_like
Parameters of the distribution :math:`p_\\theta(\\mathbf{z})`
"""
raise NotImplementedError(str(self.__class__) + " does not " +
"implement per_component_kl_divergence")
def _validate_prior_posterior(self, prior, posterior):
"""
Checks that the prior/posterior combination is what the integrator
expects and raises an exception otherwise
Parameters
----------
prior : pylearn2.models.vae.prior.Prior
Prior distribution on z
posterior : pylearn2.models.vae.posterior.Posterior
Posterior distribution on z given x
"""
if self.prior_class is None or self.posterior_class is None:
raise NotImplementedError(str(self.__class__) + " has not set " +
"the required 'prior_class' and " +
"'posterior_class' class attributes")
if not isinstance(prior, self.prior_class):
raise ValueError("prior class " + str(prior.__class__) + " is " +
"incompatible with expected prior class " +
str(self.prior_class.__class__))
if not isinstance(posterior, self.posterior_class):
raise ValueError("posterior class " + str(posterior.__class__) +
" is incompatible with expected posterior " +
"class " + str(self.prior_class.__class__))
class DiagonalGaussianPriorPosteriorKL(KLIntegrator):
"""
Computes the analytical KL between a diagonal gaussian prior and a diagonal
gaussian posterior
"""
prior_class = prior.DiagonalGaussianPrior
posterior_class = conditional.DiagonalGaussian
@wraps(KLIntegrator.kl_divergence)
def kl_divergence(self, phi, theta, prior, posterior):
return self.per_component_kl_divergence(
phi=phi,
theta=theta,
prior=prior,
posterior=posterior
).sum(axis=1)
@wraps(KLIntegrator.per_component_kl_divergence)
def per_component_kl_divergence(self, phi, theta, prior, posterior):
self._validate_prior_posterior(prior, posterior)
(posterior_mu, posterior_log_sigma) = phi
(prior_mu, prior_log_sigma) = theta
return (
prior_log_sigma - posterior_log_sigma +
0.5 * (T.exp(2 * posterior_log_sigma) +
(posterior_mu - prior_mu) ** 2) /
T.exp(2 * prior_log_sigma) - 0.5
)
def find_integrator_for(prior, posterior):
"""
Returns a KLIntegrator instance compatible with 'prior' and 'posterior', or
None if nothing is compatible.
Parameters
----------
prior : pylearn2.models.vae.prior.Prior
Object representing the prior p(z)
posterior : pylearn2.models.vae.conditional.Conditional
Object representing the approximate posterior q(z | x)
"""
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj) and issubclass(obj, KLIntegrator):
corresponds = (isinstance(prior, obj.prior_class) and
isinstance(posterior, obj.posterior_class))
if corresponds:
return obj()
return None
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.